Print this page
rev 4525 : 8014611: reserve_and_align() assumptions are invalid on windows
Summary: also reviewed by ron.durbin@oracle.com, thomas.schatzl@oracle.com
Reviewed-by: dcubed, brutisso
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/runtime/os.hpp
+++ new/src/share/vm/runtime/os.hpp
1 1 /*
2 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_RUNTIME_OS_HPP
26 26 #define SHARE_VM_RUNTIME_OS_HPP
27 27
28 28 #include "jvmtifiles/jvmti.h"
29 29 #include "runtime/atomic.hpp"
30 30 #include "runtime/extendedPC.hpp"
31 31 #include "runtime/handles.hpp"
32 32 #include "utilities/top.hpp"
33 33 #ifdef TARGET_OS_FAMILY_linux
34 34 # include "jvm_linux.h"
35 35 #endif
36 36 #ifdef TARGET_OS_FAMILY_solaris
37 37 # include "jvm_solaris.h"
38 38 #endif
39 39 #ifdef TARGET_OS_FAMILY_windows
40 40 # include "jvm_windows.h"
41 41 #endif
42 42 #ifdef TARGET_OS_FAMILY_bsd
43 43 # include "jvm_bsd.h"
44 44 #endif
45 45
46 46 // os defines the interface to operating system; this includes traditional
47 47 // OS services (time, I/O) as well as other functionality with system-
48 48 // dependent code.
49 49
50 50 typedef void (*dll_func)(...);
51 51
52 52 class Thread;
53 53 class JavaThread;
54 54 class Event;
55 55 class DLL;
56 56 class FileHandle;
57 57 template<class E> class GrowableArray;
58 58
59 59 // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
60 60
61 61 // Platform-independent error return values from OS functions
62 62 enum OSReturn {
63 63 OS_OK = 0, // Operation was successful
64 64 OS_ERR = -1, // Operation failed
65 65 OS_INTRPT = -2, // Operation was interrupted
66 66 OS_TIMEOUT = -3, // Operation timed out
67 67 OS_NOMEM = -5, // Operation failed for lack of memory
68 68 OS_NORESOURCE = -6 // Operation failed for lack of nonmemory resource
69 69 };
70 70
71 71 enum ThreadPriority { // JLS 20.20.1-3
72 72 NoPriority = -1, // Initial non-priority value
73 73 MinPriority = 1, // Minimum priority
74 74 NormPriority = 5, // Normal (non-daemon) priority
75 75 NearMaxPriority = 9, // High priority, used for VMThread
76 76 MaxPriority = 10, // Highest priority, used for WatcherThread
77 77 // ensures that VMThread doesn't starve profiler
78 78 CriticalPriority = 11 // Critical thread priority
79 79 };
80 80
81 81 // Typedef for structured exception handling support
82 82 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
83 83
84 84 class os: AllStatic {
85 85 public:
86 86 enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
87 87
88 88 private:
89 89 static OSThread* _starting_thread;
90 90 static address _polling_page;
91 91 static volatile int32_t * _mem_serialize_page;
92 92 static uintptr_t _serialize_page_mask;
93 93 public:
94 94 static size_t _page_sizes[page_sizes_max];
95 95
96 96 private:
97 97 static void init_page_sizes(size_t default_page_size) {
98 98 _page_sizes[0] = default_page_size;
99 99 _page_sizes[1] = 0; // sentinel
100 100 }
101 101
102 102 static char* pd_reserve_memory(size_t bytes, char* addr = 0,
103 103 size_t alignment_hint = 0);
104 104 static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr);
105 105 static void pd_split_reserved_memory(char *base, size_t size,
106 106 size_t split, bool realloc);
107 107 static bool pd_commit_memory(char* addr, size_t bytes, bool executable = false);
108 108 static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
109 109 bool executable = false);
110 110 static bool pd_uncommit_memory(char* addr, size_t bytes);
111 111 static bool pd_release_memory(char* addr, size_t bytes);
112 112
113 113 static char* pd_map_memory(int fd, const char* file_name, size_t file_offset,
114 114 char *addr, size_t bytes, bool read_only = false,
115 115 bool allow_exec = false);
116 116 static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset,
117 117 char *addr, size_t bytes, bool read_only,
118 118 bool allow_exec);
119 119 static bool pd_unmap_memory(char *addr, size_t bytes);
120 120 static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint);
121 121 static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint);
122 122
123 123
124 124 public:
125 125 static void init(void); // Called before command line parsing
126 126 static jint init_2(void); // Called after command line parsing
127 127 static void init_globals(void) { // Called from init_globals() in init.cpp
128 128 init_globals_ext();
129 129 }
130 130 static void init_3(void); // Called at the end of vm init
131 131
132 132 // File names are case-insensitive on windows only
133 133 // Override me as needed
134 134 static int file_name_strcmp(const char* s1, const char* s2);
135 135
136 136 static bool getenv(const char* name, char* buffer, int len);
137 137 static bool have_special_privileges();
138 138
139 139 static jlong javaTimeMillis();
140 140 static jlong javaTimeNanos();
141 141 static void javaTimeNanos_info(jvmtiTimerInfo *info_ptr);
142 142 static void run_periodic_checks();
143 143
144 144
145 145 // Returns the elapsed time in seconds since the vm started.
146 146 static double elapsedTime();
147 147
148 148 // Returns real time in seconds since an arbitrary point
149 149 // in the past.
150 150 static bool getTimesSecs(double* process_real_time,
151 151 double* process_user_time,
152 152 double* process_system_time);
153 153
154 154 // Interface to the performance counter
155 155 static jlong elapsed_counter();
156 156 static jlong elapsed_frequency();
157 157
158 158 // The "virtual time" of a thread is the amount of time a thread has
159 159 // actually run. The first function indicates whether the OS supports
160 160 // this functionality for the current thread, and if so:
161 161 // * the second enables vtime tracking (if that is required).
162 162 // * the third tells whether vtime is enabled.
163 163 // * the fourth returns the elapsed virtual time for the current
164 164 // thread.
165 165 static bool supports_vtime();
166 166 static bool enable_vtime();
167 167 static bool vtime_enabled();
168 168 static double elapsedVTime();
169 169
170 170 // Return current local time in a string (YYYY-MM-DD HH:MM:SS).
171 171 // It is MT safe, but not async-safe, as reading time zone
172 172 // information may require a lock on some platforms.
173 173 static char* local_time_string(char *buf, size_t buflen);
174 174 static struct tm* localtime_pd (const time_t* clock, struct tm* res);
175 175 // Fill in buffer with current local time as an ISO-8601 string.
176 176 // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz.
177 177 // Returns buffer, or NULL if it failed.
178 178 static char* iso8601_time(char* buffer, size_t buffer_length);
179 179
180 180 // Interface for detecting multiprocessor system
181 181 static inline bool is_MP() {
182 182 assert(_processor_count > 0, "invalid processor count");
183 183 return _processor_count > 1;
184 184 }
185 185 static julong available_memory();
186 186 static julong physical_memory();
187 187 static julong allocatable_physical_memory(julong size);
188 188 static bool is_server_class_machine();
189 189
190 190 // number of CPUs
191 191 static int processor_count() {
192 192 return _processor_count;
193 193 }
194 194 static void set_processor_count(int count) { _processor_count = count; }
195 195
196 196 // Returns the number of CPUs this process is currently allowed to run on.
197 197 // Note that on some OSes this can change dynamically.
198 198 static int active_processor_count();
199 199
200 200 // Bind processes to processors.
201 201 // This is a two step procedure:
202 202 // first you generate a distribution of processes to processors,
203 203 // then you bind processes according to that distribution.
204 204 // Compute a distribution for number of processes to processors.
205 205 // Stores the processor id's into the distribution array argument.
206 206 // Returns true if it worked, false if it didn't.
207 207 static bool distribute_processes(uint length, uint* distribution);
208 208 // Binds the current process to a processor.
209 209 // Returns true if it worked, false if it didn't.
210 210 static bool bind_to_processor(uint processor_id);
211 211
212 212 // Give a name to the current thread.
213 213 static void set_native_thread_name(const char *name);
214 214
215 215 // Interface for stack banging (predetect possible stack overflow for
216 216 // exception processing) There are guard pages, and above that shadow
217 217 // pages for stack overflow checking.
218 218 static bool uses_stack_guard_pages();
219 219 static bool allocate_stack_guard_pages();
220 220 static void bang_stack_shadow_pages();
221 221 static bool stack_shadow_pages_available(Thread *thread, methodHandle method);
222 222
223 223 // OS interface to Virtual Memory
224 224
225 225 // Return the default page size.
226 226 static int vm_page_size();
227 227
228 228 // Return the page size to use for a region of memory. The min_pages argument
229 229 // is a hint intended to limit fragmentation; it says the returned page size
230 230 // should be <= region_max_size / min_pages. Because min_pages is a hint,
231 231 // this routine may return a size larger than region_max_size / min_pages.
232 232 //
233 233 // The current implementation ignores min_pages if a larger page size is an
234 234 // exact multiple of both region_min_size and region_max_size. This allows
235 235 // larger pages to be used when doing so would not cause fragmentation; in
236 236 // particular, a single page can be used when region_min_size ==
237 237 // region_max_size == a supported page size.
238 238 static size_t page_size_for_region(size_t region_min_size,
239 239 size_t region_max_size,
240 240 uint min_pages);
241 241
242 242 // Methods for tracing page sizes returned by the above method; enabled by
243 243 // TracePageSizes. The region_{min,max}_size parameters should be the values
244 244 // passed to page_size_for_region() and page_size should be the result of that
245 245 // call. The (optional) base and size parameters should come from the
246 246 // ReservedSpace base() and size() methods.
247 247 static void trace_page_sizes(const char* str, const size_t* page_sizes,
248 248 int count) PRODUCT_RETURN;
249 249 static void trace_page_sizes(const char* str, const size_t region_min_size,
250 250 const size_t region_max_size,
251 251 const size_t page_size,
252 252 const char* base = NULL,
253 253 const size_t size = 0) PRODUCT_RETURN;
254 254
255 255 static int vm_allocation_granularity();
256 256 static char* reserve_memory(size_t bytes, char* addr = 0,
257 257 size_t alignment_hint = 0);
258 258 static char* reserve_memory(size_t bytes, char* addr,
↓ open down ↓ |
258 lines elided |
↑ open up ↑ |
259 259 size_t alignment_hint, MEMFLAGS flags);
260 260 static char* reserve_memory_aligned(size_t size, size_t alignment);
261 261 static char* attempt_reserve_memory_at(size_t bytes, char* addr);
262 262 static void split_reserved_memory(char *base, size_t size,
263 263 size_t split, bool realloc);
264 264 static bool commit_memory(char* addr, size_t bytes, bool executable = false);
265 265 static bool commit_memory(char* addr, size_t size, size_t alignment_hint,
266 266 bool executable = false);
267 267 static bool uncommit_memory(char* addr, size_t bytes);
268 268 static bool release_memory(char* addr, size_t bytes);
269 + static bool can_release_partial_region();
270 + static bool release_or_uncommit_partial_region(char* addr, size_t bytes);
269 271
270 272 enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
271 273 static bool protect_memory(char* addr, size_t bytes, ProtType prot,
272 274 bool is_committed = true);
273 275
274 276 static bool guard_memory(char* addr, size_t bytes);
275 277 static bool unguard_memory(char* addr, size_t bytes);
276 278 static bool create_stack_guard_pages(char* addr, size_t bytes);
277 279 static bool pd_create_stack_guard_pages(char* addr, size_t bytes);
278 280 static bool remove_stack_guard_pages(char* addr, size_t bytes);
279 281
280 282 static char* map_memory(int fd, const char* file_name, size_t file_offset,
281 283 char *addr, size_t bytes, bool read_only = false,
282 284 bool allow_exec = false);
283 285 static char* remap_memory(int fd, const char* file_name, size_t file_offset,
284 286 char *addr, size_t bytes, bool read_only,
285 287 bool allow_exec);
286 288 static bool unmap_memory(char *addr, size_t bytes);
287 289 static void free_memory(char *addr, size_t bytes, size_t alignment_hint);
288 290 static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
289 291
290 292 // NUMA-specific interface
291 293 static bool numa_has_static_binding();
292 294 static bool numa_has_group_homing();
293 295 static void numa_make_local(char *addr, size_t bytes, int lgrp_hint);
294 296 static void numa_make_global(char *addr, size_t bytes);
295 297 static size_t numa_get_groups_num();
296 298 static size_t numa_get_leaf_groups(int *ids, size_t size);
297 299 static bool numa_topology_changed();
298 300 static int numa_get_group_id();
299 301
300 302 // Page manipulation
301 303 struct page_info {
302 304 size_t size;
303 305 int lgrp_id;
304 306 };
305 307 static bool get_page_info(char *start, page_info* info);
306 308 static char* scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found);
307 309
308 310 static char* non_memory_address_word();
309 311 // reserve, commit and pin the entire memory region
310 312 static char* reserve_memory_special(size_t size, char* addr = NULL,
311 313 bool executable = false);
312 314 static bool release_memory_special(char* addr, size_t bytes);
313 315 static void large_page_init();
314 316 static size_t large_page_size();
315 317 static bool can_commit_large_page_memory();
316 318 static bool can_execute_large_page_memory();
317 319
318 320 // OS interface to polling page
319 321 static address get_polling_page() { return _polling_page; }
320 322 static void set_polling_page(address page) { _polling_page = page; }
321 323 static bool is_poll_address(address addr) { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); }
322 324 static void make_polling_page_unreadable();
323 325 static void make_polling_page_readable();
324 326
325 327 // Routines used to serialize the thread state without using membars
326 328 static void serialize_thread_states();
327 329
328 330 // Since we write to the serialize page from every thread, we
329 331 // want stores to be on unique cache lines whenever possible
330 332 // in order to minimize CPU cross talk. We pre-compute the
331 333 // amount to shift the thread* to make this offset unique to
332 334 // each thread.
333 335 static int get_serialize_page_shift_count() {
334 336 return SerializePageShiftCount;
335 337 }
336 338
337 339 static void set_serialize_page_mask(uintptr_t mask) {
338 340 _serialize_page_mask = mask;
339 341 }
340 342
341 343 static unsigned int get_serialize_page_mask() {
342 344 return _serialize_page_mask;
343 345 }
344 346
345 347 static void set_memory_serialize_page(address page);
346 348
347 349 static address get_memory_serialize_page() {
348 350 return (address)_mem_serialize_page;
349 351 }
350 352
351 353 static inline void write_memory_serialize_page(JavaThread *thread) {
352 354 uintptr_t page_offset = ((uintptr_t)thread >>
353 355 get_serialize_page_shift_count()) &
354 356 get_serialize_page_mask();
355 357 *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1;
356 358 }
357 359
358 360 static bool is_memory_serialize_page(JavaThread *thread, address addr) {
359 361 if (UseMembar) return false;
360 362 // Previously this function calculated the exact address of this
361 363 // thread's serialize page, and checked if the faulting address
362 364 // was equal. However, some platforms mask off faulting addresses
363 365 // to the page size, so now we just check that the address is
364 366 // within the page. This makes the thread argument unnecessary,
365 367 // but we retain the NULL check to preserve existing behaviour.
366 368 if (thread == NULL) return false;
367 369 address page = (address) _mem_serialize_page;
368 370 return addr >= page && addr < (page + os::vm_page_size());
369 371 }
370 372
371 373 static void block_on_serialize_page_trap();
372 374
373 375 // threads
374 376
375 377 enum ThreadType {
376 378 vm_thread,
377 379 cgc_thread, // Concurrent GC thread
378 380 pgc_thread, // Parallel GC thread
379 381 java_thread,
380 382 compiler_thread,
381 383 watcher_thread,
382 384 os_thread
383 385 };
384 386
385 387 static bool create_thread(Thread* thread,
386 388 ThreadType thr_type,
387 389 size_t stack_size = 0);
388 390 static bool create_main_thread(JavaThread* thread);
389 391 static bool create_attached_thread(JavaThread* thread);
390 392 static void pd_start_thread(Thread* thread);
391 393 static void start_thread(Thread* thread);
392 394
393 395 static void initialize_thread(Thread* thr);
394 396 static void free_thread(OSThread* osthread);
395 397
396 398 // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit
397 399 static intx current_thread_id();
398 400 static int current_process_id();
399 401 static int sleep(Thread* thread, jlong ms, bool interruptable);
400 402 static int naked_sleep();
401 403 static void infinite_sleep(); // never returns, use with CAUTION
402 404 static void yield(); // Yields to all threads with same priority
403 405 enum YieldResult {
404 406 YIELD_SWITCHED = 1, // caller descheduled, other ready threads exist & ran
405 407 YIELD_NONEREADY = 0, // No other runnable/ready threads.
406 408 // platform-specific yield return immediately
407 409 YIELD_UNKNOWN = -1 // Unknown: platform doesn't support _SWITCHED or _NONEREADY
408 410 // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong"
409 411 // yield that can be used in lieu of blocking.
410 412 } ;
411 413 static YieldResult NakedYield () ;
412 414 static void yield_all(int attempts = 0); // Yields to all other threads including lower priority
413 415 static void loop_breaker(int attempts); // called from within tight loops to possibly influence time-sharing
414 416 static OSReturn set_priority(Thread* thread, ThreadPriority priority);
415 417 static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority);
416 418
417 419 static void interrupt(Thread* thread);
418 420 static bool is_interrupted(Thread* thread, bool clear_interrupted);
419 421
420 422 static int pd_self_suspend_thread(Thread* thread);
421 423
422 424 static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp);
423 425 static frame fetch_frame_from_context(void* ucVoid);
424 426
425 427 static ExtendedPC get_thread_pc(Thread *thread);
426 428 static void breakpoint();
427 429
428 430 static address current_stack_pointer();
429 431 static address current_stack_base();
430 432 static size_t current_stack_size();
431 433
432 434 static void verify_stack_alignment() PRODUCT_RETURN;
433 435
434 436 static int message_box(const char* title, const char* message);
435 437 static char* do_you_want_to_debug(const char* message);
436 438
437 439 // run cmd in a separate process and return its exit code; or -1 on failures
438 440 static int fork_and_exec(char *cmd);
439 441
440 442 // Set file to send error reports.
441 443 static void set_error_file(const char *logfile);
442 444
443 445 // os::exit() is merged with vm_exit()
444 446 // static void exit(int num);
445 447
446 448 // Terminate the VM, but don't exit the process
447 449 static void shutdown();
448 450
449 451 // Terminate with an error. Default is to generate a core file on platforms
450 452 // that support such things. This calls shutdown() and then aborts.
451 453 static void abort(bool dump_core = true);
452 454
453 455 // Die immediately, no exit hook, no abort hook, no cleanup.
454 456 static void die();
455 457
456 458 // File i/o operations
457 459 static const int default_file_open_flags();
458 460 static int open(const char *path, int oflag, int mode);
459 461 static int close(int fd);
460 462 static jlong lseek(int fd, jlong offset, int whence);
461 463 static char* native_path(char *path);
462 464 static int ftruncate(int fd, jlong length);
463 465 static int fsync(int fd);
464 466 static int available(int fd, jlong *bytes);
465 467
466 468 //File i/o operations
467 469
468 470 static size_t read(int fd, void *buf, unsigned int nBytes);
469 471 static size_t restartable_read(int fd, void *buf, unsigned int nBytes);
470 472 static size_t write(int fd, const void *buf, unsigned int nBytes);
471 473
472 474 // Reading directories.
473 475 static DIR* opendir(const char* dirname);
474 476 static int readdir_buf_size(const char *path);
475 477 static struct dirent* readdir(DIR* dirp, dirent* dbuf);
476 478 static int closedir(DIR* dirp);
477 479
478 480 // Dynamic library extension
479 481 static const char* dll_file_extension();
480 482
481 483 static const char* get_temp_directory();
482 484 static const char* get_current_directory(char *buf, int buflen);
483 485
484 486 // Builds a platform-specific full library path given a ld path and lib name
485 487 static void dll_build_name(char* buffer, size_t size,
486 488 const char* pathname, const char* fname);
487 489
488 490 // Symbol lookup, find nearest function name; basically it implements
489 491 // dladdr() for all platforms. Name of the nearest function is copied
490 492 // to buf. Distance from its base address is returned as offset.
491 493 // If function name is not found, buf[0] is set to '\0' and offset is
492 494 // set to -1.
493 495 static bool dll_address_to_function_name(address addr, char* buf,
494 496 int buflen, int* offset);
495 497
496 498 // Locate DLL/DSO. On success, full path of the library is copied to
497 499 // buf, and offset is set to be the distance between addr and the
498 500 // library's base address. On failure, buf[0] is set to '\0' and
499 501 // offset is set to -1.
500 502 static bool dll_address_to_library_name(address addr, char* buf,
501 503 int buflen, int* offset);
502 504
503 505 // Find out whether the pc is in the static code for jvm.dll/libjvm.so.
504 506 static bool address_is_in_vm(address addr);
505 507
506 508 // Loads .dll/.so and
507 509 // in case of error it checks if .dll/.so was built for the
508 510 // same architecture as Hotspot is running on
509 511 static void* dll_load(const char *name, char *ebuf, int ebuflen);
510 512
511 513 // lookup symbol in a shared library
512 514 static void* dll_lookup(void* handle, const char* name);
513 515
514 516 // Unload library
515 517 static void dll_unload(void *lib);
516 518
517 519 // Print out system information; they are called by fatal error handler.
518 520 // Output format may be different on different platforms.
519 521 static void print_os_info(outputStream* st);
520 522 static void print_os_info_brief(outputStream* st);
521 523 static void print_cpu_info(outputStream* st);
522 524 static void pd_print_cpu_info(outputStream* st);
523 525 static void print_memory_info(outputStream* st);
524 526 static void print_dll_info(outputStream* st);
525 527 static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len);
526 528 static void print_context(outputStream* st, void* context);
527 529 static void print_register_info(outputStream* st, void* context);
528 530 static void print_siginfo(outputStream* st, void* siginfo);
529 531 static void print_signal_handlers(outputStream* st, char* buf, size_t buflen);
530 532 static void print_date_and_time(outputStream* st);
531 533
532 534 static void print_location(outputStream* st, intptr_t x, bool verbose = false);
533 535 static size_t lasterror(char *buf, size_t len);
534 536 static int get_last_error();
535 537
536 538 // Determines whether the calling process is being debugged by a user-mode debugger.
537 539 static bool is_debugger_attached();
538 540
539 541 // wait for a key press if PauseAtExit is set
540 542 static void wait_for_keypress_at_exit(void);
541 543
542 544 // The following two functions are used by fatal error handler to trace
543 545 // native (C) frames. They are not part of frame.hpp/frame.cpp because
544 546 // frame.hpp/cpp assume thread is JavaThread, and also because different
545 547 // OS/compiler may have different convention or provide different API to
546 548 // walk C frames.
547 549 //
548 550 // We don't attempt to become a debugger, so we only follow frames if that
549 551 // does not require a lookup in the unwind table, which is part of the binary
550 552 // file but may be unsafe to read after a fatal error. So on x86, we can
551 553 // only walk stack if %ebp is used as frame pointer; on ia64, it's not
552 554 // possible to walk C stack without having the unwind table.
553 555 static bool is_first_C_frame(frame *fr);
554 556 static frame get_sender_for_C_frame(frame *fr);
555 557
556 558 // return current frame. pc() and sp() are set to NULL on failure.
557 559 static frame current_frame();
558 560
559 561 static void print_hex_dump(outputStream* st, address start, address end, int unitsize);
560 562
561 563 // returns a string to describe the exception/signal;
562 564 // returns NULL if exception_code is not an OS exception/signal.
563 565 static const char* exception_name(int exception_code, char* buf, size_t buflen);
564 566
565 567 // Returns native Java library, loads if necessary
566 568 static void* native_java_library();
567 569
568 570 // Fills in path to jvm.dll/libjvm.so (used by the Disassembler)
569 571 static void jvm_path(char *buf, jint buflen);
570 572
571 573 // Returns true if we are running in a headless jre.
572 574 static bool is_headless_jre();
573 575
574 576 // JNI names
575 577 static void print_jni_name_prefix_on(outputStream* st, int args_size);
576 578 static void print_jni_name_suffix_on(outputStream* st, int args_size);
577 579
578 580 // File conventions
579 581 static const char* file_separator();
580 582 static const char* line_separator();
581 583 static const char* path_separator();
582 584
583 585 // Init os specific system properties values
584 586 static void init_system_properties_values();
585 587
586 588 // IO operations, non-JVM_ version.
587 589 static int stat(const char* path, struct stat* sbuf);
588 590 static bool dir_is_empty(const char* path);
589 591
590 592 // IO operations on binary files
591 593 static int create_binary_file(const char* path, bool rewrite_existing);
592 594 static jlong current_file_offset(int fd);
593 595 static jlong seek_to_file_offset(int fd, jlong offset);
594 596
595 597 // Thread Local Storage
596 598 static int allocate_thread_local_storage();
597 599 static void thread_local_storage_at_put(int index, void* value);
598 600 static void* thread_local_storage_at(int index);
599 601 static void free_thread_local_storage(int index);
600 602
601 603 // Stack walk
602 604 static address get_caller_pc(int n = 0);
603 605
604 606 // General allocation (must be MT-safe)
605 607 static void* malloc (size_t size, MEMFLAGS flags, address caller_pc = 0);
606 608 static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0);
607 609 static void free (void *memblock, MEMFLAGS flags = mtNone);
608 610 static bool check_heap(bool force = false); // verify C heap integrity
609 611 static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup
610 612
611 613 #ifndef PRODUCT
612 614 static julong num_mallocs; // # of calls to malloc/realloc
613 615 static julong alloc_bytes; // # of bytes allocated
614 616 static julong num_frees; // # of calls to free
615 617 static julong free_bytes; // # of bytes freed
616 618 #endif
617 619
618 620 // SocketInterface (ex HPI SocketInterface )
619 621 static int socket(int domain, int type, int protocol);
620 622 static int socket_close(int fd);
621 623 static int socket_shutdown(int fd, int howto);
622 624 static int recv(int fd, char* buf, size_t nBytes, uint flags);
623 625 static int send(int fd, char* buf, size_t nBytes, uint flags);
624 626 static int raw_send(int fd, char* buf, size_t nBytes, uint flags);
625 627 static int timeout(int fd, long timeout);
626 628 static int listen(int fd, int count);
627 629 static int connect(int fd, struct sockaddr* him, socklen_t len);
628 630 static int bind(int fd, struct sockaddr* him, socklen_t len);
629 631 static int accept(int fd, struct sockaddr* him, socklen_t* len);
630 632 static int recvfrom(int fd, char* buf, size_t nbytes, uint flags,
631 633 struct sockaddr* from, socklen_t* fromlen);
632 634 static int get_sock_name(int fd, struct sockaddr* him, socklen_t* len);
633 635 static int sendto(int fd, char* buf, size_t len, uint flags,
634 636 struct sockaddr* to, socklen_t tolen);
635 637 static int socket_available(int fd, jint* pbytes);
636 638
637 639 static int get_sock_opt(int fd, int level, int optname,
638 640 char* optval, socklen_t* optlen);
639 641 static int set_sock_opt(int fd, int level, int optname,
640 642 const char* optval, socklen_t optlen);
641 643 static int get_host_name(char* name, int namelen);
642 644
643 645 static struct hostent* get_host_by_name(char* name);
644 646
645 647 // Printing 64 bit integers
646 648 static const char* jlong_format_specifier();
647 649 static const char* julong_format_specifier();
648 650
649 651 // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal)
650 652 static void signal_init();
651 653 static void signal_init_pd();
652 654 static void signal_notify(int signal_number);
653 655 static void* signal(int signal_number, void* handler);
654 656 static void signal_raise(int signal_number);
655 657 static int signal_wait();
656 658 static int signal_lookup();
657 659 static void* user_handler();
658 660 static void terminate_signal_thread();
659 661 static int sigexitnum_pd();
660 662
661 663 // random number generation
662 664 static long random(); // return 32bit pseudorandom number
663 665 static void init_random(long initval); // initialize random sequence
664 666
665 667 // Structured OS Exception support
666 668 static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
667 669
668 670 // On Windows this will create an actual minidump, on Linux/Solaris it will simply check core dump limits
669 671 static void check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize);
670 672
671 673 // Get the default path to the core file
672 674 // Returns the length of the string
673 675 static int get_core_path(char* buffer, size_t bufferSize);
674 676
675 677 // JVMTI & JVM monitoring and management support
676 678 // The thread_cpu_time() and current_thread_cpu_time() are only
677 679 // supported if is_thread_cpu_time_supported() returns true.
678 680 // They are not supported on Solaris T1.
679 681
680 682 // Thread CPU Time - return the fast estimate on a platform
681 683 // On Solaris - call gethrvtime (fast) - user time only
682 684 // On Linux - fast clock_gettime where available - user+sys
683 685 // - otherwise: very slow /proc fs - user+sys
684 686 // On Windows - GetThreadTimes - user+sys
685 687 static jlong current_thread_cpu_time();
686 688 static jlong thread_cpu_time(Thread* t);
687 689
688 690 // Thread CPU Time with user_sys_cpu_time parameter.
689 691 //
690 692 // If user_sys_cpu_time is true, user+sys time is returned.
691 693 // Otherwise, only user time is returned
692 694 static jlong current_thread_cpu_time(bool user_sys_cpu_time);
693 695 static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time);
694 696
695 697 // Return a bunch of info about the timers.
696 698 // Note that the returned info for these two functions may be different
697 699 // on some platforms
698 700 static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
699 701 static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
700 702
701 703 static bool is_thread_cpu_time_supported();
702 704
703 705 // System loadavg support. Returns -1 if load average cannot be obtained.
704 706 static int loadavg(double loadavg[], int nelem);
705 707
706 708 // Hook for os specific jvm options that we don't want to abort on seeing
707 709 static bool obsolete_option(const JavaVMOption *option);
708 710
709 711 // Read file line by line. If line is longer than bsize,
710 712 // rest of line is skipped. Returns number of bytes read or -1 on EOF
711 713 static int get_line_chars(int fd, char *buf, const size_t bsize);
712 714
713 715 // Extensions
714 716 #include "runtime/os_ext.hpp"
715 717
716 718 public:
717 719
718 720 // Platform dependent stuff
719 721 #ifdef TARGET_OS_FAMILY_linux
720 722 # include "os_linux.hpp"
721 723 # include "os_posix.hpp"
722 724 #endif
723 725 #ifdef TARGET_OS_FAMILY_solaris
724 726 # include "os_solaris.hpp"
725 727 # include "os_posix.hpp"
726 728 #endif
727 729 #ifdef TARGET_OS_FAMILY_windows
728 730 # include "os_windows.hpp"
729 731 #endif
730 732 #ifdef TARGET_OS_FAMILY_bsd
731 733 # include "os_posix.hpp"
732 734 # include "os_bsd.hpp"
733 735 #endif
734 736 #ifdef TARGET_OS_ARCH_linux_x86
735 737 # include "os_linux_x86.hpp"
736 738 #endif
737 739 #ifdef TARGET_OS_ARCH_linux_sparc
738 740 # include "os_linux_sparc.hpp"
739 741 #endif
740 742 #ifdef TARGET_OS_ARCH_linux_zero
741 743 # include "os_linux_zero.hpp"
742 744 #endif
743 745 #ifdef TARGET_OS_ARCH_solaris_x86
744 746 # include "os_solaris_x86.hpp"
745 747 #endif
746 748 #ifdef TARGET_OS_ARCH_solaris_sparc
747 749 # include "os_solaris_sparc.hpp"
748 750 #endif
749 751 #ifdef TARGET_OS_ARCH_windows_x86
750 752 # include "os_windows_x86.hpp"
751 753 #endif
752 754 #ifdef TARGET_OS_ARCH_linux_arm
753 755 # include "os_linux_arm.hpp"
754 756 #endif
755 757 #ifdef TARGET_OS_ARCH_linux_ppc
756 758 # include "os_linux_ppc.hpp"
757 759 #endif
758 760 #ifdef TARGET_OS_ARCH_bsd_x86
759 761 # include "os_bsd_x86.hpp"
760 762 #endif
761 763 #ifdef TARGET_OS_ARCH_bsd_zero
762 764 # include "os_bsd_zero.hpp"
763 765 #endif
764 766
765 767 public:
766 768 // debugging support (mostly used by debug.cpp but also fatal error handler)
767 769 static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address
768 770
769 771 static bool dont_yield(); // when true, JVM_Yield() is nop
770 772 static void print_statistics();
771 773
772 774 // Thread priority helpers (implemented in OS-specific part)
773 775 static OSReturn set_native_priority(Thread* thread, int native_prio);
774 776 static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr);
775 777 static int java_to_os_priority[CriticalPriority + 1];
776 778 // Hint to the underlying OS that a task switch would not be good.
777 779 // Void return because it's a hint and can fail.
778 780 static void hint_no_preempt();
779 781
780 782 // Used at creation if requested by the diagnostic flag PauseAtStartup.
781 783 // Causes the VM to wait until an external stimulus has been applied
782 784 // (for Unix, that stimulus is a signal, for Windows, an external
783 785 // ResumeThread call)
784 786 static void pause();
785 787
786 788 class SuspendedThreadTaskContext {
787 789 public:
788 790 SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {}
789 791 Thread* thread() const { return _thread; }
790 792 void* ucontext() const { return _ucontext; }
791 793 private:
792 794 Thread* _thread;
793 795 void* _ucontext;
794 796 };
795 797
796 798 class SuspendedThreadTask {
797 799 public:
798 800 SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {}
799 801 virtual ~SuspendedThreadTask() {}
800 802 void run();
801 803 bool is_done() { return _done; }
802 804 virtual void do_task(const SuspendedThreadTaskContext& context) = 0;
803 805 protected:
804 806 private:
805 807 void internal_do_task();
806 808 Thread* _thread;
807 809 bool _done;
808 810 };
809 811
810 812 #ifndef TARGET_OS_FAMILY_windows
811 813 // Suspend/resume support
812 814 // Protocol:
813 815 //
814 816 // a thread starts in SR_RUNNING
815 817 //
816 818 // SR_RUNNING can go to
817 819 // * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it
818 820 // SR_SUSPEND_REQUEST can go to
819 821 // * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout)
820 822 // * SR_SUSPENDED if the stopped thread receives the signal and switches state
821 823 // SR_SUSPENDED can go to
822 824 // * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume
823 825 // SR_WAKEUP_REQUEST can go to
824 826 // * SR_RUNNING when the stopped thread receives the signal
825 827 // * SR_WAKEUP_REQUEST on timeout (resend the signal and try again)
826 828 class SuspendResume {
827 829 public:
828 830 enum State {
829 831 SR_RUNNING,
830 832 SR_SUSPEND_REQUEST,
831 833 SR_SUSPENDED,
832 834 SR_WAKEUP_REQUEST
833 835 };
834 836
835 837 private:
836 838 volatile State _state;
837 839
838 840 private:
839 841 /* try to switch state from state "from" to state "to"
840 842 * returns the state set after the method is complete
841 843 */
842 844 State switch_state(State from, State to) {
843 845 State result = (State) Atomic::cmpxchg((jint) to, (jint *) &_state, (jint) from);
844 846 if (result == from) {
845 847 // success
846 848 return to;
847 849 }
848 850 return result;
849 851 }
850 852
851 853 public:
852 854 SuspendResume() : _state(SR_RUNNING) { }
853 855
854 856 State state() const { return _state; }
855 857
856 858 State request_suspend() {
857 859 return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST);
858 860 }
859 861
860 862 State cancel_suspend() {
861 863 return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING);
862 864 }
863 865
864 866 State suspended() {
865 867 return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED);
866 868 }
867 869
868 870 State request_wakeup() {
869 871 return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST);
870 872 }
871 873
872 874 State running() {
873 875 return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING);
874 876 }
875 877
876 878 bool is_running() const {
877 879 return _state == SR_RUNNING;
878 880 }
879 881
880 882 bool is_suspend_request() const {
881 883 return _state == SR_SUSPEND_REQUEST;
882 884 }
883 885
884 886 bool is_suspended() const {
885 887 return _state == SR_SUSPENDED;
886 888 }
887 889 };
888 890 #endif
889 891
890 892
891 893 protected:
892 894 static long _rand_seed; // seed for random number generator
893 895 static int _processor_count; // number of processors
894 896
895 897 static char* format_boot_path(const char* format_string,
896 898 const char* home,
897 899 int home_len,
898 900 char fileSep,
899 901 char pathSep);
900 902 static bool set_boot_path(char fileSep, char pathSep);
901 903 static char** split_path(const char* path, int* n);
902 904 };
903 905
904 906 // Note that "PAUSE" is almost always used with synchronization
905 907 // so arguably we should provide Atomic::SpinPause() instead
906 908 // of the global SpinPause() with C linkage.
907 909 // It'd also be eligible for inlining on many platforms.
908 910
909 911 extern "C" int SpinPause () ;
910 912 extern "C" int SafeFetch32 (int * adr, int errValue) ;
911 913 extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) ;
912 914
913 915 #endif // SHARE_VM_RUNTIME_OS_HPP
↓ open down ↓ |
635 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX