1 /* 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // os defines the interface to operating system; this includes traditional 26 // OS services (time, I/O) as well as other functionality with system- 27 // dependent code. 28 29 typedef void (*dll_func)(...); 30 31 class Thread; 32 class JavaThread; 33 class Event; 34 class DLL; 35 class FileHandle; 36 template<class E> class GrowableArray; 37 38 // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose 39 40 // Platform-independent error return values from OS functions 41 enum OSReturn { 42 OS_OK = 0, // Operation was successful 43 OS_ERR = -1, // Operation failed 44 OS_INTRPT = -2, // Operation was interrupted 45 OS_TIMEOUT = -3, // Operation timed out 46 OS_NOMEM = -5, // Operation failed for lack of memory 47 OS_NORESOURCE = -6 // Operation failed for lack of nonmemory resource 48 }; 49 50 enum ThreadPriority { // JLS 20.20.1-3 51 NoPriority = -1, // Initial non-priority value 52 MinPriority = 1, // Minimum priority 53 NormPriority = 5, // Normal (non-daemon) priority 54 NearMaxPriority = 9, // High priority, used for VMThread 55 MaxPriority = 10 // Highest priority, used for WatcherThread 56 // ensures that VMThread doesn't starve profiler 57 }; 58 59 // Typedef for structured exception handling support 60 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); 61 62 class os: AllStatic { 63 public: 64 enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel) 65 66 private: 67 static OSThread* _starting_thread; 68 static address _polling_page; 69 static volatile int32_t * _mem_serialize_page; 70 static uintptr_t _serialize_page_mask; 71 public: 72 static size_t _page_sizes[page_sizes_max]; 73 74 private: 75 static void init_page_sizes(size_t default_page_size) { 76 _page_sizes[0] = default_page_size; 77 _page_sizes[1] = 0; // sentinel 78 } 79 80 public: 81 82 static void init(void); // Called before command line parsing 83 static jint init_2(void); // Called after command line parsing 84 static void init_3(void); // Called at the end of vm init 85 86 // File names are case-insensitive on windows only 87 // Override me as needed 88 static int file_name_strcmp(const char* s1, const char* s2); 89 90 static bool getenv(const char* name, char* buffer, int len); 91 static bool have_special_privileges(); 92 93 static jlong javaTimeMillis(); 94 static jlong javaTimeNanos(); 95 static void javaTimeNanos_info(jvmtiTimerInfo *info_ptr); 96 static void run_periodic_checks(); 97 98 99 // Returns the elapsed time in seconds since the vm started. 100 static double elapsedTime(); 101 102 // Returns real time in seconds since an arbitrary point 103 // in the past. 104 static bool getTimesSecs(double* process_real_time, 105 double* process_user_time, 106 double* process_system_time); 107 108 // Interface to the performance counter 109 static jlong elapsed_counter(); 110 static jlong elapsed_frequency(); 111 112 // The "virtual time" of a thread is the amount of time a thread has 113 // actually run. The first function indicates whether the OS supports 114 // this functionality for the current thread, and if so: 115 // * the second enables vtime tracking (if that is required). 116 // * the third tells whether vtime is enabled. 117 // * the fourth returns the elapsed virtual time for the current 118 // thread. 119 static bool supports_vtime(); 120 static bool enable_vtime(); 121 static bool vtime_enabled(); 122 static double elapsedVTime(); 123 124 // Return current local time in a string (YYYY-MM-DD HH:MM:SS). 125 // It is MT safe, but not async-safe, as reading time zone 126 // information may require a lock on some platforms. 127 static char* local_time_string(char *buf, size_t buflen); 128 static struct tm* localtime_pd (const time_t* clock, struct tm* res); 129 // Fill in buffer with current local time as an ISO-8601 string. 130 // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. 131 // Returns buffer, or NULL if it failed. 132 static char* iso8601_time(char* buffer, size_t buffer_length); 133 134 // Interface for detecting multiprocessor system 135 static inline bool is_MP() { 136 assert(_processor_count > 0, "invalid processor count"); 137 return _processor_count > 1; 138 } 139 static julong available_memory(); 140 static julong physical_memory(); 141 static julong allocatable_physical_memory(julong size); 142 static bool is_server_class_machine(); 143 144 // number of CPUs 145 static int processor_count() { 146 return _processor_count; 147 } 148 static void set_processor_count(int count) { _processor_count = count; } 149 150 // Returns the number of CPUs this process is currently allowed to run on. 151 // Note that on some OSes this can change dynamically. 152 static int active_processor_count(); 153 154 // Bind processes to processors. 155 // This is a two step procedure: 156 // first you generate a distribution of processes to processors, 157 // then you bind processes according to that distribution. 158 // Compute a distribution for number of processes to processors. 159 // Stores the processor id's into the distribution array argument. 160 // Returns true if it worked, false if it didn't. 161 static bool distribute_processes(uint length, uint* distribution); 162 // Binds the current process to a processor. 163 // Returns true if it worked, false if it didn't. 164 static bool bind_to_processor(uint processor_id); 165 166 // Interface for stack banging (predetect possible stack overflow for 167 // exception processing) There are guard pages, and above that shadow 168 // pages for stack overflow checking. 169 static bool uses_stack_guard_pages(); 170 static bool allocate_stack_guard_pages(); 171 static void bang_stack_shadow_pages(); 172 static bool stack_shadow_pages_available(Thread *thread, methodHandle method); 173 174 // OS interface to Virtual Memory 175 176 // Return the default page size. 177 static int vm_page_size(); 178 179 // Return the page size to use for a region of memory. The min_pages argument 180 // is a hint intended to limit fragmentation; it says the returned page size 181 // should be <= region_max_size / min_pages. Because min_pages is a hint, 182 // this routine may return a size larger than region_max_size / min_pages. 183 // 184 // The current implementation ignores min_pages if a larger page size is an 185 // exact multiple of both region_min_size and region_max_size. This allows 186 // larger pages to be used when doing so would not cause fragmentation; in 187 // particular, a single page can be used when region_min_size == 188 // region_max_size == a supported page size. 189 static size_t page_size_for_region(size_t region_min_size, 190 size_t region_max_size, 191 uint min_pages); 192 193 // Method for tracing page sizes returned by the above method; enabled by 194 // TracePageSizes. The region_{min,max}_size parameters should be the values 195 // passed to page_size_for_region() and page_size should be the result of that 196 // call. The (optional) base and size parameters should come from the 197 // ReservedSpace base() and size() methods. 198 static void trace_page_sizes(const char* str, const size_t region_min_size, 199 const size_t region_max_size, 200 const size_t page_size, 201 const char* base = NULL, 202 const size_t size = 0) PRODUCT_RETURN; 203 204 static int vm_allocation_granularity(); 205 static char* reserve_memory(size_t bytes, char* addr = 0, 206 size_t alignment_hint = 0); 207 static char* attempt_reserve_memory_at(size_t bytes, char* addr); 208 static void split_reserved_memory(char *base, size_t size, 209 size_t split, bool realloc); 210 static bool commit_memory(char* addr, size_t bytes, 211 bool executable = false); 212 static bool commit_memory(char* addr, size_t size, size_t alignment_hint, 213 bool executable = false); 214 static bool uncommit_memory(char* addr, size_t bytes); 215 static bool release_memory(char* addr, size_t bytes); 216 217 enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX }; 218 static bool protect_memory(char* addr, size_t bytes, ProtType prot, 219 bool is_committed = true); 220 221 static bool guard_memory(char* addr, size_t bytes); 222 static bool unguard_memory(char* addr, size_t bytes); 223 static bool create_stack_guard_pages(char* addr, size_t bytes); 224 static bool remove_stack_guard_pages(char* addr, size_t bytes); 225 226 static char* map_memory(int fd, const char* file_name, size_t file_offset, 227 char *addr, size_t bytes, bool read_only = false, 228 bool allow_exec = false); 229 static char* remap_memory(int fd, const char* file_name, size_t file_offset, 230 char *addr, size_t bytes, bool read_only, 231 bool allow_exec); 232 static bool unmap_memory(char *addr, size_t bytes); 233 static void free_memory(char *addr, size_t bytes); 234 static void realign_memory(char *addr, size_t bytes, size_t alignment_hint); 235 236 // NUMA-specific interface 237 static bool numa_has_static_binding(); 238 static bool numa_has_group_homing(); 239 static void numa_make_local(char *addr, size_t bytes, int lgrp_hint); 240 static void numa_make_global(char *addr, size_t bytes); 241 static size_t numa_get_groups_num(); 242 static size_t numa_get_leaf_groups(int *ids, size_t size); 243 static bool numa_topology_changed(); 244 static int numa_get_group_id(); 245 246 // Page manipulation 247 struct page_info { 248 size_t size; 249 int lgrp_id; 250 }; 251 static bool get_page_info(char *start, page_info* info); 252 static char* scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found); 253 254 static char* non_memory_address_word(); 255 // reserve, commit and pin the entire memory region 256 static char* reserve_memory_special(size_t size, char* addr = NULL, 257 bool executable = false); 258 static bool release_memory_special(char* addr, size_t bytes); 259 static bool large_page_init(); 260 static size_t large_page_size(); 261 static bool can_commit_large_page_memory(); 262 static bool can_execute_large_page_memory(); 263 264 // OS interface to polling page 265 static address get_polling_page() { return _polling_page; } 266 static void set_polling_page(address page) { _polling_page = page; } 267 static bool is_poll_address(address addr) { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); } 268 static void make_polling_page_unreadable(); 269 static void make_polling_page_readable(); 270 271 // Routines used to serialize the thread state without using membars 272 static void serialize_thread_states(); 273 274 // Since we write to the serialize page from every thread, we 275 // want stores to be on unique cache lines whenever possible 276 // in order to minimize CPU cross talk. We pre-compute the 277 // amount to shift the thread* to make this offset unique to 278 // each thread. 279 static int get_serialize_page_shift_count() { 280 return SerializePageShiftCount; 281 } 282 283 static void set_serialize_page_mask(uintptr_t mask) { 284 _serialize_page_mask = mask; 285 } 286 287 static unsigned int get_serialize_page_mask() { 288 return _serialize_page_mask; 289 } 290 291 static void set_memory_serialize_page(address page); 292 293 static address get_memory_serialize_page() { 294 return (address)_mem_serialize_page; 295 } 296 297 static inline void write_memory_serialize_page(JavaThread *thread) { 298 uintptr_t page_offset = ((uintptr_t)thread >> 299 get_serialize_page_shift_count()) & 300 get_serialize_page_mask(); 301 *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1; 302 } 303 304 static bool is_memory_serialize_page(JavaThread *thread, address addr) { 305 if (UseMembar) return false; 306 // Previously this function calculated the exact address of this 307 // thread's serialize page, and checked if the faulting address 308 // was equal. However, some platforms mask off faulting addresses 309 // to the page size, so now we just check that the address is 310 // within the page. This makes the thread argument unnecessary, 311 // but we retain the NULL check to preserve existing behaviour. 312 if (thread == NULL) return false; 313 address page = (address) _mem_serialize_page; 314 return addr >= page && addr < (page + os::vm_page_size()); 315 } 316 317 static void block_on_serialize_page_trap(); 318 319 // threads 320 321 enum ThreadType { 322 vm_thread, 323 cgc_thread, // Concurrent GC thread 324 pgc_thread, // Parallel GC thread 325 java_thread, 326 compiler_thread, 327 watcher_thread, 328 os_thread 329 }; 330 331 static bool create_thread(Thread* thread, 332 ThreadType thr_type, 333 size_t stack_size = 0); 334 static bool create_main_thread(JavaThread* thread); 335 static bool create_attached_thread(JavaThread* thread); 336 static void pd_start_thread(Thread* thread); 337 static void start_thread(Thread* thread); 338 339 static void initialize_thread(); 340 static void free_thread(OSThread* osthread); 341 342 // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit 343 static intx current_thread_id(); 344 static int current_process_id(); 345 // hpi::read for calls from non native state 346 // For performance, hpi::read is only callable from _thread_in_native 347 static size_t read(int fd, void *buf, unsigned int nBytes); 348 static int sleep(Thread* thread, jlong ms, bool interruptable); 349 static int naked_sleep(); 350 static void infinite_sleep(); // never returns, use with CAUTION 351 static void yield(); // Yields to all threads with same priority 352 enum YieldResult { 353 YIELD_SWITCHED = 1, // caller descheduled, other ready threads exist & ran 354 YIELD_NONEREADY = 0, // No other runnable/ready threads. 355 // platform-specific yield return immediately 356 YIELD_UNKNOWN = -1 // Unknown: platform doesn't support _SWITCHED or _NONEREADY 357 // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong" 358 // yield that can be used in lieu of blocking. 359 } ; 360 static YieldResult NakedYield () ; 361 static void yield_all(int attempts = 0); // Yields to all other threads including lower priority 362 static void loop_breaker(int attempts); // called from within tight loops to possibly influence time-sharing 363 static OSReturn set_priority(Thread* thread, ThreadPriority priority); 364 static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority); 365 366 static void interrupt(Thread* thread); 367 static bool is_interrupted(Thread* thread, bool clear_interrupted); 368 369 static int pd_self_suspend_thread(Thread* thread); 370 371 static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp); 372 static frame fetch_frame_from_context(void* ucVoid); 373 374 static ExtendedPC get_thread_pc(Thread *thread); 375 static void breakpoint(); 376 377 static address current_stack_pointer(); 378 static address current_stack_base(); 379 static size_t current_stack_size(); 380 381 static int message_box(const char* title, const char* message); 382 static char* do_you_want_to_debug(const char* message); 383 384 // run cmd in a separate process and return its exit code; or -1 on failures 385 static int fork_and_exec(char *cmd); 386 387 // Set file to send error reports. 388 static void set_error_file(const char *logfile); 389 390 // os::exit() is merged with vm_exit() 391 // static void exit(int num); 392 393 // Terminate the VM, but don't exit the process 394 static void shutdown(); 395 396 // Terminate with an error. Default is to generate a core file on platforms 397 // that support such things. This calls shutdown() and then aborts. 398 static void abort(bool dump_core = true); 399 400 // Die immediately, no exit hook, no abort hook, no cleanup. 401 static void die(); 402 403 // Reading directories. 404 static DIR* opendir(const char* dirname); 405 static int readdir_buf_size(const char *path); 406 static struct dirent* readdir(DIR* dirp, dirent* dbuf); 407 static int closedir(DIR* dirp); 408 409 // Dynamic library extension 410 static const char* dll_file_extension(); 411 412 static const char* get_temp_directory(); 413 static const char* get_current_directory(char *buf, int buflen); 414 415 // Builds a platform-specific full library path given a ld path and lib name 416 static void dll_build_name(char* buffer, size_t size, 417 const char* pathname, const char* fname); 418 419 // Symbol lookup, find nearest function name; basically it implements 420 // dladdr() for all platforms. Name of the nearest function is copied 421 // to buf. Distance from its base address is returned as offset. 422 // If function name is not found, buf[0] is set to '\0' and offset is 423 // set to -1. 424 static bool dll_address_to_function_name(address addr, char* buf, 425 int buflen, int* offset); 426 427 // Locate DLL/DSO. On success, full path of the library is copied to 428 // buf, and offset is set to be the distance between addr and the 429 // library's base address. On failure, buf[0] is set to '\0' and 430 // offset is set to -1. 431 static bool dll_address_to_library_name(address addr, char* buf, 432 int buflen, int* offset); 433 434 // Find out whether the pc is in the static code for jvm.dll/libjvm.so. 435 static bool address_is_in_vm(address addr); 436 437 // Loads .dll/.so and 438 // in case of error it checks if .dll/.so was built for the 439 // same architecture as Hotspot is running on 440 static void* dll_load(const char *name, char *ebuf, int ebuflen); 441 442 // lookup symbol in a shared library 443 static void* dll_lookup(void* handle, const char* name); 444 445 // Print out system information; they are called by fatal error handler. 446 // Output format may be different on different platforms. 447 static void print_os_info(outputStream* st); 448 static void print_cpu_info(outputStream* st); 449 static void print_memory_info(outputStream* st); 450 static void print_dll_info(outputStream* st); 451 static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len); 452 static void print_context(outputStream* st, void* context); 453 static void print_register_info(outputStream* st, void* context); 454 static void print_siginfo(outputStream* st, void* siginfo); 455 static void print_signal_handlers(outputStream* st, char* buf, size_t buflen); 456 static void print_date_and_time(outputStream* st); 457 458 static void print_location(outputStream* st, intptr_t x, bool verbose = false); 459 460 // The following two functions are used by fatal error handler to trace 461 // native (C) frames. They are not part of frame.hpp/frame.cpp because 462 // frame.hpp/cpp assume thread is JavaThread, and also because different 463 // OS/compiler may have different convention or provide different API to 464 // walk C frames. 465 // 466 // We don't attempt to become a debugger, so we only follow frames if that 467 // does not require a lookup in the unwind table, which is part of the binary 468 // file but may be unsafe to read after a fatal error. So on x86, we can 469 // only walk stack if %ebp is used as frame pointer; on ia64, it's not 470 // possible to walk C stack without having the unwind table. 471 static bool is_first_C_frame(frame *fr); 472 static frame get_sender_for_C_frame(frame *fr); 473 474 // return current frame. pc() and sp() are set to NULL on failure. 475 static frame current_frame(); 476 477 static void print_hex_dump(outputStream* st, address start, address end, int unitsize); 478 479 // returns a string to describe the exception/signal; 480 // returns NULL if exception_code is not an OS exception/signal. 481 static const char* exception_name(int exception_code, char* buf, size_t buflen); 482 483 // Returns native Java library, loads if necessary 484 static void* native_java_library(); 485 486 // Fills in path to jvm.dll/libjvm.so (this info used to find hpi). 487 static void jvm_path(char *buf, jint buflen); 488 489 // Returns true if we are running in a headless jre. 490 static bool is_headless_jre(); 491 492 // JNI names 493 static void print_jni_name_prefix_on(outputStream* st, int args_size); 494 static void print_jni_name_suffix_on(outputStream* st, int args_size); 495 496 // File conventions 497 static const char* file_separator(); 498 static const char* line_separator(); 499 static const char* path_separator(); 500 501 // Init os specific system properties values 502 static void init_system_properties_values(); 503 504 // IO operations, non-JVM_ version. 505 static int stat(const char* path, struct stat* sbuf); 506 static bool dir_is_empty(const char* path); 507 508 // IO operations on binary files 509 static int create_binary_file(const char* path, bool rewrite_existing); 510 static jlong current_file_offset(int fd); 511 static jlong seek_to_file_offset(int fd, jlong offset); 512 513 // Thread Local Storage 514 static int allocate_thread_local_storage(); 515 static void thread_local_storage_at_put(int index, void* value); 516 static void* thread_local_storage_at(int index); 517 static void free_thread_local_storage(int index); 518 519 // General allocation (must be MT-safe) 520 static void* malloc (size_t size); 521 static void* realloc (void *memblock, size_t size); 522 static void free (void *memblock); 523 static bool check_heap(bool force = false); // verify C heap integrity 524 static char* strdup(const char *); // Like strdup 525 526 #ifndef PRODUCT 527 static int num_mallocs; // # of calls to malloc/realloc 528 static size_t alloc_bytes; // # of bytes allocated 529 static int num_frees; // # of calls to free 530 #endif 531 532 // Printing 64 bit integers 533 static const char* jlong_format_specifier(); 534 static const char* julong_format_specifier(); 535 536 // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal) 537 static void signal_init(); 538 static void signal_init_pd(); 539 static void signal_notify(int signal_number); 540 static void* signal(int signal_number, void* handler); 541 static void signal_raise(int signal_number); 542 static int signal_wait(); 543 static int signal_lookup(); 544 static void* user_handler(); 545 static void terminate_signal_thread(); 546 static int sigexitnum_pd(); 547 548 // random number generation 549 static long random(); // return 32bit pseudorandom number 550 static void init_random(long initval); // initialize random sequence 551 552 // Structured OS Exception support 553 static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); 554 555 // JVMTI & JVM monitoring and management support 556 // The thread_cpu_time() and current_thread_cpu_time() are only 557 // supported if is_thread_cpu_time_supported() returns true. 558 // They are not supported on Solaris T1. 559 560 // Thread CPU Time - return the fast estimate on a platform 561 // On Solaris - call gethrvtime (fast) - user time only 562 // On Linux - fast clock_gettime where available - user+sys 563 // - otherwise: very slow /proc fs - user+sys 564 // On Windows - GetThreadTimes - user+sys 565 static jlong current_thread_cpu_time(); 566 static jlong thread_cpu_time(Thread* t); 567 568 // Thread CPU Time with user_sys_cpu_time parameter. 569 // 570 // If user_sys_cpu_time is true, user+sys time is returned. 571 // Otherwise, only user time is returned 572 static jlong current_thread_cpu_time(bool user_sys_cpu_time); 573 static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time); 574 575 // Return a bunch of info about the timers. 576 // Note that the returned info for these two functions may be different 577 // on some platforms 578 static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr); 579 static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr); 580 581 static bool is_thread_cpu_time_supported(); 582 583 // System loadavg support. Returns -1 if load average cannot be obtained. 584 static int loadavg(double loadavg[], int nelem); 585 586 // Hook for os specific jvm options that we don't want to abort on seeing 587 static bool obsolete_option(const JavaVMOption *option); 588 589 // Platform dependent stuff 590 #include "incls/_os_pd.hpp.incl" 591 592 // debugging support (mostly used by debug.cpp but also fatal error handler) 593 static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address 594 595 static bool dont_yield(); // when true, JVM_Yield() is nop 596 static void print_statistics(); 597 598 // Thread priority helpers (implemented in OS-specific part) 599 static OSReturn set_native_priority(Thread* thread, int native_prio); 600 static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr); 601 static int java_to_os_priority[MaxPriority + 1]; 602 // Hint to the underlying OS that a task switch would not be good. 603 // Void return because it's a hint and can fail. 604 static void hint_no_preempt(); 605 606 // Used at creation if requested by the diagnostic flag PauseAtStartup. 607 // Causes the VM to wait until an external stimulus has been applied 608 // (for Unix, that stimulus is a signal, for Windows, an external 609 // ResumeThread call) 610 static void pause(); 611 612 protected: 613 static long _rand_seed; // seed for random number generator 614 static int _processor_count; // number of processors 615 616 static char* format_boot_path(const char* format_string, 617 const char* home, 618 int home_len, 619 char fileSep, 620 char pathSep); 621 static bool set_boot_path(char fileSep, char pathSep); 622 static char** split_path(const char* path, int* n); 623 }; 624 625 // Note that "PAUSE" is almost always used with synchronization 626 // so arguably we should provide Atomic::SpinPause() instead 627 // of the global SpinPause() with C linkage. 628 // It'd also be eligible for inlining on many platforms. 629 630 extern "C" int SpinPause () ; 631 extern "C" int SafeFetch32 (int * adr, int errValue) ; 632 extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) ;