1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_OS_HPP 26 #define SHARE_VM_RUNTIME_OS_HPP 27 28 #include "jvmtifiles/jvmti.h" 29 #include "runtime/atomic.hpp" 30 #include "runtime/extendedPC.hpp" 31 #include "runtime/handles.hpp" 32 #include "utilities/top.hpp" 33 #ifdef TARGET_OS_FAMILY_linux 34 # include "jvm_linux.h" 35 # include <setjmp.h> 36 #endif 37 #ifdef TARGET_OS_FAMILY_solaris 38 # include "jvm_solaris.h" 39 # include <setjmp.h> 40 #endif 41 #ifdef TARGET_OS_FAMILY_windows 42 # include "jvm_windows.h" 43 #endif 44 #ifdef TARGET_OS_FAMILY_bsd 45 # include "jvm_bsd.h" 46 # include <setjmp.h> 47 #endif 48 49 // os defines the interface to operating system; this includes traditional 50 // OS services (time, I/O) as well as other functionality with system- 51 // dependent code. 52 53 typedef void (*dll_func)(...); 54 55 class Thread; 56 class JavaThread; 57 class Event; 58 class DLL; 59 class FileHandle; 60 template<class E> class GrowableArray; 61 62 // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose 63 64 // Platform-independent error return values from OS functions 65 enum OSReturn { 66 OS_OK = 0, // Operation was successful 67 OS_ERR = -1, // Operation failed 68 OS_INTRPT = -2, // Operation was interrupted 69 OS_TIMEOUT = -3, // Operation timed out 70 OS_NOMEM = -5, // Operation failed for lack of memory 71 OS_NORESOURCE = -6 // Operation failed for lack of nonmemory resource 72 }; 73 74 enum ThreadPriority { // JLS 20.20.1-3 75 NoPriority = -1, // Initial non-priority value 76 MinPriority = 1, // Minimum priority 77 NormPriority = 5, // Normal (non-daemon) priority 78 NearMaxPriority = 9, // High priority, used for VMThread 79 MaxPriority = 10, // Highest priority, used for WatcherThread 80 // ensures that VMThread doesn't starve profiler 81 CriticalPriority = 11 // Critical thread priority 82 }; 83 84 // Executable parameter flag for os::commit_memory() and 85 // os::commit_memory_or_exit(). 86 const bool ExecMem = true; 87 88 // Typedef for structured exception handling support 89 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); 90 91 class os: AllStatic { 92 public: 93 enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel) 94 95 private: 96 static OSThread* _starting_thread; 97 static address _polling_page; 98 static volatile int32_t * _mem_serialize_page; 99 static uintptr_t _serialize_page_mask; 100 public: 101 static size_t _page_sizes[page_sizes_max]; 102 103 private: 104 static void init_page_sizes(size_t default_page_size) { 105 _page_sizes[0] = default_page_size; 106 _page_sizes[1] = 0; // sentinel 107 } 108 109 static char* pd_reserve_memory(size_t bytes, char* addr = 0, 110 size_t alignment_hint = 0); 111 static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr); 112 static void pd_split_reserved_memory(char *base, size_t size, 113 size_t split, bool realloc); 114 static bool pd_commit_memory(char* addr, size_t bytes, bool executable); 115 static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 116 bool executable); 117 // Same as pd_commit_memory() that either succeeds or calls 118 // vm_exit_out_of_memory() with the specified mesg. 119 static void pd_commit_memory_or_exit(char* addr, size_t bytes, 120 bool executable, const char* mesg); 121 static void pd_commit_memory_or_exit(char* addr, size_t size, 122 size_t alignment_hint, 123 bool executable, const char* mesg); 124 static bool pd_uncommit_memory(char* addr, size_t bytes); 125 static bool pd_release_memory(char* addr, size_t bytes); 126 127 static char* pd_map_memory(int fd, const char* file_name, size_t file_offset, 128 char *addr, size_t bytes, bool read_only = false, 129 bool allow_exec = false); 130 static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset, 131 char *addr, size_t bytes, bool read_only, 132 bool allow_exec); 133 static bool pd_unmap_memory(char *addr, size_t bytes); 134 static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint); 135 static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint); 136 137 138 public: 139 static void init(void); // Called before command line parsing 140 static void init_ergo(void); // Called after command line parsing 141 // before VM ergonomics processing. 142 static jint init_2(void); // Called after command line parsing 143 // and VM ergonomics processing 144 static void init_globals(void) { // Called from init_globals() in init.cpp 145 init_globals_ext(); 146 } 147 static void init_3(void); // Called at the end of vm init 148 149 // File names are case-insensitive on windows only 150 // Override me as needed 151 static int file_name_strcmp(const char* s1, const char* s2); 152 153 static bool getenv(const char* name, char* buffer, int len); 154 static bool have_special_privileges(); 155 156 static jlong javaTimeMillis(); 157 static jlong javaTimeNanos(); 158 static void javaTimeNanos_info(jvmtiTimerInfo *info_ptr); 159 static void run_periodic_checks(); 160 161 162 // Returns the elapsed time in seconds since the vm started. 163 static double elapsedTime(); 164 165 // Returns real time in seconds since an arbitrary point 166 // in the past. 167 static bool getTimesSecs(double* process_real_time, 168 double* process_user_time, 169 double* process_system_time); 170 171 // Interface to the performance counter 172 static jlong elapsed_counter(); 173 static jlong elapsed_frequency(); 174 175 // The "virtual time" of a thread is the amount of time a thread has 176 // actually run. The first function indicates whether the OS supports 177 // this functionality for the current thread, and if so: 178 // * the second enables vtime tracking (if that is required). 179 // * the third tells whether vtime is enabled. 180 // * the fourth returns the elapsed virtual time for the current 181 // thread. 182 static bool supports_vtime(); 183 static bool enable_vtime(); 184 static bool vtime_enabled(); 185 static double elapsedVTime(); 186 187 // Return current local time in a string (YYYY-MM-DD HH:MM:SS). 188 // It is MT safe, but not async-safe, as reading time zone 189 // information may require a lock on some platforms. 190 static char* local_time_string(char *buf, size_t buflen); 191 static struct tm* localtime_pd (const time_t* clock, struct tm* res); 192 // Fill in buffer with current local time as an ISO-8601 string. 193 // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. 194 // Returns buffer, or NULL if it failed. 195 static char* iso8601_time(char* buffer, size_t buffer_length); 196 197 // Interface for detecting multiprocessor system 198 static inline bool is_MP() { 199 assert(_processor_count > 0, "invalid processor count"); 200 return _processor_count > 1 || AssumeMP; 201 } 202 static julong available_memory(); 203 static julong physical_memory(); 204 static bool has_allocatable_memory_limit(julong* limit); 205 static bool is_server_class_machine(); 206 207 // number of CPUs 208 static int processor_count() { 209 return _processor_count; 210 } 211 static void set_processor_count(int count) { _processor_count = count; } 212 213 // Returns the number of CPUs this process is currently allowed to run on. 214 // Note that on some OSes this can change dynamically. 215 static int active_processor_count(); 216 217 // Bind processes to processors. 218 // This is a two step procedure: 219 // first you generate a distribution of processes to processors, 220 // then you bind processes according to that distribution. 221 // Compute a distribution for number of processes to processors. 222 // Stores the processor id's into the distribution array argument. 223 // Returns true if it worked, false if it didn't. 224 static bool distribute_processes(uint length, uint* distribution); 225 // Binds the current process to a processor. 226 // Returns true if it worked, false if it didn't. 227 static bool bind_to_processor(uint processor_id); 228 229 // Give a name to the current thread. 230 static void set_native_thread_name(const char *name); 231 232 // Interface for stack banging (predetect possible stack overflow for 233 // exception processing) There are guard pages, and above that shadow 234 // pages for stack overflow checking. 235 static bool uses_stack_guard_pages(); 236 static bool allocate_stack_guard_pages(); 237 static void bang_stack_shadow_pages(); 238 static bool stack_shadow_pages_available(Thread *thread, methodHandle method); 239 240 // OS interface to Virtual Memory 241 242 // Return the default page size. 243 static int vm_page_size(); 244 245 // Return the page size to use for a region of memory. The min_pages argument 246 // is a hint intended to limit fragmentation; it says the returned page size 247 // should be <= region_max_size / min_pages. Because min_pages is a hint, 248 // this routine may return a size larger than region_max_size / min_pages. 249 // 250 // The current implementation ignores min_pages if a larger page size is an 251 // exact multiple of both region_min_size and region_max_size. This allows 252 // larger pages to be used when doing so would not cause fragmentation; in 253 // particular, a single page can be used when region_min_size == 254 // region_max_size == a supported page size. 255 static size_t page_size_for_region(size_t region_min_size, 256 size_t region_max_size, 257 uint min_pages); 258 // return the largest page size that can be used 259 static size_t max_page_size() { 260 // the _page_sizes array is sorted in descending order. 261 return _page_sizes[0]; 262 } 263 264 // Methods for tracing page sizes returned by the above method; enabled by 265 // TracePageSizes. The region_{min,max}_size parameters should be the values 266 // passed to page_size_for_region() and page_size should be the result of that 267 // call. The (optional) base and size parameters should come from the 268 // ReservedSpace base() and size() methods. 269 static void trace_page_sizes(const char* str, const size_t* page_sizes, 270 int count) PRODUCT_RETURN; 271 static void trace_page_sizes(const char* str, const size_t region_min_size, 272 const size_t region_max_size, 273 const size_t page_size, 274 const char* base = NULL, 275 const size_t size = 0) PRODUCT_RETURN; 276 277 static int vm_allocation_granularity(); 278 static char* reserve_memory(size_t bytes, char* addr = 0, 279 size_t alignment_hint = 0); 280 static char* reserve_memory(size_t bytes, char* addr, 281 size_t alignment_hint, MEMFLAGS flags); 282 static char* reserve_memory_aligned(size_t size, size_t alignment); 283 static char* attempt_reserve_memory_at(size_t bytes, char* addr); 284 static void split_reserved_memory(char *base, size_t size, 285 size_t split, bool realloc); 286 static bool commit_memory(char* addr, size_t bytes, bool executable); 287 static bool commit_memory(char* addr, size_t size, size_t alignment_hint, 288 bool executable); 289 // Same as commit_memory() that either succeeds or calls 290 // vm_exit_out_of_memory() with the specified mesg. 291 static void commit_memory_or_exit(char* addr, size_t bytes, 292 bool executable, const char* mesg); 293 static void commit_memory_or_exit(char* addr, size_t size, 294 size_t alignment_hint, 295 bool executable, const char* mesg); 296 static bool uncommit_memory(char* addr, size_t bytes); 297 static bool release_memory(char* addr, size_t bytes); 298 299 enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX }; 300 static bool protect_memory(char* addr, size_t bytes, ProtType prot, 301 bool is_committed = true); 302 303 static bool guard_memory(char* addr, size_t bytes); 304 static bool unguard_memory(char* addr, size_t bytes); 305 static bool create_stack_guard_pages(char* addr, size_t bytes); 306 static bool pd_create_stack_guard_pages(char* addr, size_t bytes); 307 static bool remove_stack_guard_pages(char* addr, size_t bytes); 308 309 static char* map_memory(int fd, const char* file_name, size_t file_offset, 310 char *addr, size_t bytes, bool read_only = false, 311 bool allow_exec = false); 312 static char* remap_memory(int fd, const char* file_name, size_t file_offset, 313 char *addr, size_t bytes, bool read_only, 314 bool allow_exec); 315 static bool unmap_memory(char *addr, size_t bytes); 316 static void free_memory(char *addr, size_t bytes, size_t alignment_hint); 317 static void realign_memory(char *addr, size_t bytes, size_t alignment_hint); 318 319 // NUMA-specific interface 320 static bool numa_has_static_binding(); 321 static bool numa_has_group_homing(); 322 static void numa_make_local(char *addr, size_t bytes, int lgrp_hint); 323 static void numa_make_global(char *addr, size_t bytes); 324 static size_t numa_get_groups_num(); 325 static size_t numa_get_leaf_groups(int *ids, size_t size); 326 static bool numa_topology_changed(); 327 static int numa_get_group_id(); 328 329 // Page manipulation 330 struct page_info { 331 size_t size; 332 int lgrp_id; 333 }; 334 static bool get_page_info(char *start, page_info* info); 335 static char* scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found); 336 337 static char* non_memory_address_word(); 338 // reserve, commit and pin the entire memory region 339 static char* reserve_memory_special(size_t size, size_t alignment, 340 char* addr, bool executable); 341 static bool release_memory_special(char* addr, size_t bytes); 342 static void large_page_init(); 343 static size_t large_page_size(); 344 static bool can_commit_large_page_memory(); 345 static bool can_execute_large_page_memory(); 346 347 // OS interface to polling page 348 static address get_polling_page() { return _polling_page; } 349 static void set_polling_page(address page) { _polling_page = page; } 350 static bool is_poll_address(address addr) { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); } 351 static void make_polling_page_unreadable(); 352 static void make_polling_page_readable(); 353 354 // Routines used to serialize the thread state without using membars 355 static void serialize_thread_states(); 356 357 // Since we write to the serialize page from every thread, we 358 // want stores to be on unique cache lines whenever possible 359 // in order to minimize CPU cross talk. We pre-compute the 360 // amount to shift the thread* to make this offset unique to 361 // each thread. 362 static int get_serialize_page_shift_count() { 363 return SerializePageShiftCount; 364 } 365 366 static void set_serialize_page_mask(uintptr_t mask) { 367 _serialize_page_mask = mask; 368 } 369 370 static unsigned int get_serialize_page_mask() { 371 return _serialize_page_mask; 372 } 373 374 static void set_memory_serialize_page(address page); 375 376 static address get_memory_serialize_page() { 377 return (address)_mem_serialize_page; 378 } 379 380 static inline void write_memory_serialize_page(JavaThread *thread) { 381 uintptr_t page_offset = ((uintptr_t)thread >> 382 get_serialize_page_shift_count()) & 383 get_serialize_page_mask(); 384 *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1; 385 } 386 387 static bool is_memory_serialize_page(JavaThread *thread, address addr) { 388 if (UseMembar) return false; 389 // Previously this function calculated the exact address of this 390 // thread's serialize page, and checked if the faulting address 391 // was equal. However, some platforms mask off faulting addresses 392 // to the page size, so now we just check that the address is 393 // within the page. This makes the thread argument unnecessary, 394 // but we retain the NULL check to preserve existing behaviour. 395 if (thread == NULL) return false; 396 address page = (address) _mem_serialize_page; 397 return addr >= page && addr < (page + os::vm_page_size()); 398 } 399 400 static void block_on_serialize_page_trap(); 401 402 // threads 403 404 enum ThreadType { 405 vm_thread, 406 cgc_thread, // Concurrent GC thread 407 pgc_thread, // Parallel GC thread 408 java_thread, 409 compiler_thread, 410 watcher_thread, 411 os_thread 412 }; 413 414 static bool create_thread(Thread* thread, 415 ThreadType thr_type, 416 size_t stack_size = 0); 417 static bool create_main_thread(JavaThread* thread); 418 static bool create_attached_thread(JavaThread* thread); 419 static void pd_start_thread(Thread* thread); 420 static void start_thread(Thread* thread); 421 422 static void initialize_thread(Thread* thr); 423 static void free_thread(OSThread* osthread); 424 425 // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit 426 static intx current_thread_id(); 427 static int current_process_id(); 428 static int sleep(Thread* thread, jlong ms, bool interruptable); 429 static int naked_sleep(); 430 static void infinite_sleep(); // never returns, use with CAUTION 431 static void yield(); // Yields to all threads with same priority 432 enum YieldResult { 433 YIELD_SWITCHED = 1, // caller descheduled, other ready threads exist & ran 434 YIELD_NONEREADY = 0, // No other runnable/ready threads. 435 // platform-specific yield return immediately 436 YIELD_UNKNOWN = -1 // Unknown: platform doesn't support _SWITCHED or _NONEREADY 437 // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong" 438 // yield that can be used in lieu of blocking. 439 } ; 440 static YieldResult NakedYield () ; 441 static void yield_all(int attempts = 0); // Yields to all other threads including lower priority 442 static void loop_breaker(int attempts); // called from within tight loops to possibly influence time-sharing 443 static OSReturn set_priority(Thread* thread, ThreadPriority priority); 444 static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority); 445 446 static void interrupt(Thread* thread); 447 static bool is_interrupted(Thread* thread, bool clear_interrupted); 448 449 static int pd_self_suspend_thread(Thread* thread); 450 451 static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp); 452 static frame fetch_frame_from_context(void* ucVoid); 453 454 static ExtendedPC get_thread_pc(Thread *thread); 455 static void breakpoint(); 456 457 static address current_stack_pointer(); 458 static address current_stack_base(); 459 static size_t current_stack_size(); 460 461 static void verify_stack_alignment() PRODUCT_RETURN; 462 463 static int message_box(const char* title, const char* message); 464 static char* do_you_want_to_debug(const char* message); 465 466 // run cmd in a separate process and return its exit code; or -1 on failures 467 static int fork_and_exec(char *cmd); 468 469 // Set file to send error reports. 470 static void set_error_file(const char *logfile); 471 472 // os::exit() is merged with vm_exit() 473 // static void exit(int num); 474 475 // Terminate the VM, but don't exit the process 476 static void shutdown(); 477 478 // Terminate with an error. Default is to generate a core file on platforms 479 // that support such things. This calls shutdown() and then aborts. 480 static void abort(bool dump_core = true); 481 482 // Die immediately, no exit hook, no abort hook, no cleanup. 483 static void die(); 484 485 // File i/o operations 486 static const int default_file_open_flags(); 487 static int open(const char *path, int oflag, int mode); 488 static FILE* open(int fd, const char* mode); 489 static int close(int fd); 490 static jlong lseek(int fd, jlong offset, int whence); 491 static char* native_path(char *path); 492 static int ftruncate(int fd, jlong length); 493 static int fsync(int fd); 494 static int available(int fd, jlong *bytes); 495 496 //File i/o operations 497 498 static size_t read(int fd, void *buf, unsigned int nBytes); 499 static size_t restartable_read(int fd, void *buf, unsigned int nBytes); 500 static size_t write(int fd, const void *buf, unsigned int nBytes); 501 502 // Reading directories. 503 static DIR* opendir(const char* dirname); 504 static int readdir_buf_size(const char *path); 505 static struct dirent* readdir(DIR* dirp, dirent* dbuf); 506 static int closedir(DIR* dirp); 507 508 // Dynamic library extension 509 static const char* dll_file_extension(); 510 511 static const char* get_temp_directory(); 512 static const char* get_current_directory(char *buf, size_t buflen); 513 514 // Builds a platform-specific full library path given a ld path and lib name 515 // Returns true if buffer contains full path to existing file, false otherwise 516 static bool dll_build_name(char* buffer, size_t size, 517 const char* pathname, const char* fname); 518 519 // Symbol lookup, find nearest function name; basically it implements 520 // dladdr() for all platforms. Name of the nearest function is copied 521 // to buf. Distance from its base address is optionally returned as offset. 522 // If function name is not found, buf[0] is set to '\0' and offset is 523 // set to -1 (if offset is non-NULL). 524 static bool dll_address_to_function_name(address addr, char* buf, 525 int buflen, int* offset); 526 527 // Locate DLL/DSO. On success, full path of the library is copied to 528 // buf, and offset is optionally set to be the distance between addr 529 // and the library's base address. On failure, buf[0] is set to '\0' 530 // and offset is set to -1 (if offset is non-NULL). 531 static bool dll_address_to_library_name(address addr, char* buf, 532 int buflen, int* offset); 533 534 // Find out whether the pc is in the static code for jvm.dll/libjvm.so. 535 static bool address_is_in_vm(address addr); 536 537 // Loads .dll/.so and 538 // in case of error it checks if .dll/.so was built for the 539 // same architecture as Hotspot is running on 540 static void* dll_load(const char *name, char *ebuf, int ebuflen); 541 542 // lookup symbol in a shared library 543 static void* dll_lookup(void* handle, const char* name); 544 545 // Unload library 546 static void dll_unload(void *lib); 547 548 // Print out system information; they are called by fatal error handler. 549 // Output format may be different on different platforms. 550 static void print_os_info(outputStream* st); 551 static void print_os_info_brief(outputStream* st); 552 static void print_cpu_info(outputStream* st); 553 static void pd_print_cpu_info(outputStream* st); 554 static void print_memory_info(outputStream* st); 555 static void print_dll_info(outputStream* st); 556 static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len); 557 static void print_context(outputStream* st, void* context); 558 static void print_register_info(outputStream* st, void* context); 559 static void print_siginfo(outputStream* st, void* siginfo); 560 static void print_signal_handlers(outputStream* st, char* buf, size_t buflen); 561 static void print_date_and_time(outputStream* st); 562 563 static void print_location(outputStream* st, intptr_t x, bool verbose = false); 564 static size_t lasterror(char *buf, size_t len); 565 static int get_last_error(); 566 567 // Determines whether the calling process is being debugged by a user-mode debugger. 568 static bool is_debugger_attached(); 569 570 // wait for a key press if PauseAtExit is set 571 static void wait_for_keypress_at_exit(void); 572 573 // The following two functions are used by fatal error handler to trace 574 // native (C) frames. They are not part of frame.hpp/frame.cpp because 575 // frame.hpp/cpp assume thread is JavaThread, and also because different 576 // OS/compiler may have different convention or provide different API to 577 // walk C frames. 578 // 579 // We don't attempt to become a debugger, so we only follow frames if that 580 // does not require a lookup in the unwind table, which is part of the binary 581 // file but may be unsafe to read after a fatal error. So on x86, we can 582 // only walk stack if %ebp is used as frame pointer; on ia64, it's not 583 // possible to walk C stack without having the unwind table. 584 static bool is_first_C_frame(frame *fr); 585 static frame get_sender_for_C_frame(frame *fr); 586 587 // return current frame. pc() and sp() are set to NULL on failure. 588 static frame current_frame(); 589 590 static void print_hex_dump(outputStream* st, address start, address end, int unitsize); 591 592 // returns a string to describe the exception/signal; 593 // returns NULL if exception_code is not an OS exception/signal. 594 static const char* exception_name(int exception_code, char* buf, size_t buflen); 595 596 // Returns native Java library, loads if necessary 597 static void* native_java_library(); 598 599 // Fills in path to jvm.dll/libjvm.so (used by the Disassembler) 600 static void jvm_path(char *buf, jint buflen); 601 602 // Returns true if we are running in a headless jre. 603 static bool is_headless_jre(); 604 605 // JNI names 606 static void print_jni_name_prefix_on(outputStream* st, int args_size); 607 static void print_jni_name_suffix_on(outputStream* st, int args_size); 608 609 // File conventions 610 static const char* file_separator(); 611 static const char* line_separator(); 612 static const char* path_separator(); 613 614 // Init os specific system properties values 615 static void init_system_properties_values(); 616 617 // IO operations, non-JVM_ version. 618 static int stat(const char* path, struct stat* sbuf); 619 static bool dir_is_empty(const char* path); 620 621 // IO operations on binary files 622 static int create_binary_file(const char* path, bool rewrite_existing); 623 static jlong current_file_offset(int fd); 624 static jlong seek_to_file_offset(int fd, jlong offset); 625 626 // Thread Local Storage 627 static int allocate_thread_local_storage(); 628 static void thread_local_storage_at_put(int index, void* value); 629 static void* thread_local_storage_at(int index); 630 static void free_thread_local_storage(int index); 631 632 // Stack walk 633 static address get_caller_pc(int n = 0); 634 635 // General allocation (must be MT-safe) 636 static void* malloc (size_t size, MEMFLAGS flags, address caller_pc = 0); 637 static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0); 638 static void free (void *memblock, MEMFLAGS flags = mtNone); 639 static bool check_heap(bool force = false); // verify C heap integrity 640 static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup 641 642 #ifndef PRODUCT 643 static julong num_mallocs; // # of calls to malloc/realloc 644 static julong alloc_bytes; // # of bytes allocated 645 static julong num_frees; // # of calls to free 646 static julong free_bytes; // # of bytes freed 647 #endif 648 649 // SocketInterface (ex HPI SocketInterface ) 650 static int socket(int domain, int type, int protocol); 651 static int socket_close(int fd); 652 static int socket_shutdown(int fd, int howto); 653 static int recv(int fd, char* buf, size_t nBytes, uint flags); 654 static int send(int fd, char* buf, size_t nBytes, uint flags); 655 static int raw_send(int fd, char* buf, size_t nBytes, uint flags); 656 static int timeout(int fd, long timeout); 657 static int listen(int fd, int count); 658 static int connect(int fd, struct sockaddr* him, socklen_t len); 659 static int bind(int fd, struct sockaddr* him, socklen_t len); 660 static int accept(int fd, struct sockaddr* him, socklen_t* len); 661 static int recvfrom(int fd, char* buf, size_t nbytes, uint flags, 662 struct sockaddr* from, socklen_t* fromlen); 663 static int get_sock_name(int fd, struct sockaddr* him, socklen_t* len); 664 static int sendto(int fd, char* buf, size_t len, uint flags, 665 struct sockaddr* to, socklen_t tolen); 666 static int socket_available(int fd, jint* pbytes); 667 668 static int get_sock_opt(int fd, int level, int optname, 669 char* optval, socklen_t* optlen); 670 static int set_sock_opt(int fd, int level, int optname, 671 const char* optval, socklen_t optlen); 672 static int get_host_name(char* name, int namelen); 673 674 static struct hostent* get_host_by_name(char* name); 675 676 // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal) 677 static void signal_init(); 678 static void signal_init_pd(); 679 static void signal_notify(int signal_number); 680 static void* signal(int signal_number, void* handler); 681 static void signal_raise(int signal_number); 682 static int signal_wait(); 683 static int signal_lookup(); 684 static void* user_handler(); 685 static void terminate_signal_thread(); 686 static int sigexitnum_pd(); 687 688 // random number generation 689 static long random(); // return 32bit pseudorandom number 690 static void init_random(long initval); // initialize random sequence 691 692 // Structured OS Exception support 693 static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); 694 695 // On Windows this will create an actual minidump, on Linux/Solaris it will simply check core dump limits 696 static void check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize); 697 698 // Get the default path to the core file 699 // Returns the length of the string 700 static int get_core_path(char* buffer, size_t bufferSize); 701 702 // JVMTI & JVM monitoring and management support 703 // The thread_cpu_time() and current_thread_cpu_time() are only 704 // supported if is_thread_cpu_time_supported() returns true. 705 // They are not supported on Solaris T1. 706 707 // Thread CPU Time - return the fast estimate on a platform 708 // On Solaris - call gethrvtime (fast) - user time only 709 // On Linux - fast clock_gettime where available - user+sys 710 // - otherwise: very slow /proc fs - user+sys 711 // On Windows - GetThreadTimes - user+sys 712 static jlong current_thread_cpu_time(); 713 static jlong thread_cpu_time(Thread* t); 714 715 // Thread CPU Time with user_sys_cpu_time parameter. 716 // 717 // If user_sys_cpu_time is true, user+sys time is returned. 718 // Otherwise, only user time is returned 719 static jlong current_thread_cpu_time(bool user_sys_cpu_time); 720 static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time); 721 722 // Return a bunch of info about the timers. 723 // Note that the returned info for these two functions may be different 724 // on some platforms 725 static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr); 726 static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr); 727 728 static bool is_thread_cpu_time_supported(); 729 730 // System loadavg support. Returns -1 if load average cannot be obtained. 731 static int loadavg(double loadavg[], int nelem); 732 733 // Hook for os specific jvm options that we don't want to abort on seeing 734 static bool obsolete_option(const JavaVMOption *option); 735 736 // Read file line by line. If line is longer than bsize, 737 // rest of line is skipped. Returns number of bytes read or -1 on EOF 738 static int get_line_chars(int fd, char *buf, const size_t bsize); 739 740 // Extensions 741 #include "runtime/os_ext.hpp" 742 743 public: 744 class CrashProtectionCallback : public StackObj { 745 public: 746 virtual void call() = 0; 747 }; 748 749 // Platform dependent stuff 750 #ifdef TARGET_OS_FAMILY_linux 751 # include "os_linux.hpp" 752 # include "os_posix.hpp" 753 #endif 754 #ifdef TARGET_OS_FAMILY_solaris 755 # include "os_solaris.hpp" 756 # include "os_posix.hpp" 757 #endif 758 #ifdef TARGET_OS_FAMILY_windows 759 # include "os_windows.hpp" 760 #endif 761 #ifdef TARGET_OS_FAMILY_bsd 762 # include "os_posix.hpp" 763 # include "os_bsd.hpp" 764 #endif 765 #ifdef TARGET_OS_ARCH_linux_x86 766 # include "os_linux_x86.hpp" 767 #endif 768 #ifdef TARGET_OS_ARCH_linux_sparc 769 # include "os_linux_sparc.hpp" 770 #endif 771 #ifdef TARGET_OS_ARCH_linux_zero 772 # include "os_linux_zero.hpp" 773 #endif 774 #ifdef TARGET_OS_ARCH_solaris_x86 775 # include "os_solaris_x86.hpp" 776 #endif 777 #ifdef TARGET_OS_ARCH_solaris_sparc 778 # include "os_solaris_sparc.hpp" 779 #endif 780 #ifdef TARGET_OS_ARCH_windows_x86 781 # include "os_windows_x86.hpp" 782 #endif 783 #ifdef TARGET_OS_ARCH_linux_arm 784 # include "os_linux_arm.hpp" 785 #endif 786 #ifdef TARGET_OS_ARCH_linux_ppc 787 # include "os_linux_ppc.hpp" 788 #endif 789 #ifdef TARGET_OS_ARCH_bsd_x86 790 # include "os_bsd_x86.hpp" 791 #endif 792 #ifdef TARGET_OS_ARCH_bsd_zero 793 # include "os_bsd_zero.hpp" 794 #endif 795 796 public: 797 // debugging support (mostly used by debug.cpp but also fatal error handler) 798 static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address 799 800 static bool dont_yield(); // when true, JVM_Yield() is nop 801 static void print_statistics(); 802 803 // Thread priority helpers (implemented in OS-specific part) 804 static OSReturn set_native_priority(Thread* thread, int native_prio); 805 static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr); 806 static int java_to_os_priority[CriticalPriority + 1]; 807 // Hint to the underlying OS that a task switch would not be good. 808 // Void return because it's a hint and can fail. 809 static void hint_no_preempt(); 810 811 // Used at creation if requested by the diagnostic flag PauseAtStartup. 812 // Causes the VM to wait until an external stimulus has been applied 813 // (for Unix, that stimulus is a signal, for Windows, an external 814 // ResumeThread call) 815 static void pause(); 816 817 class SuspendedThreadTaskContext { 818 public: 819 SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {} 820 Thread* thread() const { return _thread; } 821 void* ucontext() const { return _ucontext; } 822 private: 823 Thread* _thread; 824 void* _ucontext; 825 }; 826 827 class SuspendedThreadTask { 828 public: 829 SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {} 830 virtual ~SuspendedThreadTask() {} 831 void run(); 832 bool is_done() { return _done; } 833 virtual void do_task(const SuspendedThreadTaskContext& context) = 0; 834 protected: 835 private: 836 void internal_do_task(); 837 Thread* _thread; 838 bool _done; 839 }; 840 841 #ifndef TARGET_OS_FAMILY_windows 842 // Suspend/resume support 843 // Protocol: 844 // 845 // a thread starts in SR_RUNNING 846 // 847 // SR_RUNNING can go to 848 // * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it 849 // SR_SUSPEND_REQUEST can go to 850 // * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout) 851 // * SR_SUSPENDED if the stopped thread receives the signal and switches state 852 // SR_SUSPENDED can go to 853 // * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume 854 // SR_WAKEUP_REQUEST can go to 855 // * SR_RUNNING when the stopped thread receives the signal 856 // * SR_WAKEUP_REQUEST on timeout (resend the signal and try again) 857 class SuspendResume { 858 public: 859 enum State { 860 SR_RUNNING, 861 SR_SUSPEND_REQUEST, 862 SR_SUSPENDED, 863 SR_WAKEUP_REQUEST 864 }; 865 866 private: 867 volatile State _state; 868 869 private: 870 /* try to switch state from state "from" to state "to" 871 * returns the state set after the method is complete 872 */ 873 State switch_state(State from, State to); 874 875 public: 876 SuspendResume() : _state(SR_RUNNING) { } 877 878 State state() const { return _state; } 879 880 State request_suspend() { 881 return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST); 882 } 883 884 State cancel_suspend() { 885 return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING); 886 } 887 888 State suspended() { 889 return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED); 890 } 891 892 State request_wakeup() { 893 return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST); 894 } 895 896 State running() { 897 return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING); 898 } 899 900 bool is_running() const { 901 return _state == SR_RUNNING; 902 } 903 904 bool is_suspend_request() const { 905 return _state == SR_SUSPEND_REQUEST; 906 } 907 908 bool is_suspended() const { 909 return _state == SR_SUSPENDED; 910 } 911 }; 912 #endif 913 914 915 protected: 916 static long _rand_seed; // seed for random number generator 917 static int _processor_count; // number of processors 918 919 static char* format_boot_path(const char* format_string, 920 const char* home, 921 int home_len, 922 char fileSep, 923 char pathSep); 924 static bool set_boot_path(char fileSep, char pathSep); 925 static char** split_path(const char* path, int* n); 926 927 }; 928 929 // Note that "PAUSE" is almost always used with synchronization 930 // so arguably we should provide Atomic::SpinPause() instead 931 // of the global SpinPause() with C linkage. 932 // It'd also be eligible for inlining on many platforms. 933 934 extern "C" int SpinPause(); 935 936 #endif // SHARE_VM_RUNTIME_OS_HPP