1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_OS_HPP 26 #define SHARE_VM_RUNTIME_OS_HPP 27 28 #include "jvm.h" 29 #include "jvmtifiles/jvmti.h" 30 #include "metaprogramming/isRegisteredEnum.hpp" 31 #include "metaprogramming/integralConstant.hpp" 32 #include "runtime/extendedPC.hpp" 33 #include "runtime/handles.hpp" 34 #include "utilities/macros.hpp" 35 #ifndef _WINDOWS 36 # include <setjmp.h> 37 #endif 38 #ifdef __APPLE__ 39 # include <mach/mach_time.h> 40 #endif 41 42 class AgentLibrary; 43 class frame; 44 45 // os defines the interface to operating system; this includes traditional 46 // OS services (time, I/O) as well as other functionality with system- 47 // dependent code. 48 49 typedef void (*dll_func)(...); 50 51 class Thread; 52 class JavaThread; 53 class Event; 54 class DLL; 55 class FileHandle; 56 class NativeCallStack; 57 58 template<class E> class GrowableArray; 59 60 // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose 61 62 // Platform-independent error return values from OS functions 63 enum OSReturn { 64 OS_OK = 0, // Operation was successful 65 OS_ERR = -1, // Operation failed 66 OS_INTRPT = -2, // Operation was interrupted 67 OS_TIMEOUT = -3, // Operation timed out 68 OS_NOMEM = -5, // Operation failed for lack of memory 69 OS_NORESOURCE = -6 // Operation failed for lack of nonmemory resource 70 }; 71 72 enum ThreadPriority { // JLS 20.20.1-3 73 NoPriority = -1, // Initial non-priority value 74 MinPriority = 1, // Minimum priority 75 NormPriority = 5, // Normal (non-daemon) priority 76 NearMaxPriority = 9, // High priority, used for VMThread 77 MaxPriority = 10, // Highest priority, used for WatcherThread 78 // ensures that VMThread doesn't starve profiler 79 CriticalPriority = 11 // Critical thread priority 80 }; 81 82 // Executable parameter flag for os::commit_memory() and 83 // os::commit_memory_or_exit(). 84 const bool ExecMem = true; 85 86 // Typedef for structured exception handling support 87 typedef void (*java_call_t)(JavaValue* value, const methodHandle& method, JavaCallArguments* args, Thread* thread); 88 89 class MallocTracker; 90 91 class os: AllStatic { 92 friend class VMStructs; 93 friend class JVMCIVMStructs; 94 friend class MallocTracker; 95 public: 96 enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel) 97 98 private: 99 static OSThread* _starting_thread; 100 static address _polling_page; 101 static volatile int32_t * _mem_serialize_page; 102 static uintptr_t _serialize_page_mask; 103 public: 104 static size_t _page_sizes[page_sizes_max]; 105 106 private: 107 static void init_page_sizes(size_t default_page_size) { 108 _page_sizes[0] = default_page_size; 109 _page_sizes[1] = 0; // sentinel 110 } 111 112 static char* pd_reserve_memory(size_t bytes, char* addr = 0, 113 size_t alignment_hint = 0); 114 static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr); 115 static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc); 116 static void pd_split_reserved_memory(char *base, size_t size, 117 size_t split, bool realloc); 118 static bool pd_commit_memory(char* addr, size_t bytes, bool executable); 119 static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 120 bool executable); 121 // Same as pd_commit_memory() that either succeeds or calls 122 // vm_exit_out_of_memory() with the specified mesg. 123 static void pd_commit_memory_or_exit(char* addr, size_t bytes, 124 bool executable, const char* mesg); 125 static void pd_commit_memory_or_exit(char* addr, size_t size, 126 size_t alignment_hint, 127 bool executable, const char* mesg); 128 static bool pd_uncommit_memory(char* addr, size_t bytes); 129 static bool pd_release_memory(char* addr, size_t bytes); 130 131 static char* pd_map_memory(int fd, const char* file_name, size_t file_offset, 132 char *addr, size_t bytes, bool read_only = false, 133 bool allow_exec = false); 134 static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset, 135 char *addr, size_t bytes, bool read_only, 136 bool allow_exec); 137 static bool pd_unmap_memory(char *addr, size_t bytes); 138 static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint); 139 static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint); 140 141 static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned); 142 143 // Get summary strings for system information in buffer provided 144 static void get_summary_cpu_info(char* buf, size_t buflen); 145 static void get_summary_os_info(char* buf, size_t buflen); 146 147 static void initialize_initial_active_processor_count(); 148 149 LINUX_ONLY(static void pd_init_container_support();) 150 151 public: 152 static void init(void); // Called before command line parsing 153 154 static void init_container_support() { // Called during command line parsing. 155 LINUX_ONLY(pd_init_container_support();) 156 } 157 158 static void init_before_ergo(void); // Called after command line parsing 159 // before VM ergonomics processing. 160 static jint init_2(void); // Called after command line parsing 161 // and VM ergonomics processing 162 static void init_globals(void) { // Called from init_globals() in init.cpp 163 init_globals_ext(); 164 } 165 166 // File names are case-insensitive on windows only 167 // Override me as needed 168 static int file_name_strcmp(const char* s1, const char* s2); 169 170 // unset environment variable 171 static bool unsetenv(const char* name); 172 173 static bool have_special_privileges(); 174 175 static jlong javaTimeMillis(); 176 static jlong javaTimeNanos(); 177 static void javaTimeNanos_info(jvmtiTimerInfo *info_ptr); 178 static void javaTimeSystemUTC(jlong &seconds, jlong &nanos); 179 static void run_periodic_checks(); 180 static bool supports_monotonic_clock(); 181 182 // Returns the elapsed time in seconds since the vm started. 183 static double elapsedTime(); 184 185 // Returns real time in seconds since an arbitrary point 186 // in the past. 187 static bool getTimesSecs(double* process_real_time, 188 double* process_user_time, 189 double* process_system_time); 190 191 // Interface to the performance counter 192 static jlong elapsed_counter(); 193 static jlong elapsed_frequency(); 194 195 // The "virtual time" of a thread is the amount of time a thread has 196 // actually run. The first function indicates whether the OS supports 197 // this functionality for the current thread, and if so: 198 // * the second enables vtime tracking (if that is required). 199 // * the third tells whether vtime is enabled. 200 // * the fourth returns the elapsed virtual time for the current 201 // thread. 202 static bool supports_vtime(); 203 static bool enable_vtime(); 204 static bool vtime_enabled(); 205 static double elapsedVTime(); 206 207 // Return current local time in a string (YYYY-MM-DD HH:MM:SS). 208 // It is MT safe, but not async-safe, as reading time zone 209 // information may require a lock on some platforms. 210 static char* local_time_string(char *buf, size_t buflen); 211 static struct tm* localtime_pd (const time_t* clock, struct tm* res); 212 static struct tm* gmtime_pd (const time_t* clock, struct tm* res); 213 // Fill in buffer with current local time as an ISO-8601 string. 214 // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. 215 // Returns buffer, or NULL if it failed. 216 static char* iso8601_time(char* buffer, size_t buffer_length, bool utc = false); 217 218 // Interface for detecting multiprocessor system 219 static inline bool is_MP() { 220 // During bootstrap if _processor_count is not yet initialized 221 // we claim to be MP as that is safest. If any platform has a 222 // stub generator that might be triggered in this phase and for 223 // which being declared MP when in fact not, is a problem - then 224 // the bootstrap routine for the stub generator needs to check 225 // the processor count directly and leave the bootstrap routine 226 // in place until called after initialization has ocurred. 227 return AssumeMP || (_processor_count != 1); 228 } 229 static julong available_memory(); 230 static julong physical_memory(); 231 static bool has_allocatable_memory_limit(julong* limit); 232 static bool is_server_class_machine(); 233 234 // number of CPUs 235 static int processor_count() { 236 return _processor_count; 237 } 238 static void set_processor_count(int count) { _processor_count = count; } 239 240 // Returns the number of CPUs this process is currently allowed to run on. 241 // Note that on some OSes this can change dynamically. 242 static int active_processor_count(); 243 244 // At startup the number of active CPUs this process is allowed to run on. 245 // This value does not change dynamically. May be different from active_processor_count(). 246 static int initial_active_processor_count() { 247 assert(_initial_active_processor_count > 0, "Initial active processor count not set yet."); 248 return _initial_active_processor_count; 249 } 250 251 // Bind processes to processors. 252 // This is a two step procedure: 253 // first you generate a distribution of processes to processors, 254 // then you bind processes according to that distribution. 255 // Compute a distribution for number of processes to processors. 256 // Stores the processor id's into the distribution array argument. 257 // Returns true if it worked, false if it didn't. 258 static bool distribute_processes(uint length, uint* distribution); 259 // Binds the current process to a processor. 260 // Returns true if it worked, false if it didn't. 261 static bool bind_to_processor(uint processor_id); 262 263 // Give a name to the current thread. 264 static void set_native_thread_name(const char *name); 265 266 // Interface for stack banging (predetect possible stack overflow for 267 // exception processing) There are guard pages, and above that shadow 268 // pages for stack overflow checking. 269 static bool uses_stack_guard_pages(); 270 static bool must_commit_stack_guard_pages(); 271 static void map_stack_shadow_pages(address sp); 272 static bool stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp); 273 274 // Return size of stack that is actually committed. For Java thread, the bottom should be above 275 // guard pages (stack grows downward) 276 static size_t committed_stack_size(address bottom, size_t size); 277 278 // OS interface to Virtual Memory 279 280 // Return the default page size. 281 static int vm_page_size(); 282 283 // Returns the page size to use for a region of memory. 284 // region_size / min_pages will always be greater than or equal to the 285 // returned value. The returned value will divide region_size. 286 static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages); 287 288 // Returns the page size to use for a region of memory. 289 // region_size / min_pages will always be greater than or equal to the 290 // returned value. The returned value might not divide region_size. 291 static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages); 292 293 // Return the largest page size that can be used 294 static size_t max_page_size() { 295 // The _page_sizes array is sorted in descending order. 296 return _page_sizes[0]; 297 } 298 299 // Methods for tracing page sizes returned by the above method. 300 // The region_{min,max}_size parameters should be the values 301 // passed to page_size_for_region() and page_size should be the result of that 302 // call. The (optional) base and size parameters should come from the 303 // ReservedSpace base() and size() methods. 304 static void trace_page_sizes(const char* str, const size_t* page_sizes, int count); 305 static void trace_page_sizes(const char* str, 306 const size_t region_min_size, 307 const size_t region_max_size, 308 const size_t page_size, 309 const char* base, 310 const size_t size); 311 static void trace_page_sizes_for_requested_size(const char* str, 312 const size_t requested_size, 313 const size_t page_size, 314 const size_t alignment, 315 const char* base, 316 const size_t size); 317 318 static int vm_allocation_granularity(); 319 static char* reserve_memory(size_t bytes, char* addr = 0, 320 size_t alignment_hint = 0, int file_desc = -1); 321 static char* reserve_memory(size_t bytes, char* addr, 322 size_t alignment_hint, MEMFLAGS flags); 323 static char* reserve_memory_aligned(size_t size, size_t alignment, int file_desc = -1); 324 static char* attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc = -1); 325 static void split_reserved_memory(char *base, size_t size, 326 size_t split, bool realloc); 327 static bool commit_memory(char* addr, size_t bytes, bool executable); 328 static bool commit_memory(char* addr, size_t size, size_t alignment_hint, 329 bool executable); 330 // Same as commit_memory() that either succeeds or calls 331 // vm_exit_out_of_memory() with the specified mesg. 332 static void commit_memory_or_exit(char* addr, size_t bytes, 333 bool executable, const char* mesg); 334 static void commit_memory_or_exit(char* addr, size_t size, 335 size_t alignment_hint, 336 bool executable, const char* mesg); 337 static bool uncommit_memory(char* addr, size_t bytes); 338 static bool release_memory(char* addr, size_t bytes); 339 340 // Touch memory pages that cover the memory range from start to end (exclusive) 341 // to make the OS back the memory range with actual memory. 342 // Current implementation may not touch the last page if unaligned addresses 343 // are passed. 344 static void pretouch_memory(void* start, void* end, size_t page_size = vm_page_size()); 345 346 enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX }; 347 static bool protect_memory(char* addr, size_t bytes, ProtType prot, 348 bool is_committed = true); 349 350 static bool guard_memory(char* addr, size_t bytes); 351 static bool unguard_memory(char* addr, size_t bytes); 352 static bool create_stack_guard_pages(char* addr, size_t bytes); 353 static bool pd_create_stack_guard_pages(char* addr, size_t bytes); 354 static bool remove_stack_guard_pages(char* addr, size_t bytes); 355 // Helper function to create a new file with template jvmheap.XXXXXX. 356 // Returns a valid fd on success or else returns -1 357 static int create_file_for_heap(const char* dir); 358 // Map memory to the file referred by fd. This function is slightly different from map_memory() 359 // and is added to be used for implementation of -XX:AllocateHeapAt 360 static char* map_memory_to_file(char* base, size_t size, int fd); 361 // Replace existing reserved memory with file mapping 362 static char* replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd); 363 364 static char* map_memory(int fd, const char* file_name, size_t file_offset, 365 char *addr, size_t bytes, bool read_only = false, 366 bool allow_exec = false); 367 static char* remap_memory(int fd, const char* file_name, size_t file_offset, 368 char *addr, size_t bytes, bool read_only, 369 bool allow_exec); 370 static bool unmap_memory(char *addr, size_t bytes); 371 static void free_memory(char *addr, size_t bytes, size_t alignment_hint); 372 static void realign_memory(char *addr, size_t bytes, size_t alignment_hint); 373 374 // NUMA-specific interface 375 static bool numa_has_static_binding(); 376 static bool numa_has_group_homing(); 377 static void numa_make_local(char *addr, size_t bytes, int lgrp_hint); 378 static void numa_make_global(char *addr, size_t bytes); 379 static size_t numa_get_groups_num(); 380 static size_t numa_get_leaf_groups(int *ids, size_t size); 381 static bool numa_topology_changed(); 382 static int numa_get_group_id(); 383 384 // Page manipulation 385 struct page_info { 386 size_t size; 387 int lgrp_id; 388 }; 389 static bool get_page_info(char *start, page_info* info); 390 static char* scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found); 391 392 static char* non_memory_address_word(); 393 // reserve, commit and pin the entire memory region 394 static char* reserve_memory_special(size_t size, size_t alignment, 395 char* addr, bool executable); 396 static bool release_memory_special(char* addr, size_t bytes); 397 static void large_page_init(); 398 static size_t large_page_size(); 399 static bool can_commit_large_page_memory(); 400 static bool can_execute_large_page_memory(); 401 402 // OS interface to polling page 403 static address get_polling_page() { return _polling_page; } 404 static void set_polling_page(address page) { _polling_page = page; } 405 static bool is_poll_address(address addr) { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); } 406 static void make_polling_page_unreadable(); 407 static void make_polling_page_readable(); 408 409 // Routines used to serialize the thread state without using membars 410 static void serialize_thread_states(); 411 412 // Since we write to the serialize page from every thread, we 413 // want stores to be on unique cache lines whenever possible 414 // in order to minimize CPU cross talk. We pre-compute the 415 // amount to shift the thread* to make this offset unique to 416 // each thread. 417 static int get_serialize_page_shift_count() { 418 return SerializePageShiftCount; 419 } 420 421 static void set_serialize_page_mask(uintptr_t mask) { 422 _serialize_page_mask = mask; 423 } 424 425 static unsigned int get_serialize_page_mask() { 426 return _serialize_page_mask; 427 } 428 429 static void set_memory_serialize_page(address page); 430 431 static address get_memory_serialize_page() { 432 return (address)_mem_serialize_page; 433 } 434 435 static inline void write_memory_serialize_page(JavaThread *thread) { 436 uintptr_t page_offset = ((uintptr_t)thread >> 437 get_serialize_page_shift_count()) & 438 get_serialize_page_mask(); 439 *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1; 440 } 441 442 static bool is_memory_serialize_page(JavaThread *thread, address addr) { 443 if (UseMembar) return false; 444 // Previously this function calculated the exact address of this 445 // thread's serialize page, and checked if the faulting address 446 // was equal. However, some platforms mask off faulting addresses 447 // to the page size, so now we just check that the address is 448 // within the page. This makes the thread argument unnecessary, 449 // but we retain the NULL check to preserve existing behavior. 450 if (thread == NULL) return false; 451 address page = (address) _mem_serialize_page; 452 return addr >= page && addr < (page + os::vm_page_size()); 453 } 454 455 static void block_on_serialize_page_trap(); 456 457 // threads 458 459 enum ThreadType { 460 vm_thread, 461 cgc_thread, // Concurrent GC thread 462 pgc_thread, // Parallel GC thread 463 java_thread, // Java, CodeCacheSweeper, JVMTIAgent and Service threads. 464 compiler_thread, 465 watcher_thread, 466 os_thread 467 }; 468 469 static bool create_thread(Thread* thread, 470 ThreadType thr_type, 471 size_t req_stack_size = 0); 472 473 // The "main thread", also known as "starting thread", is the thread 474 // that loads/creates the JVM via JNI_CreateJavaVM. 475 static bool create_main_thread(JavaThread* thread); 476 477 // The primordial thread is the initial process thread. The java 478 // launcher never uses the primordial thread as the main thread, but 479 // applications that host the JVM directly may do so. Some platforms 480 // need special-case handling of the primordial thread if it attaches 481 // to the VM. 482 static bool is_primordial_thread(void) 483 #if defined(_WINDOWS) || defined(BSD) 484 // No way to identify the primordial thread. 485 { return false; } 486 #else 487 ; 488 #endif 489 490 static bool create_attached_thread(JavaThread* thread); 491 static void pd_start_thread(Thread* thread); 492 static void start_thread(Thread* thread); 493 494 static void initialize_thread(Thread* thr); 495 static void free_thread(OSThread* osthread); 496 497 // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit 498 static intx current_thread_id(); 499 static int current_process_id(); 500 static int sleep(Thread* thread, jlong ms, bool interruptable); 501 // Short standalone OS sleep suitable for slow path spin loop. 502 // Ignores Thread.interrupt() (so keep it short). 503 // ms = 0, will sleep for the least amount of time allowed by the OS. 504 static void naked_short_sleep(jlong ms); 505 static void infinite_sleep(); // never returns, use with CAUTION 506 static void naked_yield () ; 507 static OSReturn set_priority(Thread* thread, ThreadPriority priority); 508 static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority); 509 510 static void interrupt(Thread* thread); 511 static bool is_interrupted(Thread* thread, bool clear_interrupted); 512 513 static int pd_self_suspend_thread(Thread* thread); 514 515 static ExtendedPC fetch_frame_from_context(const void* ucVoid, intptr_t** sp, intptr_t** fp); 516 static frame fetch_frame_from_context(const void* ucVoid); 517 static frame fetch_frame_from_ucontext(Thread* thread, void* ucVoid); 518 519 static void breakpoint(); 520 static bool start_debugging(char *buf, int buflen); 521 522 static address current_stack_pointer(); 523 static address current_stack_base(); 524 static size_t current_stack_size(); 525 526 static void verify_stack_alignment() PRODUCT_RETURN; 527 528 static bool message_box(const char* title, const char* message); 529 static char* do_you_want_to_debug(const char* message); 530 531 // run cmd in a separate process and return its exit code; or -1 on failures 532 static int fork_and_exec(char *cmd); 533 534 // Call ::exit() on all platforms but Windows 535 static void exit(int num); 536 537 // Terminate the VM, but don't exit the process 538 static void shutdown(); 539 540 // Terminate with an error. Default is to generate a core file on platforms 541 // that support such things. This calls shutdown() and then aborts. 542 static void abort(bool dump_core, void *siginfo, const void *context); 543 static void abort(bool dump_core = true); 544 545 // Die immediately, no exit hook, no abort hook, no cleanup. 546 static void die(); 547 548 // File i/o operations 549 static const int default_file_open_flags(); 550 static int open(const char *path, int oflag, int mode); 551 static FILE* open(int fd, const char* mode); 552 static int close(int fd); 553 static jlong lseek(int fd, jlong offset, int whence); 554 static char* native_path(char *path); 555 static int ftruncate(int fd, jlong length); 556 static int fsync(int fd); 557 static int available(int fd, jlong *bytes); 558 static int get_fileno(FILE* fp); 559 static void flockfile(FILE* fp); 560 static void funlockfile(FILE* fp); 561 562 static int compare_file_modified_times(const char* file1, const char* file2); 563 564 //File i/o operations 565 566 static size_t read(int fd, void *buf, unsigned int nBytes); 567 static size_t read_at(int fd, void *buf, unsigned int nBytes, jlong offset); 568 static size_t restartable_read(int fd, void *buf, unsigned int nBytes); 569 static size_t write(int fd, const void *buf, unsigned int nBytes); 570 571 // Reading directories. 572 static DIR* opendir(const char* dirname); 573 static int readdir_buf_size(const char *path); 574 static struct dirent* readdir(DIR* dirp, dirent* dbuf); 575 static int closedir(DIR* dirp); 576 577 // Dynamic library extension 578 static const char* dll_file_extension(); 579 580 static const char* get_temp_directory(); 581 static const char* get_current_directory(char *buf, size_t buflen); 582 583 // Builds the platform-specific name of a library. 584 // Returns false if the buffer is too small. 585 static bool dll_build_name(char* buffer, size_t size, 586 const char* fname); 587 588 // Builds a platform-specific full library path given an ld path and 589 // unadorned library name. Returns true if the buffer contains a full 590 // path to an existing file, false otherwise. If pathname is empty, 591 // uses the path to the current directory. 592 static bool dll_locate_lib(char* buffer, size_t size, 593 const char* pathname, const char* fname); 594 595 // Symbol lookup, find nearest function name; basically it implements 596 // dladdr() for all platforms. Name of the nearest function is copied 597 // to buf. Distance from its base address is optionally returned as offset. 598 // If function name is not found, buf[0] is set to '\0' and offset is 599 // set to -1 (if offset is non-NULL). 600 static bool dll_address_to_function_name(address addr, char* buf, 601 int buflen, int* offset, 602 bool demangle = true); 603 604 // Locate DLL/DSO. On success, full path of the library is copied to 605 // buf, and offset is optionally set to be the distance between addr 606 // and the library's base address. On failure, buf[0] is set to '\0' 607 // and offset is set to -1 (if offset is non-NULL). 608 static bool dll_address_to_library_name(address addr, char* buf, 609 int buflen, int* offset); 610 611 // Find out whether the pc is in the static code for jvm.dll/libjvm.so. 612 static bool address_is_in_vm(address addr); 613 614 // Loads .dll/.so and 615 // in case of error it checks if .dll/.so was built for the 616 // same architecture as HotSpot is running on 617 static void* dll_load(const char *name, char *ebuf, int ebuflen); 618 619 // lookup symbol in a shared library 620 static void* dll_lookup(void* handle, const char* name); 621 622 // Unload library 623 static void dll_unload(void *lib); 624 625 // Callback for loaded module information 626 // Input parameters: 627 // char* module_file_name, 628 // address module_base_addr, 629 // address module_top_addr, 630 // void* param 631 typedef int (*LoadedModulesCallbackFunc)(const char *, address, address, void *); 632 633 static int get_loaded_modules_info(LoadedModulesCallbackFunc callback, void *param); 634 635 // Return the handle of this process 636 static void* get_default_process_handle(); 637 638 // Check for static linked agent library 639 static bool find_builtin_agent(AgentLibrary *agent_lib, const char *syms[], 640 size_t syms_len); 641 642 // Find agent entry point 643 static void *find_agent_function(AgentLibrary *agent_lib, bool check_lib, 644 const char *syms[], size_t syms_len); 645 646 // Write to stream 647 static int log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) ATTRIBUTE_PRINTF(3, 0); 648 649 // Get host name in buffer provided 650 static bool get_host_name(char* buf, size_t buflen); 651 652 // Print out system information; they are called by fatal error handler. 653 // Output format may be different on different platforms. 654 static void print_os_info(outputStream* st); 655 static void print_os_info_brief(outputStream* st); 656 static void print_cpu_info(outputStream* st, char* buf, size_t buflen); 657 static void pd_print_cpu_info(outputStream* st, char* buf, size_t buflen); 658 static void print_summary_info(outputStream* st, char* buf, size_t buflen); 659 static void print_memory_info(outputStream* st); 660 static void print_dll_info(outputStream* st); 661 static void print_environment_variables(outputStream* st, const char** env_list); 662 static void print_context(outputStream* st, const void* context); 663 static void print_register_info(outputStream* st, const void* context); 664 static void print_siginfo(outputStream* st, const void* siginfo); 665 static void print_signal_handlers(outputStream* st, char* buf, size_t buflen); 666 static void print_date_and_time(outputStream* st, char* buf, size_t buflen); 667 668 static void print_location(outputStream* st, intptr_t x, bool verbose = false); 669 static size_t lasterror(char *buf, size_t len); 670 static int get_last_error(); 671 672 // Replacement for strerror(). 673 // Will return the english description of the error (e.g. "File not found", as 674 // suggested in the POSIX standard. 675 // Will return "Unknown error" for an unknown errno value. 676 // Will not attempt to localize the returned string. 677 // Will always return a valid string which is a static constant. 678 // Will not change the value of errno. 679 static const char* strerror(int e); 680 681 // Will return the literalized version of the given errno (e.g. "EINVAL" 682 // for EINVAL). 683 // Will return "Unknown error" for an unknown errno value. 684 // Will always return a valid string which is a static constant. 685 // Will not change the value of errno. 686 static const char* errno_name(int e); 687 688 // Determines whether the calling process is being debugged by a user-mode debugger. 689 static bool is_debugger_attached(); 690 691 // wait for a key press if PauseAtExit is set 692 static void wait_for_keypress_at_exit(void); 693 694 // The following two functions are used by fatal error handler to trace 695 // native (C) frames. They are not part of frame.hpp/frame.cpp because 696 // frame.hpp/cpp assume thread is JavaThread, and also because different 697 // OS/compiler may have different convention or provide different API to 698 // walk C frames. 699 // 700 // We don't attempt to become a debugger, so we only follow frames if that 701 // does not require a lookup in the unwind table, which is part of the binary 702 // file but may be unsafe to read after a fatal error. So on x86, we can 703 // only walk stack if %ebp is used as frame pointer; on ia64, it's not 704 // possible to walk C stack without having the unwind table. 705 static bool is_first_C_frame(frame *fr); 706 static frame get_sender_for_C_frame(frame *fr); 707 708 // return current frame. pc() and sp() are set to NULL on failure. 709 static frame current_frame(); 710 711 static void print_hex_dump(outputStream* st, address start, address end, int unitsize); 712 713 // returns a string to describe the exception/signal; 714 // returns NULL if exception_code is not an OS exception/signal. 715 static const char* exception_name(int exception_code, char* buf, size_t buflen); 716 717 // Returns the signal number (e.g. 11) for a given signal name (SIGSEGV). 718 static int get_signal_number(const char* signal_name); 719 720 // Returns native Java library, loads if necessary 721 static void* native_java_library(); 722 723 // Fills in path to jvm.dll/libjvm.so (used by the Disassembler) 724 static void jvm_path(char *buf, jint buflen); 725 726 // JNI names 727 static void print_jni_name_prefix_on(outputStream* st, int args_size); 728 static void print_jni_name_suffix_on(outputStream* st, int args_size); 729 730 // Init os specific system properties values 731 static void init_system_properties_values(); 732 733 // IO operations, non-JVM_ version. 734 static int stat(const char* path, struct stat* sbuf); 735 static bool dir_is_empty(const char* path); 736 737 // IO operations on binary files 738 static int create_binary_file(const char* path, bool rewrite_existing); 739 static jlong current_file_offset(int fd); 740 static jlong seek_to_file_offset(int fd, jlong offset); 741 742 // Retrieve native stack frames. 743 // Parameter: 744 // stack: an array to storage stack pointers. 745 // frames: size of above array. 746 // toSkip: number of stack frames to skip at the beginning. 747 // Return: number of stack frames captured. 748 static int get_native_stack(address* stack, int size, int toSkip = 0); 749 750 // General allocation (must be MT-safe) 751 static void* malloc (size_t size, MEMFLAGS flags, const NativeCallStack& stack); 752 static void* malloc (size_t size, MEMFLAGS flags); 753 static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack); 754 static void* realloc (void *memblock, size_t size, MEMFLAGS flag); 755 756 static void free (void *memblock); 757 static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup 758 // Like strdup, but exit VM when strdup() returns NULL 759 static char* strdup_check_oom(const char*, MEMFLAGS flags = mtInternal); 760 761 #ifndef PRODUCT 762 static julong num_mallocs; // # of calls to malloc/realloc 763 static julong alloc_bytes; // # of bytes allocated 764 static julong num_frees; // # of calls to free 765 static julong free_bytes; // # of bytes freed 766 #endif 767 768 // SocketInterface (ex HPI SocketInterface ) 769 static int socket(int domain, int type, int protocol); 770 static int socket_close(int fd); 771 static int recv(int fd, char* buf, size_t nBytes, uint flags); 772 static int send(int fd, char* buf, size_t nBytes, uint flags); 773 static int raw_send(int fd, char* buf, size_t nBytes, uint flags); 774 static int connect(int fd, struct sockaddr* him, socklen_t len); 775 static struct hostent* get_host_by_name(char* name); 776 777 // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal) 778 static void signal_init(TRAPS); 779 static void signal_init_pd(); 780 static void signal_notify(int signal_number); 781 static void* signal(int signal_number, void* handler); 782 static void signal_raise(int signal_number); 783 static int signal_wait(); 784 static void* user_handler(); 785 static void terminate_signal_thread(); 786 static int sigexitnum_pd(); 787 788 // random number generation 789 static int random(); // return 32bit pseudorandom number 790 static void init_random(unsigned int initval); // initialize random sequence 791 792 // Structured OS Exception support 793 static void os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, JavaCallArguments* args, Thread* thread); 794 795 // On Posix compatible OS it will simply check core dump limits while on Windows 796 // it will check if dump file can be created. Check or prepare a core dump to be 797 // taken at a later point in the same thread in os::abort(). Use the caller 798 // provided buffer as a scratch buffer. The status message which will be written 799 // into the error log either is file location or a short error message, depending 800 // on the checking result. 801 static void check_dump_limit(char* buffer, size_t bufferSize); 802 803 // Get the default path to the core file 804 // Returns the length of the string 805 static int get_core_path(char* buffer, size_t bufferSize); 806 807 // JVMTI & JVM monitoring and management support 808 // The thread_cpu_time() and current_thread_cpu_time() are only 809 // supported if is_thread_cpu_time_supported() returns true. 810 // They are not supported on Solaris T1. 811 812 // Thread CPU Time - return the fast estimate on a platform 813 // On Solaris - call gethrvtime (fast) - user time only 814 // On Linux - fast clock_gettime where available - user+sys 815 // - otherwise: very slow /proc fs - user+sys 816 // On Windows - GetThreadTimes - user+sys 817 static jlong current_thread_cpu_time(); 818 static jlong thread_cpu_time(Thread* t); 819 820 // Thread CPU Time with user_sys_cpu_time parameter. 821 // 822 // If user_sys_cpu_time is true, user+sys time is returned. 823 // Otherwise, only user time is returned 824 static jlong current_thread_cpu_time(bool user_sys_cpu_time); 825 static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time); 826 827 // Return a bunch of info about the timers. 828 // Note that the returned info for these two functions may be different 829 // on some platforms 830 static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr); 831 static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr); 832 833 static bool is_thread_cpu_time_supported(); 834 835 // System loadavg support. Returns -1 if load average cannot be obtained. 836 static int loadavg(double loadavg[], int nelem); 837 838 // Hook for os specific jvm options that we don't want to abort on seeing 839 static bool obsolete_option(const JavaVMOption *option); 840 841 // Amount beyond the callee frame size that we bang the stack. 842 static int extra_bang_size_in_bytes(); 843 844 static char** split_path(const char* path, int* n); 845 846 // Extensions 847 #include "runtime/os_ext.hpp" 848 849 public: 850 class CrashProtectionCallback : public StackObj { 851 public: 852 virtual void call() = 0; 853 }; 854 855 // Platform dependent stuff 856 #ifndef _WINDOWS 857 # include "os_posix.hpp" 858 #endif 859 #include OS_CPU_HEADER(os) 860 #include OS_HEADER(os) 861 862 #ifndef OS_NATIVE_THREAD_CREATION_FAILED_MSG 863 #define OS_NATIVE_THREAD_CREATION_FAILED_MSG "unable to create native thread: possibly out of memory or process/resource limits reached" 864 #endif 865 866 public: 867 #ifndef PLATFORM_PRINT_NATIVE_STACK 868 // No platform-specific code for printing the native stack. 869 static bool platform_print_native_stack(outputStream* st, const void* context, 870 char *buf, int buf_size) { 871 return false; 872 } 873 #endif 874 875 // debugging support (mostly used by debug.cpp but also fatal error handler) 876 static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address 877 878 static bool dont_yield(); // when true, JVM_Yield() is nop 879 static void print_statistics(); 880 881 // Thread priority helpers (implemented in OS-specific part) 882 static OSReturn set_native_priority(Thread* thread, int native_prio); 883 static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr); 884 static int java_to_os_priority[CriticalPriority + 1]; 885 // Hint to the underlying OS that a task switch would not be good. 886 // Void return because it's a hint and can fail. 887 static void hint_no_preempt(); 888 static const char* native_thread_creation_failed_msg() { 889 return OS_NATIVE_THREAD_CREATION_FAILED_MSG; 890 } 891 892 // Used at creation if requested by the diagnostic flag PauseAtStartup. 893 // Causes the VM to wait until an external stimulus has been applied 894 // (for Unix, that stimulus is a signal, for Windows, an external 895 // ResumeThread call) 896 static void pause(); 897 898 // Builds a platform dependent Agent_OnLoad_<libname> function name 899 // which is used to find statically linked in agents. 900 static char* build_agent_function_name(const char *sym, const char *cname, 901 bool is_absolute_path); 902 903 class SuspendedThreadTaskContext { 904 public: 905 SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {} 906 Thread* thread() const { return _thread; } 907 void* ucontext() const { return _ucontext; } 908 private: 909 Thread* _thread; 910 void* _ucontext; 911 }; 912 913 class SuspendedThreadTask { 914 public: 915 SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {} 916 void run(); 917 bool is_done() { return _done; } 918 virtual void do_task(const SuspendedThreadTaskContext& context) = 0; 919 protected: 920 ~SuspendedThreadTask() {} 921 private: 922 void internal_do_task(); 923 Thread* _thread; 924 bool _done; 925 }; 926 927 #ifndef _WINDOWS 928 // Suspend/resume support 929 // Protocol: 930 // 931 // a thread starts in SR_RUNNING 932 // 933 // SR_RUNNING can go to 934 // * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it 935 // SR_SUSPEND_REQUEST can go to 936 // * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout) 937 // * SR_SUSPENDED if the stopped thread receives the signal and switches state 938 // SR_SUSPENDED can go to 939 // * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume 940 // SR_WAKEUP_REQUEST can go to 941 // * SR_RUNNING when the stopped thread receives the signal 942 // * SR_WAKEUP_REQUEST on timeout (resend the signal and try again) 943 class SuspendResume { 944 public: 945 enum State { 946 SR_RUNNING, 947 SR_SUSPEND_REQUEST, 948 SR_SUSPENDED, 949 SR_WAKEUP_REQUEST 950 }; 951 952 private: 953 volatile State _state; 954 955 private: 956 /* try to switch state from state "from" to state "to" 957 * returns the state set after the method is complete 958 */ 959 State switch_state(State from, State to); 960 961 public: 962 SuspendResume() : _state(SR_RUNNING) { } 963 964 State state() const { return _state; } 965 966 State request_suspend() { 967 return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST); 968 } 969 970 State cancel_suspend() { 971 return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING); 972 } 973 974 State suspended() { 975 return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED); 976 } 977 978 State request_wakeup() { 979 return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST); 980 } 981 982 State running() { 983 return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING); 984 } 985 986 bool is_running() const { 987 return _state == SR_RUNNING; 988 } 989 990 bool is_suspend_request() const { 991 return _state == SR_SUSPEND_REQUEST; 992 } 993 994 bool is_suspended() const { 995 return _state == SR_SUSPENDED; 996 } 997 }; 998 #endif // !WINDOWS 999 1000 1001 protected: 1002 static volatile unsigned int _rand_seed; // seed for random number generator 1003 static int _processor_count; // number of processors 1004 static int _initial_active_processor_count; // number of active processors during initialization. 1005 1006 static char* format_boot_path(const char* format_string, 1007 const char* home, 1008 int home_len, 1009 char fileSep, 1010 char pathSep); 1011 static bool set_boot_path(char fileSep, char pathSep); 1012 1013 }; 1014 1015 #ifndef _WINDOWS 1016 template<> struct IsRegisteredEnum<os::SuspendResume::State> : public TrueType {}; 1017 #endif // !_WINDOWS 1018 1019 // Note that "PAUSE" is almost always used with synchronization 1020 // so arguably we should provide Atomic::SpinPause() instead 1021 // of the global SpinPause() with C linkage. 1022 // It'd also be eligible for inlining on many platforms. 1023 1024 extern "C" int SpinPause(); 1025 1026 #endif // SHARE_VM_RUNTIME_OS_HPP