1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_OS_HPP
  26 #define SHARE_VM_RUNTIME_OS_HPP
  27 
  28 #include "jvmtifiles/jvmti.h"
  29 #include "runtime/atomic.hpp"
  30 #include "runtime/extendedPC.hpp"
  31 #include "runtime/handles.hpp"
  32 #include "utilities/top.hpp"
  33 #ifdef TARGET_OS_FAMILY_linux
  34 # include "jvm_linux.h"
  35 # include <setjmp.h>
  36 #endif
  37 #ifdef TARGET_OS_FAMILY_solaris
  38 # include "jvm_solaris.h"
  39 # include <setjmp.h>
  40 #endif
  41 #ifdef TARGET_OS_FAMILY_windows
  42 # include "jvm_windows.h"
  43 #endif
  44 #ifdef TARGET_OS_FAMILY_aix
  45 # include "jvm_aix.h"
  46 # include <setjmp.h>
  47 #endif
  48 #ifdef TARGET_OS_FAMILY_bsd
  49 # include "jvm_bsd.h"
  50 # include <setjmp.h>
  51 # ifdef __APPLE__
  52 #  include <mach/mach_time.h>
  53 # endif
  54 #endif
  55 
  56 class AgentLibrary;
  57 
  58 // os defines the interface to operating system; this includes traditional
  59 // OS services (time, I/O) as well as other functionality with system-
  60 // dependent code.
  61 
  62 typedef void (*dll_func)(...);
  63 
  64 class Thread;
  65 class JavaThread;
  66 class Event;
  67 class DLL;
  68 class FileHandle;
  69 class NativeCallStack;
  70 
  71 template<class E> class GrowableArray;
  72 
  73 // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
  74 
  75 // Platform-independent error return values from OS functions
  76 enum OSReturn {
  77   OS_OK         =  0,        // Operation was successful
  78   OS_ERR        = -1,        // Operation failed
  79   OS_INTRPT     = -2,        // Operation was interrupted
  80   OS_TIMEOUT    = -3,        // Operation timed out
  81   OS_NOMEM      = -5,        // Operation failed for lack of memory
  82   OS_NORESOURCE = -6         // Operation failed for lack of nonmemory resource
  83 };
  84 
  85 enum ThreadPriority {        // JLS 20.20.1-3
  86   NoPriority       = -1,     // Initial non-priority value
  87   MinPriority      =  1,     // Minimum priority
  88   NormPriority     =  5,     // Normal (non-daemon) priority
  89   NearMaxPriority  =  9,     // High priority, used for VMThread
  90   MaxPriority      = 10,     // Highest priority, used for WatcherThread
  91                              // ensures that VMThread doesn't starve profiler
  92   CriticalPriority = 11      // Critical thread priority
  93 };
  94 
  95 // Executable parameter flag for os::commit_memory() and
  96 // os::commit_memory_or_exit().
  97 const bool ExecMem = true;
  98 
  99 // Typedef for structured exception handling support
 100 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
 101 
 102 class MallocTracker;
 103 
 104 class os: AllStatic {
 105   friend class VMStructs;
 106   friend class MallocTracker;
 107  public:
 108   enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
 109 
 110  private:
 111   static OSThread*          _starting_thread;
 112   static address            _polling_page;
 113   static volatile int32_t * _mem_serialize_page;
 114   static uintptr_t          _serialize_page_mask;
 115  public:
 116   static size_t             _page_sizes[page_sizes_max];
 117 
 118  private:
 119   static void init_page_sizes(size_t default_page_size) {
 120     _page_sizes[0] = default_page_size;
 121     _page_sizes[1] = 0; // sentinel
 122   }
 123 
 124   static char*  pd_reserve_memory(size_t bytes, char* addr = 0,
 125                                size_t alignment_hint = 0);
 126   static char*  pd_attempt_reserve_memory_at(size_t bytes, char* addr);
 127   static void   pd_split_reserved_memory(char *base, size_t size,
 128                                       size_t split, bool realloc);
 129   static bool   pd_commit_memory(char* addr, size_t bytes, bool executable);
 130   static bool   pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
 131                                  bool executable);
 132   // Same as pd_commit_memory() that either succeeds or calls
 133   // vm_exit_out_of_memory() with the specified mesg.
 134   static void   pd_commit_memory_or_exit(char* addr, size_t bytes,
 135                                          bool executable, const char* mesg);
 136   static void   pd_commit_memory_or_exit(char* addr, size_t size,
 137                                          size_t alignment_hint,
 138                                          bool executable, const char* mesg);
 139   static bool   pd_uncommit_memory(char* addr, size_t bytes);
 140   static bool   pd_release_memory(char* addr, size_t bytes);
 141 
 142   static char*  pd_map_memory(int fd, const char* file_name, size_t file_offset,
 143                            char *addr, size_t bytes, bool read_only = false,
 144                            bool allow_exec = false);
 145   static char*  pd_remap_memory(int fd, const char* file_name, size_t file_offset,
 146                              char *addr, size_t bytes, bool read_only,
 147                              bool allow_exec);
 148   static bool   pd_unmap_memory(char *addr, size_t bytes);
 149   static void   pd_free_memory(char *addr, size_t bytes, size_t alignment_hint);
 150   static void   pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint);
 151 
 152   static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned);
 153 
 154   static void initialize_initial_active_processor_count();
 155  public:
 156   static void init(void);                      // Called before command line parsing
 157   static void init_before_ergo(void);          // Called after command line parsing
 158                                                // before VM ergonomics processing.
 159   static jint init_2(void);                    // Called after command line parsing
 160                                                // and VM ergonomics processing
 161   static void init_globals(void) {             // Called from init_globals() in init.cpp
 162     init_globals_ext();
 163   }
 164 
 165   // File names are case-insensitive on windows only
 166   // Override me as needed
 167   static int    file_name_strcmp(const char* s1, const char* s2);
 168 
 169   // get/unset environment variable
 170   static bool getenv(const char* name, char* buffer, int len);
 171   static bool unsetenv(const char* name);
 172 
 173   static bool have_special_privileges();
 174 
 175   static jlong  javaTimeMillis();
 176   static jlong  javaTimeNanos();
 177   static void   javaTimeNanos_info(jvmtiTimerInfo *info_ptr);
 178   static void   run_periodic_checks();
 179 
 180 
 181   // Returns the elapsed time in seconds since the vm started.
 182   static double elapsedTime();
 183 
 184   // Returns real time in seconds since an arbitrary point
 185   // in the past.
 186   static bool getTimesSecs(double* process_real_time,
 187                            double* process_user_time,
 188                            double* process_system_time);
 189 
 190   // Interface to the performance counter
 191   static jlong elapsed_counter();
 192   static jlong elapsed_frequency();
 193 
 194   // The "virtual time" of a thread is the amount of time a thread has
 195   // actually run.  The first function indicates whether the OS supports
 196   // this functionality for the current thread, and if so:
 197   //   * the second enables vtime tracking (if that is required).
 198   //   * the third tells whether vtime is enabled.
 199   //   * the fourth returns the elapsed virtual time for the current
 200   //     thread.
 201   static bool supports_vtime();
 202   static bool enable_vtime();
 203   static bool vtime_enabled();
 204   static double elapsedVTime();
 205 
 206   // Return current local time in a string (YYYY-MM-DD HH:MM:SS).
 207   // It is MT safe, but not async-safe, as reading time zone
 208   // information may require a lock on some platforms.
 209   static char*      local_time_string(char *buf, size_t buflen);
 210   static struct tm* localtime_pd     (const time_t* clock, struct tm*  res);
 211   // Fill in buffer with current local time as an ISO-8601 string.
 212   // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz.
 213   // Returns buffer, or NULL if it failed.
 214   static char* iso8601_time(char* buffer, size_t buffer_length);
 215 
 216   // Interface for detecting multiprocessor system
 217   static inline bool is_MP() {
 218     // During bootstrap if _processor_count is not yet initialized
 219     // we claim to be MP as that is safest. If any platform has a
 220     // stub generator that might be triggered in this phase and for
 221     // which being declared MP when in fact not, is a problem - then
 222     // the bootstrap routine for the stub generator needs to check
 223     // the processor count directly and leave the bootstrap routine
 224     // in place until called after initialization has ocurred.
 225     return (_processor_count != 1) || AssumeMP;
 226   }
 227   static julong available_memory();
 228   static julong physical_memory();
 229   static bool has_allocatable_memory_limit(julong* limit);
 230   static bool is_server_class_machine();
 231 
 232   // number of CPUs
 233   static int processor_count() {
 234     return _processor_count;
 235   }
 236   static void set_processor_count(int count) { _processor_count = count; }
 237 
 238   // Returns the number of CPUs this process is currently allowed to run on.
 239   // Note that on some OSes this can change dynamically.
 240   static int active_processor_count();
 241 
 242   // At startup the number of active CPUs this process is allowed to run on.
 243   // This value does not change dynamically. May be different from active_processor_count().
 244   static int initial_active_processor_count() {
 245     assert(_initial_active_processor_count > 0, "Initial active processor count not set yet.");
 246     return _initial_active_processor_count;
 247   }
 248 
 249   // Bind processes to processors.
 250   //     This is a two step procedure:
 251   //     first you generate a distribution of processes to processors,
 252   //     then you bind processes according to that distribution.
 253   // Compute a distribution for number of processes to processors.
 254   //    Stores the processor id's into the distribution array argument.
 255   //    Returns true if it worked, false if it didn't.
 256   static bool distribute_processes(uint length, uint* distribution);
 257   // Binds the current process to a processor.
 258   //    Returns true if it worked, false if it didn't.
 259   static bool bind_to_processor(uint processor_id);
 260 
 261   // Give a name to the current thread.
 262   static void set_native_thread_name(const char *name);
 263 
 264   // Interface for stack banging (predetect possible stack overflow for
 265   // exception processing)  There are guard pages, and above that shadow
 266   // pages for stack overflow checking.
 267   static bool uses_stack_guard_pages();
 268   static bool allocate_stack_guard_pages();
 269   static void bang_stack_shadow_pages();
 270   static bool stack_shadow_pages_available(Thread *thread, methodHandle method);
 271 
 272   // OS interface to Virtual Memory
 273 
 274   // Return the default page size.
 275   static int    vm_page_size();
 276 
 277   // Returns the page size to use for a region of memory.
 278   // region_size / min_pages will always be greater than or equal to the
 279   // returned value. The returned value will divide region_size.
 280   static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages);
 281 
 282   // Returns the page size to use for a region of memory.
 283   // region_size / min_pages will always be greater than or equal to the
 284   // returned value. The returned value might not divide region_size.
 285   static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages);
 286 
 287   // Return the largest page size that can be used
 288   static size_t max_page_size() {
 289     // The _page_sizes array is sorted in descending order.
 290     return _page_sizes[0];
 291   }
 292 
 293   // Methods for tracing page sizes returned by the above method; enabled by
 294   // TracePageSizes.  The region_{min,max}_size parameters should be the values
 295   // passed to page_size_for_region() and page_size should be the result of that
 296   // call.  The (optional) base and size parameters should come from the
 297   // ReservedSpace base() and size() methods.
 298   static void trace_page_sizes(const char* str, const size_t* page_sizes,
 299                                int count) PRODUCT_RETURN;
 300   static void trace_page_sizes(const char* str, const size_t region_min_size,
 301                                const size_t region_max_size,
 302                                const size_t page_size,
 303                                const char* base = NULL,
 304                                const size_t size = 0) PRODUCT_RETURN;
 305 
 306   static int    vm_allocation_granularity();
 307   static char*  reserve_memory(size_t bytes, char* addr = 0,
 308                                size_t alignment_hint = 0);
 309   static char*  reserve_memory(size_t bytes, char* addr,
 310                                size_t alignment_hint, MEMFLAGS flags);
 311   static char*  reserve_memory_aligned(size_t size, size_t alignment);
 312   static char*  attempt_reserve_memory_at(size_t bytes, char* addr);
 313   static void   split_reserved_memory(char *base, size_t size,
 314                                       size_t split, bool realloc);
 315   static bool   commit_memory(char* addr, size_t bytes, bool executable);
 316   static bool   commit_memory(char* addr, size_t size, size_t alignment_hint,
 317                               bool executable);
 318   // Same as commit_memory() that either succeeds or calls
 319   // vm_exit_out_of_memory() with the specified mesg.
 320   static void   commit_memory_or_exit(char* addr, size_t bytes,
 321                                       bool executable, const char* mesg);
 322   static void   commit_memory_or_exit(char* addr, size_t size,
 323                                       size_t alignment_hint,
 324                                       bool executable, const char* mesg);
 325   static bool   uncommit_memory(char* addr, size_t bytes);
 326   static bool   release_memory(char* addr, size_t bytes);
 327 
 328   // Touch memory pages that cover the memory range from start to end (exclusive)
 329   // to make the OS back the memory range with actual memory.
 330   // Current implementation may not touch the last page if unaligned addresses
 331   // are passed.
 332   static void   pretouch_memory(char* start, char* end);
 333 
 334   enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
 335   static bool   protect_memory(char* addr, size_t bytes, ProtType prot,
 336                                bool is_committed = true);
 337 
 338   static bool   guard_memory(char* addr, size_t bytes);
 339   static bool   unguard_memory(char* addr, size_t bytes);
 340   static bool   create_stack_guard_pages(char* addr, size_t bytes);
 341   static bool   pd_create_stack_guard_pages(char* addr, size_t bytes);
 342   static bool   remove_stack_guard_pages(char* addr, size_t bytes);
 343 
 344   static char*  map_memory(int fd, const char* file_name, size_t file_offset,
 345                            char *addr, size_t bytes, bool read_only = false,
 346                            bool allow_exec = false);
 347   static char*  remap_memory(int fd, const char* file_name, size_t file_offset,
 348                              char *addr, size_t bytes, bool read_only,
 349                              bool allow_exec);
 350   static bool   unmap_memory(char *addr, size_t bytes);
 351   static void   free_memory(char *addr, size_t bytes, size_t alignment_hint);
 352   static void   realign_memory(char *addr, size_t bytes, size_t alignment_hint);
 353 
 354   // NUMA-specific interface
 355   static bool   numa_has_static_binding();
 356   static bool   numa_has_group_homing();
 357   static void   numa_make_local(char *addr, size_t bytes, int lgrp_hint);
 358   static void   numa_make_global(char *addr, size_t bytes);
 359   static size_t numa_get_groups_num();
 360   static size_t numa_get_leaf_groups(int *ids, size_t size);
 361   static bool   numa_topology_changed();
 362   static int    numa_get_group_id();
 363 
 364   // Page manipulation
 365   struct page_info {
 366     size_t size;
 367     int lgrp_id;
 368   };
 369   static bool   get_page_info(char *start, page_info* info);
 370   static char*  scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found);
 371 
 372   static char*  non_memory_address_word();
 373   // reserve, commit and pin the entire memory region
 374   static char*  reserve_memory_special(size_t size, size_t alignment,
 375                                        char* addr, bool executable);
 376   static bool   release_memory_special(char* addr, size_t bytes);
 377   static void   large_page_init();
 378   static size_t large_page_size();
 379   static bool   can_commit_large_page_memory();
 380   static bool   can_execute_large_page_memory();
 381 
 382   // OS interface to polling page
 383   static address get_polling_page()             { return _polling_page; }
 384   static void    set_polling_page(address page) { _polling_page = page; }
 385   static bool    is_poll_address(address addr)  { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); }
 386   static void    make_polling_page_unreadable();
 387   static void    make_polling_page_readable();
 388 
 389   // Routines used to serialize the thread state without using membars
 390   static void    serialize_thread_states();
 391 
 392   // Since we write to the serialize page from every thread, we
 393   // want stores to be on unique cache lines whenever possible
 394   // in order to minimize CPU cross talk.  We pre-compute the
 395   // amount to shift the thread* to make this offset unique to
 396   // each thread.
 397   static int     get_serialize_page_shift_count() {
 398     return SerializePageShiftCount;
 399   }
 400 
 401   static void     set_serialize_page_mask(uintptr_t mask) {
 402     _serialize_page_mask = mask;
 403   }
 404 
 405   static unsigned int  get_serialize_page_mask() {
 406     return _serialize_page_mask;
 407   }
 408 
 409   static void    set_memory_serialize_page(address page);
 410 
 411   static address get_memory_serialize_page() {
 412     return (address)_mem_serialize_page;
 413   }
 414 
 415   static inline void write_memory_serialize_page(JavaThread *thread) {
 416     uintptr_t page_offset = ((uintptr_t)thread >>
 417                             get_serialize_page_shift_count()) &
 418                             get_serialize_page_mask();
 419     *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1;
 420   }
 421 
 422   static bool    is_memory_serialize_page(JavaThread *thread, address addr) {
 423     if (UseMembar) return false;
 424     // Previously this function calculated the exact address of this
 425     // thread's serialize page, and checked if the faulting address
 426     // was equal.  However, some platforms mask off faulting addresses
 427     // to the page size, so now we just check that the address is
 428     // within the page.  This makes the thread argument unnecessary,
 429     // but we retain the NULL check to preserve existing behaviour.
 430     if (thread == NULL) return false;
 431     address page = (address) _mem_serialize_page;
 432     return addr >= page && addr < (page + os::vm_page_size());
 433   }
 434 
 435   static void block_on_serialize_page_trap();
 436 
 437   // threads
 438 
 439   enum ThreadType {
 440     vm_thread,
 441     cgc_thread,        // Concurrent GC thread
 442     pgc_thread,        // Parallel GC thread
 443     java_thread,
 444     compiler_thread,
 445     watcher_thread,
 446     os_thread
 447   };
 448 
 449   static bool create_thread(Thread* thread,
 450                             ThreadType thr_type,
 451                             size_t stack_size = 0);
 452 
 453   // The "main thread", also known as "starting thread", is the thread
 454   // that loads/creates the JVM via JNI_CreateJavaVM.
 455   static bool create_main_thread(JavaThread* thread);
 456 
 457   // The primordial thread is the initial process thread. The java
 458   // launcher never uses the primordial thread as the main thread, but
 459   // applications that host the JVM directly may do so. Some platforms
 460   // need special-case handling of the primordial thread if it attaches
 461   // to the VM.
 462   static bool is_primordial_thread(void)
 463 #if defined(_WINDOWS) || defined(BSD)
 464     // No way to identify the primordial thread.
 465     { return false; }
 466 #else
 467   ;
 468 #endif
 469 
 470   static bool create_attached_thread(JavaThread* thread);
 471   static void pd_start_thread(Thread* thread);
 472   static void start_thread(Thread* thread);
 473 
 474   static void initialize_thread(Thread* thr);
 475   static void free_thread(OSThread* osthread);
 476 
 477   // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit
 478   static intx current_thread_id();
 479   static int current_process_id();
 480   static int sleep(Thread* thread, jlong ms, bool interruptable);
 481   // Short standalone OS sleep suitable for slow path spin loop.
 482   // Ignores Thread.interrupt() (so keep it short).
 483   // ms = 0, will sleep for the least amount of time allowed by the OS.
 484   static void naked_short_sleep(jlong ms);
 485   static void infinite_sleep(); // never returns, use with CAUTION
 486   static void yield();        // Yields to all threads with same priority
 487   enum YieldResult {
 488     YIELD_SWITCHED = 1,         // caller descheduled, other ready threads exist & ran
 489     YIELD_NONEREADY = 0,        // No other runnable/ready threads.
 490                                 // platform-specific yield return immediately
 491     YIELD_UNKNOWN = -1          // Unknown: platform doesn't support _SWITCHED or _NONEREADY
 492     // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong"
 493     // yield that can be used in lieu of blocking.
 494   } ;
 495   static YieldResult NakedYield () ;
 496   static void yield_all(int attempts = 0); // Yields to all other threads including lower priority
 497   static void loop_breaker(int attempts);  // called from within tight loops to possibly influence time-sharing
 498   static OSReturn set_priority(Thread* thread, ThreadPriority priority);
 499   static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority);
 500 
 501   static void interrupt(Thread* thread);
 502   static bool is_interrupted(Thread* thread, bool clear_interrupted);
 503 
 504   static int pd_self_suspend_thread(Thread* thread);
 505 
 506   static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp);
 507   static frame      fetch_frame_from_context(void* ucVoid);
 508 
 509   static ExtendedPC get_thread_pc(Thread *thread);
 510   static void breakpoint();
 511 
 512   static address current_stack_pointer();
 513   static address current_stack_base();
 514   static size_t current_stack_size();
 515 
 516   static void verify_stack_alignment() PRODUCT_RETURN;
 517 
 518   static int message_box(const char* title, const char* message);
 519   static char* do_you_want_to_debug(const char* message);
 520 
 521   // run cmd in a separate process and return its exit code; or -1 on failures
 522   static int fork_and_exec(char *cmd);
 523 
 524   // os::exit() is merged with vm_exit()
 525   // static void exit(int num);
 526 
 527   // Terminate the VM, but don't exit the process
 528   static void shutdown();
 529 
 530   // Terminate with an error.  Default is to generate a core file on platforms
 531   // that support such things.  This calls shutdown() and then aborts.
 532   static void abort(bool dump_core = true);
 533 
 534   // Die immediately, no exit hook, no abort hook, no cleanup.
 535   static void die();
 536 
 537   // File i/o operations
 538   static const int default_file_open_flags();
 539   static int open(const char *path, int oflag, int mode);
 540   static FILE* open(int fd, const char* mode);
 541   static int close(int fd);
 542   static jlong lseek(int fd, jlong offset, int whence);
 543   static char* native_path(char *path);
 544   static int ftruncate(int fd, jlong length);
 545   static int fsync(int fd);
 546   static int available(int fd, jlong *bytes);
 547 
 548   //File i/o operations
 549 
 550   static size_t read(int fd, void *buf, unsigned int nBytes);
 551   static size_t restartable_read(int fd, void *buf, unsigned int nBytes);
 552   static size_t write(int fd, const void *buf, unsigned int nBytes);
 553 
 554   // Reading directories.
 555   static DIR*           opendir(const char* dirname);
 556   static int            readdir_buf_size(const char *path);
 557   static struct dirent* readdir(DIR* dirp, dirent* dbuf);
 558   static int            closedir(DIR* dirp);
 559 
 560   // Dynamic library extension
 561   static const char*    dll_file_extension();
 562 
 563   static const char*    get_temp_directory();
 564   static const char*    get_current_directory(char *buf, size_t buflen);
 565 
 566   // Builds a platform-specific full library path given a ld path and lib name
 567   // Returns true if buffer contains full path to existing file, false otherwise
 568   static bool           dll_build_name(char* buffer, size_t size,
 569                                        const char* pathname, const char* fname);
 570 
 571   // Symbol lookup, find nearest function name; basically it implements
 572   // dladdr() for all platforms. Name of the nearest function is copied
 573   // to buf. Distance from its base address is optionally returned as offset.
 574   // If function name is not found, buf[0] is set to '\0' and offset is
 575   // set to -1 (if offset is non-NULL).
 576   static bool dll_address_to_function_name(address addr, char* buf,
 577                                            int buflen, int* offset);
 578 
 579   // Locate DLL/DSO. On success, full path of the library is copied to
 580   // buf, and offset is optionally set to be the distance between addr
 581   // and the library's base address. On failure, buf[0] is set to '\0'
 582   // and offset is set to -1 (if offset is non-NULL).
 583   static bool dll_address_to_library_name(address addr, char* buf,
 584                                           int buflen, int* offset);
 585 
 586   // Find out whether the pc is in the static code for jvm.dll/libjvm.so.
 587   static bool address_is_in_vm(address addr);
 588 
 589   // Loads .dll/.so and
 590   // in case of error it checks if .dll/.so was built for the
 591   // same architecture as Hotspot is running on
 592   static void* dll_load(const char *name, char *ebuf, int ebuflen);
 593 
 594   // lookup symbol in a shared library
 595   static void* dll_lookup(void* handle, const char* name);
 596 
 597   // Unload library
 598   static void  dll_unload(void *lib);
 599 
 600   // Return the handle of this process
 601   static void* get_default_process_handle();
 602 
 603   // Check for static linked agent library
 604   static bool find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
 605                                  size_t syms_len);
 606 
 607   // Find agent entry point
 608   static void *find_agent_function(AgentLibrary *agent_lib, bool check_lib,
 609                                    const char *syms[], size_t syms_len);
 610 
 611   // Print out system information; they are called by fatal error handler.
 612   // Output format may be different on different platforms.
 613   static void print_os_info(outputStream* st);
 614   static void print_os_info_brief(outputStream* st);
 615   static void print_cpu_info(outputStream* st);
 616   static void pd_print_cpu_info(outputStream* st);
 617   static void print_memory_info(outputStream* st);
 618   static void print_dll_info(outputStream* st);
 619   static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len);
 620   static void print_context(outputStream* st, void* context);
 621   static void print_register_info(outputStream* st, void* context);
 622   static void print_siginfo(outputStream* st, void* siginfo);
 623   static void print_signal_handlers(outputStream* st, char* buf, size_t buflen);
 624   static void print_date_and_time(outputStream* st);
 625 
 626   static void print_location(outputStream* st, intptr_t x, bool verbose = false);
 627   static size_t lasterror(char *buf, size_t len);
 628   static int get_last_error();
 629 
 630   // Determines whether the calling process is being debugged by a user-mode debugger.
 631   static bool is_debugger_attached();
 632 
 633   // wait for a key press if PauseAtExit is set
 634   static void wait_for_keypress_at_exit(void);
 635 
 636   // The following two functions are used by fatal error handler to trace
 637   // native (C) frames. They are not part of frame.hpp/frame.cpp because
 638   // frame.hpp/cpp assume thread is JavaThread, and also because different
 639   // OS/compiler may have different convention or provide different API to
 640   // walk C frames.
 641   //
 642   // We don't attempt to become a debugger, so we only follow frames if that
 643   // does not require a lookup in the unwind table, which is part of the binary
 644   // file but may be unsafe to read after a fatal error. So on x86, we can
 645   // only walk stack if %ebp is used as frame pointer; on ia64, it's not
 646   // possible to walk C stack without having the unwind table.
 647   static bool is_first_C_frame(frame *fr);
 648   static frame get_sender_for_C_frame(frame *fr);
 649 
 650   // return current frame. pc() and sp() are set to NULL on failure.
 651   static frame      current_frame();
 652 
 653   static void print_hex_dump(outputStream* st, address start, address end, int unitsize);
 654 
 655   // returns a string to describe the exception/signal;
 656   // returns NULL if exception_code is not an OS exception/signal.
 657   static const char* exception_name(int exception_code, char* buf, size_t buflen);
 658 
 659   // Returns native Java library, loads if necessary
 660   static void*    native_java_library();
 661 
 662   // Fills in path to jvm.dll/libjvm.so (used by the Disassembler)
 663   static void     jvm_path(char *buf, jint buflen);
 664 
 665   // Returns true if we are running in a headless jre.
 666   static bool     is_headless_jre();
 667 
 668   // JNI names
 669   static void     print_jni_name_prefix_on(outputStream* st, int args_size);
 670   static void     print_jni_name_suffix_on(outputStream* st, int args_size);
 671 
 672   // File conventions
 673   static const char* file_separator();
 674   static const char* line_separator();
 675   static const char* path_separator();
 676 
 677   // Init os specific system properties values
 678   static void init_system_properties_values();
 679 
 680   // IO operations, non-JVM_ version.
 681   static int stat(const char* path, struct stat* sbuf);
 682   static bool dir_is_empty(const char* path);
 683 
 684   // IO operations on binary files
 685   static int create_binary_file(const char* path, bool rewrite_existing);
 686   static jlong current_file_offset(int fd);
 687   static jlong seek_to_file_offset(int fd, jlong offset);
 688 
 689   // Thread Local Storage
 690   static int   allocate_thread_local_storage();
 691   static void  thread_local_storage_at_put(int index, void* value);
 692   static void* thread_local_storage_at(int index);
 693   static void  free_thread_local_storage(int index);
 694 
 695   // Retrieve native stack frames.
 696   // Parameter:
 697   //   stack:  an array to storage stack pointers.
 698   //   frames: size of above array.
 699   //   toSkip: number of stack frames to skip at the beginning.
 700   // Return: number of stack frames captured.
 701   static int get_native_stack(address* stack, int size, int toSkip = 0);
 702 
 703   // General allocation (must be MT-safe)
 704   static void* malloc  (size_t size, MEMFLAGS flags, const NativeCallStack& stack);
 705   static void* malloc  (size_t size, MEMFLAGS flags);
 706   static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack);
 707   static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
 708 
 709   static void  free    (void *memblock, MEMFLAGS flags = mtNone);
 710   static bool  check_heap(bool force = false);      // verify C heap integrity
 711   static char* strdup(const char *, MEMFLAGS flags = mtInternal);  // Like strdup
 712 
 713 #ifndef PRODUCT
 714   static julong num_mallocs;         // # of calls to malloc/realloc
 715   static julong alloc_bytes;         // # of bytes allocated
 716   static julong num_frees;           // # of calls to free
 717   static julong free_bytes;          // # of bytes freed
 718 #endif
 719 
 720   // SocketInterface (ex HPI SocketInterface )
 721   static int socket(int domain, int type, int protocol);
 722   static int socket_close(int fd);
 723   static int socket_shutdown(int fd, int howto);
 724   static int recv(int fd, char* buf, size_t nBytes, uint flags);
 725   static int send(int fd, char* buf, size_t nBytes, uint flags);
 726   static int raw_send(int fd, char* buf, size_t nBytes, uint flags);
 727   static int timeout(int fd, long timeout);
 728   static int listen(int fd, int count);
 729   static int connect(int fd, struct sockaddr* him, socklen_t len);
 730   static int bind(int fd, struct sockaddr* him, socklen_t len);
 731   static int accept(int fd, struct sockaddr* him, socklen_t* len);
 732   static int recvfrom(int fd, char* buf, size_t nbytes, uint flags,
 733                       struct sockaddr* from, socklen_t* fromlen);
 734   static int get_sock_name(int fd, struct sockaddr* him, socklen_t* len);
 735   static int sendto(int fd, char* buf, size_t len, uint flags,
 736                     struct sockaddr* to, socklen_t tolen);
 737   static int socket_available(int fd, jint* pbytes);
 738 
 739   static int get_sock_opt(int fd, int level, int optname,
 740                           char* optval, socklen_t* optlen);
 741   static int set_sock_opt(int fd, int level, int optname,
 742                           const char* optval, socklen_t optlen);
 743   static int get_host_name(char* name, int namelen);
 744 
 745   static struct hostent* get_host_by_name(char* name);
 746 
 747   // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal)
 748   static void  signal_init();
 749   static void  signal_init_pd();
 750   static void  signal_notify(int signal_number);
 751   static void* signal(int signal_number, void* handler);
 752   static void  signal_raise(int signal_number);
 753   static int   signal_wait();
 754   static int   signal_lookup();
 755   static void* user_handler();
 756   static void  terminate_signal_thread();
 757   static int   sigexitnum_pd();
 758 
 759   // random number generation
 760   static long random();                    // return 32bit pseudorandom number
 761   static void init_random(long initval);   // initialize random sequence
 762 
 763   // Structured OS Exception support
 764   static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
 765 
 766   // On Windows this will create an actual minidump, on Linux/Solaris it will simply check core dump limits
 767   static void check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize);
 768 
 769   // Get the default path to the core file
 770   // Returns the length of the string
 771   static int get_core_path(char* buffer, size_t bufferSize);
 772 
 773   // JVMTI & JVM monitoring and management support
 774   // The thread_cpu_time() and current_thread_cpu_time() are only
 775   // supported if is_thread_cpu_time_supported() returns true.
 776   // They are not supported on Solaris T1.
 777 
 778   // Thread CPU Time - return the fast estimate on a platform
 779   // On Solaris - call gethrvtime (fast) - user time only
 780   // On Linux   - fast clock_gettime where available - user+sys
 781   //            - otherwise: very slow /proc fs - user+sys
 782   // On Windows - GetThreadTimes - user+sys
 783   static jlong current_thread_cpu_time();
 784   static jlong thread_cpu_time(Thread* t);
 785 
 786   // Thread CPU Time with user_sys_cpu_time parameter.
 787   //
 788   // If user_sys_cpu_time is true, user+sys time is returned.
 789   // Otherwise, only user time is returned
 790   static jlong current_thread_cpu_time(bool user_sys_cpu_time);
 791   static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time);
 792 
 793   // Return a bunch of info about the timers.
 794   // Note that the returned info for these two functions may be different
 795   // on some platforms
 796   static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
 797   static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
 798 
 799   static bool is_thread_cpu_time_supported();
 800 
 801   // System loadavg support.  Returns -1 if load average cannot be obtained.
 802   static int loadavg(double loadavg[], int nelem);
 803 
 804   // Hook for os specific jvm options that we don't want to abort on seeing
 805   static bool obsolete_option(const JavaVMOption *option);
 806 
 807   // Extensions
 808 #include "runtime/os_ext.hpp"
 809 
 810  public:
 811   class CrashProtectionCallback : public StackObj {
 812   public:
 813     virtual void call() = 0;
 814   };
 815 
 816   // Platform dependent stuff
 817 #ifdef TARGET_OS_FAMILY_linux
 818 # include "os_linux.hpp"
 819 # include "os_posix.hpp"
 820 #endif
 821 #ifdef TARGET_OS_FAMILY_solaris
 822 # include "os_solaris.hpp"
 823 # include "os_posix.hpp"
 824 #endif
 825 #ifdef TARGET_OS_FAMILY_windows
 826 # include "os_windows.hpp"
 827 #endif
 828 #ifdef TARGET_OS_FAMILY_aix
 829 # include "os_aix.hpp"
 830 # include "os_posix.hpp"
 831 #endif
 832 #ifdef TARGET_OS_FAMILY_bsd
 833 # include "os_posix.hpp"
 834 # include "os_bsd.hpp"
 835 #endif
 836 #ifdef TARGET_OS_ARCH_linux_x86
 837 # include "os_linux_x86.hpp"
 838 #endif
 839 #ifdef TARGET_OS_ARCH_linux_sparc
 840 # include "os_linux_sparc.hpp"
 841 #endif
 842 #ifdef TARGET_OS_ARCH_linux_zero
 843 # include "os_linux_zero.hpp"
 844 #endif
 845 #ifdef TARGET_OS_ARCH_solaris_x86
 846 # include "os_solaris_x86.hpp"
 847 #endif
 848 #ifdef TARGET_OS_ARCH_solaris_sparc
 849 # include "os_solaris_sparc.hpp"
 850 #endif
 851 #ifdef TARGET_OS_ARCH_windows_x86
 852 # include "os_windows_x86.hpp"
 853 #endif
 854 #ifdef TARGET_OS_ARCH_linux_arm
 855 # include "os_linux_arm.hpp"
 856 #endif
 857 #ifdef TARGET_OS_ARCH_linux_ppc
 858 # include "os_linux_ppc.hpp"
 859 #endif
 860 #ifdef TARGET_OS_ARCH_aix_ppc
 861 # include "os_aix_ppc.hpp"
 862 #endif
 863 #ifdef TARGET_OS_ARCH_bsd_x86
 864 # include "os_bsd_x86.hpp"
 865 #endif
 866 #ifdef TARGET_OS_ARCH_bsd_zero
 867 # include "os_bsd_zero.hpp"
 868 #endif
 869 
 870  public:
 871 #ifndef PLATFORM_PRINT_NATIVE_STACK
 872   // No platform-specific code for printing the native stack.
 873   static bool platform_print_native_stack(outputStream* st, void* context,
 874                                           char *buf, int buf_size) {
 875     return false;
 876   }
 877 #endif
 878 
 879   // debugging support (mostly used by debug.cpp but also fatal error handler)
 880   static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address
 881 
 882   static bool dont_yield();                     // when true, JVM_Yield() is nop
 883   static void print_statistics();
 884 
 885   // Thread priority helpers (implemented in OS-specific part)
 886   static OSReturn set_native_priority(Thread* thread, int native_prio);
 887   static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr);
 888   static int java_to_os_priority[CriticalPriority + 1];
 889   // Hint to the underlying OS that a task switch would not be good.
 890   // Void return because it's a hint and can fail.
 891   static void hint_no_preempt();
 892 
 893   // Used at creation if requested by the diagnostic flag PauseAtStartup.
 894   // Causes the VM to wait until an external stimulus has been applied
 895   // (for Unix, that stimulus is a signal, for Windows, an external
 896   // ResumeThread call)
 897   static void pause();
 898 
 899   // Builds a platform dependent Agent_OnLoad_<libname> function name
 900   // which is used to find statically linked in agents.
 901   static char*  build_agent_function_name(const char *sym, const char *cname,
 902                                           bool is_absolute_path);
 903 
 904   class SuspendedThreadTaskContext {
 905   public:
 906     SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {}
 907     Thread* thread() const { return _thread; }
 908     void* ucontext() const { return _ucontext; }
 909   private:
 910     Thread* _thread;
 911     void* _ucontext;
 912   };
 913 
 914   class SuspendedThreadTask {
 915   public:
 916     SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {}
 917     virtual ~SuspendedThreadTask() {}
 918     void run();
 919     bool is_done() { return _done; }
 920     virtual void do_task(const SuspendedThreadTaskContext& context) = 0;
 921   protected:
 922   private:
 923     void internal_do_task();
 924     Thread* _thread;
 925     bool _done;
 926   };
 927 
 928 #ifndef TARGET_OS_FAMILY_windows
 929   // Suspend/resume support
 930   // Protocol:
 931   //
 932   // a thread starts in SR_RUNNING
 933   //
 934   // SR_RUNNING can go to
 935   //   * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it
 936   // SR_SUSPEND_REQUEST can go to
 937   //   * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout)
 938   //   * SR_SUSPENDED if the stopped thread receives the signal and switches state
 939   // SR_SUSPENDED can go to
 940   //   * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume
 941   // SR_WAKEUP_REQUEST can go to
 942   //   * SR_RUNNING when the stopped thread receives the signal
 943   //   * SR_WAKEUP_REQUEST on timeout (resend the signal and try again)
 944   class SuspendResume {
 945    public:
 946     enum State {
 947       SR_RUNNING,
 948       SR_SUSPEND_REQUEST,
 949       SR_SUSPENDED,
 950       SR_WAKEUP_REQUEST
 951     };
 952 
 953   private:
 954     volatile State _state;
 955 
 956   private:
 957     /* try to switch state from state "from" to state "to"
 958      * returns the state set after the method is complete
 959      */
 960     State switch_state(State from, State to);
 961 
 962   public:
 963     SuspendResume() : _state(SR_RUNNING) { }
 964 
 965     State state() const { return _state; }
 966 
 967     State request_suspend() {
 968       return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST);
 969     }
 970 
 971     State cancel_suspend() {
 972       return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING);
 973     }
 974 
 975     State suspended() {
 976       return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED);
 977     }
 978 
 979     State request_wakeup() {
 980       return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST);
 981     }
 982 
 983     State running() {
 984       return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING);
 985     }
 986 
 987     bool is_running() const {
 988       return _state == SR_RUNNING;
 989     }
 990 
 991     bool is_suspend_request() const {
 992       return _state == SR_SUSPEND_REQUEST;
 993     }
 994 
 995     bool is_suspended() const {
 996       return _state == SR_SUSPENDED;
 997     }
 998   };
 999 #endif
1000 
1001 
1002  protected:
1003   static long _rand_seed;                     // seed for random number generator
1004   static int _processor_count;                // number of processors
1005   static int _initial_active_processor_count; // number of active processors during initialization.
1006 
1007   static char* format_boot_path(const char* format_string,
1008                                 const char* home,
1009                                 int home_len,
1010                                 char fileSep,
1011                                 char pathSep);
1012   static bool set_boot_path(char fileSep, char pathSep);
1013   static char** split_path(const char* path, int* n);
1014 
1015 };
1016 
1017 // Note that "PAUSE" is almost always used with synchronization
1018 // so arguably we should provide Atomic::SpinPause() instead
1019 // of the global SpinPause() with C linkage.
1020 // It'd also be eligible for inlining on many platforms.
1021 
1022 extern "C" int SpinPause();
1023 
1024 #endif // SHARE_VM_RUNTIME_OS_HPP