1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "os_solaris.inline.hpp"
  41 #include "prims/jniFastGetField.hpp"
  42 #include "prims/jvm.h"
  43 #include "prims/jvm_misc.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/atomic.inline.hpp"
  46 #include "runtime/extendedPC.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/interfaceSupport.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/mutexLocker.hpp"
  52 #include "runtime/objectMonitor.hpp"
  53 #include "runtime/orderAccess.inline.hpp"
  54 #include "runtime/osThread.hpp"
  55 #include "runtime/perfMemory.hpp"
  56 #include "runtime/sharedRuntime.hpp"
  57 #include "runtime/statSampler.hpp"
  58 #include "runtime/stubRoutines.hpp"
  59 #include "runtime/thread.inline.hpp"
  60 #include "runtime/threadCritical.hpp"
  61 #include "runtime/timer.hpp"
  62 #include "runtime/vm_version.hpp"
  63 #include "services/attachListener.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "services/runtimeService.hpp"
  66 #include "utilities/decoder.hpp"
  67 #include "utilities/defaultStream.hpp"
  68 #include "utilities/events.hpp"
  69 #include "utilities/growableArray.hpp"
  70 #include "utilities/vmError.hpp"
  71 
  72 // put OS-includes here
  73 # include <dlfcn.h>
  74 # include <errno.h>
  75 # include <exception>
  76 # include <link.h>
  77 # include <poll.h>
  78 # include <pthread.h>
  79 # include <pwd.h>
  80 # include <schedctl.h>
  81 # include <setjmp.h>
  82 # include <signal.h>
  83 # include <stdio.h>
  84 # include <alloca.h>
  85 # include <sys/filio.h>
  86 # include <sys/ipc.h>
  87 # include <sys/lwp.h>
  88 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  89 # include <sys/mman.h>
  90 # include <sys/processor.h>
  91 # include <sys/procset.h>
  92 # include <sys/pset.h>
  93 # include <sys/resource.h>
  94 # include <sys/shm.h>
  95 # include <sys/socket.h>
  96 # include <sys/stat.h>
  97 # include <sys/systeminfo.h>
  98 # include <sys/time.h>
  99 # include <sys/times.h>
 100 # include <sys/types.h>
 101 # include <sys/wait.h>
 102 # include <sys/utsname.h>
 103 # include <thread.h>
 104 # include <unistd.h>
 105 # include <sys/priocntl.h>
 106 # include <sys/rtpriocntl.h>
 107 # include <sys/tspriocntl.h>
 108 # include <sys/iapriocntl.h>
 109 # include <sys/fxpriocntl.h>
 110 # include <sys/loadavg.h>
 111 # include <string.h>
 112 # include <stdio.h>
 113 
 114 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 115 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 116 
 117 #define MAX_PATH (2 * K)
 118 
 119 // for timer info max values which include all bits
 120 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 121 
 122 
 123 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 124 // compile on older systems without this header file.
 125 
 126 #ifndef MADV_ACCESS_LWP
 127   #define  MADV_ACCESS_LWP   7       /* next LWP to access heavily */
 128 #endif
 129 #ifndef MADV_ACCESS_MANY
 130   #define  MADV_ACCESS_MANY  8       /* many processes to access heavily */
 131 #endif
 132 
 133 #ifndef LGRP_RSRC_CPU
 134   #define LGRP_RSRC_CPU      0       /* CPU resources */
 135 #endif
 136 #ifndef LGRP_RSRC_MEM
 137   #define LGRP_RSRC_MEM      1       /* memory resources */
 138 #endif
 139 
 140 // see thr_setprio(3T) for the basis of these numbers
 141 #define MinimumPriority 0
 142 #define NormalPriority  64
 143 #define MaximumPriority 127
 144 
 145 // Values for ThreadPriorityPolicy == 1
 146 int prio_policy1[CriticalPriority+1] = {
 147   -99999,  0, 16,  32,  48,  64,
 148           80, 96, 112, 124, 127, 127 };
 149 
 150 // System parameters used internally
 151 static clock_t clock_tics_per_sec = 100;
 152 
 153 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 154 static bool enabled_extended_FILE_stdio = false;
 155 
 156 // For diagnostics to print a message once. see run_periodic_checks
 157 static bool check_addr0_done = false;
 158 static sigset_t check_signal_done;
 159 static bool check_signals = true;
 160 
 161 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 162 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 163 
 164 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 165 
 166 
 167 // "default" initializers for missing libc APIs
 168 extern "C" {
 169   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 170   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 171 
 172   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 173   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 174 }
 175 
 176 // "default" initializers for pthread-based synchronization
 177 extern "C" {
 178   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 179   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 180 }
 181 
 182 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 183 
 184 // Thread Local Storage
 185 // This is common to all Solaris platforms so it is defined here,
 186 // in this common file.
 187 // The declarations are in the os_cpu threadLS*.hpp files.
 188 //
 189 // Static member initialization for TLS
 190 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
 191 
 192 #ifndef PRODUCT
 193   #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
 194 
 195 int ThreadLocalStorage::_tcacheHit = 0;
 196 int ThreadLocalStorage::_tcacheMiss = 0;
 197 
 198 void ThreadLocalStorage::print_statistics() {
 199   int total = _tcacheMiss+_tcacheHit;
 200   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
 201                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
 202 }
 203   #undef _PCT
 204 #endif // PRODUCT
 205 
 206 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
 207                                                         int index) {
 208   Thread *thread = get_thread_slow();
 209   if (thread != NULL) {
 210     address sp = os::current_stack_pointer();
 211     guarantee(thread->_stack_base == NULL ||
 212               (sp <= thread->_stack_base &&
 213               sp >= thread->_stack_base - thread->_stack_size) ||
 214               is_error_reported(),
 215               "sp must be inside of selected thread stack");
 216 
 217     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
 218     _get_thread_cache[index] = thread;
 219   }
 220   return thread;
 221 }
 222 
 223 
 224 static const double all_zero[sizeof(Thread) / sizeof(double) + 1] = {0};
 225 #define NO_CACHED_THREAD ((Thread*)all_zero)
 226 
 227 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
 228 
 229   // Store the new value before updating the cache to prevent a race
 230   // between get_thread_via_cache_slowly() and this store operation.
 231   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
 232 
 233   // Update thread cache with new thread if setting on thread create,
 234   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
 235   uintptr_t raw = pd_raw_thread_id();
 236   int ix = pd_cache_index(raw);
 237   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
 238 }
 239 
 240 void ThreadLocalStorage::pd_init() {
 241   for (int i = 0; i < _pd_cache_size; i++) {
 242     _get_thread_cache[i] = NO_CACHED_THREAD;
 243   }
 244 }
 245 
 246 // Invalidate all the caches (happens to be the same as pd_init).
 247 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
 248 
 249 #undef NO_CACHED_THREAD
 250 
 251 // END Thread Local Storage
 252 
 253 static inline size_t adjust_stack_size(address base, size_t size) {
 254   if ((ssize_t)size < 0) {
 255     // 4759953: Compensate for ridiculous stack size.
 256     size = max_intx;
 257   }
 258   if (size > (size_t)base) {
 259     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 260     size = (size_t)base;
 261   }
 262   return size;
 263 }
 264 
 265 static inline stack_t get_stack_info() {
 266   stack_t st;
 267   int retval = thr_stksegment(&st);
 268   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 269   assert(retval == 0, "incorrect return value from thr_stksegment");
 270   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 271   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 272   return st;
 273 }
 274 
 275 address os::current_stack_base() {
 276   int r = thr_main();
 277   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 278   bool is_primordial_thread = r;
 279 
 280   // Workaround 4352906, avoid calls to thr_stksegment by
 281   // thr_main after the first one (it looks like we trash
 282   // some data, causing the value for ss_sp to be incorrect).
 283   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 284     stack_t st = get_stack_info();
 285     if (is_primordial_thread) {
 286       // cache initial value of stack base
 287       os::Solaris::_main_stack_base = (address)st.ss_sp;
 288     }
 289     return (address)st.ss_sp;
 290   } else {
 291     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 292     return os::Solaris::_main_stack_base;
 293   }
 294 }
 295 
 296 size_t os::current_stack_size() {
 297   size_t size;
 298 
 299   int r = thr_main();
 300   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 301   if (!r) {
 302     size = get_stack_info().ss_size;
 303   } else {
 304     struct rlimit limits;
 305     getrlimit(RLIMIT_STACK, &limits);
 306     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 307   }
 308   // base may not be page aligned
 309   address base = current_stack_base();
 310   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 311   return (size_t)(base - bottom);
 312 }
 313 
 314 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 315   return localtime_r(clock, res);
 316 }
 317 
 318 void os::Solaris::try_enable_extended_io() {
 319   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 320 
 321   if (!UseExtendedFileIO) {
 322     return;
 323   }
 324 
 325   enable_extended_FILE_stdio_t enabler =
 326     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 327                                          "enable_extended_FILE_stdio");
 328   if (enabler) {
 329     enabler(-1, -1);
 330   }
 331 }
 332 
 333 static int _processors_online = 0;
 334 
 335 jint os::Solaris::_os_thread_limit = 0;
 336 volatile jint os::Solaris::_os_thread_count = 0;
 337 
 338 julong os::available_memory() {
 339   return Solaris::available_memory();
 340 }
 341 
 342 julong os::Solaris::available_memory() {
 343   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 344 }
 345 
 346 julong os::Solaris::_physical_memory = 0;
 347 
 348 julong os::physical_memory() {
 349   return Solaris::physical_memory();
 350 }
 351 
 352 static hrtime_t first_hrtime = 0;
 353 static const hrtime_t hrtime_hz = 1000*1000*1000;
 354 static volatile hrtime_t max_hrtime = 0;
 355 
 356 
 357 void os::Solaris::initialize_system_info() {
 358   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 359   _processors_online = sysconf(_SC_NPROCESSORS_ONLN);
 360   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) *
 361                                      (julong)sysconf(_SC_PAGESIZE);
 362 }
 363 
 364 int os::active_processor_count() {
 365   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 366   pid_t pid = getpid();
 367   psetid_t pset = PS_NONE;
 368   // Are we running in a processor set or is there any processor set around?
 369   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 370     uint_t pset_cpus;
 371     // Query the number of cpus available to us.
 372     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 373       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 374       _processors_online = pset_cpus;
 375       return pset_cpus;
 376     }
 377   }
 378   // Otherwise return number of online cpus
 379   return online_cpus;
 380 }
 381 
 382 static bool find_processors_in_pset(psetid_t        pset,
 383                                     processorid_t** id_array,
 384                                     uint_t*         id_length) {
 385   bool result = false;
 386   // Find the number of processors in the processor set.
 387   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 388     // Make up an array to hold their ids.
 389     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 390     // Fill in the array with their processor ids.
 391     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 392       result = true;
 393     }
 394   }
 395   return result;
 396 }
 397 
 398 // Callers of find_processors_online() must tolerate imprecise results --
 399 // the system configuration can change asynchronously because of DR
 400 // or explicit psradm operations.
 401 //
 402 // We also need to take care that the loop (below) terminates as the
 403 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 404 // request and the loop that builds the list of processor ids.   Unfortunately
 405 // there's no reliable way to determine the maximum valid processor id,
 406 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 407 // man pages, which claim the processor id set is "sparse, but
 408 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 409 // exit the loop.
 410 //
 411 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 412 // not available on S8.0.
 413 
 414 static bool find_processors_online(processorid_t** id_array,
 415                                    uint*           id_length) {
 416   const processorid_t MAX_PROCESSOR_ID = 100000;
 417   // Find the number of processors online.
 418   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 419   // Make up an array to hold their ids.
 420   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 421   // Processors need not be numbered consecutively.
 422   long found = 0;
 423   processorid_t next = 0;
 424   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 425     processor_info_t info;
 426     if (processor_info(next, &info) == 0) {
 427       // NB, PI_NOINTR processors are effectively online ...
 428       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 429         (*id_array)[found] = next;
 430         found += 1;
 431       }
 432     }
 433     next += 1;
 434   }
 435   if (found < *id_length) {
 436     // The loop above didn't identify the expected number of processors.
 437     // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 438     // and re-running the loop, above, but there's no guarantee of progress
 439     // if the system configuration is in flux.  Instead, we just return what
 440     // we've got.  Note that in the worst case find_processors_online() could
 441     // return an empty set.  (As a fall-back in the case of the empty set we
 442     // could just return the ID of the current processor).
 443     *id_length = found;
 444   }
 445 
 446   return true;
 447 }
 448 
 449 static bool assign_distribution(processorid_t* id_array,
 450                                 uint           id_length,
 451                                 uint*          distribution,
 452                                 uint           distribution_length) {
 453   // We assume we can assign processorid_t's to uint's.
 454   assert(sizeof(processorid_t) == sizeof(uint),
 455          "can't convert processorid_t to uint");
 456   // Quick check to see if we won't succeed.
 457   if (id_length < distribution_length) {
 458     return false;
 459   }
 460   // Assign processor ids to the distribution.
 461   // Try to shuffle processors to distribute work across boards,
 462   // assuming 4 processors per board.
 463   const uint processors_per_board = ProcessDistributionStride;
 464   // Find the maximum processor id.
 465   processorid_t max_id = 0;
 466   for (uint m = 0; m < id_length; m += 1) {
 467     max_id = MAX2(max_id, id_array[m]);
 468   }
 469   // The next id, to limit loops.
 470   const processorid_t limit_id = max_id + 1;
 471   // Make up markers for available processors.
 472   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 473   for (uint c = 0; c < limit_id; c += 1) {
 474     available_id[c] = false;
 475   }
 476   for (uint a = 0; a < id_length; a += 1) {
 477     available_id[id_array[a]] = true;
 478   }
 479   // Step by "boards", then by "slot", copying to "assigned".
 480   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 481   //                remembering which processors have been assigned by
 482   //                previous calls, etc., so as to distribute several
 483   //                independent calls of this method.  What we'd like is
 484   //                It would be nice to have an API that let us ask
 485   //                how many processes are bound to a processor,
 486   //                but we don't have that, either.
 487   //                In the short term, "board" is static so that
 488   //                subsequent distributions don't all start at board 0.
 489   static uint board = 0;
 490   uint assigned = 0;
 491   // Until we've found enough processors ....
 492   while (assigned < distribution_length) {
 493     // ... find the next available processor in the board.
 494     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 495       uint try_id = board * processors_per_board + slot;
 496       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 497         distribution[assigned] = try_id;
 498         available_id[try_id] = false;
 499         assigned += 1;
 500         break;
 501       }
 502     }
 503     board += 1;
 504     if (board * processors_per_board + 0 >= limit_id) {
 505       board = 0;
 506     }
 507   }
 508   if (available_id != NULL) {
 509     FREE_C_HEAP_ARRAY(bool, available_id);
 510   }
 511   return true;
 512 }
 513 
 514 void os::set_native_thread_name(const char *name) {
 515   // Not yet implemented.
 516   return;
 517 }
 518 
 519 bool os::distribute_processes(uint length, uint* distribution) {
 520   bool result = false;
 521   // Find the processor id's of all the available CPUs.
 522   processorid_t* id_array  = NULL;
 523   uint           id_length = 0;
 524   // There are some races between querying information and using it,
 525   // since processor sets can change dynamically.
 526   psetid_t pset = PS_NONE;
 527   // Are we running in a processor set?
 528   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 529     result = find_processors_in_pset(pset, &id_array, &id_length);
 530   } else {
 531     result = find_processors_online(&id_array, &id_length);
 532   }
 533   if (result == true) {
 534     if (id_length >= length) {
 535       result = assign_distribution(id_array, id_length, distribution, length);
 536     } else {
 537       result = false;
 538     }
 539   }
 540   if (id_array != NULL) {
 541     FREE_C_HEAP_ARRAY(processorid_t, id_array);
 542   }
 543   return result;
 544 }
 545 
 546 bool os::bind_to_processor(uint processor_id) {
 547   // We assume that a processorid_t can be stored in a uint.
 548   assert(sizeof(uint) == sizeof(processorid_t),
 549          "can't convert uint to processorid_t");
 550   int bind_result =
 551     processor_bind(P_LWPID,                       // bind LWP.
 552                    P_MYID,                        // bind current LWP.
 553                    (processorid_t) processor_id,  // id.
 554                    NULL);                         // don't return old binding.
 555   return (bind_result == 0);
 556 }
 557 
 558 bool os::getenv(const char* name, char* buffer, int len) {
 559   char* val = ::getenv(name);
 560   if (val == NULL || strlen(val) + 1 > len) {
 561     if (len > 0) buffer[0] = 0; // return a null string
 562     return false;
 563   }
 564   strcpy(buffer, val);
 565   return true;
 566 }
 567 
 568 
 569 // Return true if user is running as root.
 570 
 571 bool os::have_special_privileges() {
 572   static bool init = false;
 573   static bool privileges = false;
 574   if (!init) {
 575     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 576     init = true;
 577   }
 578   return privileges;
 579 }
 580 
 581 
 582 void os::init_system_properties_values() {
 583   // The next steps are taken in the product version:
 584   //
 585   // Obtain the JAVA_HOME value from the location of libjvm.so.
 586   // This library should be located at:
 587   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 588   //
 589   // If "/jre/lib/" appears at the right place in the path, then we
 590   // assume libjvm.so is installed in a JDK and we use this path.
 591   //
 592   // Otherwise exit with message: "Could not create the Java virtual machine."
 593   //
 594   // The following extra steps are taken in the debugging version:
 595   //
 596   // If "/jre/lib/" does NOT appear at the right place in the path
 597   // instead of exit check for $JAVA_HOME environment variable.
 598   //
 599   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 600   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 601   // it looks like libjvm.so is installed there
 602   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 603   //
 604   // Otherwise exit.
 605   //
 606   // Important note: if the location of libjvm.so changes this
 607   // code needs to be changed accordingly.
 608 
 609 // Base path of extensions installed on the system.
 610 #define SYS_EXT_DIR     "/usr/jdk/packages"
 611 #define EXTENSIONS_DIR  "/lib/ext"
 612 
 613   char cpu_arch[12];
 614   // Buffer that fits several sprintfs.
 615   // Note that the space for the colon and the trailing null are provided
 616   // by the nulls included by the sizeof operator.
 617   const size_t bufsize =
 618     MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
 619          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 620          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir
 621   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 622 
 623   // sysclasspath, java_home, dll_dir
 624   {
 625     char *pslash;
 626     os::jvm_path(buf, bufsize);
 627 
 628     // Found the full path to libjvm.so.
 629     // Now cut the path to <java_home>/jre if we can.
 630     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 631     pslash = strrchr(buf, '/');
 632     if (pslash != NULL) {
 633       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 634     }
 635     Arguments::set_dll_dir(buf);
 636 
 637     if (pslash != NULL) {
 638       pslash = strrchr(buf, '/');
 639       if (pslash != NULL) {
 640         *pslash = '\0';          // Get rid of /<arch>.
 641         pslash = strrchr(buf, '/');
 642         if (pslash != NULL) {
 643           *pslash = '\0';        // Get rid of /lib.
 644         }
 645       }
 646     }
 647     Arguments::set_java_home(buf);
 648     set_boot_path('/', ':');
 649   }
 650 
 651   // Where to look for native libraries.
 652   {
 653     // Use dlinfo() to determine the correct java.library.path.
 654     //
 655     // If we're launched by the Java launcher, and the user
 656     // does not set java.library.path explicitly on the commandline,
 657     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 658     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 659     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 660     // /usr/lib), which is exactly what we want.
 661     //
 662     // If the user does set java.library.path, it completely
 663     // overwrites this setting, and always has.
 664     //
 665     // If we're not launched by the Java launcher, we may
 666     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 667     // settings.  Again, dlinfo does exactly what we want.
 668 
 669     Dl_serinfo     info_sz, *info = &info_sz;
 670     Dl_serpath     *path;
 671     char           *library_path;
 672     char           *common_path = buf;
 673 
 674     // Determine search path count and required buffer size.
 675     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 676       FREE_C_HEAP_ARRAY(char, buf);
 677       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 678     }
 679 
 680     // Allocate new buffer and initialize.
 681     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 682     info->dls_size = info_sz.dls_size;
 683     info->dls_cnt = info_sz.dls_cnt;
 684 
 685     // Obtain search path information.
 686     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 687       FREE_C_HEAP_ARRAY(char, buf);
 688       FREE_C_HEAP_ARRAY(char, info);
 689       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 690     }
 691 
 692     path = &info->dls_serpath[0];
 693 
 694     // Note: Due to a legacy implementation, most of the library path
 695     // is set in the launcher. This was to accomodate linking restrictions
 696     // on legacy Solaris implementations (which are no longer supported).
 697     // Eventually, all the library path setting will be done here.
 698     //
 699     // However, to prevent the proliferation of improperly built native
 700     // libraries, the new path component /usr/jdk/packages is added here.
 701 
 702     // Determine the actual CPU architecture.
 703     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 704 #ifdef _LP64
 705     // If we are a 64-bit vm, perform the following translations:
 706     //   sparc   -> sparcv9
 707     //   i386    -> amd64
 708     if (strcmp(cpu_arch, "sparc") == 0) {
 709       strcat(cpu_arch, "v9");
 710     } else if (strcmp(cpu_arch, "i386") == 0) {
 711       strcpy(cpu_arch, "amd64");
 712     }
 713 #endif
 714 
 715     // Construct the invariant part of ld_library_path.
 716     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 717 
 718     // Struct size is more than sufficient for the path components obtained
 719     // through the dlinfo() call, so only add additional space for the path
 720     // components explicitly added here.
 721     size_t library_path_size = info->dls_size + strlen(common_path);
 722     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 723     library_path[0] = '\0';
 724 
 725     // Construct the desired Java library path from the linker's library
 726     // search path.
 727     //
 728     // For compatibility, it is optimal that we insert the additional path
 729     // components specific to the Java VM after those components specified
 730     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 731     // infrastructure.
 732     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 733       strcpy(library_path, common_path);
 734     } else {
 735       int inserted = 0;
 736       int i;
 737       for (i = 0; i < info->dls_cnt; i++, path++) {
 738         uint_t flags = path->dls_flags & LA_SER_MASK;
 739         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 740           strcat(library_path, common_path);
 741           strcat(library_path, os::path_separator());
 742           inserted = 1;
 743         }
 744         strcat(library_path, path->dls_name);
 745         strcat(library_path, os::path_separator());
 746       }
 747       // Eliminate trailing path separator.
 748       library_path[strlen(library_path)-1] = '\0';
 749     }
 750 
 751     // happens before argument parsing - can't use a trace flag
 752     // tty->print_raw("init_system_properties_values: native lib path: ");
 753     // tty->print_raw_cr(library_path);
 754 
 755     // Callee copies into its own buffer.
 756     Arguments::set_library_path(library_path);
 757 
 758     FREE_C_HEAP_ARRAY(char, library_path);
 759     FREE_C_HEAP_ARRAY(char, info);
 760   }
 761 
 762   // Extensions directories.
 763   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 764   Arguments::set_ext_dirs(buf);
 765 
 766   FREE_C_HEAP_ARRAY(char, buf);
 767 
 768 #undef SYS_EXT_DIR
 769 #undef EXTENSIONS_DIR
 770 }
 771 
 772 void os::breakpoint() {
 773   BREAKPOINT;
 774 }
 775 
 776 bool os::obsolete_option(const JavaVMOption *option) {
 777   if (!strncmp(option->optionString, "-Xt", 3)) {
 778     return true;
 779   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 780     return true;
 781   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 782     return true;
 783   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 784     return true;
 785   }
 786   return false;
 787 }
 788 
 789 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 790   address  stackStart  = (address)thread->stack_base();
 791   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 792   if (sp < stackStart && sp >= stackEnd) return true;
 793   return false;
 794 }
 795 
 796 extern "C" void breakpoint() {
 797   // use debugger to set breakpoint here
 798 }
 799 
 800 static thread_t main_thread;
 801 
 802 // Thread start routine for all new Java threads
 803 extern "C" void* java_start(void* thread_addr) {
 804   // Try to randomize the cache line index of hot stack frames.
 805   // This helps when threads of the same stack traces evict each other's
 806   // cache lines. The threads can be either from the same JVM instance, or
 807   // from different JVM instances. The benefit is especially true for
 808   // processors with hyperthreading technology.
 809   static int counter = 0;
 810   int pid = os::current_process_id();
 811   alloca(((pid ^ counter++) & 7) * 128);
 812 
 813   int prio;
 814   Thread* thread = (Thread*)thread_addr;
 815   OSThread* osthr = thread->osthread();
 816 
 817   osthr->set_lwp_id(_lwp_self());  // Store lwp in case we are bound
 818   thread->_schedctl = (void *) schedctl_init();
 819 
 820   if (UseNUMA) {
 821     int lgrp_id = os::numa_get_group_id();
 822     if (lgrp_id != -1) {
 823       thread->set_lgrp_id(lgrp_id);
 824     }
 825   }
 826 
 827   // If the creator called set priority before we started,
 828   // we need to call set_native_priority now that we have an lwp.
 829   // We used to get the priority from thr_getprio (we called
 830   // thr_setprio way back in create_thread) and pass it to
 831   // set_native_priority, but Solaris scales the priority
 832   // in java_to_os_priority, so when we read it back here,
 833   // we pass trash to set_native_priority instead of what's
 834   // in java_to_os_priority. So we save the native priority
 835   // in the osThread and recall it here.
 836 
 837   if (osthr->thread_id() != -1) {
 838     if (UseThreadPriorities) {
 839       int prio = osthr->native_priority();
 840       if (ThreadPriorityVerbose) {
 841         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 842                       INTPTR_FORMAT ", setting priority: %d\n",
 843                       osthr->thread_id(), osthr->lwp_id(), prio);
 844       }
 845       os::set_native_priority(thread, prio);
 846     }
 847   } else if (ThreadPriorityVerbose) {
 848     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 849   }
 850 
 851   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 852 
 853   // initialize signal mask for this thread
 854   os::Solaris::hotspot_sigmask(thread);
 855 
 856   thread->run();
 857 
 858   // One less thread is executing
 859   // When the VMThread gets here, the main thread may have already exited
 860   // which frees the CodeHeap containing the Atomic::dec code
 861   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 862     Atomic::dec(&os::Solaris::_os_thread_count);
 863   }
 864 
 865   if (UseDetachedThreads) {
 866     thr_exit(NULL);
 867     ShouldNotReachHere();
 868   }
 869   return NULL;
 870 }
 871 
 872 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 873   // Allocate the OSThread object
 874   OSThread* osthread = new OSThread(NULL, NULL);
 875   if (osthread == NULL) return NULL;
 876 
 877   // Store info on the Solaris thread into the OSThread
 878   osthread->set_thread_id(thread_id);
 879   osthread->set_lwp_id(_lwp_self());
 880   thread->_schedctl = (void *) schedctl_init();
 881 
 882   if (UseNUMA) {
 883     int lgrp_id = os::numa_get_group_id();
 884     if (lgrp_id != -1) {
 885       thread->set_lgrp_id(lgrp_id);
 886     }
 887   }
 888 
 889   if (ThreadPriorityVerbose) {
 890     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 891                   osthread->thread_id(), osthread->lwp_id());
 892   }
 893 
 894   // Initial thread state is INITIALIZED, not SUSPENDED
 895   osthread->set_state(INITIALIZED);
 896 
 897   return osthread;
 898 }
 899 
 900 void os::Solaris::hotspot_sigmask(Thread* thread) {
 901   //Save caller's signal mask
 902   sigset_t sigmask;
 903   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 904   OSThread *osthread = thread->osthread();
 905   osthread->set_caller_sigmask(sigmask);
 906 
 907   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 908   if (!ReduceSignalUsage) {
 909     if (thread->is_VM_thread()) {
 910       // Only the VM thread handles BREAK_SIGNAL ...
 911       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 912     } else {
 913       // ... all other threads block BREAK_SIGNAL
 914       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 915       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 916     }
 917   }
 918 }
 919 
 920 bool os::create_attached_thread(JavaThread* thread) {
 921 #ifdef ASSERT
 922   thread->verify_not_published();
 923 #endif
 924   OSThread* osthread = create_os_thread(thread, thr_self());
 925   if (osthread == NULL) {
 926     return false;
 927   }
 928 
 929   // Initial thread state is RUNNABLE
 930   osthread->set_state(RUNNABLE);
 931   thread->set_osthread(osthread);
 932 
 933   // initialize signal mask for this thread
 934   // and save the caller's signal mask
 935   os::Solaris::hotspot_sigmask(thread);
 936 
 937   return true;
 938 }
 939 
 940 bool os::create_main_thread(JavaThread* thread) {
 941 #ifdef ASSERT
 942   thread->verify_not_published();
 943 #endif
 944   if (_starting_thread == NULL) {
 945     _starting_thread = create_os_thread(thread, main_thread);
 946     if (_starting_thread == NULL) {
 947       return false;
 948     }
 949   }
 950 
 951   // The primodial thread is runnable from the start
 952   _starting_thread->set_state(RUNNABLE);
 953 
 954   thread->set_osthread(_starting_thread);
 955 
 956   // initialize signal mask for this thread
 957   // and save the caller's signal mask
 958   os::Solaris::hotspot_sigmask(thread);
 959 
 960   return true;
 961 }
 962 
 963 
 964 bool os::create_thread(Thread* thread, ThreadType thr_type,
 965                        size_t stack_size) {
 966   // Allocate the OSThread object
 967   OSThread* osthread = new OSThread(NULL, NULL);
 968   if (osthread == NULL) {
 969     return false;
 970   }
 971 
 972   if (ThreadPriorityVerbose) {
 973     char *thrtyp;
 974     switch (thr_type) {
 975     case vm_thread:
 976       thrtyp = (char *)"vm";
 977       break;
 978     case cgc_thread:
 979       thrtyp = (char *)"cgc";
 980       break;
 981     case pgc_thread:
 982       thrtyp = (char *)"pgc";
 983       break;
 984     case java_thread:
 985       thrtyp = (char *)"java";
 986       break;
 987     case compiler_thread:
 988       thrtyp = (char *)"compiler";
 989       break;
 990     case watcher_thread:
 991       thrtyp = (char *)"watcher";
 992       break;
 993     default:
 994       thrtyp = (char *)"unknown";
 995       break;
 996     }
 997     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
 998   }
 999 
1000   // Calculate stack size if it's not specified by caller.
1001   if (stack_size == 0) {
1002     // The default stack size 1M (2M for LP64).
1003     stack_size = (BytesPerWord >> 2) * K * K;
1004 
1005     switch (thr_type) {
1006     case os::java_thread:
1007       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1008       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1009       break;
1010     case os::compiler_thread:
1011       if (CompilerThreadStackSize > 0) {
1012         stack_size = (size_t)(CompilerThreadStackSize * K);
1013         break;
1014       } // else fall through:
1015         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1016     case os::vm_thread:
1017     case os::pgc_thread:
1018     case os::cgc_thread:
1019     case os::watcher_thread:
1020       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1021       break;
1022     }
1023   }
1024   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1025 
1026   // Initial state is ALLOCATED but not INITIALIZED
1027   osthread->set_state(ALLOCATED);
1028 
1029   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1030     // We got lots of threads. Check if we still have some address space left.
1031     // Need to be at least 5Mb of unreserved address space. We do check by
1032     // trying to reserve some.
1033     const size_t VirtualMemoryBangSize = 20*K*K;
1034     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1035     if (mem == NULL) {
1036       delete osthread;
1037       return false;
1038     } else {
1039       // Release the memory again
1040       os::release_memory(mem, VirtualMemoryBangSize);
1041     }
1042   }
1043 
1044   // Setup osthread because the child thread may need it.
1045   thread->set_osthread(osthread);
1046 
1047   // Create the Solaris thread
1048   thread_t tid = 0;
1049   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
1050   int      status;
1051 
1052   // Mark that we don't have an lwp or thread id yet.
1053   // In case we attempt to set the priority before the thread starts.
1054   osthread->set_lwp_id(-1);
1055   osthread->set_thread_id(-1);
1056 
1057   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1058   if (status != 0) {
1059     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1060       perror("os::create_thread");
1061     }
1062     thread->set_osthread(NULL);
1063     // Need to clean up stuff we've allocated so far
1064     delete osthread;
1065     return false;
1066   }
1067 
1068   Atomic::inc(&os::Solaris::_os_thread_count);
1069 
1070   // Store info on the Solaris thread into the OSThread
1071   osthread->set_thread_id(tid);
1072 
1073   // Remember that we created this thread so we can set priority on it
1074   osthread->set_vm_created();
1075 
1076   // Initial thread state is INITIALIZED, not SUSPENDED
1077   osthread->set_state(INITIALIZED);
1078 
1079   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1080   return true;
1081 }
1082 
1083 // defined for >= Solaris 10. This allows builds on earlier versions
1084 // of Solaris to take advantage of the newly reserved Solaris JVM signals
1085 // With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1086 // and -XX:+UseAltSigs does nothing since these should have no conflict
1087 //
1088 #if !defined(SIGJVM1)
1089   #define SIGJVM1 39
1090   #define SIGJVM2 40
1091 #endif
1092 
1093 debug_only(static bool signal_sets_initialized = false);
1094 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1095 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1096 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1097 
1098 bool os::Solaris::is_sig_ignored(int sig) {
1099   struct sigaction oact;
1100   sigaction(sig, (struct sigaction*)NULL, &oact);
1101   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1102                                  : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1103   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
1104     return true;
1105   } else {
1106     return false;
1107   }
1108 }
1109 
1110 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1111 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1112 static bool isJVM1available() {
1113   return SIGJVM1 < SIGRTMIN;
1114 }
1115 
1116 void os::Solaris::signal_sets_init() {
1117   // Should also have an assertion stating we are still single-threaded.
1118   assert(!signal_sets_initialized, "Already initialized");
1119   // Fill in signals that are necessarily unblocked for all threads in
1120   // the VM. Currently, we unblock the following signals:
1121   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1122   //                         by -Xrs (=ReduceSignalUsage));
1123   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1124   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1125   // the dispositions or masks wrt these signals.
1126   // Programs embedding the VM that want to use the above signals for their
1127   // own purposes must, at this time, use the "-Xrs" option to prevent
1128   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1129   // (See bug 4345157, and other related bugs).
1130   // In reality, though, unblocking these signals is really a nop, since
1131   // these signals are not blocked by default.
1132   sigemptyset(&unblocked_sigs);
1133   sigemptyset(&allowdebug_blocked_sigs);
1134   sigaddset(&unblocked_sigs, SIGILL);
1135   sigaddset(&unblocked_sigs, SIGSEGV);
1136   sigaddset(&unblocked_sigs, SIGBUS);
1137   sigaddset(&unblocked_sigs, SIGFPE);
1138 
1139   if (isJVM1available) {
1140     os::Solaris::set_SIGinterrupt(SIGJVM1);
1141     os::Solaris::set_SIGasync(SIGJVM2);
1142   } else if (UseAltSigs) {
1143     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1144     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1145   } else {
1146     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1147     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1148   }
1149 
1150   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1151   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1152 
1153   if (!ReduceSignalUsage) {
1154     if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1155       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1156       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1157     }
1158     if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1159       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1160       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1161     }
1162     if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1163       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1164       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1165     }
1166   }
1167   // Fill in signals that are blocked by all but the VM thread.
1168   sigemptyset(&vm_sigs);
1169   if (!ReduceSignalUsage) {
1170     sigaddset(&vm_sigs, BREAK_SIGNAL);
1171   }
1172   debug_only(signal_sets_initialized = true);
1173 
1174   // For diagnostics only used in run_periodic_checks
1175   sigemptyset(&check_signal_done);
1176 }
1177 
1178 // These are signals that are unblocked while a thread is running Java.
1179 // (For some reason, they get blocked by default.)
1180 sigset_t* os::Solaris::unblocked_signals() {
1181   assert(signal_sets_initialized, "Not initialized");
1182   return &unblocked_sigs;
1183 }
1184 
1185 // These are the signals that are blocked while a (non-VM) thread is
1186 // running Java. Only the VM thread handles these signals.
1187 sigset_t* os::Solaris::vm_signals() {
1188   assert(signal_sets_initialized, "Not initialized");
1189   return &vm_sigs;
1190 }
1191 
1192 // These are signals that are blocked during cond_wait to allow debugger in
1193 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1194   assert(signal_sets_initialized, "Not initialized");
1195   return &allowdebug_blocked_sigs;
1196 }
1197 
1198 
1199 void _handle_uncaught_cxx_exception() {
1200   VMError err("An uncaught C++ exception");
1201   err.report_and_die();
1202 }
1203 
1204 
1205 // First crack at OS-specific initialization, from inside the new thread.
1206 void os::initialize_thread(Thread* thr) {
1207   int r = thr_main();
1208   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
1209   if (r) {
1210     JavaThread* jt = (JavaThread *)thr;
1211     assert(jt != NULL, "Sanity check");
1212     size_t stack_size;
1213     address base = jt->stack_base();
1214     if (Arguments::created_by_java_launcher()) {
1215       // Use 2MB to allow for Solaris 7 64 bit mode.
1216       stack_size = JavaThread::stack_size_at_create() == 0
1217         ? 2048*K : JavaThread::stack_size_at_create();
1218 
1219       // There are rare cases when we may have already used more than
1220       // the basic stack size allotment before this method is invoked.
1221       // Attempt to allow for a normally sized java_stack.
1222       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1223       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1224     } else {
1225       // 6269555: If we were not created by a Java launcher, i.e. if we are
1226       // running embedded in a native application, treat the primordial thread
1227       // as much like a native attached thread as possible.  This means using
1228       // the current stack size from thr_stksegment(), unless it is too large
1229       // to reliably setup guard pages.  A reasonable max size is 8MB.
1230       size_t current_size = current_stack_size();
1231       // This should never happen, but just in case....
1232       if (current_size == 0) current_size = 2 * K * K;
1233       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1234     }
1235     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1236     stack_size = (size_t)(base - bottom);
1237 
1238     assert(stack_size > 0, "Stack size calculation problem");
1239 
1240     if (stack_size > jt->stack_size()) {
1241 #ifndef PRODUCT
1242       struct rlimit limits;
1243       getrlimit(RLIMIT_STACK, &limits);
1244       size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1245       assert(size >= jt->stack_size(), "Stack size problem in main thread");
1246 #endif
1247       tty->print_cr("Stack size of %d Kb exceeds current limit of %d Kb.\n"
1248                     "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1249                     "See limit(1) to increase the stack size limit.",
1250                     stack_size / K, jt->stack_size() / K);
1251       vm_exit(1);
1252     }
1253     assert(jt->stack_size() >= stack_size,
1254            "Attempt to map more stack than was allocated");
1255     jt->set_stack_size(stack_size);
1256   }
1257 
1258   // With the T2 libthread (T1 is no longer supported) threads are always bound
1259   // and we use stackbanging in all cases.
1260 
1261   os::Solaris::init_thread_fpu_state();
1262   std::set_terminate(_handle_uncaught_cxx_exception);
1263 }
1264 
1265 
1266 
1267 // Free Solaris resources related to the OSThread
1268 void os::free_thread(OSThread* osthread) {
1269   assert(osthread != NULL, "os::free_thread but osthread not set");
1270 
1271 
1272   // We are told to free resources of the argument thread,
1273   // but we can only really operate on the current thread.
1274   // The main thread must take the VMThread down synchronously
1275   // before the main thread exits and frees up CodeHeap
1276   guarantee((Thread::current()->osthread() == osthread
1277              || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1278   if (Thread::current()->osthread() == osthread) {
1279     // Restore caller's signal mask
1280     sigset_t sigmask = osthread->caller_sigmask();
1281     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1282   }
1283   delete osthread;
1284 }
1285 
1286 void os::pd_start_thread(Thread* thread) {
1287   int status = thr_continue(thread->osthread()->thread_id());
1288   assert_status(status == 0, status, "thr_continue failed");
1289 }
1290 
1291 
1292 intx os::current_thread_id() {
1293   return (intx)thr_self();
1294 }
1295 
1296 static pid_t _initial_pid = 0;
1297 
1298 int os::current_process_id() {
1299   return (int)(_initial_pid ? _initial_pid : getpid());
1300 }
1301 
1302 int os::allocate_thread_local_storage() {
1303   // %%%       in Win32 this allocates a memory segment pointed to by a
1304   //           register.  Dan Stein can implement a similar feature in
1305   //           Solaris.  Alternatively, the VM can do the same thing
1306   //           explicitly: malloc some storage and keep the pointer in a
1307   //           register (which is part of the thread's context) (or keep it
1308   //           in TLS).
1309   // %%%       In current versions of Solaris, thr_self and TSD can
1310   //           be accessed via short sequences of displaced indirections.
1311   //           The value of thr_self is available as %g7(36).
1312   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1313   //           assuming that the current thread already has a value bound to k.
1314   //           It may be worth experimenting with such access patterns,
1315   //           and later having the parameters formally exported from a Solaris
1316   //           interface.  I think, however, that it will be faster to
1317   //           maintain the invariant that %g2 always contains the
1318   //           JavaThread in Java code, and have stubs simply
1319   //           treat %g2 as a caller-save register, preserving it in a %lN.
1320   thread_key_t tk;
1321   if (thr_keycreate(&tk, NULL)) {
1322     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1323                   "(%s)", strerror(errno)));
1324   }
1325   return int(tk);
1326 }
1327 
1328 void os::free_thread_local_storage(int index) {
1329   // %%% don't think we need anything here
1330   // if (pthread_key_delete((pthread_key_t) tk)) {
1331   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
1332   // }
1333 }
1334 
1335 // libthread allocate for tsd_common is a version specific
1336 // small number - point is NO swap space available
1337 #define SMALLINT 32
1338 void os::thread_local_storage_at_put(int index, void* value) {
1339   // %%% this is used only in threadLocalStorage.cpp
1340   if (thr_setspecific((thread_key_t)index, value)) {
1341     if (errno == ENOMEM) {
1342       vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1343                             "thr_setspecific: out of swap space");
1344     } else {
1345       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1346                     "(%s)", strerror(errno)));
1347     }
1348   } else {
1349     ThreadLocalStorage::set_thread_in_slot((Thread *) value);
1350   }
1351 }
1352 
1353 // This function could be called before TLS is initialized, for example, when
1354 // VM receives an async signal or when VM causes a fatal error during
1355 // initialization. Return NULL if thr_getspecific() fails.
1356 void* os::thread_local_storage_at(int index) {
1357   // %%% this is used only in threadLocalStorage.cpp
1358   void* r = NULL;
1359   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1360 }
1361 
1362 
1363 // gethrtime() should be monotonic according to the documentation,
1364 // but some virtualized platforms are known to break this guarantee.
1365 // getTimeNanos() must be guaranteed not to move backwards, so we
1366 // are forced to add a check here.
1367 inline hrtime_t getTimeNanos() {
1368   const hrtime_t now = gethrtime();
1369   const hrtime_t prev = max_hrtime;
1370   if (now <= prev) {
1371     return prev;   // same or retrograde time;
1372   }
1373   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1374   assert(obsv >= prev, "invariant");   // Monotonicity
1375   // If the CAS succeeded then we're done and return "now".
1376   // If the CAS failed and the observed value "obsv" is >= now then
1377   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1378   // some other thread raced this thread and installed a new value, in which case
1379   // we could either (a) retry the entire operation, (b) retry trying to install now
1380   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1381   // we might discard a higher "now" value in deference to a slightly lower but freshly
1382   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1383   // to (a) or (b) -- and greatly reduces coherence traffic.
1384   // We might also condition (c) on the magnitude of the delta between obsv and now.
1385   // Avoiding excessive CAS operations to hot RW locations is critical.
1386   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1387   return (prev == obsv) ? now : obsv;
1388 }
1389 
1390 // Time since start-up in seconds to a fine granularity.
1391 // Used by VMSelfDestructTimer and the MemProfiler.
1392 double os::elapsedTime() {
1393   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1394 }
1395 
1396 jlong os::elapsed_counter() {
1397   return (jlong)(getTimeNanos() - first_hrtime);
1398 }
1399 
1400 jlong os::elapsed_frequency() {
1401   return hrtime_hz;
1402 }
1403 
1404 // Return the real, user, and system times in seconds from an
1405 // arbitrary fixed point in the past.
1406 bool os::getTimesSecs(double* process_real_time,
1407                       double* process_user_time,
1408                       double* process_system_time) {
1409   struct tms ticks;
1410   clock_t real_ticks = times(&ticks);
1411 
1412   if (real_ticks == (clock_t) (-1)) {
1413     return false;
1414   } else {
1415     double ticks_per_second = (double) clock_tics_per_sec;
1416     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1417     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1418     // For consistency return the real time from getTimeNanos()
1419     // converted to seconds.
1420     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1421 
1422     return true;
1423   }
1424 }
1425 
1426 bool os::supports_vtime() { return true; }
1427 
1428 bool os::enable_vtime() {
1429   int fd = ::open("/proc/self/ctl", O_WRONLY);
1430   if (fd == -1) {
1431     return false;
1432   }
1433 
1434   long cmd[] = { PCSET, PR_MSACCT };
1435   int res = ::write(fd, cmd, sizeof(long) * 2);
1436   ::close(fd);
1437   if (res != sizeof(long) * 2) {
1438     return false;
1439   }
1440   return true;
1441 }
1442 
1443 bool os::vtime_enabled() {
1444   int fd = ::open("/proc/self/status", O_RDONLY);
1445   if (fd == -1) {
1446     return false;
1447   }
1448 
1449   pstatus_t status;
1450   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1451   ::close(fd);
1452   if (res != sizeof(pstatus_t)) {
1453     return false;
1454   }
1455   return status.pr_flags & PR_MSACCT;
1456 }
1457 
1458 double os::elapsedVTime() {
1459   return (double)gethrvtime() / (double)hrtime_hz;
1460 }
1461 
1462 // Used internally for comparisons only
1463 // getTimeMillis guaranteed to not move backwards on Solaris
1464 jlong getTimeMillis() {
1465   jlong nanotime = getTimeNanos();
1466   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1467 }
1468 
1469 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1470 jlong os::javaTimeMillis() {
1471   timeval t;
1472   if (gettimeofday(&t, NULL) == -1) {
1473     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1474   }
1475   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1476 }
1477 
1478 jlong os::javaTimeNanos() {
1479   return (jlong)getTimeNanos();
1480 }
1481 
1482 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1483   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1484   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1485   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1486   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1487 }
1488 
1489 char * os::local_time_string(char *buf, size_t buflen) {
1490   struct tm t;
1491   time_t long_time;
1492   time(&long_time);
1493   localtime_r(&long_time, &t);
1494   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1495                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1496                t.tm_hour, t.tm_min, t.tm_sec);
1497   return buf;
1498 }
1499 
1500 // Note: os::shutdown() might be called very early during initialization, or
1501 // called from signal handler. Before adding something to os::shutdown(), make
1502 // sure it is async-safe and can handle partially initialized VM.
1503 void os::shutdown() {
1504 
1505   // allow PerfMemory to attempt cleanup of any persistent resources
1506   perfMemory_exit();
1507 
1508   // needs to remove object in file system
1509   AttachListener::abort();
1510 
1511   // flush buffered output, finish log files
1512   ostream_abort();
1513 
1514   // Check for abort hook
1515   abort_hook_t abort_hook = Arguments::abort_hook();
1516   if (abort_hook != NULL) {
1517     abort_hook();
1518   }
1519 }
1520 
1521 // Note: os::abort() might be called very early during initialization, or
1522 // called from signal handler. Before adding something to os::abort(), make
1523 // sure it is async-safe and can handle partially initialized VM.
1524 void os::abort(bool dump_core) {
1525   os::shutdown();
1526   if (dump_core) {
1527 #ifndef PRODUCT
1528     fdStream out(defaultStream::output_fd());
1529     out.print_raw("Current thread is ");
1530     char buf[16];
1531     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1532     out.print_raw_cr(buf);
1533     out.print_raw_cr("Dumping core ...");
1534 #endif
1535     ::abort(); // dump core (for debugging)
1536   }
1537 
1538   ::exit(1);
1539 }
1540 
1541 // Die immediately, no exit hook, no abort hook, no cleanup.
1542 void os::die() {
1543   ::abort(); // dump core (for debugging)
1544 }
1545 
1546 // DLL functions
1547 
1548 const char* os::dll_file_extension() { return ".so"; }
1549 
1550 // This must be hard coded because it's the system's temporary
1551 // directory not the java application's temp directory, ala java.io.tmpdir.
1552 const char* os::get_temp_directory() { return "/tmp"; }
1553 
1554 static bool file_exists(const char* filename) {
1555   struct stat statbuf;
1556   if (filename == NULL || strlen(filename) == 0) {
1557     return false;
1558   }
1559   return os::stat(filename, &statbuf) == 0;
1560 }
1561 
1562 bool os::dll_build_name(char* buffer, size_t buflen,
1563                         const char* pname, const char* fname) {
1564   bool retval = false;
1565   const size_t pnamelen = pname ? strlen(pname) : 0;
1566 
1567   // Return error on buffer overflow.
1568   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1569     return retval;
1570   }
1571 
1572   if (pnamelen == 0) {
1573     snprintf(buffer, buflen, "lib%s.so", fname);
1574     retval = true;
1575   } else if (strchr(pname, *os::path_separator()) != NULL) {
1576     int n;
1577     char** pelements = split_path(pname, &n);
1578     if (pelements == NULL) {
1579       return false;
1580     }
1581     for (int i = 0; i < n; i++) {
1582       // really shouldn't be NULL but what the heck, check can't hurt
1583       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1584         continue; // skip the empty path values
1585       }
1586       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1587       if (file_exists(buffer)) {
1588         retval = true;
1589         break;
1590       }
1591     }
1592     // release the storage
1593     for (int i = 0; i < n; i++) {
1594       if (pelements[i] != NULL) {
1595         FREE_C_HEAP_ARRAY(char, pelements[i]);
1596       }
1597     }
1598     if (pelements != NULL) {
1599       FREE_C_HEAP_ARRAY(char*, pelements);
1600     }
1601   } else {
1602     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1603     retval = true;
1604   }
1605   return retval;
1606 }
1607 
1608 // check if addr is inside libjvm.so
1609 bool os::address_is_in_vm(address addr) {
1610   static address libjvm_base_addr;
1611   Dl_info dlinfo;
1612 
1613   if (libjvm_base_addr == NULL) {
1614     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1615       libjvm_base_addr = (address)dlinfo.dli_fbase;
1616     }
1617     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1618   }
1619 
1620   if (dladdr((void *)addr, &dlinfo) != 0) {
1621     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1622   }
1623 
1624   return false;
1625 }
1626 
1627 typedef int (*dladdr1_func_type)(void *, Dl_info *, void **, int);
1628 static dladdr1_func_type dladdr1_func = NULL;
1629 
1630 bool os::dll_address_to_function_name(address addr, char *buf,
1631                                       int buflen, int * offset) {
1632   // buf is not optional, but offset is optional
1633   assert(buf != NULL, "sanity check");
1634 
1635   Dl_info dlinfo;
1636 
1637   // dladdr1_func was initialized in os::init()
1638   if (dladdr1_func != NULL) {
1639     // yes, we have dladdr1
1640 
1641     // Support for dladdr1 is checked at runtime; it may be
1642     // available even if the vm is built on a machine that does
1643     // not have dladdr1 support.  Make sure there is a value for
1644     // RTLD_DL_SYMENT.
1645 #ifndef RTLD_DL_SYMENT
1646   #define RTLD_DL_SYMENT 1
1647 #endif
1648 #ifdef _LP64
1649     Elf64_Sym * info;
1650 #else
1651     Elf32_Sym * info;
1652 #endif
1653     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1654                      RTLD_DL_SYMENT) != 0) {
1655       // see if we have a matching symbol that covers our address
1656       if (dlinfo.dli_saddr != NULL &&
1657           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1658         if (dlinfo.dli_sname != NULL) {
1659           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1660             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1661           }
1662           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1663           return true;
1664         }
1665       }
1666       // no matching symbol so try for just file info
1667       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1668         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1669                             buf, buflen, offset, dlinfo.dli_fname)) {
1670           return true;
1671         }
1672       }
1673     }
1674     buf[0] = '\0';
1675     if (offset != NULL) *offset  = -1;
1676     return false;
1677   }
1678 
1679   // no, only dladdr is available
1680   if (dladdr((void *)addr, &dlinfo) != 0) {
1681     // see if we have a matching symbol
1682     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1683       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1684         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1685       }
1686       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1687       return true;
1688     }
1689     // no matching symbol so try for just file info
1690     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1691       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1692                           buf, buflen, offset, dlinfo.dli_fname)) {
1693         return true;
1694       }
1695     }
1696   }
1697   buf[0] = '\0';
1698   if (offset != NULL) *offset  = -1;
1699   return false;
1700 }
1701 
1702 bool os::dll_address_to_library_name(address addr, char* buf,
1703                                      int buflen, int* offset) {
1704   // buf is not optional, but offset is optional
1705   assert(buf != NULL, "sanity check");
1706 
1707   Dl_info dlinfo;
1708 
1709   if (dladdr((void*)addr, &dlinfo) != 0) {
1710     if (dlinfo.dli_fname != NULL) {
1711       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1712     }
1713     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1714       *offset = addr - (address)dlinfo.dli_fbase;
1715     }
1716     return true;
1717   }
1718 
1719   buf[0] = '\0';
1720   if (offset) *offset = -1;
1721   return false;
1722 }
1723 
1724 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1725   Dl_info dli;
1726   // Sanity check?
1727   if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 ||
1728       dli.dli_fname == NULL) {
1729     return 1;
1730   }
1731 
1732   void * handle = dlopen(dli.dli_fname, RTLD_LAZY);
1733   if (handle == NULL) {
1734     return 1;
1735   }
1736 
1737   Link_map *map;
1738   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1739   if (map == NULL) {
1740     dlclose(handle);
1741     return 1;
1742   }
1743 
1744   while (map->l_prev != NULL) {
1745     map = map->l_prev;
1746   }
1747 
1748   while (map != NULL) {
1749     // Iterate through all map entries and call callback with fields of interest
1750     if(callback(map->l_name, (address)map->l_addr, (address)0, param)) {
1751       dlclose(handle);
1752       return 1;
1753     }
1754     map = map->l_next;
1755   }
1756 
1757   dlclose(handle);
1758   return 0;
1759 }
1760 
1761 int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) {
1762   outputStream * out = (outputStream *) param;
1763   out->print_cr(PTR_FORMAT " \t%s", base_address, name);
1764   return 0;
1765 }
1766 
1767 void os::print_dll_info(outputStream * st) {
1768   st->print_cr("Dynamic libraries:"); st->flush();
1769   if (get_loaded_modules_info(_print_dll_info_cb, (void *)st)) {
1770     st->print_cr("Error: Cannot print dynamic libraries.");
1771   }
1772 }
1773 
1774 // Loads .dll/.so and
1775 // in case of error it checks if .dll/.so was built for the
1776 // same architecture as Hotspot is running on
1777 
1778 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1779   void * result= ::dlopen(filename, RTLD_LAZY);
1780   if (result != NULL) {
1781     // Successful loading
1782     return result;
1783   }
1784 
1785   Elf32_Ehdr elf_head;
1786 
1787   // Read system error message into ebuf
1788   // It may or may not be overwritten below
1789   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1790   ebuf[ebuflen-1]='\0';
1791   int diag_msg_max_length=ebuflen-strlen(ebuf);
1792   char* diag_msg_buf=ebuf+strlen(ebuf);
1793 
1794   if (diag_msg_max_length==0) {
1795     // No more space in ebuf for additional diagnostics message
1796     return NULL;
1797   }
1798 
1799 
1800   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1801 
1802   if (file_descriptor < 0) {
1803     // Can't open library, report dlerror() message
1804     return NULL;
1805   }
1806 
1807   bool failed_to_read_elf_head=
1808     (sizeof(elf_head)!=
1809      (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1810 
1811   ::close(file_descriptor);
1812   if (failed_to_read_elf_head) {
1813     // file i/o error - report dlerror() msg
1814     return NULL;
1815   }
1816 
1817   typedef struct {
1818     Elf32_Half  code;         // Actual value as defined in elf.h
1819     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1820     char        elf_class;    // 32 or 64 bit
1821     char        endianess;    // MSB or LSB
1822     char*       name;         // String representation
1823   } arch_t;
1824 
1825   static const arch_t arch_array[]={
1826     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1827     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1828     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1829     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1830     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1831     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1832     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1833     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1834     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1835     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1836   };
1837 
1838 #if  (defined IA32)
1839   static  Elf32_Half running_arch_code=EM_386;
1840 #elif   (defined AMD64)
1841   static  Elf32_Half running_arch_code=EM_X86_64;
1842 #elif  (defined IA64)
1843   static  Elf32_Half running_arch_code=EM_IA_64;
1844 #elif  (defined __sparc) && (defined _LP64)
1845   static  Elf32_Half running_arch_code=EM_SPARCV9;
1846 #elif  (defined __sparc) && (!defined _LP64)
1847   static  Elf32_Half running_arch_code=EM_SPARC;
1848 #elif  (defined __powerpc64__)
1849   static  Elf32_Half running_arch_code=EM_PPC64;
1850 #elif  (defined __powerpc__)
1851   static  Elf32_Half running_arch_code=EM_PPC;
1852 #elif (defined ARM)
1853   static  Elf32_Half running_arch_code=EM_ARM;
1854 #else
1855   #error Method os::dll_load requires that one of following is defined:\
1856        IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1857 #endif
1858 
1859   // Identify compatability class for VM's architecture and library's architecture
1860   // Obtain string descriptions for architectures
1861 
1862   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1863   int running_arch_index=-1;
1864 
1865   for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
1866     if (running_arch_code == arch_array[i].code) {
1867       running_arch_index    = i;
1868     }
1869     if (lib_arch.code == arch_array[i].code) {
1870       lib_arch.compat_class = arch_array[i].compat_class;
1871       lib_arch.name         = arch_array[i].name;
1872     }
1873   }
1874 
1875   assert(running_arch_index != -1,
1876          "Didn't find running architecture code (running_arch_code) in arch_array");
1877   if (running_arch_index == -1) {
1878     // Even though running architecture detection failed
1879     // we may still continue with reporting dlerror() message
1880     return NULL;
1881   }
1882 
1883   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1884     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1885     return NULL;
1886   }
1887 
1888   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1889     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1890     return NULL;
1891   }
1892 
1893   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1894     if (lib_arch.name!=NULL) {
1895       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1896                  " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1897                  lib_arch.name, arch_array[running_arch_index].name);
1898     } else {
1899       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1900                  " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1901                  lib_arch.code,
1902                  arch_array[running_arch_index].name);
1903     }
1904   }
1905 
1906   return NULL;
1907 }
1908 
1909 void* os::dll_lookup(void* handle, const char* name) {
1910   return dlsym(handle, name);
1911 }
1912 
1913 void* os::get_default_process_handle() {
1914   return (void*)::dlopen(NULL, RTLD_LAZY);
1915 }
1916 
1917 int os::stat(const char *path, struct stat *sbuf) {
1918   char pathbuf[MAX_PATH];
1919   if (strlen(path) > MAX_PATH - 1) {
1920     errno = ENAMETOOLONG;
1921     return -1;
1922   }
1923   os::native_path(strcpy(pathbuf, path));
1924   return ::stat(pathbuf, sbuf);
1925 }
1926 
1927 static bool _print_ascii_file(const char* filename, outputStream* st) {
1928   int fd = ::open(filename, O_RDONLY);
1929   if (fd == -1) {
1930     return false;
1931   }
1932 
1933   char buf[32];
1934   int bytes;
1935   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1936     st->print_raw(buf, bytes);
1937   }
1938 
1939   ::close(fd);
1940 
1941   return true;
1942 }
1943 
1944 void os::print_os_info_brief(outputStream* st) {
1945   os::Solaris::print_distro_info(st);
1946 
1947   os::Posix::print_uname_info(st);
1948 
1949   os::Solaris::print_libversion_info(st);
1950 }
1951 
1952 void os::print_os_info(outputStream* st) {
1953   st->print("OS:");
1954 
1955   os::Solaris::print_distro_info(st);
1956 
1957   os::Posix::print_uname_info(st);
1958 
1959   os::Solaris::print_libversion_info(st);
1960 
1961   os::Posix::print_rlimit_info(st);
1962 
1963   os::Posix::print_load_average(st);
1964 }
1965 
1966 void os::Solaris::print_distro_info(outputStream* st) {
1967   if (!_print_ascii_file("/etc/release", st)) {
1968     st->print("Solaris");
1969   }
1970   st->cr();
1971 }
1972 
1973 void os::Solaris::print_libversion_info(outputStream* st) {
1974   st->print("  (T2 libthread)");
1975   st->cr();
1976 }
1977 
1978 static bool check_addr0(outputStream* st) {
1979   jboolean status = false;
1980   int fd = ::open("/proc/self/map",O_RDONLY);
1981   if (fd >= 0) {
1982     prmap_t p;
1983     while (::read(fd, &p, sizeof(p)) > 0) {
1984       if (p.pr_vaddr == 0x0) {
1985         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
1986         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
1987         st->print("Access:");
1988         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
1989         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
1990         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
1991         st->cr();
1992         status = true;
1993       }
1994     }
1995     ::close(fd);
1996   }
1997   return status;
1998 }
1999 
2000 void os::pd_print_cpu_info(outputStream* st) {
2001   // Nothing to do for now.
2002 }
2003 
2004 void os::print_memory_info(outputStream* st) {
2005   st->print("Memory:");
2006   st->print(" %dk page", os::vm_page_size()>>10);
2007   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2008   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2009   st->cr();
2010   (void) check_addr0(st);
2011 }
2012 
2013 void os::print_siginfo(outputStream* st, void* siginfo) {
2014   const siginfo_t* si = (const siginfo_t*)siginfo;
2015 
2016   os::Posix::print_siginfo_brief(st, si);
2017 
2018   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2019       UseSharedSpaces) {
2020     FileMapInfo* mapinfo = FileMapInfo::current_info();
2021     if (mapinfo->is_in_shared_space(si->si_addr)) {
2022       st->print("\n\nError accessing class data sharing archive."   \
2023                 " Mapped file inaccessible during execution, "      \
2024                 " possible disk/network problem.");
2025     }
2026   }
2027   st->cr();
2028 }
2029 
2030 // Moved from whole group, because we need them here for diagnostic
2031 // prints.
2032 #define OLDMAXSIGNUM 32
2033 static int Maxsignum = 0;
2034 static int *ourSigFlags = NULL;
2035 
2036 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2037 
2038 int os::Solaris::get_our_sigflags(int sig) {
2039   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2040   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2041   return ourSigFlags[sig];
2042 }
2043 
2044 void os::Solaris::set_our_sigflags(int sig, int flags) {
2045   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2046   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2047   ourSigFlags[sig] = flags;
2048 }
2049 
2050 
2051 static const char* get_signal_handler_name(address handler,
2052                                            char* buf, int buflen) {
2053   int offset;
2054   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2055   if (found) {
2056     // skip directory names
2057     const char *p1, *p2;
2058     p1 = buf;
2059     size_t len = strlen(os::file_separator());
2060     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2061     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2062   } else {
2063     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2064   }
2065   return buf;
2066 }
2067 
2068 static void print_signal_handler(outputStream* st, int sig,
2069                                  char* buf, size_t buflen) {
2070   struct sigaction sa;
2071 
2072   sigaction(sig, NULL, &sa);
2073 
2074   st->print("%s: ", os::exception_name(sig, buf, buflen));
2075 
2076   address handler = (sa.sa_flags & SA_SIGINFO)
2077                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2078                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2079 
2080   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2081     st->print("SIG_DFL");
2082   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2083     st->print("SIG_IGN");
2084   } else {
2085     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2086   }
2087 
2088   st->print(", sa_mask[0]=");
2089   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2090 
2091   address rh = VMError::get_resetted_sighandler(sig);
2092   // May be, handler was resetted by VMError?
2093   if (rh != NULL) {
2094     handler = rh;
2095     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2096   }
2097 
2098   st->print(", sa_flags=");
2099   os::Posix::print_sa_flags(st, sa.sa_flags);
2100 
2101   // Check: is it our handler?
2102   if (handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2103       handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2104     // It is our signal handler
2105     // check for flags
2106     if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2107       st->print(
2108                 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2109                 os::Solaris::get_our_sigflags(sig));
2110     }
2111   }
2112   st->cr();
2113 }
2114 
2115 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2116   st->print_cr("Signal Handlers:");
2117   print_signal_handler(st, SIGSEGV, buf, buflen);
2118   print_signal_handler(st, SIGBUS , buf, buflen);
2119   print_signal_handler(st, SIGFPE , buf, buflen);
2120   print_signal_handler(st, SIGPIPE, buf, buflen);
2121   print_signal_handler(st, SIGXFSZ, buf, buflen);
2122   print_signal_handler(st, SIGILL , buf, buflen);
2123   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2124   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2125   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2126   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2127   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2128   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2129   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2130   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2131 }
2132 
2133 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2134 
2135 // Find the full path to the current module, libjvm.so
2136 void os::jvm_path(char *buf, jint buflen) {
2137   // Error checking.
2138   if (buflen < MAXPATHLEN) {
2139     assert(false, "must use a large-enough buffer");
2140     buf[0] = '\0';
2141     return;
2142   }
2143   // Lazy resolve the path to current module.
2144   if (saved_jvm_path[0] != 0) {
2145     strcpy(buf, saved_jvm_path);
2146     return;
2147   }
2148 
2149   Dl_info dlinfo;
2150   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2151   assert(ret != 0, "cannot locate libjvm");
2152   if (ret != 0 && dlinfo.dli_fname != NULL) {
2153     realpath((char *)dlinfo.dli_fname, buf);
2154   } else {
2155     buf[0] = '\0';
2156     return;
2157   }
2158 
2159   if (Arguments::sun_java_launcher_is_altjvm()) {
2160     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2161     // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
2162     // If "/jre/lib/" appears at the right place in the string, then
2163     // assume we are installed in a JDK and we're done.  Otherwise, check
2164     // for a JAVA_HOME environment variable and fix up the path so it
2165     // looks like libjvm.so is installed there (append a fake suffix
2166     // hotspot/libjvm.so).
2167     const char *p = buf + strlen(buf) - 1;
2168     for (int count = 0; p > buf && count < 5; ++count) {
2169       for (--p; p > buf && *p != '/'; --p)
2170         /* empty */ ;
2171     }
2172 
2173     if (strncmp(p, "/jre/lib/", 9) != 0) {
2174       // Look for JAVA_HOME in the environment.
2175       char* java_home_var = ::getenv("JAVA_HOME");
2176       if (java_home_var != NULL && java_home_var[0] != 0) {
2177         char cpu_arch[12];
2178         char* jrelib_p;
2179         int   len;
2180         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2181 #ifdef _LP64
2182         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2183         if (strcmp(cpu_arch, "sparc") == 0) {
2184           strcat(cpu_arch, "v9");
2185         } else if (strcmp(cpu_arch, "i386") == 0) {
2186           strcpy(cpu_arch, "amd64");
2187         }
2188 #endif
2189         // Check the current module name "libjvm.so".
2190         p = strrchr(buf, '/');
2191         assert(strstr(p, "/libjvm") == p, "invalid library name");
2192 
2193         realpath(java_home_var, buf);
2194         // determine if this is a legacy image or modules image
2195         // modules image doesn't have "jre" subdirectory
2196         len = strlen(buf);
2197         assert(len < buflen, "Ran out of buffer space");
2198         jrelib_p = buf + len;
2199         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2200         if (0 != access(buf, F_OK)) {
2201           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2202         }
2203 
2204         if (0 == access(buf, F_OK)) {
2205           // Use current module name "libjvm.so"
2206           len = strlen(buf);
2207           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2208         } else {
2209           // Go back to path of .so
2210           realpath((char *)dlinfo.dli_fname, buf);
2211         }
2212       }
2213     }
2214   }
2215 
2216   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2217   saved_jvm_path[MAXPATHLEN - 1] = '\0';
2218 }
2219 
2220 
2221 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2222   // no prefix required, not even "_"
2223 }
2224 
2225 
2226 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2227   // no suffix required
2228 }
2229 
2230 // This method is a copy of JDK's sysGetLastErrorString
2231 // from src/solaris/hpi/src/system_md.c
2232 
2233 size_t os::lasterror(char *buf, size_t len) {
2234   if (errno == 0)  return 0;
2235 
2236   const char *s = ::strerror(errno);
2237   size_t n = ::strlen(s);
2238   if (n >= len) {
2239     n = len - 1;
2240   }
2241   ::strncpy(buf, s, n);
2242   buf[n] = '\0';
2243   return n;
2244 }
2245 
2246 
2247 // sun.misc.Signal
2248 
2249 extern "C" {
2250   static void UserHandler(int sig, void *siginfo, void *context) {
2251     // Ctrl-C is pressed during error reporting, likely because the error
2252     // handler fails to abort. Let VM die immediately.
2253     if (sig == SIGINT && is_error_reported()) {
2254       os::die();
2255     }
2256 
2257     os::signal_notify(sig);
2258     // We do not need to reinstate the signal handler each time...
2259   }
2260 }
2261 
2262 void* os::user_handler() {
2263   return CAST_FROM_FN_PTR(void*, UserHandler);
2264 }
2265 
2266 class Semaphore : public StackObj {
2267  public:
2268   Semaphore();
2269   ~Semaphore();
2270   void signal();
2271   void wait();
2272   bool trywait();
2273   bool timedwait(unsigned int sec, int nsec);
2274  private:
2275   sema_t _semaphore;
2276 };
2277 
2278 
2279 Semaphore::Semaphore() {
2280   sema_init(&_semaphore, 0, NULL, NULL);
2281 }
2282 
2283 Semaphore::~Semaphore() {
2284   sema_destroy(&_semaphore);
2285 }
2286 
2287 void Semaphore::signal() {
2288   sema_post(&_semaphore);
2289 }
2290 
2291 void Semaphore::wait() {
2292   sema_wait(&_semaphore);
2293 }
2294 
2295 bool Semaphore::trywait() {
2296   return sema_trywait(&_semaphore) == 0;
2297 }
2298 
2299 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2300   struct timespec ts;
2301   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2302 
2303   while (1) {
2304     int result = sema_timedwait(&_semaphore, &ts);
2305     if (result == 0) {
2306       return true;
2307     } else if (errno == EINTR) {
2308       continue;
2309     } else if (errno == ETIME) {
2310       return false;
2311     } else {
2312       return false;
2313     }
2314   }
2315 }
2316 
2317 extern "C" {
2318   typedef void (*sa_handler_t)(int);
2319   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2320 }
2321 
2322 void* os::signal(int signal_number, void* handler) {
2323   struct sigaction sigAct, oldSigAct;
2324   sigfillset(&(sigAct.sa_mask));
2325   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2326   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2327 
2328   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
2329     // -1 means registration failed
2330     return (void *)-1;
2331   }
2332 
2333   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2334 }
2335 
2336 void os::signal_raise(int signal_number) {
2337   raise(signal_number);
2338 }
2339 
2340 // The following code is moved from os.cpp for making this
2341 // code platform specific, which it is by its very nature.
2342 
2343 // a counter for each possible signal value
2344 static int Sigexit = 0;
2345 static int Maxlibjsigsigs;
2346 static jint *pending_signals = NULL;
2347 static int *preinstalled_sigs = NULL;
2348 static struct sigaction *chainedsigactions = NULL;
2349 static sema_t sig_sem;
2350 typedef int (*version_getting_t)();
2351 version_getting_t os::Solaris::get_libjsig_version = NULL;
2352 static int libjsigversion = NULL;
2353 
2354 int os::sigexitnum_pd() {
2355   assert(Sigexit > 0, "signal memory not yet initialized");
2356   return Sigexit;
2357 }
2358 
2359 void os::Solaris::init_signal_mem() {
2360   // Initialize signal structures
2361   Maxsignum = SIGRTMAX;
2362   Sigexit = Maxsignum+1;
2363   assert(Maxsignum >0, "Unable to obtain max signal number");
2364 
2365   Maxlibjsigsigs = Maxsignum;
2366 
2367   // pending_signals has one int per signal
2368   // The additional signal is for SIGEXIT - exit signal to signal_thread
2369   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2370   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2371 
2372   if (UseSignalChaining) {
2373     chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2374                                                    * (Maxsignum + 1), mtInternal);
2375     memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2376     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2377     memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2378   }
2379   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2380   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2381 }
2382 
2383 void os::signal_init_pd() {
2384   int ret;
2385 
2386   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2387   assert(ret == 0, "sema_init() failed");
2388 }
2389 
2390 void os::signal_notify(int signal_number) {
2391   int ret;
2392 
2393   Atomic::inc(&pending_signals[signal_number]);
2394   ret = ::sema_post(&sig_sem);
2395   assert(ret == 0, "sema_post() failed");
2396 }
2397 
2398 static int check_pending_signals(bool wait_for_signal) {
2399   int ret;
2400   while (true) {
2401     for (int i = 0; i < Sigexit + 1; i++) {
2402       jint n = pending_signals[i];
2403       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2404         return i;
2405       }
2406     }
2407     if (!wait_for_signal) {
2408       return -1;
2409     }
2410     JavaThread *thread = JavaThread::current();
2411     ThreadBlockInVM tbivm(thread);
2412 
2413     bool threadIsSuspended;
2414     do {
2415       thread->set_suspend_equivalent();
2416       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2417       while ((ret = ::sema_wait(&sig_sem)) == EINTR)
2418         ;
2419       assert(ret == 0, "sema_wait() failed");
2420 
2421       // were we externally suspended while we were waiting?
2422       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2423       if (threadIsSuspended) {
2424         // The semaphore has been incremented, but while we were waiting
2425         // another thread suspended us. We don't want to continue running
2426         // while suspended because that would surprise the thread that
2427         // suspended us.
2428         ret = ::sema_post(&sig_sem);
2429         assert(ret == 0, "sema_post() failed");
2430 
2431         thread->java_suspend_self();
2432       }
2433     } while (threadIsSuspended);
2434   }
2435 }
2436 
2437 int os::signal_lookup() {
2438   return check_pending_signals(false);
2439 }
2440 
2441 int os::signal_wait() {
2442   return check_pending_signals(true);
2443 }
2444 
2445 ////////////////////////////////////////////////////////////////////////////////
2446 // Virtual Memory
2447 
2448 static int page_size = -1;
2449 
2450 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2451 // clear this var if support is not available.
2452 static bool has_map_align = true;
2453 
2454 int os::vm_page_size() {
2455   assert(page_size != -1, "must call os::init");
2456   return page_size;
2457 }
2458 
2459 // Solaris allocates memory by pages.
2460 int os::vm_allocation_granularity() {
2461   assert(page_size != -1, "must call os::init");
2462   return page_size;
2463 }
2464 
2465 static bool recoverable_mmap_error(int err) {
2466   // See if the error is one we can let the caller handle. This
2467   // list of errno values comes from the Solaris mmap(2) man page.
2468   switch (err) {
2469   case EBADF:
2470   case EINVAL:
2471   case ENOTSUP:
2472     // let the caller deal with these errors
2473     return true;
2474 
2475   default:
2476     // Any remaining errors on this OS can cause our reserved mapping
2477     // to be lost. That can cause confusion where different data
2478     // structures think they have the same memory mapped. The worst
2479     // scenario is if both the VM and a library think they have the
2480     // same memory mapped.
2481     return false;
2482   }
2483 }
2484 
2485 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2486                                     int err) {
2487   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2488           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2489           strerror(err), err);
2490 }
2491 
2492 static void warn_fail_commit_memory(char* addr, size_t bytes,
2493                                     size_t alignment_hint, bool exec,
2494                                     int err) {
2495   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2496           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2497           alignment_hint, exec, strerror(err), err);
2498 }
2499 
2500 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2501   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2502   size_t size = bytes;
2503   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2504   if (res != NULL) {
2505     if (UseNUMAInterleaving) {
2506       numa_make_global(addr, bytes);
2507     }
2508     return 0;
2509   }
2510 
2511   int err = errno;  // save errno from mmap() call in mmap_chunk()
2512 
2513   if (!recoverable_mmap_error(err)) {
2514     warn_fail_commit_memory(addr, bytes, exec, err);
2515     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2516   }
2517 
2518   return err;
2519 }
2520 
2521 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2522   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2523 }
2524 
2525 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2526                                   const char* mesg) {
2527   assert(mesg != NULL, "mesg must be specified");
2528   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2529   if (err != 0) {
2530     // the caller wants all commit errors to exit with the specified mesg:
2531     warn_fail_commit_memory(addr, bytes, exec, err);
2532     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2533   }
2534 }
2535 
2536 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
2537   assert(is_size_aligned(alignment, (size_t) vm_page_size()),
2538          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
2539                  alignment, (size_t) vm_page_size()));
2540 
2541   for (int i = 0; _page_sizes[i] != 0; i++) {
2542     if (is_size_aligned(alignment, _page_sizes[i])) {
2543       return _page_sizes[i];
2544     }
2545   }
2546 
2547   return (size_t) vm_page_size();
2548 }
2549 
2550 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2551                                     size_t alignment_hint, bool exec) {
2552   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2553   if (err == 0 && UseLargePages && alignment_hint > 0) {
2554     assert(is_size_aligned(bytes, alignment_hint),
2555            err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
2556 
2557     // The syscall memcntl requires an exact page size (see man memcntl for details).
2558     size_t page_size = page_size_for_alignment(alignment_hint);
2559     if (page_size > (size_t) vm_page_size()) {
2560       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2561     }
2562   }
2563   return err;
2564 }
2565 
2566 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2567                           bool exec) {
2568   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2569 }
2570 
2571 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2572                                   size_t alignment_hint, bool exec,
2573                                   const char* mesg) {
2574   assert(mesg != NULL, "mesg must be specified");
2575   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2576   if (err != 0) {
2577     // the caller wants all commit errors to exit with the specified mesg:
2578     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2579     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2580   }
2581 }
2582 
2583 // Uncommit the pages in a specified region.
2584 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2585   if (madvise(addr, bytes, MADV_FREE) < 0) {
2586     debug_only(warning("MADV_FREE failed."));
2587     return;
2588   }
2589 }
2590 
2591 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2592   return os::commit_memory(addr, size, !ExecMem);
2593 }
2594 
2595 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2596   return os::uncommit_memory(addr, size);
2597 }
2598 
2599 // Change the page size in a given range.
2600 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2601   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2602   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2603   if (UseLargePages) {
2604     size_t page_size = Solaris::page_size_for_alignment(alignment_hint);
2605     if (page_size > (size_t) vm_page_size()) {
2606       Solaris::setup_large_pages(addr, bytes, page_size);
2607     }
2608   }
2609 }
2610 
2611 // Tell the OS to make the range local to the first-touching LWP
2612 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2613   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2614   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2615     debug_only(warning("MADV_ACCESS_LWP failed."));
2616   }
2617 }
2618 
2619 // Tell the OS that this range would be accessed from different LWPs.
2620 void os::numa_make_global(char *addr, size_t bytes) {
2621   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2622   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2623     debug_only(warning("MADV_ACCESS_MANY failed."));
2624   }
2625 }
2626 
2627 // Get the number of the locality groups.
2628 size_t os::numa_get_groups_num() {
2629   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2630   return n != -1 ? n : 1;
2631 }
2632 
2633 // Get a list of leaf locality groups. A leaf lgroup is group that
2634 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2635 // board. An LWP is assigned to one of these groups upon creation.
2636 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2637   if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2638     ids[0] = 0;
2639     return 1;
2640   }
2641   int result_size = 0, top = 1, bottom = 0, cur = 0;
2642   for (int k = 0; k < size; k++) {
2643     int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2644                                    (Solaris::lgrp_id_t*)&ids[top], size - top);
2645     if (r == -1) {
2646       ids[0] = 0;
2647       return 1;
2648     }
2649     if (!r) {
2650       // That's a leaf node.
2651       assert(bottom <= cur, "Sanity check");
2652       // Check if the node has memory
2653       if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2654                                   NULL, 0, LGRP_RSRC_MEM) > 0) {
2655         ids[bottom++] = ids[cur];
2656       }
2657     }
2658     top += r;
2659     cur++;
2660   }
2661   if (bottom == 0) {
2662     // Handle a situation, when the OS reports no memory available.
2663     // Assume UMA architecture.
2664     ids[0] = 0;
2665     return 1;
2666   }
2667   return bottom;
2668 }
2669 
2670 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2671 bool os::numa_topology_changed() {
2672   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2673   if (is_stale != -1 && is_stale) {
2674     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2675     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2676     assert(c != 0, "Failure to initialize LGRP API");
2677     Solaris::set_lgrp_cookie(c);
2678     return true;
2679   }
2680   return false;
2681 }
2682 
2683 // Get the group id of the current LWP.
2684 int os::numa_get_group_id() {
2685   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2686   if (lgrp_id == -1) {
2687     return 0;
2688   }
2689   const int size = os::numa_get_groups_num();
2690   int *ids = (int*)alloca(size * sizeof(int));
2691 
2692   // Get the ids of all lgroups with memory; r is the count.
2693   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2694                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2695   if (r <= 0) {
2696     return 0;
2697   }
2698   return ids[os::random() % r];
2699 }
2700 
2701 // Request information about the page.
2702 bool os::get_page_info(char *start, page_info* info) {
2703   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2704   uint64_t addr = (uintptr_t)start;
2705   uint64_t outdata[2];
2706   uint_t validity = 0;
2707 
2708   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2709     return false;
2710   }
2711 
2712   info->size = 0;
2713   info->lgrp_id = -1;
2714 
2715   if ((validity & 1) != 0) {
2716     if ((validity & 2) != 0) {
2717       info->lgrp_id = outdata[0];
2718     }
2719     if ((validity & 4) != 0) {
2720       info->size = outdata[1];
2721     }
2722     return true;
2723   }
2724   return false;
2725 }
2726 
2727 // Scan the pages from start to end until a page different than
2728 // the one described in the info parameter is encountered.
2729 char *os::scan_pages(char *start, char* end, page_info* page_expected,
2730                      page_info* page_found) {
2731   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2732   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2733   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2734   uint_t validity[MAX_MEMINFO_CNT];
2735 
2736   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2737   uint64_t p = (uint64_t)start;
2738   while (p < (uint64_t)end) {
2739     addrs[0] = p;
2740     size_t addrs_count = 1;
2741     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2742       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2743       addrs_count++;
2744     }
2745 
2746     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2747       return NULL;
2748     }
2749 
2750     size_t i = 0;
2751     for (; i < addrs_count; i++) {
2752       if ((validity[i] & 1) != 0) {
2753         if ((validity[i] & 4) != 0) {
2754           if (outdata[types * i + 1] != page_expected->size) {
2755             break;
2756           }
2757         } else if (page_expected->size != 0) {
2758           break;
2759         }
2760 
2761         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2762           if (outdata[types * i] != page_expected->lgrp_id) {
2763             break;
2764           }
2765         }
2766       } else {
2767         return NULL;
2768       }
2769     }
2770 
2771     if (i < addrs_count) {
2772       if ((validity[i] & 2) != 0) {
2773         page_found->lgrp_id = outdata[types * i];
2774       } else {
2775         page_found->lgrp_id = -1;
2776       }
2777       if ((validity[i] & 4) != 0) {
2778         page_found->size = outdata[types * i + 1];
2779       } else {
2780         page_found->size = 0;
2781       }
2782       return (char*)addrs[i];
2783     }
2784 
2785     p = addrs[addrs_count - 1] + page_size;
2786   }
2787   return end;
2788 }
2789 
2790 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2791   size_t size = bytes;
2792   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2793   // uncommitted page. Otherwise, the read/write might succeed if we
2794   // have enough swap space to back the physical page.
2795   return
2796     NULL != Solaris::mmap_chunk(addr, size,
2797                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2798                                 PROT_NONE);
2799 }
2800 
2801 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2802   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2803 
2804   if (b == MAP_FAILED) {
2805     return NULL;
2806   }
2807   return b;
2808 }
2809 
2810 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes,
2811                              size_t alignment_hint, bool fixed) {
2812   char* addr = requested_addr;
2813   int flags = MAP_PRIVATE | MAP_NORESERVE;
2814 
2815   assert(!(fixed && (alignment_hint > 0)),
2816          "alignment hint meaningless with fixed mmap");
2817 
2818   if (fixed) {
2819     flags |= MAP_FIXED;
2820   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2821     flags |= MAP_ALIGN;
2822     addr = (char*) alignment_hint;
2823   }
2824 
2825   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2826   // uncommitted page. Otherwise, the read/write might succeed if we
2827   // have enough swap space to back the physical page.
2828   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2829 }
2830 
2831 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2832                             size_t alignment_hint) {
2833   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint,
2834                                   (requested_addr != NULL));
2835 
2836   guarantee(requested_addr == NULL || requested_addr == addr,
2837             "OS failed to return requested mmap address.");
2838   return addr;
2839 }
2840 
2841 // Reserve memory at an arbitrary address, only if that area is
2842 // available (and not reserved for something else).
2843 
2844 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2845   const int max_tries = 10;
2846   char* base[max_tries];
2847   size_t size[max_tries];
2848 
2849   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2850   // is dependent on the requested size and the MMU.  Our initial gap
2851   // value here is just a guess and will be corrected later.
2852   bool had_top_overlap = false;
2853   bool have_adjusted_gap = false;
2854   size_t gap = 0x400000;
2855 
2856   // Assert only that the size is a multiple of the page size, since
2857   // that's all that mmap requires, and since that's all we really know
2858   // about at this low abstraction level.  If we need higher alignment,
2859   // we can either pass an alignment to this method or verify alignment
2860   // in one of the methods further up the call chain.  See bug 5044738.
2861   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2862 
2863   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2864   // Give it a try, if the kernel honors the hint we can return immediately.
2865   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2866 
2867   volatile int err = errno;
2868   if (addr == requested_addr) {
2869     return addr;
2870   } else if (addr != NULL) {
2871     pd_unmap_memory(addr, bytes);
2872   }
2873 
2874   if (PrintMiscellaneous && Verbose) {
2875     char buf[256];
2876     buf[0] = '\0';
2877     if (addr == NULL) {
2878       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2879     }
2880     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2881             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2882             "%s", bytes, requested_addr, addr, buf);
2883   }
2884 
2885   // Address hint method didn't work.  Fall back to the old method.
2886   // In theory, once SNV becomes our oldest supported platform, this
2887   // code will no longer be needed.
2888   //
2889   // Repeatedly allocate blocks until the block is allocated at the
2890   // right spot. Give up after max_tries.
2891   int i;
2892   for (i = 0; i < max_tries; ++i) {
2893     base[i] = reserve_memory(bytes);
2894 
2895     if (base[i] != NULL) {
2896       // Is this the block we wanted?
2897       if (base[i] == requested_addr) {
2898         size[i] = bytes;
2899         break;
2900       }
2901 
2902       // check that the gap value is right
2903       if (had_top_overlap && !have_adjusted_gap) {
2904         size_t actual_gap = base[i-1] - base[i] - bytes;
2905         if (gap != actual_gap) {
2906           // adjust the gap value and retry the last 2 allocations
2907           assert(i > 0, "gap adjustment code problem");
2908           have_adjusted_gap = true;  // adjust the gap only once, just in case
2909           gap = actual_gap;
2910           if (PrintMiscellaneous && Verbose) {
2911             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2912           }
2913           unmap_memory(base[i], bytes);
2914           unmap_memory(base[i-1], size[i-1]);
2915           i-=2;
2916           continue;
2917         }
2918       }
2919 
2920       // Does this overlap the block we wanted? Give back the overlapped
2921       // parts and try again.
2922       //
2923       // There is still a bug in this code: if top_overlap == bytes,
2924       // the overlap is offset from requested region by the value of gap.
2925       // In this case giving back the overlapped part will not work,
2926       // because we'll give back the entire block at base[i] and
2927       // therefore the subsequent allocation will not generate a new gap.
2928       // This could be fixed with a new algorithm that used larger
2929       // or variable size chunks to find the requested region -
2930       // but such a change would introduce additional complications.
2931       // It's rare enough that the planets align for this bug,
2932       // so we'll just wait for a fix for 6204603/5003415 which
2933       // will provide a mmap flag to allow us to avoid this business.
2934 
2935       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2936       if (top_overlap >= 0 && top_overlap < bytes) {
2937         had_top_overlap = true;
2938         unmap_memory(base[i], top_overlap);
2939         base[i] += top_overlap;
2940         size[i] = bytes - top_overlap;
2941       } else {
2942         size_t bottom_overlap = base[i] + bytes - requested_addr;
2943         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2944           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2945             warning("attempt_reserve_memory_at: possible alignment bug");
2946           }
2947           unmap_memory(requested_addr, bottom_overlap);
2948           size[i] = bytes - bottom_overlap;
2949         } else {
2950           size[i] = bytes;
2951         }
2952       }
2953     }
2954   }
2955 
2956   // Give back the unused reserved pieces.
2957 
2958   for (int j = 0; j < i; ++j) {
2959     if (base[j] != NULL) {
2960       unmap_memory(base[j], size[j]);
2961     }
2962   }
2963 
2964   return (i < max_tries) ? requested_addr : NULL;
2965 }
2966 
2967 bool os::pd_release_memory(char* addr, size_t bytes) {
2968   size_t size = bytes;
2969   return munmap(addr, size) == 0;
2970 }
2971 
2972 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
2973   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
2974          "addr must be page aligned");
2975   int retVal = mprotect(addr, bytes, prot);
2976   return retVal == 0;
2977 }
2978 
2979 // Protect memory (Used to pass readonly pages through
2980 // JNI GetArray<type>Elements with empty arrays.)
2981 // Also, used for serialization page and for compressed oops null pointer
2982 // checking.
2983 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2984                         bool is_committed) {
2985   unsigned int p = 0;
2986   switch (prot) {
2987   case MEM_PROT_NONE: p = PROT_NONE; break;
2988   case MEM_PROT_READ: p = PROT_READ; break;
2989   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2990   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2991   default:
2992     ShouldNotReachHere();
2993   }
2994   // is_committed is unused.
2995   return solaris_mprotect(addr, bytes, p);
2996 }
2997 
2998 // guard_memory and unguard_memory only happens within stack guard pages.
2999 // Since ISM pertains only to the heap, guard and unguard memory should not
3000 /// happen with an ISM region.
3001 bool os::guard_memory(char* addr, size_t bytes) {
3002   return solaris_mprotect(addr, bytes, PROT_NONE);
3003 }
3004 
3005 bool os::unguard_memory(char* addr, size_t bytes) {
3006   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3007 }
3008 
3009 // Large page support
3010 static size_t _large_page_size = 0;
3011 
3012 // Insertion sort for small arrays (descending order).
3013 static void insertion_sort_descending(size_t* array, int len) {
3014   for (int i = 0; i < len; i++) {
3015     size_t val = array[i];
3016     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3017       size_t tmp = array[key];
3018       array[key] = array[key - 1];
3019       array[key - 1] = tmp;
3020     }
3021   }
3022 }
3023 
3024 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3025   const unsigned int usable_count = VM_Version::page_size_count();
3026   if (usable_count == 1) {
3027     return false;
3028   }
3029 
3030   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3031   // build platform, getpagesizes() (without the '2') can be called directly.
3032   typedef int (*gps_t)(size_t[], int);
3033   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3034   if (gps_func == NULL) {
3035     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3036     if (gps_func == NULL) {
3037       if (warn) {
3038         warning("MPSS is not supported by the operating system.");
3039       }
3040       return false;
3041     }
3042   }
3043 
3044   // Fill the array of page sizes.
3045   int n = (*gps_func)(_page_sizes, page_sizes_max);
3046   assert(n > 0, "Solaris bug?");
3047 
3048   if (n == page_sizes_max) {
3049     // Add a sentinel value (necessary only if the array was completely filled
3050     // since it is static (zeroed at initialization)).
3051     _page_sizes[--n] = 0;
3052     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3053   }
3054   assert(_page_sizes[n] == 0, "missing sentinel");
3055   trace_page_sizes("available page sizes", _page_sizes, n);
3056 
3057   if (n == 1) return false;     // Only one page size available.
3058 
3059   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3060   // select up to usable_count elements.  First sort the array, find the first
3061   // acceptable value, then copy the usable sizes to the top of the array and
3062   // trim the rest.  Make sure to include the default page size :-).
3063   //
3064   // A better policy could get rid of the 4M limit by taking the sizes of the
3065   // important VM memory regions (java heap and possibly the code cache) into
3066   // account.
3067   insertion_sort_descending(_page_sizes, n);
3068   const size_t size_limit =
3069     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3070   int beg;
3071   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */;
3072   const int end = MIN2((int)usable_count, n) - 1;
3073   for (int cur = 0; cur < end; ++cur, ++beg) {
3074     _page_sizes[cur] = _page_sizes[beg];
3075   }
3076   _page_sizes[end] = vm_page_size();
3077   _page_sizes[end + 1] = 0;
3078 
3079   if (_page_sizes[end] > _page_sizes[end - 1]) {
3080     // Default page size is not the smallest; sort again.
3081     insertion_sort_descending(_page_sizes, end + 1);
3082   }
3083   *page_size = _page_sizes[0];
3084 
3085   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3086   return true;
3087 }
3088 
3089 void os::large_page_init() {
3090   if (UseLargePages) {
3091     // print a warning if any large page related flag is specified on command line
3092     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3093                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3094 
3095     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3096   }
3097 }
3098 
3099 bool os::Solaris::is_valid_page_size(size_t bytes) {
3100   for (int i = 0; _page_sizes[i] != 0; i++) {
3101     if (_page_sizes[i] == bytes) {
3102       return true;
3103     }
3104   }
3105   return false;
3106 }
3107 
3108 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3109   assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
3110   assert(is_ptr_aligned((void*) start, align),
3111          err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
3112   assert(is_size_aligned(bytes, align),
3113          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
3114 
3115   // Signal to OS that we want large pages for addresses
3116   // from addr, addr + bytes
3117   struct memcntl_mha mpss_struct;
3118   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3119   mpss_struct.mha_pagesize = align;
3120   mpss_struct.mha_flags = 0;
3121   // Upon successful completion, memcntl() returns 0
3122   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3123     debug_only(warning("Attempt to use MPSS failed."));
3124     return false;
3125   }
3126   return true;
3127 }
3128 
3129 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3130   fatal("os::reserve_memory_special should not be called on Solaris.");
3131   return NULL;
3132 }
3133 
3134 bool os::release_memory_special(char* base, size_t bytes) {
3135   fatal("os::release_memory_special should not be called on Solaris.");
3136   return false;
3137 }
3138 
3139 size_t os::large_page_size() {
3140   return _large_page_size;
3141 }
3142 
3143 // MPSS allows application to commit large page memory on demand; with ISM
3144 // the entire memory region must be allocated as shared memory.
3145 bool os::can_commit_large_page_memory() {
3146   return true;
3147 }
3148 
3149 bool os::can_execute_large_page_memory() {
3150   return true;
3151 }
3152 
3153 // Read calls from inside the vm need to perform state transitions
3154 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3155   size_t res;
3156   JavaThread* thread = (JavaThread*)Thread::current();
3157   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3158   ThreadBlockInVM tbiv(thread);
3159   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3160   return res;
3161 }
3162 
3163 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
3164   size_t res;
3165   JavaThread* thread = (JavaThread*)Thread::current();
3166   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3167   ThreadBlockInVM tbiv(thread);
3168   RESTARTABLE(::pread(fd, buf, (size_t) nBytes, offset), res);
3169   return res;
3170 }
3171 
3172 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3173   size_t res;
3174   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
3175          "Assumed _thread_in_native");
3176   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3177   return res;
3178 }
3179 
3180 void os::naked_short_sleep(jlong ms) {
3181   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3182 
3183   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3184   // Solaris requires -lrt for this.
3185   usleep((ms * 1000));
3186 
3187   return;
3188 }
3189 
3190 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3191 void os::infinite_sleep() {
3192   while (true) {    // sleep forever ...
3193     ::sleep(100);   // ... 100 seconds at a time
3194   }
3195 }
3196 
3197 // Used to convert frequent JVM_Yield() to nops
3198 bool os::dont_yield() {
3199   if (DontYieldALot) {
3200     static hrtime_t last_time = 0;
3201     hrtime_t diff = getTimeNanos() - last_time;
3202 
3203     if (diff < DontYieldALotInterval * 1000000) {
3204       return true;
3205     }
3206 
3207     last_time += diff;
3208 
3209     return false;
3210   } else {
3211     return false;
3212   }
3213 }
3214 
3215 // Note that yield semantics are defined by the scheduling class to which
3216 // the thread currently belongs.  Typically, yield will _not yield to
3217 // other equal or higher priority threads that reside on the dispatch queues
3218 // of other CPUs.
3219 
3220 void os::naked_yield() {
3221   thr_yield();
3222 }
3223 
3224 // Interface for setting lwp priorities.  If we are using T2 libthread,
3225 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3226 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3227 // function is meaningless in this mode so we must adjust the real lwp's priority
3228 // The routines below implement the getting and setting of lwp priorities.
3229 //
3230 // Note: T2 is now the only supported libthread. UseBoundThreads flag is
3231 //       being deprecated and all threads are now BoundThreads
3232 //
3233 // Note: There are three priority scales used on Solaris.  Java priotities
3234 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3235 //       from 0 to 127, and the current scheduling class of the process we
3236 //       are running in.  This is typically from -60 to +60.
3237 //       The setting of the lwp priorities in done after a call to thr_setprio
3238 //       so Java priorities are mapped to libthread priorities and we map from
3239 //       the latter to lwp priorities.  We don't keep priorities stored in
3240 //       Java priorities since some of our worker threads want to set priorities
3241 //       higher than all Java threads.
3242 //
3243 // For related information:
3244 // (1)  man -s 2 priocntl
3245 // (2)  man -s 4 priocntl
3246 // (3)  man dispadmin
3247 // =    librt.so
3248 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3249 // =    ps -cL <pid> ... to validate priority.
3250 // =    sched_get_priority_min and _max
3251 //              pthread_create
3252 //              sched_setparam
3253 //              pthread_setschedparam
3254 //
3255 // Assumptions:
3256 // +    We assume that all threads in the process belong to the same
3257 //              scheduling class.   IE. an homogenous process.
3258 // +    Must be root or in IA group to change change "interactive" attribute.
3259 //              Priocntl() will fail silently.  The only indication of failure is when
3260 //              we read-back the value and notice that it hasn't changed.
3261 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3262 // +    For RT, change timeslice as well.  Invariant:
3263 //              constant "priority integral"
3264 //              Konst == TimeSlice * (60-Priority)
3265 //              Given a priority, compute appropriate timeslice.
3266 // +    Higher numerical values have higher priority.
3267 
3268 // sched class attributes
3269 typedef struct {
3270   int   schedPolicy;              // classID
3271   int   maxPrio;
3272   int   minPrio;
3273 } SchedInfo;
3274 
3275 
3276 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3277 
3278 #ifdef ASSERT
3279 static int  ReadBackValidate = 1;
3280 #endif
3281 static int  myClass     = 0;
3282 static int  myMin       = 0;
3283 static int  myMax       = 0;
3284 static int  myCur       = 0;
3285 static bool priocntl_enable = false;
3286 
3287 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3288 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3289 
3290 
3291 // lwp_priocntl_init
3292 //
3293 // Try to determine the priority scale for our process.
3294 //
3295 // Return errno or 0 if OK.
3296 //
3297 static int lwp_priocntl_init() {
3298   int rslt;
3299   pcinfo_t ClassInfo;
3300   pcparms_t ParmInfo;
3301   int i;
3302 
3303   if (!UseThreadPriorities) return 0;
3304 
3305   // If ThreadPriorityPolicy is 1, switch tables
3306   if (ThreadPriorityPolicy == 1) {
3307     for (i = 0; i < CriticalPriority+1; i++)
3308       os::java_to_os_priority[i] = prio_policy1[i];
3309   }
3310   if (UseCriticalJavaThreadPriority) {
3311     // MaxPriority always maps to the FX scheduling class and criticalPrio.
3312     // See set_native_priority() and set_lwp_class_and_priority().
3313     // Save original MaxPriority mapping in case attempt to
3314     // use critical priority fails.
3315     java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3316     // Set negative to distinguish from other priorities
3317     os::java_to_os_priority[MaxPriority] = -criticalPrio;
3318   }
3319 
3320   // Get IDs for a set of well-known scheduling classes.
3321   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3322   // the system.  We should have a loop that iterates over the
3323   // classID values, which are known to be "small" integers.
3324 
3325   strcpy(ClassInfo.pc_clname, "TS");
3326   ClassInfo.pc_cid = -1;
3327   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3328   if (rslt < 0) return errno;
3329   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3330   tsLimits.schedPolicy = ClassInfo.pc_cid;
3331   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3332   tsLimits.minPrio = -tsLimits.maxPrio;
3333 
3334   strcpy(ClassInfo.pc_clname, "IA");
3335   ClassInfo.pc_cid = -1;
3336   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3337   if (rslt < 0) return errno;
3338   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3339   iaLimits.schedPolicy = ClassInfo.pc_cid;
3340   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3341   iaLimits.minPrio = -iaLimits.maxPrio;
3342 
3343   strcpy(ClassInfo.pc_clname, "RT");
3344   ClassInfo.pc_cid = -1;
3345   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3346   if (rslt < 0) return errno;
3347   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3348   rtLimits.schedPolicy = ClassInfo.pc_cid;
3349   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3350   rtLimits.minPrio = 0;
3351 
3352   strcpy(ClassInfo.pc_clname, "FX");
3353   ClassInfo.pc_cid = -1;
3354   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3355   if (rslt < 0) return errno;
3356   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3357   fxLimits.schedPolicy = ClassInfo.pc_cid;
3358   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3359   fxLimits.minPrio = 0;
3360 
3361   // Query our "current" scheduling class.
3362   // This will normally be IA, TS or, rarely, FX or RT.
3363   memset(&ParmInfo, 0, sizeof(ParmInfo));
3364   ParmInfo.pc_cid = PC_CLNULL;
3365   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3366   if (rslt < 0) return errno;
3367   myClass = ParmInfo.pc_cid;
3368 
3369   // We now know our scheduling classId, get specific information
3370   // about the class.
3371   ClassInfo.pc_cid = myClass;
3372   ClassInfo.pc_clname[0] = 0;
3373   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3374   if (rslt < 0) return errno;
3375 
3376   if (ThreadPriorityVerbose) {
3377     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3378   }
3379 
3380   memset(&ParmInfo, 0, sizeof(pcparms_t));
3381   ParmInfo.pc_cid = PC_CLNULL;
3382   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3383   if (rslt < 0) return errno;
3384 
3385   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3386     myMin = rtLimits.minPrio;
3387     myMax = rtLimits.maxPrio;
3388   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3389     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3390     myMin = iaLimits.minPrio;
3391     myMax = iaLimits.maxPrio;
3392     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3393   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3394     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3395     myMin = tsLimits.minPrio;
3396     myMax = tsLimits.maxPrio;
3397     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3398   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3399     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3400     myMin = fxLimits.minPrio;
3401     myMax = fxLimits.maxPrio;
3402     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3403   } else {
3404     // No clue - punt
3405     if (ThreadPriorityVerbose) {
3406       tty->print_cr("Unknown scheduling class: %s ... \n",
3407                     ClassInfo.pc_clname);
3408     }
3409     return EINVAL;      // no clue, punt
3410   }
3411 
3412   if (ThreadPriorityVerbose) {
3413     tty->print_cr("Thread priority Range: [%d..%d]\n", myMin, myMax);
3414   }
3415 
3416   priocntl_enable = true;  // Enable changing priorities
3417   return 0;
3418 }
3419 
3420 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3421 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3422 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3423 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3424 
3425 
3426 // scale_to_lwp_priority
3427 //
3428 // Convert from the libthread "thr_setprio" scale to our current
3429 // lwp scheduling class scale.
3430 //
3431 static int scale_to_lwp_priority(int rMin, int rMax, int x) {
3432   int v;
3433 
3434   if (x == 127) return rMax;            // avoid round-down
3435   v = (((x*(rMax-rMin)))/128)+rMin;
3436   return v;
3437 }
3438 
3439 
3440 // set_lwp_class_and_priority
3441 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3442                                int newPrio, int new_class, bool scale) {
3443   int rslt;
3444   int Actual, Expected, prv;
3445   pcparms_t ParmInfo;                   // for GET-SET
3446 #ifdef ASSERT
3447   pcparms_t ReadBack;                   // for readback
3448 #endif
3449 
3450   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3451   // Query current values.
3452   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3453   // Cache "pcparms_t" in global ParmCache.
3454   // TODO: elide set-to-same-value
3455 
3456   // If something went wrong on init, don't change priorities.
3457   if (!priocntl_enable) {
3458     if (ThreadPriorityVerbose) {
3459       tty->print_cr("Trying to set priority but init failed, ignoring");
3460     }
3461     return EINVAL;
3462   }
3463 
3464   // If lwp hasn't started yet, just return
3465   // the _start routine will call us again.
3466   if (lwpid <= 0) {
3467     if (ThreadPriorityVerbose) {
3468       tty->print_cr("deferring the set_lwp_class_and_priority of thread "
3469                     INTPTR_FORMAT " to %d, lwpid not set",
3470                     ThreadID, newPrio);
3471     }
3472     return 0;
3473   }
3474 
3475   if (ThreadPriorityVerbose) {
3476     tty->print_cr ("set_lwp_class_and_priority("
3477                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3478                    ThreadID, lwpid, newPrio);
3479   }
3480 
3481   memset(&ParmInfo, 0, sizeof(pcparms_t));
3482   ParmInfo.pc_cid = PC_CLNULL;
3483   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3484   if (rslt < 0) return errno;
3485 
3486   int cur_class = ParmInfo.pc_cid;
3487   ParmInfo.pc_cid = (id_t)new_class;
3488 
3489   if (new_class == rtLimits.schedPolicy) {
3490     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3491     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3492                                                        rtLimits.maxPrio, newPrio)
3493                                : newPrio;
3494     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3495     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3496     if (ThreadPriorityVerbose) {
3497       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3498     }
3499   } else if (new_class == iaLimits.schedPolicy) {
3500     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3501     int maxClamped     = MIN2(iaLimits.maxPrio,
3502                               cur_class == new_class
3503                               ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3504     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3505                                                        maxClamped, newPrio)
3506                                : newPrio;
3507     iaInfo->ia_uprilim = cur_class == new_class
3508                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3509     iaInfo->ia_mode    = IA_NOCHANGE;
3510     if (ThreadPriorityVerbose) {
3511       tty->print_cr("IA: [%d...%d] %d->%d\n",
3512                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3513     }
3514   } else if (new_class == tsLimits.schedPolicy) {
3515     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3516     int maxClamped     = MIN2(tsLimits.maxPrio,
3517                               cur_class == new_class
3518                               ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3519     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3520                                                        maxClamped, newPrio)
3521                                : newPrio;
3522     tsInfo->ts_uprilim = cur_class == new_class
3523                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3524     if (ThreadPriorityVerbose) {
3525       tty->print_cr("TS: [%d...%d] %d->%d\n",
3526                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3527     }
3528   } else if (new_class == fxLimits.schedPolicy) {
3529     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3530     int maxClamped     = MIN2(fxLimits.maxPrio,
3531                               cur_class == new_class
3532                               ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3533     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3534                                                        maxClamped, newPrio)
3535                                : newPrio;
3536     fxInfo->fx_uprilim = cur_class == new_class
3537                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3538     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3539     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3540     if (ThreadPriorityVerbose) {
3541       tty->print_cr("FX: [%d...%d] %d->%d\n",
3542                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3543     }
3544   } else {
3545     if (ThreadPriorityVerbose) {
3546       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3547     }
3548     return EINVAL;    // no clue, punt
3549   }
3550 
3551   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3552   if (ThreadPriorityVerbose && rslt) {
3553     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3554   }
3555   if (rslt < 0) return errno;
3556 
3557 #ifdef ASSERT
3558   // Sanity check: read back what we just attempted to set.
3559   // In theory it could have changed in the interim ...
3560   //
3561   // The priocntl system call is tricky.
3562   // Sometimes it'll validate the priority value argument and
3563   // return EINVAL if unhappy.  At other times it fails silently.
3564   // Readbacks are prudent.
3565 
3566   if (!ReadBackValidate) return 0;
3567 
3568   memset(&ReadBack, 0, sizeof(pcparms_t));
3569   ReadBack.pc_cid = PC_CLNULL;
3570   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3571   assert(rslt >= 0, "priocntl failed");
3572   Actual = Expected = 0xBAD;
3573   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3574   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3575     Actual   = RTPRI(ReadBack)->rt_pri;
3576     Expected = RTPRI(ParmInfo)->rt_pri;
3577   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3578     Actual   = IAPRI(ReadBack)->ia_upri;
3579     Expected = IAPRI(ParmInfo)->ia_upri;
3580   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3581     Actual   = TSPRI(ReadBack)->ts_upri;
3582     Expected = TSPRI(ParmInfo)->ts_upri;
3583   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3584     Actual   = FXPRI(ReadBack)->fx_upri;
3585     Expected = FXPRI(ParmInfo)->fx_upri;
3586   } else {
3587     if (ThreadPriorityVerbose) {
3588       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3589                     ParmInfo.pc_cid);
3590     }
3591   }
3592 
3593   if (Actual != Expected) {
3594     if (ThreadPriorityVerbose) {
3595       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3596                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3597     }
3598   }
3599 #endif
3600 
3601   return 0;
3602 }
3603 
3604 // Solaris only gives access to 128 real priorities at a time,
3605 // so we expand Java's ten to fill this range.  This would be better
3606 // if we dynamically adjusted relative priorities.
3607 //
3608 // The ThreadPriorityPolicy option allows us to select 2 different
3609 // priority scales.
3610 //
3611 // ThreadPriorityPolicy=0
3612 // Since the Solaris' default priority is MaximumPriority, we do not
3613 // set a priority lower than Max unless a priority lower than
3614 // NormPriority is requested.
3615 //
3616 // ThreadPriorityPolicy=1
3617 // This mode causes the priority table to get filled with
3618 // linear values.  NormPriority get's mapped to 50% of the
3619 // Maximum priority an so on.  This will cause VM threads
3620 // to get unfair treatment against other Solaris processes
3621 // which do not explicitly alter their thread priorities.
3622 
3623 int os::java_to_os_priority[CriticalPriority + 1] = {
3624   -99999,         // 0 Entry should never be used
3625 
3626   0,              // 1 MinPriority
3627   32,             // 2
3628   64,             // 3
3629 
3630   96,             // 4
3631   127,            // 5 NormPriority
3632   127,            // 6
3633 
3634   127,            // 7
3635   127,            // 8
3636   127,            // 9 NearMaxPriority
3637 
3638   127,            // 10 MaxPriority
3639 
3640   -criticalPrio   // 11 CriticalPriority
3641 };
3642 
3643 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3644   OSThread* osthread = thread->osthread();
3645 
3646   // Save requested priority in case the thread hasn't been started
3647   osthread->set_native_priority(newpri);
3648 
3649   // Check for critical priority request
3650   bool fxcritical = false;
3651   if (newpri == -criticalPrio) {
3652     fxcritical = true;
3653     newpri = criticalPrio;
3654   }
3655 
3656   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3657   if (!UseThreadPriorities) return OS_OK;
3658 
3659   int status = 0;
3660 
3661   if (!fxcritical) {
3662     // Use thr_setprio only if we have a priority that thr_setprio understands
3663     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3664   }
3665 
3666   int lwp_status =
3667           set_lwp_class_and_priority(osthread->thread_id(),
3668                                      osthread->lwp_id(),
3669                                      newpri,
3670                                      fxcritical ? fxLimits.schedPolicy : myClass,
3671                                      !fxcritical);
3672   if (lwp_status != 0 && fxcritical) {
3673     // Try again, this time without changing the scheduling class
3674     newpri = java_MaxPriority_to_os_priority;
3675     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3676                                             osthread->lwp_id(),
3677                                             newpri, myClass, false);
3678   }
3679   status |= lwp_status;
3680   return (status == 0) ? OS_OK : OS_ERR;
3681 }
3682 
3683 
3684 OSReturn os::get_native_priority(const Thread* const thread,
3685                                  int *priority_ptr) {
3686   int p;
3687   if (!UseThreadPriorities) {
3688     *priority_ptr = NormalPriority;
3689     return OS_OK;
3690   }
3691   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3692   if (status != 0) {
3693     return OS_ERR;
3694   }
3695   *priority_ptr = p;
3696   return OS_OK;
3697 }
3698 
3699 
3700 // Hint to the underlying OS that a task switch would not be good.
3701 // Void return because it's a hint and can fail.
3702 void os::hint_no_preempt() {
3703   schedctl_start(schedctl_init());
3704 }
3705 
3706 static void resume_clear_context(OSThread *osthread) {
3707   osthread->set_ucontext(NULL);
3708 }
3709 
3710 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3711   osthread->set_ucontext(context);
3712 }
3713 
3714 static Semaphore sr_semaphore;
3715 
3716 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3717   // Save and restore errno to avoid confusing native code with EINTR
3718   // after sigsuspend.
3719   int old_errno = errno;
3720 
3721   OSThread* osthread = thread->osthread();
3722   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3723 
3724   os::SuspendResume::State current = osthread->sr.state();
3725   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3726     suspend_save_context(osthread, uc);
3727 
3728     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3729     os::SuspendResume::State state = osthread->sr.suspended();
3730     if (state == os::SuspendResume::SR_SUSPENDED) {
3731       sigset_t suspend_set;  // signals for sigsuspend()
3732 
3733       // get current set of blocked signals and unblock resume signal
3734       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3735       sigdelset(&suspend_set, os::Solaris::SIGasync());
3736 
3737       sr_semaphore.signal();
3738       // wait here until we are resumed
3739       while (1) {
3740         sigsuspend(&suspend_set);
3741 
3742         os::SuspendResume::State result = osthread->sr.running();
3743         if (result == os::SuspendResume::SR_RUNNING) {
3744           sr_semaphore.signal();
3745           break;
3746         }
3747       }
3748 
3749     } else if (state == os::SuspendResume::SR_RUNNING) {
3750       // request was cancelled, continue
3751     } else {
3752       ShouldNotReachHere();
3753     }
3754 
3755     resume_clear_context(osthread);
3756   } else if (current == os::SuspendResume::SR_RUNNING) {
3757     // request was cancelled, continue
3758   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3759     // ignore
3760   } else {
3761     // ignore
3762   }
3763 
3764   errno = old_errno;
3765 }
3766 
3767 void os::print_statistics() {
3768 }
3769 
3770 int os::message_box(const char* title, const char* message) {
3771   int i;
3772   fdStream err(defaultStream::error_fd());
3773   for (i = 0; i < 78; i++) err.print_raw("=");
3774   err.cr();
3775   err.print_raw_cr(title);
3776   for (i = 0; i < 78; i++) err.print_raw("-");
3777   err.cr();
3778   err.print_raw_cr(message);
3779   for (i = 0; i < 78; i++) err.print_raw("=");
3780   err.cr();
3781 
3782   char buf[16];
3783   // Prevent process from exiting upon "read error" without consuming all CPU
3784   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3785 
3786   return buf[0] == 'y' || buf[0] == 'Y';
3787 }
3788 
3789 static int sr_notify(OSThread* osthread) {
3790   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
3791   assert_status(status == 0, status, "thr_kill");
3792   return status;
3793 }
3794 
3795 // "Randomly" selected value for how long we want to spin
3796 // before bailing out on suspending a thread, also how often
3797 // we send a signal to a thread we want to resume
3798 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3799 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3800 
3801 static bool do_suspend(OSThread* osthread) {
3802   assert(osthread->sr.is_running(), "thread should be running");
3803   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
3804 
3805   // mark as suspended and send signal
3806   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3807     // failed to switch, state wasn't running?
3808     ShouldNotReachHere();
3809     return false;
3810   }
3811 
3812   if (sr_notify(osthread) != 0) {
3813     ShouldNotReachHere();
3814   }
3815 
3816   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3817   while (true) {
3818     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
3819       break;
3820     } else {
3821       // timeout
3822       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3823       if (cancelled == os::SuspendResume::SR_RUNNING) {
3824         return false;
3825       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3826         // make sure that we consume the signal on the semaphore as well
3827         sr_semaphore.wait();
3828         break;
3829       } else {
3830         ShouldNotReachHere();
3831         return false;
3832       }
3833     }
3834   }
3835 
3836   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3837   return true;
3838 }
3839 
3840 static void do_resume(OSThread* osthread) {
3841   assert(osthread->sr.is_suspended(), "thread should be suspended");
3842   assert(!sr_semaphore.trywait(), "invalid semaphore state");
3843 
3844   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3845     // failed to switch to WAKEUP_REQUEST
3846     ShouldNotReachHere();
3847     return;
3848   }
3849 
3850   while (true) {
3851     if (sr_notify(osthread) == 0) {
3852       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
3853         if (osthread->sr.is_running()) {
3854           return;
3855         }
3856       }
3857     } else {
3858       ShouldNotReachHere();
3859     }
3860   }
3861 
3862   guarantee(osthread->sr.is_running(), "Must be running!");
3863 }
3864 
3865 void os::SuspendedThreadTask::internal_do_task() {
3866   if (do_suspend(_thread->osthread())) {
3867     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3868     do_task(context);
3869     do_resume(_thread->osthread());
3870   }
3871 }
3872 
3873 class PcFetcher : public os::SuspendedThreadTask {
3874  public:
3875   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3876   ExtendedPC result();
3877  protected:
3878   void do_task(const os::SuspendedThreadTaskContext& context);
3879  private:
3880   ExtendedPC _epc;
3881 };
3882 
3883 ExtendedPC PcFetcher::result() {
3884   guarantee(is_done(), "task is not done yet.");
3885   return _epc;
3886 }
3887 
3888 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3889   Thread* thread = context.thread();
3890   OSThread* osthread = thread->osthread();
3891   if (osthread->ucontext() != NULL) {
3892     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
3893   } else {
3894     // NULL context is unexpected, double-check this is the VMThread
3895     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3896   }
3897 }
3898 
3899 // A lightweight implementation that does not suspend the target thread and
3900 // thus returns only a hint. Used for profiling only!
3901 ExtendedPC os::get_thread_pc(Thread* thread) {
3902   // Make sure that it is called by the watcher and the Threads lock is owned.
3903   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
3904   // For now, is only used to profile the VM Thread
3905   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3906   PcFetcher fetcher(thread);
3907   fetcher.run();
3908   return fetcher.result();
3909 }
3910 
3911 
3912 // This does not do anything on Solaris. This is basically a hook for being
3913 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
3914 void os::os_exception_wrapper(java_call_t f, JavaValue* value,
3915                               methodHandle* method, JavaCallArguments* args,
3916                               Thread* thread) {
3917   f(value, method, args, thread);
3918 }
3919 
3920 // This routine may be used by user applications as a "hook" to catch signals.
3921 // The user-defined signal handler must pass unrecognized signals to this
3922 // routine, and if it returns true (non-zero), then the signal handler must
3923 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
3924 // routine will never retun false (zero), but instead will execute a VM panic
3925 // routine kill the process.
3926 //
3927 // If this routine returns false, it is OK to call it again.  This allows
3928 // the user-defined signal handler to perform checks either before or after
3929 // the VM performs its own checks.  Naturally, the user code would be making
3930 // a serious error if it tried to handle an exception (such as a null check
3931 // or breakpoint) that the VM was generating for its own correct operation.
3932 //
3933 // This routine may recognize any of the following kinds of signals:
3934 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
3935 // os::Solaris::SIGasync
3936 // It should be consulted by handlers for any of those signals.
3937 // It explicitly does not recognize os::Solaris::SIGinterrupt
3938 //
3939 // The caller of this routine must pass in the three arguments supplied
3940 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3941 // field of the structure passed to sigaction().  This routine assumes that
3942 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3943 //
3944 // Note that the VM will print warnings if it detects conflicting signal
3945 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3946 //
3947 extern "C" JNIEXPORT int JVM_handle_solaris_signal(int signo,
3948                                                    siginfo_t* siginfo,
3949                                                    void* ucontext,
3950                                                    int abort_if_unrecognized);
3951 
3952 
3953 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
3954   int orig_errno = errno;  // Preserve errno value over signal handler.
3955   JVM_handle_solaris_signal(sig, info, ucVoid, true);
3956   errno = orig_errno;
3957 }
3958 
3959 // Do not delete - if guarantee is ever removed,  a signal handler (even empty)
3960 // is needed to provoke threads blocked on IO to return an EINTR
3961 // Note: this explicitly does NOT call JVM_handle_solaris_signal and
3962 // does NOT participate in signal chaining due to requirement for
3963 // NOT setting SA_RESTART to make EINTR work.
3964 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
3965   if (UseSignalChaining) {
3966     struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
3967     if (actp && actp->sa_handler) {
3968       vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
3969     }
3970   }
3971 }
3972 
3973 // This boolean allows users to forward their own non-matching signals
3974 // to JVM_handle_solaris_signal, harmlessly.
3975 bool os::Solaris::signal_handlers_are_installed = false;
3976 
3977 // For signal-chaining
3978 bool os::Solaris::libjsig_is_loaded = false;
3979 typedef struct sigaction *(*get_signal_t)(int);
3980 get_signal_t os::Solaris::get_signal_action = NULL;
3981 
3982 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
3983   struct sigaction *actp = NULL;
3984 
3985   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
3986     // Retrieve the old signal handler from libjsig
3987     actp = (*get_signal_action)(sig);
3988   }
3989   if (actp == NULL) {
3990     // Retrieve the preinstalled signal handler from jvm
3991     actp = get_preinstalled_handler(sig);
3992   }
3993 
3994   return actp;
3995 }
3996 
3997 static bool call_chained_handler(struct sigaction *actp, int sig,
3998                                  siginfo_t *siginfo, void *context) {
3999   // Call the old signal handler
4000   if (actp->sa_handler == SIG_DFL) {
4001     // It's more reasonable to let jvm treat it as an unexpected exception
4002     // instead of taking the default action.
4003     return false;
4004   } else if (actp->sa_handler != SIG_IGN) {
4005     if ((actp->sa_flags & SA_NODEFER) == 0) {
4006       // automaticlly block the signal
4007       sigaddset(&(actp->sa_mask), sig);
4008     }
4009 
4010     sa_handler_t hand;
4011     sa_sigaction_t sa;
4012     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4013     // retrieve the chained handler
4014     if (siginfo_flag_set) {
4015       sa = actp->sa_sigaction;
4016     } else {
4017       hand = actp->sa_handler;
4018     }
4019 
4020     if ((actp->sa_flags & SA_RESETHAND) != 0) {
4021       actp->sa_handler = SIG_DFL;
4022     }
4023 
4024     // try to honor the signal mask
4025     sigset_t oset;
4026     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4027 
4028     // call into the chained handler
4029     if (siginfo_flag_set) {
4030       (*sa)(sig, siginfo, context);
4031     } else {
4032       (*hand)(sig);
4033     }
4034 
4035     // restore the signal mask
4036     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4037   }
4038   // Tell jvm's signal handler the signal is taken care of.
4039   return true;
4040 }
4041 
4042 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4043   bool chained = false;
4044   // signal-chaining
4045   if (UseSignalChaining) {
4046     struct sigaction *actp = get_chained_signal_action(sig);
4047     if (actp != NULL) {
4048       chained = call_chained_handler(actp, sig, siginfo, context);
4049     }
4050   }
4051   return chained;
4052 }
4053 
4054 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4055   assert((chainedsigactions != (struct sigaction *)NULL) &&
4056          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
4057   if (preinstalled_sigs[sig] != 0) {
4058     return &chainedsigactions[sig];
4059   }
4060   return NULL;
4061 }
4062 
4063 void os::Solaris::save_preinstalled_handler(int sig,
4064                                             struct sigaction& oldAct) {
4065   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4066   assert((chainedsigactions != (struct sigaction *)NULL) &&
4067          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
4068   chainedsigactions[sig] = oldAct;
4069   preinstalled_sigs[sig] = 1;
4070 }
4071 
4072 void os::Solaris::set_signal_handler(int sig, bool set_installed,
4073                                      bool oktochain) {
4074   // Check for overwrite.
4075   struct sigaction oldAct;
4076   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4077   void* oldhand =
4078       oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4079                           : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4080   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4081       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4082       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4083     if (AllowUserSignalHandlers || !set_installed) {
4084       // Do not overwrite; user takes responsibility to forward to us.
4085       return;
4086     } else if (UseSignalChaining) {
4087       if (oktochain) {
4088         // save the old handler in jvm
4089         save_preinstalled_handler(sig, oldAct);
4090       } else {
4091         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4092       }
4093       // libjsig also interposes the sigaction() call below and saves the
4094       // old sigaction on it own.
4095     } else {
4096       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4097                     "%#lx for signal %d.", (long)oldhand, sig));
4098     }
4099   }
4100 
4101   struct sigaction sigAct;
4102   sigfillset(&(sigAct.sa_mask));
4103   sigAct.sa_handler = SIG_DFL;
4104 
4105   sigAct.sa_sigaction = signalHandler;
4106   // Handle SIGSEGV on alternate signal stack if
4107   // not using stack banging
4108   if (!UseStackBanging && sig == SIGSEGV) {
4109     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4110   } else if (sig == os::Solaris::SIGinterrupt()) {
4111     // Interruptible i/o requires SA_RESTART cleared so EINTR
4112     // is returned instead of restarting system calls
4113     sigemptyset(&sigAct.sa_mask);
4114     sigAct.sa_handler = NULL;
4115     sigAct.sa_flags = SA_SIGINFO;
4116     sigAct.sa_sigaction = sigINTRHandler;
4117   } else {
4118     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4119   }
4120   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4121 
4122   sigaction(sig, &sigAct, &oldAct);
4123 
4124   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4125                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4126   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4127 }
4128 
4129 
4130 #define DO_SIGNAL_CHECK(sig)                      \
4131   do {                                            \
4132     if (!sigismember(&check_signal_done, sig)) {  \
4133       os::Solaris::check_signal_handler(sig);     \
4134     }                                             \
4135   } while (0)
4136 
4137 // This method is a periodic task to check for misbehaving JNI applications
4138 // under CheckJNI, we can add any periodic checks here
4139 
4140 void os::run_periodic_checks() {
4141   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4142   // thereby preventing a NULL checks.
4143   if (!check_addr0_done) check_addr0_done = check_addr0(tty);
4144 
4145   if (check_signals == false) return;
4146 
4147   // SEGV and BUS if overridden could potentially prevent
4148   // generation of hs*.log in the event of a crash, debugging
4149   // such a case can be very challenging, so we absolutely
4150   // check for the following for a good measure:
4151   DO_SIGNAL_CHECK(SIGSEGV);
4152   DO_SIGNAL_CHECK(SIGILL);
4153   DO_SIGNAL_CHECK(SIGFPE);
4154   DO_SIGNAL_CHECK(SIGBUS);
4155   DO_SIGNAL_CHECK(SIGPIPE);
4156   DO_SIGNAL_CHECK(SIGXFSZ);
4157 
4158   // ReduceSignalUsage allows the user to override these handlers
4159   // see comments at the very top and jvm_solaris.h
4160   if (!ReduceSignalUsage) {
4161     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4162     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4163     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4164     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4165   }
4166 
4167   // See comments above for using JVM1/JVM2 and UseAltSigs
4168   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4169   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4170 
4171 }
4172 
4173 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4174 
4175 static os_sigaction_t os_sigaction = NULL;
4176 
4177 void os::Solaris::check_signal_handler(int sig) {
4178   char buf[O_BUFLEN];
4179   address jvmHandler = NULL;
4180 
4181   struct sigaction act;
4182   if (os_sigaction == NULL) {
4183     // only trust the default sigaction, in case it has been interposed
4184     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4185     if (os_sigaction == NULL) return;
4186   }
4187 
4188   os_sigaction(sig, (struct sigaction*)NULL, &act);
4189 
4190   address thisHandler = (act.sa_flags & SA_SIGINFO)
4191     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4192     : CAST_FROM_FN_PTR(address, act.sa_handler);
4193 
4194 
4195   switch (sig) {
4196   case SIGSEGV:
4197   case SIGBUS:
4198   case SIGFPE:
4199   case SIGPIPE:
4200   case SIGXFSZ:
4201   case SIGILL:
4202     jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4203     break;
4204 
4205   case SHUTDOWN1_SIGNAL:
4206   case SHUTDOWN2_SIGNAL:
4207   case SHUTDOWN3_SIGNAL:
4208   case BREAK_SIGNAL:
4209     jvmHandler = (address)user_handler();
4210     break;
4211 
4212   default:
4213     int intrsig = os::Solaris::SIGinterrupt();
4214     int asynsig = os::Solaris::SIGasync();
4215 
4216     if (sig == intrsig) {
4217       jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4218     } else if (sig == asynsig) {
4219       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4220     } else {
4221       return;
4222     }
4223     break;
4224   }
4225 
4226 
4227   if (thisHandler != jvmHandler) {
4228     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4229     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4230     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4231     // No need to check this sig any longer
4232     sigaddset(&check_signal_done, sig);
4233     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4234     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4235       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4236                     exception_name(sig, buf, O_BUFLEN));
4237     }
4238   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4239     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4240     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4241     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4242     // No need to check this sig any longer
4243     sigaddset(&check_signal_done, sig);
4244   }
4245 
4246   // Print all the signal handler state
4247   if (sigismember(&check_signal_done, sig)) {
4248     print_signal_handlers(tty, buf, O_BUFLEN);
4249   }
4250 
4251 }
4252 
4253 void os::Solaris::install_signal_handlers() {
4254   bool libjsigdone = false;
4255   signal_handlers_are_installed = true;
4256 
4257   // signal-chaining
4258   typedef void (*signal_setting_t)();
4259   signal_setting_t begin_signal_setting = NULL;
4260   signal_setting_t end_signal_setting = NULL;
4261   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4262                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4263   if (begin_signal_setting != NULL) {
4264     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4265                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4266     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4267                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4268     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4269                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4270     libjsig_is_loaded = true;
4271     if (os::Solaris::get_libjsig_version != NULL) {
4272       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4273     }
4274     assert(UseSignalChaining, "should enable signal-chaining");
4275   }
4276   if (libjsig_is_loaded) {
4277     // Tell libjsig jvm is setting signal handlers
4278     (*begin_signal_setting)();
4279   }
4280 
4281   set_signal_handler(SIGSEGV, true, true);
4282   set_signal_handler(SIGPIPE, true, true);
4283   set_signal_handler(SIGXFSZ, true, true);
4284   set_signal_handler(SIGBUS, true, true);
4285   set_signal_handler(SIGILL, true, true);
4286   set_signal_handler(SIGFPE, true, true);
4287 
4288 
4289   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4290 
4291     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4292     // can not register overridable signals which might be > 32
4293     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4294       // Tell libjsig jvm has finished setting signal handlers
4295       (*end_signal_setting)();
4296       libjsigdone = true;
4297     }
4298   }
4299 
4300   // Never ok to chain our SIGinterrupt
4301   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4302   set_signal_handler(os::Solaris::SIGasync(), true, true);
4303 
4304   if (libjsig_is_loaded && !libjsigdone) {
4305     // Tell libjsig jvm finishes setting signal handlers
4306     (*end_signal_setting)();
4307   }
4308 
4309   // We don't activate signal checker if libjsig is in place, we trust ourselves
4310   // and if UserSignalHandler is installed all bets are off.
4311   // Log that signal checking is off only if -verbose:jni is specified.
4312   if (CheckJNICalls) {
4313     if (libjsig_is_loaded) {
4314       if (PrintJNIResolving) {
4315         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4316       }
4317       check_signals = false;
4318     }
4319     if (AllowUserSignalHandlers) {
4320       if (PrintJNIResolving) {
4321         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4322       }
4323       check_signals = false;
4324     }
4325   }
4326 }
4327 
4328 
4329 void report_error(const char* file_name, int line_no, const char* title,
4330                   const char* format, ...);
4331 
4332 const char * signames[] = {
4333   "SIG0",
4334   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4335   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4336   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4337   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4338   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4339   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4340   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4341   "SIGCANCEL", "SIGLOST"
4342 };
4343 
4344 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4345   if (0 < exception_code && exception_code <= SIGRTMAX) {
4346     // signal
4347     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4348       jio_snprintf(buf, size, "%s", signames[exception_code]);
4349     } else {
4350       jio_snprintf(buf, size, "SIG%d", exception_code);
4351     }
4352     return buf;
4353   } else {
4354     return NULL;
4355   }
4356 }
4357 
4358 // (Static) wrapper for getisax(2) call.
4359 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4360 
4361 // (Static) wrappers for the liblgrp API
4362 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4363 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4364 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4365 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4366 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4367 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4368 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4369 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4370 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4371 
4372 // (Static) wrapper for meminfo() call.
4373 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4374 
4375 static address resolve_symbol_lazy(const char* name) {
4376   address addr = (address) dlsym(RTLD_DEFAULT, name);
4377   if (addr == NULL) {
4378     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4379     addr = (address) dlsym(RTLD_NEXT, name);
4380   }
4381   return addr;
4382 }
4383 
4384 static address resolve_symbol(const char* name) {
4385   address addr = resolve_symbol_lazy(name);
4386   if (addr == NULL) {
4387     fatal(dlerror());
4388   }
4389   return addr;
4390 }
4391 
4392 void os::Solaris::libthread_init() {
4393   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4394 
4395   lwp_priocntl_init();
4396 
4397   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4398   if (func == NULL) {
4399     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4400     // Guarantee that this VM is running on an new enough OS (5.6 or
4401     // later) that it will have a new enough libthread.so.
4402     guarantee(func != NULL, "libthread.so is too old.");
4403   }
4404 
4405   int size;
4406   void (*handler_info_func)(address *, int *);
4407   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4408   handler_info_func(&handler_start, &size);
4409   handler_end = handler_start + size;
4410 }
4411 
4412 
4413 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4414 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4415 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4416 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4417 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4418 int os::Solaris::_mutex_scope = USYNC_THREAD;
4419 
4420 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4421 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4422 int_fnP_cond_tP os::Solaris::_cond_signal;
4423 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4424 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4425 int_fnP_cond_tP os::Solaris::_cond_destroy;
4426 int os::Solaris::_cond_scope = USYNC_THREAD;
4427 
4428 void os::Solaris::synchronization_init() {
4429   if (UseLWPSynchronization) {
4430     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4431     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4432     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4433     os::Solaris::set_mutex_init(lwp_mutex_init);
4434     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4435     os::Solaris::set_mutex_scope(USYNC_THREAD);
4436 
4437     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4438     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4439     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4440     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4441     os::Solaris::set_cond_init(lwp_cond_init);
4442     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4443     os::Solaris::set_cond_scope(USYNC_THREAD);
4444   } else {
4445     os::Solaris::set_mutex_scope(USYNC_THREAD);
4446     os::Solaris::set_cond_scope(USYNC_THREAD);
4447 
4448     if (UsePthreads) {
4449       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4450       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4451       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4452       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4453       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4454 
4455       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4456       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4457       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4458       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4459       os::Solaris::set_cond_init(pthread_cond_default_init);
4460       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4461     } else {
4462       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4463       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4464       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4465       os::Solaris::set_mutex_init(::mutex_init);
4466       os::Solaris::set_mutex_destroy(::mutex_destroy);
4467 
4468       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4469       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4470       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4471       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4472       os::Solaris::set_cond_init(::cond_init);
4473       os::Solaris::set_cond_destroy(::cond_destroy);
4474     }
4475   }
4476 }
4477 
4478 bool os::Solaris::liblgrp_init() {
4479   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4480   if (handle != NULL) {
4481     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4482     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4483     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4484     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4485     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4486     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4487     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4488     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4489                                                       dlsym(handle, "lgrp_cookie_stale")));
4490 
4491     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4492     set_lgrp_cookie(c);
4493     return true;
4494   }
4495   return false;
4496 }
4497 
4498 void os::Solaris::misc_sym_init() {
4499   address func;
4500 
4501   // getisax
4502   func = resolve_symbol_lazy("getisax");
4503   if (func != NULL) {
4504     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4505   }
4506 
4507   // meminfo
4508   func = resolve_symbol_lazy("meminfo");
4509   if (func != NULL) {
4510     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4511   }
4512 }
4513 
4514 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4515   assert(_getisax != NULL, "_getisax not set");
4516   return _getisax(array, n);
4517 }
4518 
4519 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4520 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4521 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4522 
4523 void init_pset_getloadavg_ptr(void) {
4524   pset_getloadavg_ptr =
4525     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4526   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4527     warning("pset_getloadavg function not found");
4528   }
4529 }
4530 
4531 int os::Solaris::_dev_zero_fd = -1;
4532 
4533 // this is called _before_ the global arguments have been parsed
4534 void os::init(void) {
4535   _initial_pid = getpid();
4536 
4537   max_hrtime = first_hrtime = gethrtime();
4538 
4539   init_random(1234567);
4540 
4541   page_size = sysconf(_SC_PAGESIZE);
4542   if (page_size == -1) {
4543     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4544                   strerror(errno)));
4545   }
4546   init_page_sizes((size_t) page_size);
4547 
4548   Solaris::initialize_system_info();
4549 
4550   // Initialize misc. symbols as soon as possible, so we can use them
4551   // if we need them.
4552   Solaris::misc_sym_init();
4553 
4554   int fd = ::open("/dev/zero", O_RDWR);
4555   if (fd < 0) {
4556     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4557   } else {
4558     Solaris::set_dev_zero_fd(fd);
4559 
4560     // Close on exec, child won't inherit.
4561     fcntl(fd, F_SETFD, FD_CLOEXEC);
4562   }
4563 
4564   clock_tics_per_sec = CLK_TCK;
4565 
4566   // check if dladdr1() exists; dladdr1 can provide more information than
4567   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4568   // and is available on linker patches for 5.7 and 5.8.
4569   // libdl.so must have been loaded, this call is just an entry lookup
4570   void * hdl = dlopen("libdl.so", RTLD_NOW);
4571   if (hdl) {
4572     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4573   }
4574 
4575   // (Solaris only) this switches to calls that actually do locking.
4576   ThreadCritical::initialize();
4577 
4578   main_thread = thr_self();
4579 
4580   // Constant minimum stack size allowed. It must be at least
4581   // the minimum of what the OS supports (thr_min_stack()), and
4582   // enough to allow the thread to get to user bytecode execution.
4583   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4584   // If the pagesize of the VM is greater than 8K determine the appropriate
4585   // number of initial guard pages.  The user can change this with the
4586   // command line arguments, if needed.
4587   if (vm_page_size() > 8*K) {
4588     StackYellowPages = 1;
4589     StackRedPages = 1;
4590     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4591   }
4592 }
4593 
4594 // To install functions for atexit system call
4595 extern "C" {
4596   static void perfMemory_exit_helper() {
4597     perfMemory_exit();
4598   }
4599 }
4600 
4601 // this is called _after_ the global arguments have been parsed
4602 jint os::init_2(void) {
4603   // try to enable extended file IO ASAP, see 6431278
4604   os::Solaris::try_enable_extended_io();
4605 
4606   // Allocate a single page and mark it as readable for safepoint polling.  Also
4607   // use this first mmap call to check support for MAP_ALIGN.
4608   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4609                                                       page_size,
4610                                                       MAP_PRIVATE | MAP_ALIGN,
4611                                                       PROT_READ);
4612   if (polling_page == NULL) {
4613     has_map_align = false;
4614     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4615                                                 PROT_READ);
4616   }
4617 
4618   os::set_polling_page(polling_page);
4619 
4620 #ifndef PRODUCT
4621   if (Verbose && PrintMiscellaneous) {
4622     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n",
4623                (intptr_t)polling_page);
4624   }
4625 #endif
4626 
4627   if (!UseMembar) {
4628     address mem_serialize_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE);
4629     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4630     os::set_memory_serialize_page(mem_serialize_page);
4631 
4632 #ifndef PRODUCT
4633     if (Verbose && PrintMiscellaneous) {
4634       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n",
4635                  (intptr_t)mem_serialize_page);
4636     }
4637 #endif
4638   }
4639 
4640   // Check minimum allowable stack size for thread creation and to initialize
4641   // the java system classes, including StackOverflowError - depends on page
4642   // size.  Add a page for compiler2 recursion in main thread.
4643   // Add in 2*BytesPerWord times page size to account for VM stack during
4644   // class initialization depending on 32 or 64 bit VM.
4645   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4646                                         (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4647                                         2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4648 
4649   size_t threadStackSizeInBytes = ThreadStackSize * K;
4650   if (threadStackSizeInBytes != 0 &&
4651       threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4652     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4653                   os::Solaris::min_stack_allowed/K);
4654     return JNI_ERR;
4655   }
4656 
4657   // For 64kbps there will be a 64kb page size, which makes
4658   // the usable default stack size quite a bit less.  Increase the
4659   // stack for 64kb (or any > than 8kb) pages, this increases
4660   // virtual memory fragmentation (since we're not creating the
4661   // stack on a power of 2 boundary.  The real fix for this
4662   // should be to fix the guard page mechanism.
4663 
4664   if (vm_page_size() > 8*K) {
4665     threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4666        ? threadStackSizeInBytes +
4667          ((StackYellowPages + StackRedPages) * vm_page_size())
4668        : 0;
4669     ThreadStackSize = threadStackSizeInBytes/K;
4670   }
4671 
4672   // Make the stack size a multiple of the page size so that
4673   // the yellow/red zones can be guarded.
4674   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4675                                                 vm_page_size()));
4676 
4677   Solaris::libthread_init();
4678 
4679   if (UseNUMA) {
4680     if (!Solaris::liblgrp_init()) {
4681       UseNUMA = false;
4682     } else {
4683       size_t lgrp_limit = os::numa_get_groups_num();
4684       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4685       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4686       FREE_C_HEAP_ARRAY(int, lgrp_ids);
4687       if (lgrp_num < 2) {
4688         // There's only one locality group, disable NUMA.
4689         UseNUMA = false;
4690       }
4691     }
4692     if (!UseNUMA && ForceNUMA) {
4693       UseNUMA = true;
4694     }
4695   }
4696 
4697   Solaris::signal_sets_init();
4698   Solaris::init_signal_mem();
4699   Solaris::install_signal_handlers();
4700 
4701   if (libjsigversion < JSIG_VERSION_1_4_1) {
4702     Maxlibjsigsigs = OLDMAXSIGNUM;
4703   }
4704 
4705   // initialize synchronization primitives to use either thread or
4706   // lwp synchronization (controlled by UseLWPSynchronization)
4707   Solaris::synchronization_init();
4708 
4709   if (MaxFDLimit) {
4710     // set the number of file descriptors to max. print out error
4711     // if getrlimit/setrlimit fails but continue regardless.
4712     struct rlimit nbr_files;
4713     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4714     if (status != 0) {
4715       if (PrintMiscellaneous && (Verbose || WizardMode)) {
4716         perror("os::init_2 getrlimit failed");
4717       }
4718     } else {
4719       nbr_files.rlim_cur = nbr_files.rlim_max;
4720       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4721       if (status != 0) {
4722         if (PrintMiscellaneous && (Verbose || WizardMode)) {
4723           perror("os::init_2 setrlimit failed");
4724         }
4725       }
4726     }
4727   }
4728 
4729   // Calculate theoretical max. size of Threads to guard gainst
4730   // artifical out-of-memory situations, where all available address-
4731   // space has been reserved by thread stacks. Default stack size is 1Mb.
4732   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
4733     JavaThread::stack_size_at_create() : (1*K*K);
4734   assert(pre_thread_stack_size != 0, "Must have a stack");
4735   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
4736   // we should start doing Virtual Memory banging. Currently when the threads will
4737   // have used all but 200Mb of space.
4738   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
4739   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
4740 
4741   // at-exit methods are called in the reverse order of their registration.
4742   // In Solaris 7 and earlier, atexit functions are called on return from
4743   // main or as a result of a call to exit(3C). There can be only 32 of
4744   // these functions registered and atexit() does not set errno. In Solaris
4745   // 8 and later, there is no limit to the number of functions registered
4746   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
4747   // functions are called upon dlclose(3DL) in addition to return from main
4748   // and exit(3C).
4749 
4750   if (PerfAllowAtExitRegistration) {
4751     // only register atexit functions if PerfAllowAtExitRegistration is set.
4752     // atexit functions can be delayed until process exit time, which
4753     // can be problematic for embedded VM situations. Embedded VMs should
4754     // call DestroyJavaVM() to assure that VM resources are released.
4755 
4756     // note: perfMemory_exit_helper atexit function may be removed in
4757     // the future if the appropriate cleanup code can be added to the
4758     // VM_Exit VMOperation's doit method.
4759     if (atexit(perfMemory_exit_helper) != 0) {
4760       warning("os::init2 atexit(perfMemory_exit_helper) failed");
4761     }
4762   }
4763 
4764   // Init pset_loadavg function pointer
4765   init_pset_getloadavg_ptr();
4766 
4767   return JNI_OK;
4768 }
4769 
4770 // Mark the polling page as unreadable
4771 void os::make_polling_page_unreadable(void) {
4772   if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0) {
4773     fatal("Could not disable polling page");
4774   }
4775 }
4776 
4777 // Mark the polling page as readable
4778 void os::make_polling_page_readable(void) {
4779   if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0) {
4780     fatal("Could not enable polling page");
4781   }
4782 }
4783 
4784 // OS interface.
4785 
4786 bool os::check_heap(bool force) { return true; }
4787 
4788 // Is a (classpath) directory empty?
4789 bool os::dir_is_empty(const char* path) {
4790   DIR *dir = NULL;
4791   struct dirent *ptr;
4792 
4793   dir = opendir(path);
4794   if (dir == NULL) return true;
4795 
4796   // Scan the directory
4797   bool result = true;
4798   char buf[sizeof(struct dirent) + MAX_PATH];
4799   struct dirent *dbuf = (struct dirent *) buf;
4800   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
4801     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4802       result = false;
4803     }
4804   }
4805   closedir(dir);
4806   return result;
4807 }
4808 
4809 // This code originates from JDK's sysOpen and open64_w
4810 // from src/solaris/hpi/src/system_md.c
4811 
4812 int os::open(const char *path, int oflag, int mode) {
4813   if (strlen(path) > MAX_PATH - 1) {
4814     errno = ENAMETOOLONG;
4815     return -1;
4816   }
4817   int fd;
4818 
4819   fd = ::open64(path, oflag, mode);
4820   if (fd == -1) return -1;
4821 
4822   // If the open succeeded, the file might still be a directory
4823   {
4824     struct stat64 buf64;
4825     int ret = ::fstat64(fd, &buf64);
4826     int st_mode = buf64.st_mode;
4827 
4828     if (ret != -1) {
4829       if ((st_mode & S_IFMT) == S_IFDIR) {
4830         errno = EISDIR;
4831         ::close(fd);
4832         return -1;
4833       }
4834     } else {
4835       ::close(fd);
4836       return -1;
4837     }
4838   }
4839 
4840   // 32-bit Solaris systems suffer from:
4841   //
4842   // - an historical default soft limit of 256 per-process file
4843   //   descriptors that is too low for many Java programs.
4844   //
4845   // - a design flaw where file descriptors created using stdio
4846   //   fopen must be less than 256, _even_ when the first limit above
4847   //   has been raised.  This can cause calls to fopen (but not calls to
4848   //   open, for example) to fail mysteriously, perhaps in 3rd party
4849   //   native code (although the JDK itself uses fopen).  One can hardly
4850   //   criticize them for using this most standard of all functions.
4851   //
4852   // We attempt to make everything work anyways by:
4853   //
4854   // - raising the soft limit on per-process file descriptors beyond
4855   //   256
4856   //
4857   // - As of Solaris 10u4, we can request that Solaris raise the 256
4858   //   stdio fopen limit by calling function enable_extended_FILE_stdio.
4859   //   This is done in init_2 and recorded in enabled_extended_FILE_stdio
4860   //
4861   // - If we are stuck on an old (pre 10u4) Solaris system, we can
4862   //   workaround the bug by remapping non-stdio file descriptors below
4863   //   256 to ones beyond 256, which is done below.
4864   //
4865   // See:
4866   // 1085341: 32-bit stdio routines should support file descriptors >255
4867   // 6533291: Work around 32-bit Solaris stdio limit of 256 open files
4868   // 6431278: Netbeans crash on 32 bit Solaris: need to call
4869   //          enable_extended_FILE_stdio() in VM initialisation
4870   // Giri Mandalika's blog
4871   // http://technopark02.blogspot.com/2005_05_01_archive.html
4872   //
4873 #ifndef  _LP64
4874   if ((!enabled_extended_FILE_stdio) && fd < 256) {
4875     int newfd = ::fcntl(fd, F_DUPFD, 256);
4876     if (newfd != -1) {
4877       ::close(fd);
4878       fd = newfd;
4879     }
4880   }
4881 #endif // 32-bit Solaris
4882 
4883   // All file descriptors that are opened in the JVM and not
4884   // specifically destined for a subprocess should have the
4885   // close-on-exec flag set.  If we don't set it, then careless 3rd
4886   // party native code might fork and exec without closing all
4887   // appropriate file descriptors (e.g. as we do in closeDescriptors in
4888   // UNIXProcess.c), and this in turn might:
4889   //
4890   // - cause end-of-file to fail to be detected on some file
4891   //   descriptors, resulting in mysterious hangs, or
4892   //
4893   // - might cause an fopen in the subprocess to fail on a system
4894   //   suffering from bug 1085341.
4895   //
4896   // (Yes, the default setting of the close-on-exec flag is a Unix
4897   // design flaw)
4898   //
4899   // See:
4900   // 1085341: 32-bit stdio routines should support file descriptors >255
4901   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4902   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4903   //
4904 #ifdef FD_CLOEXEC
4905   {
4906     int flags = ::fcntl(fd, F_GETFD);
4907     if (flags != -1) {
4908       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4909     }
4910   }
4911 #endif
4912 
4913   return fd;
4914 }
4915 
4916 // create binary file, rewriting existing file if required
4917 int os::create_binary_file(const char* path, bool rewrite_existing) {
4918   int oflags = O_WRONLY | O_CREAT;
4919   if (!rewrite_existing) {
4920     oflags |= O_EXCL;
4921   }
4922   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4923 }
4924 
4925 // return current position of file pointer
4926 jlong os::current_file_offset(int fd) {
4927   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4928 }
4929 
4930 // move file pointer to the specified offset
4931 jlong os::seek_to_file_offset(int fd, jlong offset) {
4932   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4933 }
4934 
4935 jlong os::lseek(int fd, jlong offset, int whence) {
4936   return (jlong) ::lseek64(fd, offset, whence);
4937 }
4938 
4939 char * os::native_path(char *path) {
4940   return path;
4941 }
4942 
4943 int os::ftruncate(int fd, jlong length) {
4944   return ::ftruncate64(fd, length);
4945 }
4946 
4947 int os::fsync(int fd)  {
4948   RESTARTABLE_RETURN_INT(::fsync(fd));
4949 }
4950 
4951 int os::available(int fd, jlong *bytes) {
4952   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
4953          "Assumed _thread_in_native");
4954   jlong cur, end;
4955   int mode;
4956   struct stat64 buf64;
4957 
4958   if (::fstat64(fd, &buf64) >= 0) {
4959     mode = buf64.st_mode;
4960     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4961       int n,ioctl_return;
4962 
4963       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
4964       if (ioctl_return>= 0) {
4965         *bytes = n;
4966         return 1;
4967       }
4968     }
4969   }
4970   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4971     return 0;
4972   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4973     return 0;
4974   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4975     return 0;
4976   }
4977   *bytes = end - cur;
4978   return 1;
4979 }
4980 
4981 // Map a block of memory.
4982 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4983                         char *addr, size_t bytes, bool read_only,
4984                         bool allow_exec) {
4985   int prot;
4986   int flags;
4987 
4988   if (read_only) {
4989     prot = PROT_READ;
4990     flags = MAP_SHARED;
4991   } else {
4992     prot = PROT_READ | PROT_WRITE;
4993     flags = MAP_PRIVATE;
4994   }
4995 
4996   if (allow_exec) {
4997     prot |= PROT_EXEC;
4998   }
4999 
5000   if (addr != NULL) {
5001     flags |= MAP_FIXED;
5002   }
5003 
5004   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5005                                      fd, file_offset);
5006   if (mapped_address == MAP_FAILED) {
5007     return NULL;
5008   }
5009   return mapped_address;
5010 }
5011 
5012 
5013 // Remap a block of memory.
5014 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5015                           char *addr, size_t bytes, bool read_only,
5016                           bool allow_exec) {
5017   // same as map_memory() on this OS
5018   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5019                         allow_exec);
5020 }
5021 
5022 
5023 // Unmap a block of memory.
5024 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5025   return munmap(addr, bytes) == 0;
5026 }
5027 
5028 void os::pause() {
5029   char filename[MAX_PATH];
5030   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5031     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5032   } else {
5033     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5034   }
5035 
5036   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5037   if (fd != -1) {
5038     struct stat buf;
5039     ::close(fd);
5040     while (::stat(filename, &buf) == 0) {
5041       (void)::poll(NULL, 0, 100);
5042     }
5043   } else {
5044     jio_fprintf(stderr,
5045                 "Could not open pause file '%s', continuing immediately.\n", filename);
5046   }
5047 }
5048 
5049 #ifndef PRODUCT
5050 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5051 // Turn this on if you need to trace synch operations.
5052 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5053 // and call record_synch_enable and record_synch_disable
5054 // around the computation of interest.
5055 
5056 void record_synch(char* name, bool returning);  // defined below
5057 
5058 class RecordSynch {
5059   char* _name;
5060  public:
5061   RecordSynch(char* name) :_name(name) { record_synch(_name, false); }
5062   ~RecordSynch()                       { record_synch(_name, true); }
5063 };
5064 
5065 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5066 extern "C" ret name params {                                    \
5067   typedef ret name##_t params;                                  \
5068   static name##_t* implem = NULL;                               \
5069   static int callcount = 0;                                     \
5070   if (implem == NULL) {                                         \
5071     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5072     if (implem == NULL)  fatal(dlerror());                      \
5073   }                                                             \
5074   ++callcount;                                                  \
5075   RecordSynch _rs(#name);                                       \
5076   inner;                                                        \
5077   return implem args;                                           \
5078 }
5079 // in dbx, examine callcounts this way:
5080 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5081 
5082 #define CHECK_POINTER_OK(p) \
5083   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5084 #define CHECK_MU \
5085   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5086 #define CHECK_CV \
5087   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5088 #define CHECK_P(p) \
5089   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5090 
5091 #define CHECK_MUTEX(mutex_op) \
5092   CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5093 
5094 CHECK_MUTEX(   mutex_lock)
5095 CHECK_MUTEX(  _mutex_lock)
5096 CHECK_MUTEX( mutex_unlock)
5097 CHECK_MUTEX(_mutex_unlock)
5098 CHECK_MUTEX( mutex_trylock)
5099 CHECK_MUTEX(_mutex_trylock)
5100 
5101 #define CHECK_COND(cond_op) \
5102   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU; CHECK_CV);
5103 
5104 CHECK_COND( cond_wait);
5105 CHECK_COND(_cond_wait);
5106 CHECK_COND(_cond_wait_cancel);
5107 
5108 #define CHECK_COND2(cond_op) \
5109   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU; CHECK_CV);
5110 
5111 CHECK_COND2( cond_timedwait);
5112 CHECK_COND2(_cond_timedwait);
5113 CHECK_COND2(_cond_timedwait_cancel);
5114 
5115 // do the _lwp_* versions too
5116 #define mutex_t lwp_mutex_t
5117 #define cond_t  lwp_cond_t
5118 CHECK_MUTEX(  _lwp_mutex_lock)
5119 CHECK_MUTEX(  _lwp_mutex_unlock)
5120 CHECK_MUTEX(  _lwp_mutex_trylock)
5121 CHECK_MUTEX( __lwp_mutex_lock)
5122 CHECK_MUTEX( __lwp_mutex_unlock)
5123 CHECK_MUTEX( __lwp_mutex_trylock)
5124 CHECK_MUTEX(___lwp_mutex_lock)
5125 CHECK_MUTEX(___lwp_mutex_unlock)
5126 
5127 CHECK_COND(  _lwp_cond_wait);
5128 CHECK_COND( __lwp_cond_wait);
5129 CHECK_COND(___lwp_cond_wait);
5130 
5131 CHECK_COND2(  _lwp_cond_timedwait);
5132 CHECK_COND2( __lwp_cond_timedwait);
5133 #undef mutex_t
5134 #undef cond_t
5135 
5136 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5137 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5138 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5139 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5140 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5141 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5142 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5143 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5144 
5145 
5146 // recording machinery:
5147 
5148 enum { RECORD_SYNCH_LIMIT = 200 };
5149 char* record_synch_name[RECORD_SYNCH_LIMIT];
5150 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5151 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5152 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5153 int record_synch_count = 0;
5154 bool record_synch_enabled = false;
5155 
5156 // in dbx, examine recorded data this way:
5157 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5158 
5159 void record_synch(char* name, bool returning) {
5160   if (record_synch_enabled) {
5161     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5162       record_synch_name[record_synch_count] = name;
5163       record_synch_returning[record_synch_count] = returning;
5164       record_synch_thread[record_synch_count] = thr_self();
5165       record_synch_arg0ptr[record_synch_count] = &name;
5166       record_synch_count++;
5167     }
5168     // put more checking code here:
5169     // ...
5170   }
5171 }
5172 
5173 void record_synch_enable() {
5174   // start collecting trace data, if not already doing so
5175   if (!record_synch_enabled)  record_synch_count = 0;
5176   record_synch_enabled = true;
5177 }
5178 
5179 void record_synch_disable() {
5180   // stop collecting trace data
5181   record_synch_enabled = false;
5182 }
5183 
5184 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5185 #endif // PRODUCT
5186 
5187 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5188 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5189                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5190 
5191 
5192 // JVMTI & JVM monitoring and management support
5193 // The thread_cpu_time() and current_thread_cpu_time() are only
5194 // supported if is_thread_cpu_time_supported() returns true.
5195 // They are not supported on Solaris T1.
5196 
5197 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5198 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5199 // of a thread.
5200 //
5201 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5202 // returns the fast estimate available on the platform.
5203 
5204 // hrtime_t gethrvtime() return value includes
5205 // user time but does not include system time
5206 jlong os::current_thread_cpu_time() {
5207   return (jlong) gethrvtime();
5208 }
5209 
5210 jlong os::thread_cpu_time(Thread *thread) {
5211   // return user level CPU time only to be consistent with
5212   // what current_thread_cpu_time returns.
5213   // thread_cpu_time_info() must be changed if this changes
5214   return os::thread_cpu_time(thread, false /* user time only */);
5215 }
5216 
5217 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5218   if (user_sys_cpu_time) {
5219     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5220   } else {
5221     return os::current_thread_cpu_time();
5222   }
5223 }
5224 
5225 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5226   char proc_name[64];
5227   int count;
5228   prusage_t prusage;
5229   jlong lwp_time;
5230   int fd;
5231 
5232   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5233           getpid(),
5234           thread->osthread()->lwp_id());
5235   fd = ::open(proc_name, O_RDONLY);
5236   if (fd == -1) return -1;
5237 
5238   do {
5239     count = ::pread(fd,
5240                     (void *)&prusage.pr_utime,
5241                     thr_time_size,
5242                     thr_time_off);
5243   } while (count < 0 && errno == EINTR);
5244   ::close(fd);
5245   if (count < 0) return -1;
5246 
5247   if (user_sys_cpu_time) {
5248     // user + system CPU time
5249     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5250                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5251                  (jlong)prusage.pr_stime.tv_nsec +
5252                  (jlong)prusage.pr_utime.tv_nsec;
5253   } else {
5254     // user level CPU time only
5255     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5256                 (jlong)prusage.pr_utime.tv_nsec;
5257   }
5258 
5259   return (lwp_time);
5260 }
5261 
5262 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5263   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5264   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5265   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5266   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5267 }
5268 
5269 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5270   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5271   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5272   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5273   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5274 }
5275 
5276 bool os::is_thread_cpu_time_supported() {
5277   return true;
5278 }
5279 
5280 // System loadavg support.  Returns -1 if load average cannot be obtained.
5281 // Return the load average for our processor set if the primitive exists
5282 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5283 int os::loadavg(double loadavg[], int nelem) {
5284   if (pset_getloadavg_ptr != NULL) {
5285     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5286   } else {
5287     return ::getloadavg(loadavg, nelem);
5288   }
5289 }
5290 
5291 //---------------------------------------------------------------------------------
5292 
5293 bool os::find(address addr, outputStream* st) {
5294   Dl_info dlinfo;
5295   memset(&dlinfo, 0, sizeof(dlinfo));
5296   if (dladdr(addr, &dlinfo) != 0) {
5297     st->print(PTR_FORMAT ": ", addr);
5298     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5299       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5300     } else if (dlinfo.dli_fbase != NULL) {
5301       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5302     } else {
5303       st->print("<absolute address>");
5304     }
5305     if (dlinfo.dli_fname != NULL) {
5306       st->print(" in %s", dlinfo.dli_fname);
5307     }
5308     if (dlinfo.dli_fbase != NULL) {
5309       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5310     }
5311     st->cr();
5312 
5313     if (Verbose) {
5314       // decode some bytes around the PC
5315       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5316       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5317       address       lowest = (address) dlinfo.dli_sname;
5318       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5319       if (begin < lowest)  begin = lowest;
5320       Dl_info dlinfo2;
5321       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5322           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
5323         end = (address) dlinfo2.dli_saddr;
5324       }
5325       Disassembler::decode(begin, end, st);
5326     }
5327     return true;
5328   }
5329   return false;
5330 }
5331 
5332 // Following function has been added to support HotSparc's libjvm.so running
5333 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5334 // src/solaris/hpi/native_threads in the EVM codebase.
5335 //
5336 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5337 // libraries and should thus be removed. We will leave it behind for a while
5338 // until we no longer want to able to run on top of 1.3.0 Solaris production
5339 // JDK. See 4341971.
5340 
5341 #define STACK_SLACK 0x800
5342 
5343 extern "C" {
5344   intptr_t sysThreadAvailableStackWithSlack() {
5345     stack_t st;
5346     intptr_t retval, stack_top;
5347     retval = thr_stksegment(&st);
5348     assert(retval == 0, "incorrect return value from thr_stksegment");
5349     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5350     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5351     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5352     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5353   }
5354 }
5355 
5356 // ObjectMonitor park-unpark infrastructure ...
5357 //
5358 // We implement Solaris and Linux PlatformEvents with the
5359 // obvious condvar-mutex-flag triple.
5360 // Another alternative that works quite well is pipes:
5361 // Each PlatformEvent consists of a pipe-pair.
5362 // The thread associated with the PlatformEvent
5363 // calls park(), which reads from the input end of the pipe.
5364 // Unpark() writes into the other end of the pipe.
5365 // The write-side of the pipe must be set NDELAY.
5366 // Unfortunately pipes consume a large # of handles.
5367 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5368 // Using pipes for the 1st few threads might be workable, however.
5369 //
5370 // park() is permitted to return spuriously.
5371 // Callers of park() should wrap the call to park() in
5372 // an appropriate loop.  A litmus test for the correct
5373 // usage of park is the following: if park() were modified
5374 // to immediately return 0 your code should still work,
5375 // albeit degenerating to a spin loop.
5376 //
5377 // In a sense, park()-unpark() just provides more polite spinning
5378 // and polling with the key difference over naive spinning being
5379 // that a parked thread needs to be explicitly unparked() in order
5380 // to wake up and to poll the underlying condition.
5381 //
5382 // Assumption:
5383 //    Only one parker can exist on an event, which is why we allocate
5384 //    them per-thread. Multiple unparkers can coexist.
5385 //
5386 // _Event transitions in park()
5387 //   -1 => -1 : illegal
5388 //    1 =>  0 : pass - return immediately
5389 //    0 => -1 : block; then set _Event to 0 before returning
5390 //
5391 // _Event transitions in unpark()
5392 //    0 => 1 : just return
5393 //    1 => 1 : just return
5394 //   -1 => either 0 or 1; must signal target thread
5395 //         That is, we can safely transition _Event from -1 to either
5396 //         0 or 1.
5397 //
5398 // _Event serves as a restricted-range semaphore.
5399 //   -1 : thread is blocked, i.e. there is a waiter
5400 //    0 : neutral: thread is running or ready,
5401 //        could have been signaled after a wait started
5402 //    1 : signaled - thread is running or ready
5403 //
5404 // Another possible encoding of _Event would be with
5405 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5406 //
5407 // TODO-FIXME: add DTRACE probes for:
5408 // 1.   Tx parks
5409 // 2.   Ty unparks Tx
5410 // 3.   Tx resumes from park
5411 
5412 
5413 // value determined through experimentation
5414 #define ROUNDINGFIX 11
5415 
5416 // utility to compute the abstime argument to timedwait.
5417 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5418 
5419 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5420   // millis is the relative timeout time
5421   // abstime will be the absolute timeout time
5422   if (millis < 0)  millis = 0;
5423   struct timeval now;
5424   int status = gettimeofday(&now, NULL);
5425   assert(status == 0, "gettimeofday");
5426   jlong seconds = millis / 1000;
5427   jlong max_wait_period;
5428 
5429   if (UseLWPSynchronization) {
5430     // forward port of fix for 4275818 (not sleeping long enough)
5431     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5432     // _lwp_cond_timedwait() used a round_down algorithm rather
5433     // than a round_up. For millis less than our roundfactor
5434     // it rounded down to 0 which doesn't meet the spec.
5435     // For millis > roundfactor we may return a bit sooner, but
5436     // since we can not accurately identify the patch level and
5437     // this has already been fixed in Solaris 9 and 8 we will
5438     // leave it alone rather than always rounding down.
5439 
5440     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5441     // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5442     // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5443     max_wait_period = 21000000;
5444   } else {
5445     max_wait_period = 50000000;
5446   }
5447   millis %= 1000;
5448   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5449     seconds = max_wait_period;
5450   }
5451   abstime->tv_sec = now.tv_sec  + seconds;
5452   long       usec = now.tv_usec + millis * 1000;
5453   if (usec >= 1000000) {
5454     abstime->tv_sec += 1;
5455     usec -= 1000000;
5456   }
5457   abstime->tv_nsec = usec * 1000;
5458   return abstime;
5459 }
5460 
5461 void os::PlatformEvent::park() {           // AKA: down()
5462   // Transitions for _Event:
5463   //   -1 => -1 : illegal
5464   //    1 =>  0 : pass - return immediately
5465   //    0 => -1 : block; then set _Event to 0 before returning
5466 
5467   // Invariant: Only the thread associated with the Event/PlatformEvent
5468   // may call park().
5469   assert(_nParked == 0, "invariant");
5470 
5471   int v;
5472   for (;;) {
5473     v = _Event;
5474     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5475   }
5476   guarantee(v >= 0, "invariant");
5477   if (v == 0) {
5478     // Do this the hard way by blocking ...
5479     // See http://monaco.sfbay/detail.jsf?cr=5094058.
5480     // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5481     // Only for SPARC >= V8PlusA
5482 #if defined(__sparc) && defined(COMPILER2)
5483     if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5484 #endif
5485     int status = os::Solaris::mutex_lock(_mutex);
5486     assert_status(status == 0, status, "mutex_lock");
5487     guarantee(_nParked == 0, "invariant");
5488     ++_nParked;
5489     while (_Event < 0) {
5490       // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5491       // Treat this the same as if the wait was interrupted
5492       // With usr/lib/lwp going to kernel, always handle ETIME
5493       status = os::Solaris::cond_wait(_cond, _mutex);
5494       if (status == ETIME) status = EINTR;
5495       assert_status(status == 0 || status == EINTR, status, "cond_wait");
5496     }
5497     --_nParked;
5498     _Event = 0;
5499     status = os::Solaris::mutex_unlock(_mutex);
5500     assert_status(status == 0, status, "mutex_unlock");
5501     // Paranoia to ensure our locked and lock-free paths interact
5502     // correctly with each other.
5503     OrderAccess::fence();
5504   }
5505 }
5506 
5507 int os::PlatformEvent::park(jlong millis) {
5508   // Transitions for _Event:
5509   //   -1 => -1 : illegal
5510   //    1 =>  0 : pass - return immediately
5511   //    0 => -1 : block; then set _Event to 0 before returning
5512 
5513   guarantee(_nParked == 0, "invariant");
5514   int v;
5515   for (;;) {
5516     v = _Event;
5517     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5518   }
5519   guarantee(v >= 0, "invariant");
5520   if (v != 0) return OS_OK;
5521 
5522   int ret = OS_TIMEOUT;
5523   timestruc_t abst;
5524   compute_abstime(&abst, millis);
5525 
5526   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5527   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5528   // Only for SPARC >= V8PlusA
5529 #if defined(__sparc) && defined(COMPILER2)
5530   if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5531 #endif
5532   int status = os::Solaris::mutex_lock(_mutex);
5533   assert_status(status == 0, status, "mutex_lock");
5534   guarantee(_nParked == 0, "invariant");
5535   ++_nParked;
5536   while (_Event < 0) {
5537     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5538     assert_status(status == 0 || status == EINTR ||
5539                   status == ETIME || status == ETIMEDOUT,
5540                   status, "cond_timedwait");
5541     if (!FilterSpuriousWakeups) break;                // previous semantics
5542     if (status == ETIME || status == ETIMEDOUT) break;
5543     // We consume and ignore EINTR and spurious wakeups.
5544   }
5545   --_nParked;
5546   if (_Event >= 0) ret = OS_OK;
5547   _Event = 0;
5548   status = os::Solaris::mutex_unlock(_mutex);
5549   assert_status(status == 0, status, "mutex_unlock");
5550   // Paranoia to ensure our locked and lock-free paths interact
5551   // correctly with each other.
5552   OrderAccess::fence();
5553   return ret;
5554 }
5555 
5556 void os::PlatformEvent::unpark() {
5557   // Transitions for _Event:
5558   //    0 => 1 : just return
5559   //    1 => 1 : just return
5560   //   -1 => either 0 or 1; must signal target thread
5561   //         That is, we can safely transition _Event from -1 to either
5562   //         0 or 1.
5563   // See also: "Semaphores in Plan 9" by Mullender & Cox
5564   //
5565   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5566   // that it will take two back-to-back park() calls for the owning
5567   // thread to block. This has the benefit of forcing a spurious return
5568   // from the first park() call after an unpark() call which will help
5569   // shake out uses of park() and unpark() without condition variables.
5570 
5571   if (Atomic::xchg(1, &_Event) >= 0) return;
5572 
5573   // If the thread associated with the event was parked, wake it.
5574   // Wait for the thread assoc with the PlatformEvent to vacate.
5575   int status = os::Solaris::mutex_lock(_mutex);
5576   assert_status(status == 0, status, "mutex_lock");
5577   int AnyWaiters = _nParked;
5578   status = os::Solaris::mutex_unlock(_mutex);
5579   assert_status(status == 0, status, "mutex_unlock");
5580   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5581   if (AnyWaiters != 0) {
5582     // Note that we signal() *after* dropping the lock for "immortal" Events.
5583     // This is safe and avoids a common class of  futile wakeups.  In rare
5584     // circumstances this can cause a thread to return prematurely from
5585     // cond_{timed}wait() but the spurious wakeup is benign and the victim
5586     // will simply re-test the condition and re-park itself.
5587     // This provides particular benefit if the underlying platform does not
5588     // provide wait morphing.
5589     status = os::Solaris::cond_signal(_cond);
5590     assert_status(status == 0, status, "cond_signal");
5591   }
5592 }
5593 
5594 // JSR166
5595 // -------------------------------------------------------
5596 
5597 // The solaris and linux implementations of park/unpark are fairly
5598 // conservative for now, but can be improved. They currently use a
5599 // mutex/condvar pair, plus _counter.
5600 // Park decrements _counter if > 0, else does a condvar wait.  Unpark
5601 // sets count to 1 and signals condvar.  Only one thread ever waits
5602 // on the condvar. Contention seen when trying to park implies that someone
5603 // is unparking you, so don't wait. And spurious returns are fine, so there
5604 // is no need to track notifications.
5605 
5606 #define MAX_SECS 100000000
5607 
5608 // This code is common to linux and solaris and will be moved to a
5609 // common place in dolphin.
5610 //
5611 // The passed in time value is either a relative time in nanoseconds
5612 // or an absolute time in milliseconds. Either way it has to be unpacked
5613 // into suitable seconds and nanoseconds components and stored in the
5614 // given timespec structure.
5615 // Given time is a 64-bit value and the time_t used in the timespec is only
5616 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
5617 // overflow if times way in the future are given. Further on Solaris versions
5618 // prior to 10 there is a restriction (see cond_timedwait) that the specified
5619 // number of seconds, in abstime, is less than current_time  + 100,000,000.
5620 // As it will be 28 years before "now + 100000000" will overflow we can
5621 // ignore overflow and just impose a hard-limit on seconds using the value
5622 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
5623 // years from "now".
5624 //
5625 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5626   assert(time > 0, "convertTime");
5627 
5628   struct timeval now;
5629   int status = gettimeofday(&now, NULL);
5630   assert(status == 0, "gettimeofday");
5631 
5632   time_t max_secs = now.tv_sec + MAX_SECS;
5633 
5634   if (isAbsolute) {
5635     jlong secs = time / 1000;
5636     if (secs > max_secs) {
5637       absTime->tv_sec = max_secs;
5638     } else {
5639       absTime->tv_sec = secs;
5640     }
5641     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5642   } else {
5643     jlong secs = time / NANOSECS_PER_SEC;
5644     if (secs >= MAX_SECS) {
5645       absTime->tv_sec = max_secs;
5646       absTime->tv_nsec = 0;
5647     } else {
5648       absTime->tv_sec = now.tv_sec + secs;
5649       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5650       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5651         absTime->tv_nsec -= NANOSECS_PER_SEC;
5652         ++absTime->tv_sec; // note: this must be <= max_secs
5653       }
5654     }
5655   }
5656   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5657   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5658   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5659   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5660 }
5661 
5662 void Parker::park(bool isAbsolute, jlong time) {
5663   // Ideally we'd do something useful while spinning, such
5664   // as calling unpackTime().
5665 
5666   // Optional fast-path check:
5667   // Return immediately if a permit is available.
5668   // We depend on Atomic::xchg() having full barrier semantics
5669   // since we are doing a lock-free update to _counter.
5670   if (Atomic::xchg(0, &_counter) > 0) return;
5671 
5672   // Optional fast-exit: Check interrupt before trying to wait
5673   Thread* thread = Thread::current();
5674   assert(thread->is_Java_thread(), "Must be JavaThread");
5675   JavaThread *jt = (JavaThread *)thread;
5676   if (Thread::is_interrupted(thread, false)) {
5677     return;
5678   }
5679 
5680   // First, demultiplex/decode time arguments
5681   timespec absTime;
5682   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
5683     return;
5684   }
5685   if (time > 0) {
5686     // Warning: this code might be exposed to the old Solaris time
5687     // round-down bugs.  Grep "roundingFix" for details.
5688     unpackTime(&absTime, isAbsolute, time);
5689   }
5690 
5691   // Enter safepoint region
5692   // Beware of deadlocks such as 6317397.
5693   // The per-thread Parker:: _mutex is a classic leaf-lock.
5694   // In particular a thread must never block on the Threads_lock while
5695   // holding the Parker:: mutex.  If safepoints are pending both the
5696   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5697   ThreadBlockInVM tbivm(jt);
5698 
5699   // Don't wait if cannot get lock since interference arises from
5700   // unblocking.  Also. check interrupt before trying wait
5701   if (Thread::is_interrupted(thread, false) ||
5702       os::Solaris::mutex_trylock(_mutex) != 0) {
5703     return;
5704   }
5705 
5706   int status;
5707 
5708   if (_counter > 0)  { // no wait needed
5709     _counter = 0;
5710     status = os::Solaris::mutex_unlock(_mutex);
5711     assert(status == 0, "invariant");
5712     // Paranoia to ensure our locked and lock-free paths interact
5713     // correctly with each other and Java-level accesses.
5714     OrderAccess::fence();
5715     return;
5716   }
5717 
5718 #ifdef ASSERT
5719   // Don't catch signals while blocked; let the running threads have the signals.
5720   // (This allows a debugger to break into the running thread.)
5721   sigset_t oldsigs;
5722   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
5723   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5724 #endif
5725 
5726   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5727   jt->set_suspend_equivalent();
5728   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5729 
5730   // Do this the hard way by blocking ...
5731   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5732   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5733   // Only for SPARC >= V8PlusA
5734 #if defined(__sparc) && defined(COMPILER2)
5735   if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5736 #endif
5737 
5738   if (time == 0) {
5739     status = os::Solaris::cond_wait(_cond, _mutex);
5740   } else {
5741     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
5742   }
5743   // Note that an untimed cond_wait() can sometimes return ETIME on older
5744   // versions of the Solaris.
5745   assert_status(status == 0 || status == EINTR ||
5746                 status == ETIME || status == ETIMEDOUT,
5747                 status, "cond_timedwait");
5748 
5749 #ifdef ASSERT
5750   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
5751 #endif
5752   _counter = 0;
5753   status = os::Solaris::mutex_unlock(_mutex);
5754   assert_status(status == 0, status, "mutex_unlock");
5755   // Paranoia to ensure our locked and lock-free paths interact
5756   // correctly with each other and Java-level accesses.
5757   OrderAccess::fence();
5758 
5759   // If externally suspended while waiting, re-suspend
5760   if (jt->handle_special_suspend_equivalent_condition()) {
5761     jt->java_suspend_self();
5762   }
5763 }
5764 
5765 void Parker::unpark() {
5766   int status = os::Solaris::mutex_lock(_mutex);
5767   assert(status == 0, "invariant");
5768   const int s = _counter;
5769   _counter = 1;
5770   status = os::Solaris::mutex_unlock(_mutex);
5771   assert(status == 0, "invariant");
5772 
5773   if (s < 1) {
5774     status = os::Solaris::cond_signal(_cond);
5775     assert(status == 0, "invariant");
5776   }
5777 }
5778 
5779 extern char** environ;
5780 
5781 // Run the specified command in a separate process. Return its exit value,
5782 // or -1 on failure (e.g. can't fork a new process).
5783 // Unlike system(), this function can be called from signal handler. It
5784 // doesn't block SIGINT et al.
5785 int os::fork_and_exec(char* cmd) {
5786   char * argv[4];
5787   argv[0] = (char *)"sh";
5788   argv[1] = (char *)"-c";
5789   argv[2] = cmd;
5790   argv[3] = NULL;
5791 
5792   // fork is async-safe, fork1 is not so can't use in signal handler
5793   pid_t pid;
5794   Thread* t = ThreadLocalStorage::get_thread_slow();
5795   if (t != NULL && t->is_inside_signal_handler()) {
5796     pid = fork();
5797   } else {
5798     pid = fork1();
5799   }
5800 
5801   if (pid < 0) {
5802     // fork failed
5803     warning("fork failed: %s", strerror(errno));
5804     return -1;
5805 
5806   } else if (pid == 0) {
5807     // child process
5808 
5809     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
5810     execve("/usr/bin/sh", argv, environ);
5811 
5812     // execve failed
5813     _exit(-1);
5814 
5815   } else  {
5816     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5817     // care about the actual exit code, for now.
5818 
5819     int status;
5820 
5821     // Wait for the child process to exit.  This returns immediately if
5822     // the child has already exited. */
5823     while (waitpid(pid, &status, 0) < 0) {
5824       switch (errno) {
5825       case ECHILD: return 0;
5826       case EINTR: break;
5827       default: return -1;
5828       }
5829     }
5830 
5831     if (WIFEXITED(status)) {
5832       // The child exited normally; get its exit code.
5833       return WEXITSTATUS(status);
5834     } else if (WIFSIGNALED(status)) {
5835       // The child exited because of a signal
5836       // The best value to return is 0x80 + signal number,
5837       // because that is what all Unix shells do, and because
5838       // it allows callers to distinguish between process exit and
5839       // process death by signal.
5840       return 0x80 + WTERMSIG(status);
5841     } else {
5842       // Unknown exit code; pass it through
5843       return status;
5844     }
5845   }
5846 }
5847 
5848 // is_headless_jre()
5849 //
5850 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5851 // in order to report if we are running in a headless jre
5852 //
5853 // Since JDK8 xawt/libmawt.so was moved into the same directory
5854 // as libawt.so, and renamed libawt_xawt.so
5855 //
5856 bool os::is_headless_jre() {
5857   struct stat statbuf;
5858   char buf[MAXPATHLEN];
5859   char libmawtpath[MAXPATHLEN];
5860   const char *xawtstr  = "/xawt/libmawt.so";
5861   const char *new_xawtstr = "/libawt_xawt.so";
5862   char *p;
5863 
5864   // Get path to libjvm.so
5865   os::jvm_path(buf, sizeof(buf));
5866 
5867   // Get rid of libjvm.so
5868   p = strrchr(buf, '/');
5869   if (p == NULL) {
5870     return false;
5871   } else {
5872     *p = '\0';
5873   }
5874 
5875   // Get rid of client or server
5876   p = strrchr(buf, '/');
5877   if (p == NULL) {
5878     return false;
5879   } else {
5880     *p = '\0';
5881   }
5882 
5883   // check xawt/libmawt.so
5884   strcpy(libmawtpath, buf);
5885   strcat(libmawtpath, xawtstr);
5886   if (::stat(libmawtpath, &statbuf) == 0) return false;
5887 
5888   // check libawt_xawt.so
5889   strcpy(libmawtpath, buf);
5890   strcat(libmawtpath, new_xawtstr);
5891   if (::stat(libmawtpath, &statbuf) == 0) return false;
5892 
5893   return true;
5894 }
5895 
5896 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
5897   size_t res;
5898   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5899          "Assumed _thread_in_native");
5900   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
5901   return res;
5902 }
5903 
5904 int os::close(int fd) {
5905   return ::close(fd);
5906 }
5907 
5908 int os::socket_close(int fd) {
5909   return ::close(fd);
5910 }
5911 
5912 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5913   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5914          "Assumed _thread_in_native");
5915   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
5916 }
5917 
5918 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5919   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5920          "Assumed _thread_in_native");
5921   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5922 }
5923 
5924 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5925   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5926 }
5927 
5928 // As both poll and select can be interrupted by signals, we have to be
5929 // prepared to restart the system call after updating the timeout, unless
5930 // a poll() is done with timeout == -1, in which case we repeat with this
5931 // "wait forever" value.
5932 
5933 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
5934   int _result;
5935   _result = ::connect(fd, him, len);
5936 
5937   // On Solaris, when a connect() call is interrupted, the connection
5938   // can be established asynchronously (see 6343810). Subsequent calls
5939   // to connect() must check the errno value which has the semantic
5940   // described below (copied from the connect() man page). Handling
5941   // of asynchronously established connections is required for both
5942   // blocking and non-blocking sockets.
5943   //     EINTR            The  connection  attempt  was   interrupted
5944   //                      before  any data arrived by the delivery of
5945   //                      a signal. The connection, however, will  be
5946   //                      established asynchronously.
5947   //
5948   //     EINPROGRESS      The socket is non-blocking, and the connec-
5949   //                      tion  cannot  be completed immediately.
5950   //
5951   //     EALREADY         The socket is non-blocking,  and a previous
5952   //                      connection  attempt  has  not yet been com-
5953   //                      pleted.
5954   //
5955   //     EISCONN          The socket is already connected.
5956   if (_result == OS_ERR && errno == EINTR) {
5957     // restarting a connect() changes its errno semantics
5958     RESTARTABLE(::connect(fd, him, len), _result);
5959     // undo these changes
5960     if (_result == OS_ERR) {
5961       if (errno == EALREADY) {
5962         errno = EINPROGRESS; // fall through
5963       } else if (errno == EISCONN) {
5964         errno = 0;
5965         return OS_OK;
5966       }
5967     }
5968   }
5969   return _result;
5970 }
5971 
5972 // Get the default path to the core file
5973 // Returns the length of the string
5974 int os::get_core_path(char* buffer, size_t bufferSize) {
5975   const char* p = get_current_directory(buffer, bufferSize);
5976 
5977   if (p == NULL) {
5978     assert(p != NULL, "failed to get current directory");
5979     return 0;
5980   }
5981 
5982   return strlen(buffer);
5983 }
5984 
5985 #ifndef PRODUCT
5986 void TestReserveMemorySpecial_test() {
5987   // No tests available for this platform
5988 }
5989 #endif