1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "os_solaris.inline.hpp"
  41 #include "prims/jniFastGetField.hpp"
  42 #include "prims/jvm.h"
  43 #include "prims/jvm_misc.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/atomic.inline.hpp"
  46 #include "runtime/extendedPC.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/interfaceSupport.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/mutexLocker.hpp"
  52 #include "runtime/objectMonitor.hpp"
  53 #include "runtime/orderAccess.inline.hpp"
  54 #include "runtime/osThread.hpp"
  55 #include "runtime/perfMemory.hpp"
  56 #include "runtime/sharedRuntime.hpp"
  57 #include "runtime/statSampler.hpp"
  58 #include "runtime/stubRoutines.hpp"
  59 #include "runtime/thread.inline.hpp"
  60 #include "runtime/threadCritical.hpp"
  61 #include "runtime/timer.hpp"
  62 #include "runtime/vm_version.hpp"
  63 #include "services/attachListener.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "services/runtimeService.hpp"
  66 #include "utilities/decoder.hpp"
  67 #include "utilities/defaultStream.hpp"
  68 #include "utilities/events.hpp"
  69 #include "utilities/growableArray.hpp"
  70 #include "utilities/vmError.hpp"
  71 
  72 // put OS-includes here
  73 # include <dlfcn.h>
  74 # include <errno.h>
  75 # include <exception>
  76 # include <link.h>
  77 # include <poll.h>
  78 # include <pthread.h>
  79 # include <pwd.h>
  80 # include <schedctl.h>
  81 # include <setjmp.h>
  82 # include <signal.h>
  83 # include <stdio.h>
  84 # include <alloca.h>
  85 # include <sys/filio.h>
  86 # include <sys/ipc.h>
  87 # include <sys/lwp.h>
  88 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  89 # include <sys/mman.h>
  90 # include <sys/processor.h>
  91 # include <sys/procset.h>
  92 # include <sys/pset.h>
  93 # include <sys/resource.h>
  94 # include <sys/shm.h>
  95 # include <sys/socket.h>
  96 # include <sys/stat.h>
  97 # include <sys/systeminfo.h>
  98 # include <sys/time.h>
  99 # include <sys/times.h>
 100 # include <sys/types.h>
 101 # include <sys/wait.h>
 102 # include <sys/utsname.h>
 103 # include <thread.h>
 104 # include <unistd.h>
 105 # include <sys/priocntl.h>
 106 # include <sys/rtpriocntl.h>
 107 # include <sys/tspriocntl.h>
 108 # include <sys/iapriocntl.h>
 109 # include <sys/fxpriocntl.h>
 110 # include <sys/loadavg.h>
 111 # include <string.h>
 112 # include <stdio.h>
 113 
 114 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 115 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 116 
 117 #define MAX_PATH (2 * K)
 118 
 119 // for timer info max values which include all bits
 120 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 121 
 122 
 123 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 124 // compile on older systems without this header file.
 125 
 126 #ifndef MADV_ACCESS_LWP
 127   #define  MADV_ACCESS_LWP   7       /* next LWP to access heavily */
 128 #endif
 129 #ifndef MADV_ACCESS_MANY
 130   #define  MADV_ACCESS_MANY  8       /* many processes to access heavily */
 131 #endif
 132 
 133 #ifndef LGRP_RSRC_CPU
 134   #define LGRP_RSRC_CPU      0       /* CPU resources */
 135 #endif
 136 #ifndef LGRP_RSRC_MEM
 137   #define LGRP_RSRC_MEM      1       /* memory resources */
 138 #endif
 139 
 140 // see thr_setprio(3T) for the basis of these numbers
 141 #define MinimumPriority 0
 142 #define NormalPriority  64
 143 #define MaximumPriority 127
 144 
 145 // Values for ThreadPriorityPolicy == 1
 146 int prio_policy1[CriticalPriority+1] = {
 147   -99999,  0, 16,  32,  48,  64,
 148           80, 96, 112, 124, 127, 127 };
 149 
 150 // System parameters used internally
 151 static clock_t clock_tics_per_sec = 100;
 152 
 153 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 154 static bool enabled_extended_FILE_stdio = false;
 155 
 156 // For diagnostics to print a message once. see run_periodic_checks
 157 static bool check_addr0_done = false;
 158 static sigset_t check_signal_done;
 159 static bool check_signals = true;
 160 
 161 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 162 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 163 
 164 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 165 
 166 
 167 // "default" initializers for missing libc APIs
 168 extern "C" {
 169   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 170   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 171 
 172   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 173   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 174 }
 175 
 176 // "default" initializers for pthread-based synchronization
 177 extern "C" {
 178   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 179   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 180 }
 181 
 182 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 183 
 184 // Thread Local Storage
 185 // This is common to all Solaris platforms so it is defined here,
 186 // in this common file.
 187 // The declarations are in the os_cpu threadLS*.hpp files.
 188 //
 189 // Static member initialization for TLS
 190 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
 191 
 192 #ifndef PRODUCT
 193   #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
 194 
 195 int ThreadLocalStorage::_tcacheHit = 0;
 196 int ThreadLocalStorage::_tcacheMiss = 0;
 197 
 198 void ThreadLocalStorage::print_statistics() {
 199   int total = _tcacheMiss+_tcacheHit;
 200   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
 201                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
 202 }
 203   #undef _PCT
 204 #endif // PRODUCT
 205 
 206 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
 207                                                         int index) {
 208   Thread *thread = get_thread_slow();
 209   if (thread != NULL) {
 210     address sp = os::current_stack_pointer();
 211     guarantee(thread->_stack_base == NULL ||
 212               (sp <= thread->_stack_base &&
 213               sp >= thread->_stack_base - thread->_stack_size) ||
 214               is_error_reported(),
 215               "sp must be inside of selected thread stack");
 216 
 217     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
 218     _get_thread_cache[index] = thread;
 219   }
 220   return thread;
 221 }
 222 
 223 
 224 static const double all_zero[sizeof(Thread) / sizeof(double) + 1] = {0};
 225 #define NO_CACHED_THREAD ((Thread*)all_zero)
 226 
 227 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
 228 
 229   // Store the new value before updating the cache to prevent a race
 230   // between get_thread_via_cache_slowly() and this store operation.
 231   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
 232 
 233   // Update thread cache with new thread if setting on thread create,
 234   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
 235   uintptr_t raw = pd_raw_thread_id();
 236   int ix = pd_cache_index(raw);
 237   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
 238 }
 239 
 240 void ThreadLocalStorage::pd_init() {
 241   for (int i = 0; i < _pd_cache_size; i++) {
 242     _get_thread_cache[i] = NO_CACHED_THREAD;
 243   }
 244 }
 245 
 246 // Invalidate all the caches (happens to be the same as pd_init).
 247 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
 248 
 249 #undef NO_CACHED_THREAD
 250 
 251 // END Thread Local Storage
 252 
 253 static inline size_t adjust_stack_size(address base, size_t size) {
 254   if ((ssize_t)size < 0) {
 255     // 4759953: Compensate for ridiculous stack size.
 256     size = max_intx;
 257   }
 258   if (size > (size_t)base) {
 259     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 260     size = (size_t)base;
 261   }
 262   return size;
 263 }
 264 
 265 static inline stack_t get_stack_info() {
 266   stack_t st;
 267   int retval = thr_stksegment(&st);
 268   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 269   assert(retval == 0, "incorrect return value from thr_stksegment");
 270   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 271   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 272   return st;
 273 }
 274 
 275 address os::current_stack_base() {
 276   int r = thr_main();
 277   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 278   bool is_primordial_thread = r;
 279 
 280   // Workaround 4352906, avoid calls to thr_stksegment by
 281   // thr_main after the first one (it looks like we trash
 282   // some data, causing the value for ss_sp to be incorrect).
 283   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 284     stack_t st = get_stack_info();
 285     if (is_primordial_thread) {
 286       // cache initial value of stack base
 287       os::Solaris::_main_stack_base = (address)st.ss_sp;
 288     }
 289     return (address)st.ss_sp;
 290   } else {
 291     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 292     return os::Solaris::_main_stack_base;
 293   }
 294 }
 295 
 296 size_t os::current_stack_size() {
 297   size_t size;
 298 
 299   int r = thr_main();
 300   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 301   if (!r) {
 302     size = get_stack_info().ss_size;
 303   } else {
 304     struct rlimit limits;
 305     getrlimit(RLIMIT_STACK, &limits);
 306     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 307   }
 308   // base may not be page aligned
 309   address base = current_stack_base();
 310   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 311   return (size_t)(base - bottom);
 312 }
 313 
 314 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 315   return localtime_r(clock, res);
 316 }
 317 
 318 void os::Solaris::try_enable_extended_io() {
 319   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 320 
 321   if (!UseExtendedFileIO) {
 322     return;
 323   }
 324 
 325   enable_extended_FILE_stdio_t enabler =
 326     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 327                                          "enable_extended_FILE_stdio");
 328   if (enabler) {
 329     enabler(-1, -1);
 330   }
 331 }
 332 
 333 static int _processors_online = 0;
 334 
 335 jint os::Solaris::_os_thread_limit = 0;
 336 volatile jint os::Solaris::_os_thread_count = 0;
 337 
 338 julong os::available_memory() {
 339   return Solaris::available_memory();
 340 }
 341 
 342 julong os::Solaris::available_memory() {
 343   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 344 }
 345 
 346 julong os::Solaris::_physical_memory = 0;
 347 
 348 julong os::physical_memory() {
 349   return Solaris::physical_memory();
 350 }
 351 
 352 static hrtime_t first_hrtime = 0;
 353 static const hrtime_t hrtime_hz = 1000*1000*1000;
 354 static volatile hrtime_t max_hrtime = 0;
 355 
 356 
 357 void os::Solaris::initialize_system_info() {
 358   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 359   _processors_online = sysconf(_SC_NPROCESSORS_ONLN);
 360   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) *
 361                                      (julong)sysconf(_SC_PAGESIZE);
 362 }
 363 
 364 int os::active_processor_count() {
 365   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 366   pid_t pid = getpid();
 367   psetid_t pset = PS_NONE;
 368   // Are we running in a processor set or is there any processor set around?
 369   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 370     uint_t pset_cpus;
 371     // Query the number of cpus available to us.
 372     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 373       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 374       _processors_online = pset_cpus;
 375       return pset_cpus;
 376     }
 377   }
 378   // Otherwise return number of online cpus
 379   return online_cpus;
 380 }
 381 
 382 static bool find_processors_in_pset(psetid_t        pset,
 383                                     processorid_t** id_array,
 384                                     uint_t*         id_length) {
 385   bool result = false;
 386   // Find the number of processors in the processor set.
 387   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 388     // Make up an array to hold their ids.
 389     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 390     // Fill in the array with their processor ids.
 391     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 392       result = true;
 393     }
 394   }
 395   return result;
 396 }
 397 
 398 // Callers of find_processors_online() must tolerate imprecise results --
 399 // the system configuration can change asynchronously because of DR
 400 // or explicit psradm operations.
 401 //
 402 // We also need to take care that the loop (below) terminates as the
 403 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 404 // request and the loop that builds the list of processor ids.   Unfortunately
 405 // there's no reliable way to determine the maximum valid processor id,
 406 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 407 // man pages, which claim the processor id set is "sparse, but
 408 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 409 // exit the loop.
 410 //
 411 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 412 // not available on S8.0.
 413 
 414 static bool find_processors_online(processorid_t** id_array,
 415                                    uint*           id_length) {
 416   const processorid_t MAX_PROCESSOR_ID = 100000;
 417   // Find the number of processors online.
 418   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 419   // Make up an array to hold their ids.
 420   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 421   // Processors need not be numbered consecutively.
 422   long found = 0;
 423   processorid_t next = 0;
 424   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 425     processor_info_t info;
 426     if (processor_info(next, &info) == 0) {
 427       // NB, PI_NOINTR processors are effectively online ...
 428       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 429         (*id_array)[found] = next;
 430         found += 1;
 431       }
 432     }
 433     next += 1;
 434   }
 435   if (found < *id_length) {
 436     // The loop above didn't identify the expected number of processors.
 437     // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 438     // and re-running the loop, above, but there's no guarantee of progress
 439     // if the system configuration is in flux.  Instead, we just return what
 440     // we've got.  Note that in the worst case find_processors_online() could
 441     // return an empty set.  (As a fall-back in the case of the empty set we
 442     // could just return the ID of the current processor).
 443     *id_length = found;
 444   }
 445 
 446   return true;
 447 }
 448 
 449 static bool assign_distribution(processorid_t* id_array,
 450                                 uint           id_length,
 451                                 uint*          distribution,
 452                                 uint           distribution_length) {
 453   // We assume we can assign processorid_t's to uint's.
 454   assert(sizeof(processorid_t) == sizeof(uint),
 455          "can't convert processorid_t to uint");
 456   // Quick check to see if we won't succeed.
 457   if (id_length < distribution_length) {
 458     return false;
 459   }
 460   // Assign processor ids to the distribution.
 461   // Try to shuffle processors to distribute work across boards,
 462   // assuming 4 processors per board.
 463   const uint processors_per_board = ProcessDistributionStride;
 464   // Find the maximum processor id.
 465   processorid_t max_id = 0;
 466   for (uint m = 0; m < id_length; m += 1) {
 467     max_id = MAX2(max_id, id_array[m]);
 468   }
 469   // The next id, to limit loops.
 470   const processorid_t limit_id = max_id + 1;
 471   // Make up markers for available processors.
 472   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 473   for (uint c = 0; c < limit_id; c += 1) {
 474     available_id[c] = false;
 475   }
 476   for (uint a = 0; a < id_length; a += 1) {
 477     available_id[id_array[a]] = true;
 478   }
 479   // Step by "boards", then by "slot", copying to "assigned".
 480   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 481   //                remembering which processors have been assigned by
 482   //                previous calls, etc., so as to distribute several
 483   //                independent calls of this method.  What we'd like is
 484   //                It would be nice to have an API that let us ask
 485   //                how many processes are bound to a processor,
 486   //                but we don't have that, either.
 487   //                In the short term, "board" is static so that
 488   //                subsequent distributions don't all start at board 0.
 489   static uint board = 0;
 490   uint assigned = 0;
 491   // Until we've found enough processors ....
 492   while (assigned < distribution_length) {
 493     // ... find the next available processor in the board.
 494     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 495       uint try_id = board * processors_per_board + slot;
 496       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 497         distribution[assigned] = try_id;
 498         available_id[try_id] = false;
 499         assigned += 1;
 500         break;
 501       }
 502     }
 503     board += 1;
 504     if (board * processors_per_board + 0 >= limit_id) {
 505       board = 0;
 506     }
 507   }
 508   if (available_id != NULL) {
 509     FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
 510   }
 511   return true;
 512 }
 513 
 514 void os::set_native_thread_name(const char *name) {
 515   // Not yet implemented.
 516   return;
 517 }
 518 
 519 bool os::distribute_processes(uint length, uint* distribution) {
 520   bool result = false;
 521   // Find the processor id's of all the available CPUs.
 522   processorid_t* id_array  = NULL;
 523   uint           id_length = 0;
 524   // There are some races between querying information and using it,
 525   // since processor sets can change dynamically.
 526   psetid_t pset = PS_NONE;
 527   // Are we running in a processor set?
 528   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 529     result = find_processors_in_pset(pset, &id_array, &id_length);
 530   } else {
 531     result = find_processors_online(&id_array, &id_length);
 532   }
 533   if (result == true) {
 534     if (id_length >= length) {
 535       result = assign_distribution(id_array, id_length, distribution, length);
 536     } else {
 537       result = false;
 538     }
 539   }
 540   if (id_array != NULL) {
 541     FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
 542   }
 543   return result;
 544 }
 545 
 546 bool os::bind_to_processor(uint processor_id) {
 547   // We assume that a processorid_t can be stored in a uint.
 548   assert(sizeof(uint) == sizeof(processorid_t),
 549          "can't convert uint to processorid_t");
 550   int bind_result =
 551     processor_bind(P_LWPID,                       // bind LWP.
 552                    P_MYID,                        // bind current LWP.
 553                    (processorid_t) processor_id,  // id.
 554                    NULL);                         // don't return old binding.
 555   return (bind_result == 0);
 556 }
 557 
 558 bool os::getenv(const char* name, char* buffer, int len) {
 559   char* val = ::getenv(name);
 560   if (val == NULL || strlen(val) + 1 > len) {
 561     if (len > 0) buffer[0] = 0; // return a null string
 562     return false;
 563   }
 564   strcpy(buffer, val);
 565   return true;
 566 }
 567 
 568 
 569 // Return true if user is running as root.
 570 
 571 bool os::have_special_privileges() {
 572   static bool init = false;
 573   static bool privileges = false;
 574   if (!init) {
 575     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 576     init = true;
 577   }
 578   return privileges;
 579 }
 580 
 581 
 582 void os::init_system_properties_values() {
 583   // The next steps are taken in the product version:
 584   //
 585   // Obtain the JAVA_HOME value from the location of libjvm.so.
 586   // This library should be located at:
 587   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 588   //
 589   // If "/jre/lib/" appears at the right place in the path, then we
 590   // assume libjvm.so is installed in a JDK and we use this path.
 591   //
 592   // Otherwise exit with message: "Could not create the Java virtual machine."
 593   //
 594   // The following extra steps are taken in the debugging version:
 595   //
 596   // If "/jre/lib/" does NOT appear at the right place in the path
 597   // instead of exit check for $JAVA_HOME environment variable.
 598   //
 599   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 600   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 601   // it looks like libjvm.so is installed there
 602   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 603   //
 604   // Otherwise exit.
 605   //
 606   // Important note: if the location of libjvm.so changes this
 607   // code needs to be changed accordingly.
 608 
 609 // Base path of extensions installed on the system.
 610 #define SYS_EXT_DIR     "/usr/jdk/packages"
 611 #define EXTENSIONS_DIR  "/lib/ext"

 612 
 613   char cpu_arch[12];
 614   // Buffer that fits several sprintfs.
 615   // Note that the space for the colon and the trailing null are provided
 616   // by the nulls included by the sizeof operator.
 617   const size_t bufsize =
 618     MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
 619          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 620          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir

 621   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 622 
 623   // sysclasspath, java_home, dll_dir
 624   {
 625     char *pslash;
 626     os::jvm_path(buf, bufsize);
 627 
 628     // Found the full path to libjvm.so.
 629     // Now cut the path to <java_home>/jre if we can.
 630     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 631     pslash = strrchr(buf, '/');
 632     if (pslash != NULL) {
 633       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 634     }
 635     Arguments::set_dll_dir(buf);
 636 
 637     if (pslash != NULL) {
 638       pslash = strrchr(buf, '/');
 639       if (pslash != NULL) {
 640         *pslash = '\0';          // Get rid of /<arch>.
 641         pslash = strrchr(buf, '/');
 642         if (pslash != NULL) {
 643           *pslash = '\0';        // Get rid of /lib.
 644         }
 645       }
 646     }
 647     Arguments::set_java_home(buf);
 648     set_boot_path('/', ':');
 649   }
 650 
 651   // Where to look for native libraries.
 652   {
 653     // Use dlinfo() to determine the correct java.library.path.
 654     //
 655     // If we're launched by the Java launcher, and the user
 656     // does not set java.library.path explicitly on the commandline,
 657     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 658     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 659     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 660     // /usr/lib), which is exactly what we want.
 661     //
 662     // If the user does set java.library.path, it completely
 663     // overwrites this setting, and always has.
 664     //
 665     // If we're not launched by the Java launcher, we may
 666     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 667     // settings.  Again, dlinfo does exactly what we want.
 668 
 669     Dl_serinfo     info_sz, *info = &info_sz;
 670     Dl_serpath     *path;
 671     char           *library_path;
 672     char           *common_path = buf;
 673 
 674     // Determine search path count and required buffer size.
 675     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 676       FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 677       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 678     }
 679 
 680     // Allocate new buffer and initialize.
 681     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 682     info->dls_size = info_sz.dls_size;
 683     info->dls_cnt = info_sz.dls_cnt;
 684 
 685     // Obtain search path information.
 686     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 687       FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 688       FREE_C_HEAP_ARRAY(char, info, mtInternal);
 689       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 690     }
 691 
 692     path = &info->dls_serpath[0];
 693 
 694     // Note: Due to a legacy implementation, most of the library path
 695     // is set in the launcher. This was to accomodate linking restrictions
 696     // on legacy Solaris implementations (which are no longer supported).
 697     // Eventually, all the library path setting will be done here.
 698     //
 699     // However, to prevent the proliferation of improperly built native
 700     // libraries, the new path component /usr/jdk/packages is added here.
 701 
 702     // Determine the actual CPU architecture.
 703     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 704 #ifdef _LP64
 705     // If we are a 64-bit vm, perform the following translations:
 706     //   sparc   -> sparcv9
 707     //   i386    -> amd64
 708     if (strcmp(cpu_arch, "sparc") == 0) {
 709       strcat(cpu_arch, "v9");
 710     } else if (strcmp(cpu_arch, "i386") == 0) {
 711       strcpy(cpu_arch, "amd64");
 712     }
 713 #endif
 714 
 715     // Construct the invariant part of ld_library_path.
 716     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 717 
 718     // Struct size is more than sufficient for the path components obtained
 719     // through the dlinfo() call, so only add additional space for the path
 720     // components explicitly added here.
 721     size_t library_path_size = info->dls_size + strlen(common_path);
 722     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 723     library_path[0] = '\0';
 724 
 725     // Construct the desired Java library path from the linker's library
 726     // search path.
 727     //
 728     // For compatibility, it is optimal that we insert the additional path
 729     // components specific to the Java VM after those components specified
 730     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 731     // infrastructure.
 732     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 733       strcpy(library_path, common_path);
 734     } else {
 735       int inserted = 0;
 736       int i;
 737       for (i = 0; i < info->dls_cnt; i++, path++) {
 738         uint_t flags = path->dls_flags & LA_SER_MASK;
 739         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 740           strcat(library_path, common_path);
 741           strcat(library_path, os::path_separator());
 742           inserted = 1;
 743         }
 744         strcat(library_path, path->dls_name);
 745         strcat(library_path, os::path_separator());
 746       }
 747       // Eliminate trailing path separator.
 748       library_path[strlen(library_path)-1] = '\0';
 749     }
 750 
 751     // happens before argument parsing - can't use a trace flag
 752     // tty->print_raw("init_system_properties_values: native lib path: ");
 753     // tty->print_raw_cr(library_path);
 754 
 755     // Callee copies into its own buffer.
 756     Arguments::set_library_path(library_path);
 757 
 758     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
 759     FREE_C_HEAP_ARRAY(char, info, mtInternal);
 760   }
 761 
 762   // Extensions directories.
 763   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 764   Arguments::set_ext_dirs(buf);
 765 




 766   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 767 
 768 #undef SYS_EXT_DIR
 769 #undef EXTENSIONS_DIR

 770 }
 771 
 772 void os::breakpoint() {
 773   BREAKPOINT;
 774 }
 775 
 776 bool os::obsolete_option(const JavaVMOption *option) {
 777   if (!strncmp(option->optionString, "-Xt", 3)) {
 778     return true;
 779   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 780     return true;
 781   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 782     return true;
 783   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 784     return true;
 785   }
 786   return false;
 787 }
 788 
 789 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 790   address  stackStart  = (address)thread->stack_base();
 791   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 792   if (sp < stackStart && sp >= stackEnd) return true;
 793   return false;
 794 }
 795 
 796 extern "C" void breakpoint() {
 797   // use debugger to set breakpoint here
 798 }
 799 
 800 static thread_t main_thread;
 801 
 802 // Thread start routine for all new Java threads
 803 extern "C" void* java_start(void* thread_addr) {
 804   // Try to randomize the cache line index of hot stack frames.
 805   // This helps when threads of the same stack traces evict each other's
 806   // cache lines. The threads can be either from the same JVM instance, or
 807   // from different JVM instances. The benefit is especially true for
 808   // processors with hyperthreading technology.
 809   static int counter = 0;
 810   int pid = os::current_process_id();
 811   alloca(((pid ^ counter++) & 7) * 128);
 812 
 813   int prio;
 814   Thread* thread = (Thread*)thread_addr;
 815   OSThread* osthr = thread->osthread();
 816 
 817   osthr->set_lwp_id(_lwp_self());  // Store lwp in case we are bound
 818   thread->_schedctl = (void *) schedctl_init();
 819 
 820   if (UseNUMA) {
 821     int lgrp_id = os::numa_get_group_id();
 822     if (lgrp_id != -1) {
 823       thread->set_lgrp_id(lgrp_id);
 824     }
 825   }
 826 
 827   // If the creator called set priority before we started,
 828   // we need to call set_native_priority now that we have an lwp.
 829   // We used to get the priority from thr_getprio (we called
 830   // thr_setprio way back in create_thread) and pass it to
 831   // set_native_priority, but Solaris scales the priority
 832   // in java_to_os_priority, so when we read it back here,
 833   // we pass trash to set_native_priority instead of what's
 834   // in java_to_os_priority. So we save the native priority
 835   // in the osThread and recall it here.
 836 
 837   if (osthr->thread_id() != -1) {
 838     if (UseThreadPriorities) {
 839       int prio = osthr->native_priority();
 840       if (ThreadPriorityVerbose) {
 841         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 842                       INTPTR_FORMAT ", setting priority: %d\n",
 843                       osthr->thread_id(), osthr->lwp_id(), prio);
 844       }
 845       os::set_native_priority(thread, prio);
 846     }
 847   } else if (ThreadPriorityVerbose) {
 848     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 849   }
 850 
 851   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 852 
 853   // initialize signal mask for this thread
 854   os::Solaris::hotspot_sigmask(thread);
 855 
 856   thread->run();
 857 
 858   // One less thread is executing
 859   // When the VMThread gets here, the main thread may have already exited
 860   // which frees the CodeHeap containing the Atomic::dec code
 861   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 862     Atomic::dec(&os::Solaris::_os_thread_count);
 863   }
 864 
 865   if (UseDetachedThreads) {
 866     thr_exit(NULL);
 867     ShouldNotReachHere();
 868   }
 869   return NULL;
 870 }
 871 
 872 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 873   // Allocate the OSThread object
 874   OSThread* osthread = new OSThread(NULL, NULL);
 875   if (osthread == NULL) return NULL;
 876 
 877   // Store info on the Solaris thread into the OSThread
 878   osthread->set_thread_id(thread_id);
 879   osthread->set_lwp_id(_lwp_self());
 880   thread->_schedctl = (void *) schedctl_init();
 881 
 882   if (UseNUMA) {
 883     int lgrp_id = os::numa_get_group_id();
 884     if (lgrp_id != -1) {
 885       thread->set_lgrp_id(lgrp_id);
 886     }
 887   }
 888 
 889   if (ThreadPriorityVerbose) {
 890     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 891                   osthread->thread_id(), osthread->lwp_id());
 892   }
 893 
 894   // Initial thread state is INITIALIZED, not SUSPENDED
 895   osthread->set_state(INITIALIZED);
 896 
 897   return osthread;
 898 }
 899 
 900 void os::Solaris::hotspot_sigmask(Thread* thread) {
 901   //Save caller's signal mask
 902   sigset_t sigmask;
 903   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 904   OSThread *osthread = thread->osthread();
 905   osthread->set_caller_sigmask(sigmask);
 906 
 907   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 908   if (!ReduceSignalUsage) {
 909     if (thread->is_VM_thread()) {
 910       // Only the VM thread handles BREAK_SIGNAL ...
 911       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 912     } else {
 913       // ... all other threads block BREAK_SIGNAL
 914       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 915       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 916     }
 917   }
 918 }
 919 
 920 bool os::create_attached_thread(JavaThread* thread) {
 921 #ifdef ASSERT
 922   thread->verify_not_published();
 923 #endif
 924   OSThread* osthread = create_os_thread(thread, thr_self());
 925   if (osthread == NULL) {
 926     return false;
 927   }
 928 
 929   // Initial thread state is RUNNABLE
 930   osthread->set_state(RUNNABLE);
 931   thread->set_osthread(osthread);
 932 
 933   // initialize signal mask for this thread
 934   // and save the caller's signal mask
 935   os::Solaris::hotspot_sigmask(thread);
 936 
 937   return true;
 938 }
 939 
 940 bool os::create_main_thread(JavaThread* thread) {
 941 #ifdef ASSERT
 942   thread->verify_not_published();
 943 #endif
 944   if (_starting_thread == NULL) {
 945     _starting_thread = create_os_thread(thread, main_thread);
 946     if (_starting_thread == NULL) {
 947       return false;
 948     }
 949   }
 950 
 951   // The primodial thread is runnable from the start
 952   _starting_thread->set_state(RUNNABLE);
 953 
 954   thread->set_osthread(_starting_thread);
 955 
 956   // initialize signal mask for this thread
 957   // and save the caller's signal mask
 958   os::Solaris::hotspot_sigmask(thread);
 959 
 960   return true;
 961 }
 962 
 963 
 964 bool os::create_thread(Thread* thread, ThreadType thr_type,
 965                        size_t stack_size) {
 966   // Allocate the OSThread object
 967   OSThread* osthread = new OSThread(NULL, NULL);
 968   if (osthread == NULL) {
 969     return false;
 970   }
 971 
 972   if (ThreadPriorityVerbose) {
 973     char *thrtyp;
 974     switch (thr_type) {
 975     case vm_thread:
 976       thrtyp = (char *)"vm";
 977       break;
 978     case cgc_thread:
 979       thrtyp = (char *)"cgc";
 980       break;
 981     case pgc_thread:
 982       thrtyp = (char *)"pgc";
 983       break;
 984     case java_thread:
 985       thrtyp = (char *)"java";
 986       break;
 987     case compiler_thread:
 988       thrtyp = (char *)"compiler";
 989       break;
 990     case watcher_thread:
 991       thrtyp = (char *)"watcher";
 992       break;
 993     default:
 994       thrtyp = (char *)"unknown";
 995       break;
 996     }
 997     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
 998   }
 999 
1000   // Calculate stack size if it's not specified by caller.
1001   if (stack_size == 0) {
1002     // The default stack size 1M (2M for LP64).
1003     stack_size = (BytesPerWord >> 2) * K * K;
1004 
1005     switch (thr_type) {
1006     case os::java_thread:
1007       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1008       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1009       break;
1010     case os::compiler_thread:
1011       if (CompilerThreadStackSize > 0) {
1012         stack_size = (size_t)(CompilerThreadStackSize * K);
1013         break;
1014       } // else fall through:
1015         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1016     case os::vm_thread:
1017     case os::pgc_thread:
1018     case os::cgc_thread:
1019     case os::watcher_thread:
1020       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1021       break;
1022     }
1023   }
1024   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1025 
1026   // Initial state is ALLOCATED but not INITIALIZED
1027   osthread->set_state(ALLOCATED);
1028 
1029   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1030     // We got lots of threads. Check if we still have some address space left.
1031     // Need to be at least 5Mb of unreserved address space. We do check by
1032     // trying to reserve some.
1033     const size_t VirtualMemoryBangSize = 20*K*K;
1034     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1035     if (mem == NULL) {
1036       delete osthread;
1037       return false;
1038     } else {
1039       // Release the memory again
1040       os::release_memory(mem, VirtualMemoryBangSize);
1041     }
1042   }
1043 
1044   // Setup osthread because the child thread may need it.
1045   thread->set_osthread(osthread);
1046 
1047   // Create the Solaris thread
1048   thread_t tid = 0;
1049   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
1050   int      status;
1051 
1052   // Mark that we don't have an lwp or thread id yet.
1053   // In case we attempt to set the priority before the thread starts.
1054   osthread->set_lwp_id(-1);
1055   osthread->set_thread_id(-1);
1056 
1057   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1058   if (status != 0) {
1059     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1060       perror("os::create_thread");
1061     }
1062     thread->set_osthread(NULL);
1063     // Need to clean up stuff we've allocated so far
1064     delete osthread;
1065     return false;
1066   }
1067 
1068   Atomic::inc(&os::Solaris::_os_thread_count);
1069 
1070   // Store info on the Solaris thread into the OSThread
1071   osthread->set_thread_id(tid);
1072 
1073   // Remember that we created this thread so we can set priority on it
1074   osthread->set_vm_created();
1075 
1076   // Initial thread state is INITIALIZED, not SUSPENDED
1077   osthread->set_state(INITIALIZED);
1078 
1079   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1080   return true;
1081 }
1082 
1083 // defined for >= Solaris 10. This allows builds on earlier versions
1084 // of Solaris to take advantage of the newly reserved Solaris JVM signals
1085 // With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1086 // and -XX:+UseAltSigs does nothing since these should have no conflict
1087 //
1088 #if !defined(SIGJVM1)
1089   #define SIGJVM1 39
1090   #define SIGJVM2 40
1091 #endif
1092 
1093 debug_only(static bool signal_sets_initialized = false);
1094 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1095 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1096 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1097 
1098 bool os::Solaris::is_sig_ignored(int sig) {
1099   struct sigaction oact;
1100   sigaction(sig, (struct sigaction*)NULL, &oact);
1101   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1102                                  : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1103   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
1104     return true;
1105   } else {
1106     return false;
1107   }
1108 }
1109 
1110 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1111 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1112 static bool isJVM1available() {
1113   return SIGJVM1 < SIGRTMIN;
1114 }
1115 
1116 void os::Solaris::signal_sets_init() {
1117   // Should also have an assertion stating we are still single-threaded.
1118   assert(!signal_sets_initialized, "Already initialized");
1119   // Fill in signals that are necessarily unblocked for all threads in
1120   // the VM. Currently, we unblock the following signals:
1121   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1122   //                         by -Xrs (=ReduceSignalUsage));
1123   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1124   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1125   // the dispositions or masks wrt these signals.
1126   // Programs embedding the VM that want to use the above signals for their
1127   // own purposes must, at this time, use the "-Xrs" option to prevent
1128   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1129   // (See bug 4345157, and other related bugs).
1130   // In reality, though, unblocking these signals is really a nop, since
1131   // these signals are not blocked by default.
1132   sigemptyset(&unblocked_sigs);
1133   sigemptyset(&allowdebug_blocked_sigs);
1134   sigaddset(&unblocked_sigs, SIGILL);
1135   sigaddset(&unblocked_sigs, SIGSEGV);
1136   sigaddset(&unblocked_sigs, SIGBUS);
1137   sigaddset(&unblocked_sigs, SIGFPE);
1138 
1139   if (isJVM1available) {
1140     os::Solaris::set_SIGinterrupt(SIGJVM1);
1141     os::Solaris::set_SIGasync(SIGJVM2);
1142   } else if (UseAltSigs) {
1143     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1144     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1145   } else {
1146     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1147     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1148   }
1149 
1150   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1151   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1152 
1153   if (!ReduceSignalUsage) {
1154     if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1155       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1156       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1157     }
1158     if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1159       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1160       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1161     }
1162     if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1163       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1164       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1165     }
1166   }
1167   // Fill in signals that are blocked by all but the VM thread.
1168   sigemptyset(&vm_sigs);
1169   if (!ReduceSignalUsage) {
1170     sigaddset(&vm_sigs, BREAK_SIGNAL);
1171   }
1172   debug_only(signal_sets_initialized = true);
1173 
1174   // For diagnostics only used in run_periodic_checks
1175   sigemptyset(&check_signal_done);
1176 }
1177 
1178 // These are signals that are unblocked while a thread is running Java.
1179 // (For some reason, they get blocked by default.)
1180 sigset_t* os::Solaris::unblocked_signals() {
1181   assert(signal_sets_initialized, "Not initialized");
1182   return &unblocked_sigs;
1183 }
1184 
1185 // These are the signals that are blocked while a (non-VM) thread is
1186 // running Java. Only the VM thread handles these signals.
1187 sigset_t* os::Solaris::vm_signals() {
1188   assert(signal_sets_initialized, "Not initialized");
1189   return &vm_sigs;
1190 }
1191 
1192 // These are signals that are blocked during cond_wait to allow debugger in
1193 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1194   assert(signal_sets_initialized, "Not initialized");
1195   return &allowdebug_blocked_sigs;
1196 }
1197 
1198 
1199 void _handle_uncaught_cxx_exception() {
1200   VMError err("An uncaught C++ exception");
1201   err.report_and_die();
1202 }
1203 
1204 
1205 // First crack at OS-specific initialization, from inside the new thread.
1206 void os::initialize_thread(Thread* thr) {
1207   int r = thr_main();
1208   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
1209   if (r) {
1210     JavaThread* jt = (JavaThread *)thr;
1211     assert(jt != NULL, "Sanity check");
1212     size_t stack_size;
1213     address base = jt->stack_base();
1214     if (Arguments::created_by_java_launcher()) {
1215       // Use 2MB to allow for Solaris 7 64 bit mode.
1216       stack_size = JavaThread::stack_size_at_create() == 0
1217         ? 2048*K : JavaThread::stack_size_at_create();
1218 
1219       // There are rare cases when we may have already used more than
1220       // the basic stack size allotment before this method is invoked.
1221       // Attempt to allow for a normally sized java_stack.
1222       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1223       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1224     } else {
1225       // 6269555: If we were not created by a Java launcher, i.e. if we are
1226       // running embedded in a native application, treat the primordial thread
1227       // as much like a native attached thread as possible.  This means using
1228       // the current stack size from thr_stksegment(), unless it is too large
1229       // to reliably setup guard pages.  A reasonable max size is 8MB.
1230       size_t current_size = current_stack_size();
1231       // This should never happen, but just in case....
1232       if (current_size == 0) current_size = 2 * K * K;
1233       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1234     }
1235     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1236     stack_size = (size_t)(base - bottom);
1237 
1238     assert(stack_size > 0, "Stack size calculation problem");
1239 
1240     if (stack_size > jt->stack_size()) {
1241 #ifndef PRODUCT
1242       struct rlimit limits;
1243       getrlimit(RLIMIT_STACK, &limits);
1244       size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1245       assert(size >= jt->stack_size(), "Stack size problem in main thread");
1246 #endif
1247       tty->print_cr("Stack size of %d Kb exceeds current limit of %d Kb.\n"
1248                     "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1249                     "See limit(1) to increase the stack size limit.",
1250                     stack_size / K, jt->stack_size() / K);
1251       vm_exit(1);
1252     }
1253     assert(jt->stack_size() >= stack_size,
1254            "Attempt to map more stack than was allocated");
1255     jt->set_stack_size(stack_size);
1256   }
1257 
1258   // With the T2 libthread (T1 is no longer supported) threads are always bound
1259   // and we use stackbanging in all cases.
1260 
1261   os::Solaris::init_thread_fpu_state();
1262   std::set_terminate(_handle_uncaught_cxx_exception);
1263 }
1264 
1265 
1266 
1267 // Free Solaris resources related to the OSThread
1268 void os::free_thread(OSThread* osthread) {
1269   assert(osthread != NULL, "os::free_thread but osthread not set");
1270 
1271 
1272   // We are told to free resources of the argument thread,
1273   // but we can only really operate on the current thread.
1274   // The main thread must take the VMThread down synchronously
1275   // before the main thread exits and frees up CodeHeap
1276   guarantee((Thread::current()->osthread() == osthread
1277              || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1278   if (Thread::current()->osthread() == osthread) {
1279     // Restore caller's signal mask
1280     sigset_t sigmask = osthread->caller_sigmask();
1281     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1282   }
1283   delete osthread;
1284 }
1285 
1286 void os::pd_start_thread(Thread* thread) {
1287   int status = thr_continue(thread->osthread()->thread_id());
1288   assert_status(status == 0, status, "thr_continue failed");
1289 }
1290 
1291 
1292 intx os::current_thread_id() {
1293   return (intx)thr_self();
1294 }
1295 
1296 static pid_t _initial_pid = 0;
1297 
1298 int os::current_process_id() {
1299   return (int)(_initial_pid ? _initial_pid : getpid());
1300 }
1301 
1302 int os::allocate_thread_local_storage() {
1303   // %%%       in Win32 this allocates a memory segment pointed to by a
1304   //           register.  Dan Stein can implement a similar feature in
1305   //           Solaris.  Alternatively, the VM can do the same thing
1306   //           explicitly: malloc some storage and keep the pointer in a
1307   //           register (which is part of the thread's context) (or keep it
1308   //           in TLS).
1309   // %%%       In current versions of Solaris, thr_self and TSD can
1310   //           be accessed via short sequences of displaced indirections.
1311   //           The value of thr_self is available as %g7(36).
1312   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1313   //           assuming that the current thread already has a value bound to k.
1314   //           It may be worth experimenting with such access patterns,
1315   //           and later having the parameters formally exported from a Solaris
1316   //           interface.  I think, however, that it will be faster to
1317   //           maintain the invariant that %g2 always contains the
1318   //           JavaThread in Java code, and have stubs simply
1319   //           treat %g2 as a caller-save register, preserving it in a %lN.
1320   thread_key_t tk;
1321   if (thr_keycreate(&tk, NULL)) {
1322     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1323                   "(%s)", strerror(errno)));
1324   }
1325   return int(tk);
1326 }
1327 
1328 void os::free_thread_local_storage(int index) {
1329   // %%% don't think we need anything here
1330   // if (pthread_key_delete((pthread_key_t) tk)) {
1331   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
1332   // }
1333 }
1334 
1335 // libthread allocate for tsd_common is a version specific
1336 // small number - point is NO swap space available
1337 #define SMALLINT 32
1338 void os::thread_local_storage_at_put(int index, void* value) {
1339   // %%% this is used only in threadLocalStorage.cpp
1340   if (thr_setspecific((thread_key_t)index, value)) {
1341     if (errno == ENOMEM) {
1342       vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1343                             "thr_setspecific: out of swap space");
1344     } else {
1345       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1346                     "(%s)", strerror(errno)));
1347     }
1348   } else {
1349     ThreadLocalStorage::set_thread_in_slot((Thread *) value);
1350   }
1351 }
1352 
1353 // This function could be called before TLS is initialized, for example, when
1354 // VM receives an async signal or when VM causes a fatal error during
1355 // initialization. Return NULL if thr_getspecific() fails.
1356 void* os::thread_local_storage_at(int index) {
1357   // %%% this is used only in threadLocalStorage.cpp
1358   void* r = NULL;
1359   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1360 }
1361 
1362 
1363 // gethrtime() should be monotonic according to the documentation,
1364 // but some virtualized platforms are known to break this guarantee.
1365 // getTimeNanos() must be guaranteed not to move backwards, so we
1366 // are forced to add a check here.
1367 inline hrtime_t getTimeNanos() {
1368   const hrtime_t now = gethrtime();
1369   const hrtime_t prev = max_hrtime;
1370   if (now <= prev) {
1371     return prev;   // same or retrograde time;
1372   }
1373   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1374   assert(obsv >= prev, "invariant");   // Monotonicity
1375   // If the CAS succeeded then we're done and return "now".
1376   // If the CAS failed and the observed value "obsv" is >= now then
1377   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1378   // some other thread raced this thread and installed a new value, in which case
1379   // we could either (a) retry the entire operation, (b) retry trying to install now
1380   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1381   // we might discard a higher "now" value in deference to a slightly lower but freshly
1382   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1383   // to (a) or (b) -- and greatly reduces coherence traffic.
1384   // We might also condition (c) on the magnitude of the delta between obsv and now.
1385   // Avoiding excessive CAS operations to hot RW locations is critical.
1386   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1387   return (prev == obsv) ? now : obsv;
1388 }
1389 
1390 // Time since start-up in seconds to a fine granularity.
1391 // Used by VMSelfDestructTimer and the MemProfiler.
1392 double os::elapsedTime() {
1393   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1394 }
1395 
1396 jlong os::elapsed_counter() {
1397   return (jlong)(getTimeNanos() - first_hrtime);
1398 }
1399 
1400 jlong os::elapsed_frequency() {
1401   return hrtime_hz;
1402 }
1403 
1404 // Return the real, user, and system times in seconds from an
1405 // arbitrary fixed point in the past.
1406 bool os::getTimesSecs(double* process_real_time,
1407                       double* process_user_time,
1408                       double* process_system_time) {
1409   struct tms ticks;
1410   clock_t real_ticks = times(&ticks);
1411 
1412   if (real_ticks == (clock_t) (-1)) {
1413     return false;
1414   } else {
1415     double ticks_per_second = (double) clock_tics_per_sec;
1416     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1417     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1418     // For consistency return the real time from getTimeNanos()
1419     // converted to seconds.
1420     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1421 
1422     return true;
1423   }
1424 }
1425 
1426 bool os::supports_vtime() { return true; }
1427 
1428 bool os::enable_vtime() {
1429   int fd = ::open("/proc/self/ctl", O_WRONLY);
1430   if (fd == -1) {
1431     return false;
1432   }
1433 
1434   long cmd[] = { PCSET, PR_MSACCT };
1435   int res = ::write(fd, cmd, sizeof(long) * 2);
1436   ::close(fd);
1437   if (res != sizeof(long) * 2) {
1438     return false;
1439   }
1440   return true;
1441 }
1442 
1443 bool os::vtime_enabled() {
1444   int fd = ::open("/proc/self/status", O_RDONLY);
1445   if (fd == -1) {
1446     return false;
1447   }
1448 
1449   pstatus_t status;
1450   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1451   ::close(fd);
1452   if (res != sizeof(pstatus_t)) {
1453     return false;
1454   }
1455   return status.pr_flags & PR_MSACCT;
1456 }
1457 
1458 double os::elapsedVTime() {
1459   return (double)gethrvtime() / (double)hrtime_hz;
1460 }
1461 
1462 // Used internally for comparisons only
1463 // getTimeMillis guaranteed to not move backwards on Solaris
1464 jlong getTimeMillis() {
1465   jlong nanotime = getTimeNanos();
1466   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1467 }
1468 
1469 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1470 jlong os::javaTimeMillis() {
1471   timeval t;
1472   if (gettimeofday(&t, NULL) == -1) {
1473     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1474   }
1475   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1476 }
1477 
1478 jlong os::javaTimeNanos() {
1479   return (jlong)getTimeNanos();
1480 }
1481 
1482 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1483   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1484   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1485   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1486   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1487 }
1488 
1489 char * os::local_time_string(char *buf, size_t buflen) {
1490   struct tm t;
1491   time_t long_time;
1492   time(&long_time);
1493   localtime_r(&long_time, &t);
1494   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1495                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1496                t.tm_hour, t.tm_min, t.tm_sec);
1497   return buf;
1498 }
1499 
1500 // Note: os::shutdown() might be called very early during initialization, or
1501 // called from signal handler. Before adding something to os::shutdown(), make
1502 // sure it is async-safe and can handle partially initialized VM.
1503 void os::shutdown() {
1504 
1505   // allow PerfMemory to attempt cleanup of any persistent resources
1506   perfMemory_exit();
1507 
1508   // needs to remove object in file system
1509   AttachListener::abort();
1510 
1511   // flush buffered output, finish log files
1512   ostream_abort();
1513 
1514   // Check for abort hook
1515   abort_hook_t abort_hook = Arguments::abort_hook();
1516   if (abort_hook != NULL) {
1517     abort_hook();
1518   }
1519 }
1520 
1521 // Note: os::abort() might be called very early during initialization, or
1522 // called from signal handler. Before adding something to os::abort(), make
1523 // sure it is async-safe and can handle partially initialized VM.
1524 void os::abort(bool dump_core) {
1525   os::shutdown();
1526   if (dump_core) {
1527 #ifndef PRODUCT
1528     fdStream out(defaultStream::output_fd());
1529     out.print_raw("Current thread is ");
1530     char buf[16];
1531     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1532     out.print_raw_cr(buf);
1533     out.print_raw_cr("Dumping core ...");
1534 #endif
1535     ::abort(); // dump core (for debugging)
1536   }
1537 
1538   ::exit(1);
1539 }
1540 
1541 // Die immediately, no exit hook, no abort hook, no cleanup.
1542 void os::die() {
1543   ::abort(); // dump core (for debugging)
1544 }
1545 
1546 // DLL functions
1547 
1548 const char* os::dll_file_extension() { return ".so"; }
1549 
1550 // This must be hard coded because it's the system's temporary
1551 // directory not the java application's temp directory, ala java.io.tmpdir.
1552 const char* os::get_temp_directory() { return "/tmp"; }
1553 
1554 static bool file_exists(const char* filename) {
1555   struct stat statbuf;
1556   if (filename == NULL || strlen(filename) == 0) {
1557     return false;
1558   }
1559   return os::stat(filename, &statbuf) == 0;
1560 }
1561 
1562 bool os::dll_build_name(char* buffer, size_t buflen,
1563                         const char* pname, const char* fname) {
1564   bool retval = false;
1565   const size_t pnamelen = pname ? strlen(pname) : 0;
1566 
1567   // Return error on buffer overflow.
1568   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1569     return retval;
1570   }
1571 
1572   if (pnamelen == 0) {
1573     snprintf(buffer, buflen, "lib%s.so", fname);
1574     retval = true;
1575   } else if (strchr(pname, *os::path_separator()) != NULL) {
1576     int n;
1577     char** pelements = split_path(pname, &n);
1578     if (pelements == NULL) {
1579       return false;
1580     }
1581     for (int i = 0; i < n; i++) {
1582       // really shouldn't be NULL but what the heck, check can't hurt
1583       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1584         continue; // skip the empty path values
1585       }
1586       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1587       if (file_exists(buffer)) {
1588         retval = true;
1589         break;
1590       }
1591     }
1592     // release the storage
1593     for (int i = 0; i < n; i++) {
1594       if (pelements[i] != NULL) {
1595         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1596       }
1597     }
1598     if (pelements != NULL) {
1599       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1600     }
1601   } else {
1602     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1603     retval = true;
1604   }
1605   return retval;
1606 }
1607 
1608 // check if addr is inside libjvm.so
1609 bool os::address_is_in_vm(address addr) {
1610   static address libjvm_base_addr;
1611   Dl_info dlinfo;
1612 
1613   if (libjvm_base_addr == NULL) {
1614     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1615       libjvm_base_addr = (address)dlinfo.dli_fbase;
1616     }
1617     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1618   }
1619 
1620   if (dladdr((void *)addr, &dlinfo) != 0) {
1621     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1622   }
1623 
1624   return false;
1625 }
1626 
1627 typedef int (*dladdr1_func_type)(void *, Dl_info *, void **, int);
1628 static dladdr1_func_type dladdr1_func = NULL;
1629 
1630 bool os::dll_address_to_function_name(address addr, char *buf,
1631                                       int buflen, int * offset) {
1632   // buf is not optional, but offset is optional
1633   assert(buf != NULL, "sanity check");
1634 
1635   Dl_info dlinfo;
1636 
1637   // dladdr1_func was initialized in os::init()
1638   if (dladdr1_func != NULL) {
1639     // yes, we have dladdr1
1640 
1641     // Support for dladdr1 is checked at runtime; it may be
1642     // available even if the vm is built on a machine that does
1643     // not have dladdr1 support.  Make sure there is a value for
1644     // RTLD_DL_SYMENT.
1645 #ifndef RTLD_DL_SYMENT
1646   #define RTLD_DL_SYMENT 1
1647 #endif
1648 #ifdef _LP64
1649     Elf64_Sym * info;
1650 #else
1651     Elf32_Sym * info;
1652 #endif
1653     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1654                      RTLD_DL_SYMENT) != 0) {
1655       // see if we have a matching symbol that covers our address
1656       if (dlinfo.dli_saddr != NULL &&
1657           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1658         if (dlinfo.dli_sname != NULL) {
1659           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1660             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1661           }
1662           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1663           return true;
1664         }
1665       }
1666       // no matching symbol so try for just file info
1667       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1668         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1669                             buf, buflen, offset, dlinfo.dli_fname)) {
1670           return true;
1671         }
1672       }
1673     }
1674     buf[0] = '\0';
1675     if (offset != NULL) *offset  = -1;
1676     return false;
1677   }
1678 
1679   // no, only dladdr is available
1680   if (dladdr((void *)addr, &dlinfo) != 0) {
1681     // see if we have a matching symbol
1682     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1683       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1684         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1685       }
1686       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1687       return true;
1688     }
1689     // no matching symbol so try for just file info
1690     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1691       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1692                           buf, buflen, offset, dlinfo.dli_fname)) {
1693         return true;
1694       }
1695     }
1696   }
1697   buf[0] = '\0';
1698   if (offset != NULL) *offset  = -1;
1699   return false;
1700 }
1701 
1702 bool os::dll_address_to_library_name(address addr, char* buf,
1703                                      int buflen, int* offset) {
1704   // buf is not optional, but offset is optional
1705   assert(buf != NULL, "sanity check");
1706 
1707   Dl_info dlinfo;
1708 
1709   if (dladdr((void*)addr, &dlinfo) != 0) {
1710     if (dlinfo.dli_fname != NULL) {
1711       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1712     }
1713     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1714       *offset = addr - (address)dlinfo.dli_fbase;
1715     }
1716     return true;
1717   }
1718 
1719   buf[0] = '\0';
1720   if (offset) *offset = -1;
1721   return false;
1722 }
1723 
1724 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1725   Dl_info dli;
1726   // Sanity check?
1727   if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 ||
1728       dli.dli_fname == NULL) {
1729     return 1;
1730   }
1731 
1732   void * handle = dlopen(dli.dli_fname, RTLD_LAZY);
1733   if (handle == NULL) {
1734     return 1;
1735   }
1736 
1737   Link_map *map;
1738   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1739   if (map == NULL) {
1740     dlclose(handle);
1741     return 1;
1742   }
1743 
1744   while (map->l_prev != NULL) {
1745     map = map->l_prev;
1746   }
1747 
1748   while (map != NULL) {
1749     // Iterate through all map entries and call callback with fields of interest
1750     if(callback(map->l_name, (address)map->l_addr, (address)0, param)) {
1751       dlclose(handle);
1752       return 1;
1753     }
1754     map = map->l_next;
1755   }
1756 
1757   dlclose(handle);
1758   return 0;
1759 }
1760 
1761 int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) {
1762   outputStream * out = (outputStream *) param;
1763   out->print_cr(PTR_FORMAT " \t%s", base_address, name);
1764   return 0;
1765 }
1766 
1767 void os::print_dll_info(outputStream * st) {
1768   st->print_cr("Dynamic libraries:"); st->flush();
1769   if (get_loaded_modules_info(_print_dll_info_cb, (void *)st)) {
1770     st->print_cr("Error: Cannot print dynamic libraries.");
1771   }
1772 }
1773 
1774 // Loads .dll/.so and
1775 // in case of error it checks if .dll/.so was built for the
1776 // same architecture as Hotspot is running on
1777 
1778 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1779   void * result= ::dlopen(filename, RTLD_LAZY);
1780   if (result != NULL) {
1781     // Successful loading
1782     return result;
1783   }
1784 
1785   Elf32_Ehdr elf_head;
1786 
1787   // Read system error message into ebuf
1788   // It may or may not be overwritten below
1789   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1790   ebuf[ebuflen-1]='\0';
1791   int diag_msg_max_length=ebuflen-strlen(ebuf);
1792   char* diag_msg_buf=ebuf+strlen(ebuf);
1793 
1794   if (diag_msg_max_length==0) {
1795     // No more space in ebuf for additional diagnostics message
1796     return NULL;
1797   }
1798 
1799 
1800   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1801 
1802   if (file_descriptor < 0) {
1803     // Can't open library, report dlerror() message
1804     return NULL;
1805   }
1806 
1807   bool failed_to_read_elf_head=
1808     (sizeof(elf_head)!=
1809      (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1810 
1811   ::close(file_descriptor);
1812   if (failed_to_read_elf_head) {
1813     // file i/o error - report dlerror() msg
1814     return NULL;
1815   }
1816 
1817   typedef struct {
1818     Elf32_Half  code;         // Actual value as defined in elf.h
1819     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1820     char        elf_class;    // 32 or 64 bit
1821     char        endianess;    // MSB or LSB
1822     char*       name;         // String representation
1823   } arch_t;
1824 
1825   static const arch_t arch_array[]={
1826     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1827     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1828     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1829     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1830     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1831     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1832     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1833     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1834     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1835     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1836   };
1837 
1838 #if  (defined IA32)
1839   static  Elf32_Half running_arch_code=EM_386;
1840 #elif   (defined AMD64)
1841   static  Elf32_Half running_arch_code=EM_X86_64;
1842 #elif  (defined IA64)
1843   static  Elf32_Half running_arch_code=EM_IA_64;
1844 #elif  (defined __sparc) && (defined _LP64)
1845   static  Elf32_Half running_arch_code=EM_SPARCV9;
1846 #elif  (defined __sparc) && (!defined _LP64)
1847   static  Elf32_Half running_arch_code=EM_SPARC;
1848 #elif  (defined __powerpc64__)
1849   static  Elf32_Half running_arch_code=EM_PPC64;
1850 #elif  (defined __powerpc__)
1851   static  Elf32_Half running_arch_code=EM_PPC;
1852 #elif (defined ARM)
1853   static  Elf32_Half running_arch_code=EM_ARM;
1854 #else
1855   #error Method os::dll_load requires that one of following is defined:\
1856        IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1857 #endif
1858 
1859   // Identify compatability class for VM's architecture and library's architecture
1860   // Obtain string descriptions for architectures
1861 
1862   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1863   int running_arch_index=-1;
1864 
1865   for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
1866     if (running_arch_code == arch_array[i].code) {
1867       running_arch_index    = i;
1868     }
1869     if (lib_arch.code == arch_array[i].code) {
1870       lib_arch.compat_class = arch_array[i].compat_class;
1871       lib_arch.name         = arch_array[i].name;
1872     }
1873   }
1874 
1875   assert(running_arch_index != -1,
1876          "Didn't find running architecture code (running_arch_code) in arch_array");
1877   if (running_arch_index == -1) {
1878     // Even though running architecture detection failed
1879     // we may still continue with reporting dlerror() message
1880     return NULL;
1881   }
1882 
1883   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1884     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1885     return NULL;
1886   }
1887 
1888   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1889     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1890     return NULL;
1891   }
1892 
1893   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1894     if (lib_arch.name!=NULL) {
1895       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1896                  " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1897                  lib_arch.name, arch_array[running_arch_index].name);
1898     } else {
1899       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1900                  " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1901                  lib_arch.code,
1902                  arch_array[running_arch_index].name);
1903     }
1904   }
1905 
1906   return NULL;
1907 }
1908 
1909 void* os::dll_lookup(void* handle, const char* name) {
1910   return dlsym(handle, name);
1911 }
1912 
1913 void* os::get_default_process_handle() {
1914   return (void*)::dlopen(NULL, RTLD_LAZY);
1915 }
1916 
1917 int os::stat(const char *path, struct stat *sbuf) {
1918   char pathbuf[MAX_PATH];
1919   if (strlen(path) > MAX_PATH - 1) {
1920     errno = ENAMETOOLONG;
1921     return -1;
1922   }
1923   os::native_path(strcpy(pathbuf, path));
1924   return ::stat(pathbuf, sbuf);
1925 }
1926 
1927 static bool _print_ascii_file(const char* filename, outputStream* st) {
1928   int fd = ::open(filename, O_RDONLY);
1929   if (fd == -1) {
1930     return false;
1931   }
1932 
1933   char buf[32];
1934   int bytes;
1935   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1936     st->print_raw(buf, bytes);
1937   }
1938 
1939   ::close(fd);
1940 
1941   return true;
1942 }
1943 
1944 void os::print_os_info_brief(outputStream* st) {
1945   os::Solaris::print_distro_info(st);
1946 
1947   os::Posix::print_uname_info(st);
1948 
1949   os::Solaris::print_libversion_info(st);
1950 }
1951 
1952 void os::print_os_info(outputStream* st) {
1953   st->print("OS:");
1954 
1955   os::Solaris::print_distro_info(st);
1956 
1957   os::Posix::print_uname_info(st);
1958 
1959   os::Solaris::print_libversion_info(st);
1960 
1961   os::Posix::print_rlimit_info(st);
1962 
1963   os::Posix::print_load_average(st);
1964 }
1965 
1966 void os::Solaris::print_distro_info(outputStream* st) {
1967   if (!_print_ascii_file("/etc/release", st)) {
1968     st->print("Solaris");
1969   }
1970   st->cr();
1971 }
1972 
1973 void os::Solaris::print_libversion_info(outputStream* st) {
1974   st->print("  (T2 libthread)");
1975   st->cr();
1976 }
1977 
1978 static bool check_addr0(outputStream* st) {
1979   jboolean status = false;
1980   int fd = ::open("/proc/self/map",O_RDONLY);
1981   if (fd >= 0) {
1982     prmap_t p;
1983     while (::read(fd, &p, sizeof(p)) > 0) {
1984       if (p.pr_vaddr == 0x0) {
1985         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
1986         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
1987         st->print("Access:");
1988         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
1989         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
1990         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
1991         st->cr();
1992         status = true;
1993       }
1994     }
1995     ::close(fd);
1996   }
1997   return status;
1998 }
1999 
2000 void os::pd_print_cpu_info(outputStream* st) {
2001   // Nothing to do for now.
2002 }
2003 
2004 void os::print_memory_info(outputStream* st) {
2005   st->print("Memory:");
2006   st->print(" %dk page", os::vm_page_size()>>10);
2007   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2008   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2009   st->cr();
2010   (void) check_addr0(st);
2011 }
2012 
2013 void os::print_siginfo(outputStream* st, void* siginfo) {
2014   const siginfo_t* si = (const siginfo_t*)siginfo;
2015 
2016   os::Posix::print_siginfo_brief(st, si);
2017 
2018   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2019       UseSharedSpaces) {
2020     FileMapInfo* mapinfo = FileMapInfo::current_info();
2021     if (mapinfo->is_in_shared_space(si->si_addr)) {
2022       st->print("\n\nError accessing class data sharing archive."   \
2023                 " Mapped file inaccessible during execution, "      \
2024                 " possible disk/network problem.");
2025     }
2026   }
2027   st->cr();
2028 }
2029 
2030 // Moved from whole group, because we need them here for diagnostic
2031 // prints.
2032 #define OLDMAXSIGNUM 32
2033 static int Maxsignum = 0;
2034 static int *ourSigFlags = NULL;
2035 
2036 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2037 
2038 int os::Solaris::get_our_sigflags(int sig) {
2039   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2040   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2041   return ourSigFlags[sig];
2042 }
2043 
2044 void os::Solaris::set_our_sigflags(int sig, int flags) {
2045   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2046   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2047   ourSigFlags[sig] = flags;
2048 }
2049 
2050 
2051 static const char* get_signal_handler_name(address handler,
2052                                            char* buf, int buflen) {
2053   int offset;
2054   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2055   if (found) {
2056     // skip directory names
2057     const char *p1, *p2;
2058     p1 = buf;
2059     size_t len = strlen(os::file_separator());
2060     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2061     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2062   } else {
2063     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2064   }
2065   return buf;
2066 }
2067 
2068 static void print_signal_handler(outputStream* st, int sig,
2069                                  char* buf, size_t buflen) {
2070   struct sigaction sa;
2071 
2072   sigaction(sig, NULL, &sa);
2073 
2074   st->print("%s: ", os::exception_name(sig, buf, buflen));
2075 
2076   address handler = (sa.sa_flags & SA_SIGINFO)
2077                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2078                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2079 
2080   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2081     st->print("SIG_DFL");
2082   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2083     st->print("SIG_IGN");
2084   } else {
2085     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2086   }
2087 
2088   st->print(", sa_mask[0]=");
2089   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2090 
2091   address rh = VMError::get_resetted_sighandler(sig);
2092   // May be, handler was resetted by VMError?
2093   if (rh != NULL) {
2094     handler = rh;
2095     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2096   }
2097 
2098   st->print(", sa_flags=");
2099   os::Posix::print_sa_flags(st, sa.sa_flags);
2100 
2101   // Check: is it our handler?
2102   if (handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2103       handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2104     // It is our signal handler
2105     // check for flags
2106     if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2107       st->print(
2108                 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2109                 os::Solaris::get_our_sigflags(sig));
2110     }
2111   }
2112   st->cr();
2113 }
2114 
2115 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2116   st->print_cr("Signal Handlers:");
2117   print_signal_handler(st, SIGSEGV, buf, buflen);
2118   print_signal_handler(st, SIGBUS , buf, buflen);
2119   print_signal_handler(st, SIGFPE , buf, buflen);
2120   print_signal_handler(st, SIGPIPE, buf, buflen);
2121   print_signal_handler(st, SIGXFSZ, buf, buflen);
2122   print_signal_handler(st, SIGILL , buf, buflen);
2123   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2124   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2125   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2126   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2127   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2128   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2129   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2130   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2131 }
2132 
2133 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2134 
2135 // Find the full path to the current module, libjvm.so
2136 void os::jvm_path(char *buf, jint buflen) {
2137   // Error checking.
2138   if (buflen < MAXPATHLEN) {
2139     assert(false, "must use a large-enough buffer");
2140     buf[0] = '\0';
2141     return;
2142   }
2143   // Lazy resolve the path to current module.
2144   if (saved_jvm_path[0] != 0) {
2145     strcpy(buf, saved_jvm_path);
2146     return;
2147   }
2148 
2149   Dl_info dlinfo;
2150   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2151   assert(ret != 0, "cannot locate libjvm");
2152   if (ret != 0 && dlinfo.dli_fname != NULL) {
2153     realpath((char *)dlinfo.dli_fname, buf);
2154   } else {
2155     buf[0] = '\0';
2156     return;
2157   }
2158 
2159   if (Arguments::sun_java_launcher_is_altjvm()) {
2160     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2161     // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
2162     // If "/jre/lib/" appears at the right place in the string, then
2163     // assume we are installed in a JDK and we're done.  Otherwise, check
2164     // for a JAVA_HOME environment variable and fix up the path so it
2165     // looks like libjvm.so is installed there (append a fake suffix
2166     // hotspot/libjvm.so).
2167     const char *p = buf + strlen(buf) - 1;
2168     for (int count = 0; p > buf && count < 5; ++count) {
2169       for (--p; p > buf && *p != '/'; --p)
2170         /* empty */ ;
2171     }
2172 
2173     if (strncmp(p, "/jre/lib/", 9) != 0) {
2174       // Look for JAVA_HOME in the environment.
2175       char* java_home_var = ::getenv("JAVA_HOME");
2176       if (java_home_var != NULL && java_home_var[0] != 0) {
2177         char cpu_arch[12];
2178         char* jrelib_p;
2179         int   len;
2180         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2181 #ifdef _LP64
2182         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2183         if (strcmp(cpu_arch, "sparc") == 0) {
2184           strcat(cpu_arch, "v9");
2185         } else if (strcmp(cpu_arch, "i386") == 0) {
2186           strcpy(cpu_arch, "amd64");
2187         }
2188 #endif
2189         // Check the current module name "libjvm.so".
2190         p = strrchr(buf, '/');
2191         assert(strstr(p, "/libjvm") == p, "invalid library name");
2192 
2193         realpath(java_home_var, buf);
2194         // determine if this is a legacy image or modules image
2195         // modules image doesn't have "jre" subdirectory
2196         len = strlen(buf);
2197         assert(len < buflen, "Ran out of buffer space");
2198         jrelib_p = buf + len;
2199         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2200         if (0 != access(buf, F_OK)) {
2201           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2202         }
2203 
2204         if (0 == access(buf, F_OK)) {
2205           // Use current module name "libjvm.so"
2206           len = strlen(buf);
2207           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2208         } else {
2209           // Go back to path of .so
2210           realpath((char *)dlinfo.dli_fname, buf);
2211         }
2212       }
2213     }
2214   }
2215 
2216   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2217   saved_jvm_path[MAXPATHLEN - 1] = '\0';
2218 }
2219 
2220 
2221 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2222   // no prefix required, not even "_"
2223 }
2224 
2225 
2226 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2227   // no suffix required
2228 }
2229 
2230 // This method is a copy of JDK's sysGetLastErrorString
2231 // from src/solaris/hpi/src/system_md.c
2232 
2233 size_t os::lasterror(char *buf, size_t len) {
2234   if (errno == 0)  return 0;
2235 
2236   const char *s = ::strerror(errno);
2237   size_t n = ::strlen(s);
2238   if (n >= len) {
2239     n = len - 1;
2240   }
2241   ::strncpy(buf, s, n);
2242   buf[n] = '\0';
2243   return n;
2244 }
2245 
2246 
2247 // sun.misc.Signal
2248 
2249 extern "C" {
2250   static void UserHandler(int sig, void *siginfo, void *context) {
2251     // Ctrl-C is pressed during error reporting, likely because the error
2252     // handler fails to abort. Let VM die immediately.
2253     if (sig == SIGINT && is_error_reported()) {
2254       os::die();
2255     }
2256 
2257     os::signal_notify(sig);
2258     // We do not need to reinstate the signal handler each time...
2259   }
2260 }
2261 
2262 void* os::user_handler() {
2263   return CAST_FROM_FN_PTR(void*, UserHandler);
2264 }
2265 
2266 class Semaphore : public StackObj {
2267  public:
2268   Semaphore();
2269   ~Semaphore();
2270   void signal();
2271   void wait();
2272   bool trywait();
2273   bool timedwait(unsigned int sec, int nsec);
2274  private:
2275   sema_t _semaphore;
2276 };
2277 
2278 
2279 Semaphore::Semaphore() {
2280   sema_init(&_semaphore, 0, NULL, NULL);
2281 }
2282 
2283 Semaphore::~Semaphore() {
2284   sema_destroy(&_semaphore);
2285 }
2286 
2287 void Semaphore::signal() {
2288   sema_post(&_semaphore);
2289 }
2290 
2291 void Semaphore::wait() {
2292   sema_wait(&_semaphore);
2293 }
2294 
2295 bool Semaphore::trywait() {
2296   return sema_trywait(&_semaphore) == 0;
2297 }
2298 
2299 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2300   struct timespec ts;
2301   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2302 
2303   while (1) {
2304     int result = sema_timedwait(&_semaphore, &ts);
2305     if (result == 0) {
2306       return true;
2307     } else if (errno == EINTR) {
2308       continue;
2309     } else if (errno == ETIME) {
2310       return false;
2311     } else {
2312       return false;
2313     }
2314   }
2315 }
2316 
2317 extern "C" {
2318   typedef void (*sa_handler_t)(int);
2319   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2320 }
2321 
2322 void* os::signal(int signal_number, void* handler) {
2323   struct sigaction sigAct, oldSigAct;
2324   sigfillset(&(sigAct.sa_mask));
2325   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2326   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2327 
2328   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
2329     // -1 means registration failed
2330     return (void *)-1;
2331   }
2332 
2333   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2334 }
2335 
2336 void os::signal_raise(int signal_number) {
2337   raise(signal_number);
2338 }
2339 
2340 // The following code is moved from os.cpp for making this
2341 // code platform specific, which it is by its very nature.
2342 
2343 // a counter for each possible signal value
2344 static int Sigexit = 0;
2345 static int Maxlibjsigsigs;
2346 static jint *pending_signals = NULL;
2347 static int *preinstalled_sigs = NULL;
2348 static struct sigaction *chainedsigactions = NULL;
2349 static sema_t sig_sem;
2350 typedef int (*version_getting_t)();
2351 version_getting_t os::Solaris::get_libjsig_version = NULL;
2352 static int libjsigversion = NULL;
2353 
2354 int os::sigexitnum_pd() {
2355   assert(Sigexit > 0, "signal memory not yet initialized");
2356   return Sigexit;
2357 }
2358 
2359 void os::Solaris::init_signal_mem() {
2360   // Initialize signal structures
2361   Maxsignum = SIGRTMAX;
2362   Sigexit = Maxsignum+1;
2363   assert(Maxsignum >0, "Unable to obtain max signal number");
2364 
2365   Maxlibjsigsigs = Maxsignum;
2366 
2367   // pending_signals has one int per signal
2368   // The additional signal is for SIGEXIT - exit signal to signal_thread
2369   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2370   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2371 
2372   if (UseSignalChaining) {
2373     chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2374                                                    * (Maxsignum + 1), mtInternal);
2375     memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2376     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2377     memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2378   }
2379   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2380   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2381 }
2382 
2383 void os::signal_init_pd() {
2384   int ret;
2385 
2386   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2387   assert(ret == 0, "sema_init() failed");
2388 }
2389 
2390 void os::signal_notify(int signal_number) {
2391   int ret;
2392 
2393   Atomic::inc(&pending_signals[signal_number]);
2394   ret = ::sema_post(&sig_sem);
2395   assert(ret == 0, "sema_post() failed");
2396 }
2397 
2398 static int check_pending_signals(bool wait_for_signal) {
2399   int ret;
2400   while (true) {
2401     for (int i = 0; i < Sigexit + 1; i++) {
2402       jint n = pending_signals[i];
2403       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2404         return i;
2405       }
2406     }
2407     if (!wait_for_signal) {
2408       return -1;
2409     }
2410     JavaThread *thread = JavaThread::current();
2411     ThreadBlockInVM tbivm(thread);
2412 
2413     bool threadIsSuspended;
2414     do {
2415       thread->set_suspend_equivalent();
2416       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2417       while ((ret = ::sema_wait(&sig_sem)) == EINTR)
2418         ;
2419       assert(ret == 0, "sema_wait() failed");
2420 
2421       // were we externally suspended while we were waiting?
2422       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2423       if (threadIsSuspended) {
2424         // The semaphore has been incremented, but while we were waiting
2425         // another thread suspended us. We don't want to continue running
2426         // while suspended because that would surprise the thread that
2427         // suspended us.
2428         ret = ::sema_post(&sig_sem);
2429         assert(ret == 0, "sema_post() failed");
2430 
2431         thread->java_suspend_self();
2432       }
2433     } while (threadIsSuspended);
2434   }
2435 }
2436 
2437 int os::signal_lookup() {
2438   return check_pending_signals(false);
2439 }
2440 
2441 int os::signal_wait() {
2442   return check_pending_signals(true);
2443 }
2444 
2445 ////////////////////////////////////////////////////////////////////////////////
2446 // Virtual Memory
2447 
2448 static int page_size = -1;
2449 
2450 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2451 // clear this var if support is not available.
2452 static bool has_map_align = true;
2453 
2454 int os::vm_page_size() {
2455   assert(page_size != -1, "must call os::init");
2456   return page_size;
2457 }
2458 
2459 // Solaris allocates memory by pages.
2460 int os::vm_allocation_granularity() {
2461   assert(page_size != -1, "must call os::init");
2462   return page_size;
2463 }
2464 
2465 static bool recoverable_mmap_error(int err) {
2466   // See if the error is one we can let the caller handle. This
2467   // list of errno values comes from the Solaris mmap(2) man page.
2468   switch (err) {
2469   case EBADF:
2470   case EINVAL:
2471   case ENOTSUP:
2472     // let the caller deal with these errors
2473     return true;
2474 
2475   default:
2476     // Any remaining errors on this OS can cause our reserved mapping
2477     // to be lost. That can cause confusion where different data
2478     // structures think they have the same memory mapped. The worst
2479     // scenario is if both the VM and a library think they have the
2480     // same memory mapped.
2481     return false;
2482   }
2483 }
2484 
2485 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2486                                     int err) {
2487   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2488           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2489           strerror(err), err);
2490 }
2491 
2492 static void warn_fail_commit_memory(char* addr, size_t bytes,
2493                                     size_t alignment_hint, bool exec,
2494                                     int err) {
2495   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2496           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2497           alignment_hint, exec, strerror(err), err);
2498 }
2499 
2500 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2501   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2502   size_t size = bytes;
2503   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2504   if (res != NULL) {
2505     if (UseNUMAInterleaving) {
2506       numa_make_global(addr, bytes);
2507     }
2508     return 0;
2509   }
2510 
2511   int err = errno;  // save errno from mmap() call in mmap_chunk()
2512 
2513   if (!recoverable_mmap_error(err)) {
2514     warn_fail_commit_memory(addr, bytes, exec, err);
2515     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2516   }
2517 
2518   return err;
2519 }
2520 
2521 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2522   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2523 }
2524 
2525 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2526                                   const char* mesg) {
2527   assert(mesg != NULL, "mesg must be specified");
2528   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2529   if (err != 0) {
2530     // the caller wants all commit errors to exit with the specified mesg:
2531     warn_fail_commit_memory(addr, bytes, exec, err);
2532     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2533   }
2534 }
2535 
2536 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
2537   assert(is_size_aligned(alignment, (size_t) vm_page_size()),
2538          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
2539                  alignment, (size_t) vm_page_size()));
2540 
2541   for (int i = 0; _page_sizes[i] != 0; i++) {
2542     if (is_size_aligned(alignment, _page_sizes[i])) {
2543       return _page_sizes[i];
2544     }
2545   }
2546 
2547   return (size_t) vm_page_size();
2548 }
2549 
2550 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2551                                     size_t alignment_hint, bool exec) {
2552   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2553   if (err == 0 && UseLargePages && alignment_hint > 0) {
2554     assert(is_size_aligned(bytes, alignment_hint),
2555            err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
2556 
2557     // The syscall memcntl requires an exact page size (see man memcntl for details).
2558     size_t page_size = page_size_for_alignment(alignment_hint);
2559     if (page_size > (size_t) vm_page_size()) {
2560       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2561     }
2562   }
2563   return err;
2564 }
2565 
2566 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2567                           bool exec) {
2568   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2569 }
2570 
2571 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2572                                   size_t alignment_hint, bool exec,
2573                                   const char* mesg) {
2574   assert(mesg != NULL, "mesg must be specified");
2575   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2576   if (err != 0) {
2577     // the caller wants all commit errors to exit with the specified mesg:
2578     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2579     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2580   }
2581 }
2582 
2583 // Uncommit the pages in a specified region.
2584 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2585   if (madvise(addr, bytes, MADV_FREE) < 0) {
2586     debug_only(warning("MADV_FREE failed."));
2587     return;
2588   }
2589 }
2590 
2591 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2592   return os::commit_memory(addr, size, !ExecMem);
2593 }
2594 
2595 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2596   return os::uncommit_memory(addr, size);
2597 }
2598 
2599 // Change the page size in a given range.
2600 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2601   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2602   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2603   if (UseLargePages) {
2604     Solaris::setup_large_pages(addr, bytes, alignment_hint);
2605   }
2606 }
2607 
2608 // Tell the OS to make the range local to the first-touching LWP
2609 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2610   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2611   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2612     debug_only(warning("MADV_ACCESS_LWP failed."));
2613   }
2614 }
2615 
2616 // Tell the OS that this range would be accessed from different LWPs.
2617 void os::numa_make_global(char *addr, size_t bytes) {
2618   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2619   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2620     debug_only(warning("MADV_ACCESS_MANY failed."));
2621   }
2622 }
2623 
2624 // Get the number of the locality groups.
2625 size_t os::numa_get_groups_num() {
2626   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2627   return n != -1 ? n : 1;
2628 }
2629 
2630 // Get a list of leaf locality groups. A leaf lgroup is group that
2631 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2632 // board. An LWP is assigned to one of these groups upon creation.
2633 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2634   if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2635     ids[0] = 0;
2636     return 1;
2637   }
2638   int result_size = 0, top = 1, bottom = 0, cur = 0;
2639   for (int k = 0; k < size; k++) {
2640     int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2641                                    (Solaris::lgrp_id_t*)&ids[top], size - top);
2642     if (r == -1) {
2643       ids[0] = 0;
2644       return 1;
2645     }
2646     if (!r) {
2647       // That's a leaf node.
2648       assert(bottom <= cur, "Sanity check");
2649       // Check if the node has memory
2650       if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2651                                   NULL, 0, LGRP_RSRC_MEM) > 0) {
2652         ids[bottom++] = ids[cur];
2653       }
2654     }
2655     top += r;
2656     cur++;
2657   }
2658   if (bottom == 0) {
2659     // Handle a situation, when the OS reports no memory available.
2660     // Assume UMA architecture.
2661     ids[0] = 0;
2662     return 1;
2663   }
2664   return bottom;
2665 }
2666 
2667 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2668 bool os::numa_topology_changed() {
2669   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2670   if (is_stale != -1 && is_stale) {
2671     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2672     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2673     assert(c != 0, "Failure to initialize LGRP API");
2674     Solaris::set_lgrp_cookie(c);
2675     return true;
2676   }
2677   return false;
2678 }
2679 
2680 // Get the group id of the current LWP.
2681 int os::numa_get_group_id() {
2682   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2683   if (lgrp_id == -1) {
2684     return 0;
2685   }
2686   const int size = os::numa_get_groups_num();
2687   int *ids = (int*)alloca(size * sizeof(int));
2688 
2689   // Get the ids of all lgroups with memory; r is the count.
2690   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2691                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2692   if (r <= 0) {
2693     return 0;
2694   }
2695   return ids[os::random() % r];
2696 }
2697 
2698 // Request information about the page.
2699 bool os::get_page_info(char *start, page_info* info) {
2700   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2701   uint64_t addr = (uintptr_t)start;
2702   uint64_t outdata[2];
2703   uint_t validity = 0;
2704 
2705   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2706     return false;
2707   }
2708 
2709   info->size = 0;
2710   info->lgrp_id = -1;
2711 
2712   if ((validity & 1) != 0) {
2713     if ((validity & 2) != 0) {
2714       info->lgrp_id = outdata[0];
2715     }
2716     if ((validity & 4) != 0) {
2717       info->size = outdata[1];
2718     }
2719     return true;
2720   }
2721   return false;
2722 }
2723 
2724 // Scan the pages from start to end until a page different than
2725 // the one described in the info parameter is encountered.
2726 char *os::scan_pages(char *start, char* end, page_info* page_expected,
2727                      page_info* page_found) {
2728   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2729   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2730   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2731   uint_t validity[MAX_MEMINFO_CNT];
2732 
2733   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2734   uint64_t p = (uint64_t)start;
2735   while (p < (uint64_t)end) {
2736     addrs[0] = p;
2737     size_t addrs_count = 1;
2738     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2739       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2740       addrs_count++;
2741     }
2742 
2743     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2744       return NULL;
2745     }
2746 
2747     size_t i = 0;
2748     for (; i < addrs_count; i++) {
2749       if ((validity[i] & 1) != 0) {
2750         if ((validity[i] & 4) != 0) {
2751           if (outdata[types * i + 1] != page_expected->size) {
2752             break;
2753           }
2754         } else if (page_expected->size != 0) {
2755           break;
2756         }
2757 
2758         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2759           if (outdata[types * i] != page_expected->lgrp_id) {
2760             break;
2761           }
2762         }
2763       } else {
2764         return NULL;
2765       }
2766     }
2767 
2768     if (i < addrs_count) {
2769       if ((validity[i] & 2) != 0) {
2770         page_found->lgrp_id = outdata[types * i];
2771       } else {
2772         page_found->lgrp_id = -1;
2773       }
2774       if ((validity[i] & 4) != 0) {
2775         page_found->size = outdata[types * i + 1];
2776       } else {
2777         page_found->size = 0;
2778       }
2779       return (char*)addrs[i];
2780     }
2781 
2782     p = addrs[addrs_count - 1] + page_size;
2783   }
2784   return end;
2785 }
2786 
2787 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2788   size_t size = bytes;
2789   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2790   // uncommitted page. Otherwise, the read/write might succeed if we
2791   // have enough swap space to back the physical page.
2792   return
2793     NULL != Solaris::mmap_chunk(addr, size,
2794                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2795                                 PROT_NONE);
2796 }
2797 
2798 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2799   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2800 
2801   if (b == MAP_FAILED) {
2802     return NULL;
2803   }
2804   return b;
2805 }
2806 
2807 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes,
2808                              size_t alignment_hint, bool fixed) {
2809   char* addr = requested_addr;
2810   int flags = MAP_PRIVATE | MAP_NORESERVE;
2811 
2812   assert(!(fixed && (alignment_hint > 0)),
2813          "alignment hint meaningless with fixed mmap");
2814 
2815   if (fixed) {
2816     flags |= MAP_FIXED;
2817   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2818     flags |= MAP_ALIGN;
2819     addr = (char*) alignment_hint;
2820   }
2821 
2822   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2823   // uncommitted page. Otherwise, the read/write might succeed if we
2824   // have enough swap space to back the physical page.
2825   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2826 }
2827 
2828 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2829                             size_t alignment_hint) {
2830   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint,
2831                                   (requested_addr != NULL));
2832 
2833   guarantee(requested_addr == NULL || requested_addr == addr,
2834             "OS failed to return requested mmap address.");
2835   return addr;
2836 }
2837 
2838 // Reserve memory at an arbitrary address, only if that area is
2839 // available (and not reserved for something else).
2840 
2841 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2842   const int max_tries = 10;
2843   char* base[max_tries];
2844   size_t size[max_tries];
2845 
2846   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2847   // is dependent on the requested size and the MMU.  Our initial gap
2848   // value here is just a guess and will be corrected later.
2849   bool had_top_overlap = false;
2850   bool have_adjusted_gap = false;
2851   size_t gap = 0x400000;
2852 
2853   // Assert only that the size is a multiple of the page size, since
2854   // that's all that mmap requires, and since that's all we really know
2855   // about at this low abstraction level.  If we need higher alignment,
2856   // we can either pass an alignment to this method or verify alignment
2857   // in one of the methods further up the call chain.  See bug 5044738.
2858   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2859 
2860   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2861   // Give it a try, if the kernel honors the hint we can return immediately.
2862   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2863 
2864   volatile int err = errno;
2865   if (addr == requested_addr) {
2866     return addr;
2867   } else if (addr != NULL) {
2868     pd_unmap_memory(addr, bytes);
2869   }
2870 
2871   if (PrintMiscellaneous && Verbose) {
2872     char buf[256];
2873     buf[0] = '\0';
2874     if (addr == NULL) {
2875       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2876     }
2877     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2878             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2879             "%s", bytes, requested_addr, addr, buf);
2880   }
2881 
2882   // Address hint method didn't work.  Fall back to the old method.
2883   // In theory, once SNV becomes our oldest supported platform, this
2884   // code will no longer be needed.
2885   //
2886   // Repeatedly allocate blocks until the block is allocated at the
2887   // right spot. Give up after max_tries.
2888   int i;
2889   for (i = 0; i < max_tries; ++i) {
2890     base[i] = reserve_memory(bytes);
2891 
2892     if (base[i] != NULL) {
2893       // Is this the block we wanted?
2894       if (base[i] == requested_addr) {
2895         size[i] = bytes;
2896         break;
2897       }
2898 
2899       // check that the gap value is right
2900       if (had_top_overlap && !have_adjusted_gap) {
2901         size_t actual_gap = base[i-1] - base[i] - bytes;
2902         if (gap != actual_gap) {
2903           // adjust the gap value and retry the last 2 allocations
2904           assert(i > 0, "gap adjustment code problem");
2905           have_adjusted_gap = true;  // adjust the gap only once, just in case
2906           gap = actual_gap;
2907           if (PrintMiscellaneous && Verbose) {
2908             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2909           }
2910           unmap_memory(base[i], bytes);
2911           unmap_memory(base[i-1], size[i-1]);
2912           i-=2;
2913           continue;
2914         }
2915       }
2916 
2917       // Does this overlap the block we wanted? Give back the overlapped
2918       // parts and try again.
2919       //
2920       // There is still a bug in this code: if top_overlap == bytes,
2921       // the overlap is offset from requested region by the value of gap.
2922       // In this case giving back the overlapped part will not work,
2923       // because we'll give back the entire block at base[i] and
2924       // therefore the subsequent allocation will not generate a new gap.
2925       // This could be fixed with a new algorithm that used larger
2926       // or variable size chunks to find the requested region -
2927       // but such a change would introduce additional complications.
2928       // It's rare enough that the planets align for this bug,
2929       // so we'll just wait for a fix for 6204603/5003415 which
2930       // will provide a mmap flag to allow us to avoid this business.
2931 
2932       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2933       if (top_overlap >= 0 && top_overlap < bytes) {
2934         had_top_overlap = true;
2935         unmap_memory(base[i], top_overlap);
2936         base[i] += top_overlap;
2937         size[i] = bytes - top_overlap;
2938       } else {
2939         size_t bottom_overlap = base[i] + bytes - requested_addr;
2940         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2941           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2942             warning("attempt_reserve_memory_at: possible alignment bug");
2943           }
2944           unmap_memory(requested_addr, bottom_overlap);
2945           size[i] = bytes - bottom_overlap;
2946         } else {
2947           size[i] = bytes;
2948         }
2949       }
2950     }
2951   }
2952 
2953   // Give back the unused reserved pieces.
2954 
2955   for (int j = 0; j < i; ++j) {
2956     if (base[j] != NULL) {
2957       unmap_memory(base[j], size[j]);
2958     }
2959   }
2960 
2961   return (i < max_tries) ? requested_addr : NULL;
2962 }
2963 
2964 bool os::pd_release_memory(char* addr, size_t bytes) {
2965   size_t size = bytes;
2966   return munmap(addr, size) == 0;
2967 }
2968 
2969 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
2970   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
2971          "addr must be page aligned");
2972   int retVal = mprotect(addr, bytes, prot);
2973   return retVal == 0;
2974 }
2975 
2976 // Protect memory (Used to pass readonly pages through
2977 // JNI GetArray<type>Elements with empty arrays.)
2978 // Also, used for serialization page and for compressed oops null pointer
2979 // checking.
2980 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2981                         bool is_committed) {
2982   unsigned int p = 0;
2983   switch (prot) {
2984   case MEM_PROT_NONE: p = PROT_NONE; break;
2985   case MEM_PROT_READ: p = PROT_READ; break;
2986   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2987   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2988   default:
2989     ShouldNotReachHere();
2990   }
2991   // is_committed is unused.
2992   return solaris_mprotect(addr, bytes, p);
2993 }
2994 
2995 // guard_memory and unguard_memory only happens within stack guard pages.
2996 // Since ISM pertains only to the heap, guard and unguard memory should not
2997 /// happen with an ISM region.
2998 bool os::guard_memory(char* addr, size_t bytes) {
2999   return solaris_mprotect(addr, bytes, PROT_NONE);
3000 }
3001 
3002 bool os::unguard_memory(char* addr, size_t bytes) {
3003   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3004 }
3005 
3006 // Large page support
3007 static size_t _large_page_size = 0;
3008 
3009 // Insertion sort for small arrays (descending order).
3010 static void insertion_sort_descending(size_t* array, int len) {
3011   for (int i = 0; i < len; i++) {
3012     size_t val = array[i];
3013     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3014       size_t tmp = array[key];
3015       array[key] = array[key - 1];
3016       array[key - 1] = tmp;
3017     }
3018   }
3019 }
3020 
3021 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3022   const unsigned int usable_count = VM_Version::page_size_count();
3023   if (usable_count == 1) {
3024     return false;
3025   }
3026 
3027   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3028   // build platform, getpagesizes() (without the '2') can be called directly.
3029   typedef int (*gps_t)(size_t[], int);
3030   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3031   if (gps_func == NULL) {
3032     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3033     if (gps_func == NULL) {
3034       if (warn) {
3035         warning("MPSS is not supported by the operating system.");
3036       }
3037       return false;
3038     }
3039   }
3040 
3041   // Fill the array of page sizes.
3042   int n = (*gps_func)(_page_sizes, page_sizes_max);
3043   assert(n > 0, "Solaris bug?");
3044 
3045   if (n == page_sizes_max) {
3046     // Add a sentinel value (necessary only if the array was completely filled
3047     // since it is static (zeroed at initialization)).
3048     _page_sizes[--n] = 0;
3049     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3050   }
3051   assert(_page_sizes[n] == 0, "missing sentinel");
3052   trace_page_sizes("available page sizes", _page_sizes, n);
3053 
3054   if (n == 1) return false;     // Only one page size available.
3055 
3056   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3057   // select up to usable_count elements.  First sort the array, find the first
3058   // acceptable value, then copy the usable sizes to the top of the array and
3059   // trim the rest.  Make sure to include the default page size :-).
3060   //
3061   // A better policy could get rid of the 4M limit by taking the sizes of the
3062   // important VM memory regions (java heap and possibly the code cache) into
3063   // account.
3064   insertion_sort_descending(_page_sizes, n);
3065   const size_t size_limit =
3066     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3067   int beg;
3068   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */;
3069   const int end = MIN2((int)usable_count, n) - 1;
3070   for (int cur = 0; cur < end; ++cur, ++beg) {
3071     _page_sizes[cur] = _page_sizes[beg];
3072   }
3073   _page_sizes[end] = vm_page_size();
3074   _page_sizes[end + 1] = 0;
3075 
3076   if (_page_sizes[end] > _page_sizes[end - 1]) {
3077     // Default page size is not the smallest; sort again.
3078     insertion_sort_descending(_page_sizes, end + 1);
3079   }
3080   *page_size = _page_sizes[0];
3081 
3082   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3083   return true;
3084 }
3085 
3086 void os::large_page_init() {
3087   if (UseLargePages) {
3088     // print a warning if any large page related flag is specified on command line
3089     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3090                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3091 
3092     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3093   }
3094 }
3095 
3096 bool os::Solaris::is_valid_page_size(size_t bytes) {
3097   for (int i = 0; _page_sizes[i] != 0; i++) {
3098     if (_page_sizes[i] == bytes) {
3099       return true;
3100     }
3101   }
3102   return false;
3103 }
3104 
3105 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3106   assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
3107   assert(is_ptr_aligned((void*) start, align),
3108          err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
3109   assert(is_size_aligned(bytes, align),
3110          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
3111 
3112   // Signal to OS that we want large pages for addresses
3113   // from addr, addr + bytes
3114   struct memcntl_mha mpss_struct;
3115   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3116   mpss_struct.mha_pagesize = align;
3117   mpss_struct.mha_flags = 0;
3118   // Upon successful completion, memcntl() returns 0
3119   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3120     debug_only(warning("Attempt to use MPSS failed."));
3121     return false;
3122   }
3123   return true;
3124 }
3125 
3126 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3127   fatal("os::reserve_memory_special should not be called on Solaris.");
3128   return NULL;
3129 }
3130 
3131 bool os::release_memory_special(char* base, size_t bytes) {
3132   fatal("os::release_memory_special should not be called on Solaris.");
3133   return false;
3134 }
3135 
3136 size_t os::large_page_size() {
3137   return _large_page_size;
3138 }
3139 
3140 // MPSS allows application to commit large page memory on demand; with ISM
3141 // the entire memory region must be allocated as shared memory.
3142 bool os::can_commit_large_page_memory() {
3143   return true;
3144 }
3145 
3146 bool os::can_execute_large_page_memory() {
3147   return true;
3148 }
3149 
3150 // Read calls from inside the vm need to perform state transitions
3151 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3152   size_t res;
3153   JavaThread* thread = (JavaThread*)Thread::current();
3154   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3155   ThreadBlockInVM tbiv(thread);
3156   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3157   return res;
3158 }
3159 
3160 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
3161   size_t res;
3162   JavaThread* thread = (JavaThread*)Thread::current();
3163   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3164   ThreadBlockInVM tbiv(thread);
3165   RESTARTABLE(::pread(fd, buf, (size_t) nBytes, offset), res);
3166   return res;
3167 }
3168 
3169 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3170   size_t res;
3171   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
3172          "Assumed _thread_in_native");
3173   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3174   return res;
3175 }
3176 
3177 void os::naked_short_sleep(jlong ms) {
3178   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3179 
3180   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3181   // Solaris requires -lrt for this.
3182   usleep((ms * 1000));
3183 
3184   return;
3185 }
3186 
3187 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3188 void os::infinite_sleep() {
3189   while (true) {    // sleep forever ...
3190     ::sleep(100);   // ... 100 seconds at a time
3191   }
3192 }
3193 
3194 // Used to convert frequent JVM_Yield() to nops
3195 bool os::dont_yield() {
3196   if (DontYieldALot) {
3197     static hrtime_t last_time = 0;
3198     hrtime_t diff = getTimeNanos() - last_time;
3199 
3200     if (diff < DontYieldALotInterval * 1000000) {
3201       return true;
3202     }
3203 
3204     last_time += diff;
3205 
3206     return false;
3207   } else {
3208     return false;
3209   }
3210 }
3211 
3212 // Note that yield semantics are defined by the scheduling class to which
3213 // the thread currently belongs.  Typically, yield will _not yield to
3214 // other equal or higher priority threads that reside on the dispatch queues
3215 // of other CPUs.
3216 
3217 void os::naked_yield() {
3218   thr_yield();
3219 }
3220 
3221 // Interface for setting lwp priorities.  If we are using T2 libthread,
3222 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3223 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3224 // function is meaningless in this mode so we must adjust the real lwp's priority
3225 // The routines below implement the getting and setting of lwp priorities.
3226 //
3227 // Note: T2 is now the only supported libthread. UseBoundThreads flag is
3228 //       being deprecated and all threads are now BoundThreads
3229 //
3230 // Note: There are three priority scales used on Solaris.  Java priotities
3231 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3232 //       from 0 to 127, and the current scheduling class of the process we
3233 //       are running in.  This is typically from -60 to +60.
3234 //       The setting of the lwp priorities in done after a call to thr_setprio
3235 //       so Java priorities are mapped to libthread priorities and we map from
3236 //       the latter to lwp priorities.  We don't keep priorities stored in
3237 //       Java priorities since some of our worker threads want to set priorities
3238 //       higher than all Java threads.
3239 //
3240 // For related information:
3241 // (1)  man -s 2 priocntl
3242 // (2)  man -s 4 priocntl
3243 // (3)  man dispadmin
3244 // =    librt.so
3245 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3246 // =    ps -cL <pid> ... to validate priority.
3247 // =    sched_get_priority_min and _max
3248 //              pthread_create
3249 //              sched_setparam
3250 //              pthread_setschedparam
3251 //
3252 // Assumptions:
3253 // +    We assume that all threads in the process belong to the same
3254 //              scheduling class.   IE. an homogenous process.
3255 // +    Must be root or in IA group to change change "interactive" attribute.
3256 //              Priocntl() will fail silently.  The only indication of failure is when
3257 //              we read-back the value and notice that it hasn't changed.
3258 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3259 // +    For RT, change timeslice as well.  Invariant:
3260 //              constant "priority integral"
3261 //              Konst == TimeSlice * (60-Priority)
3262 //              Given a priority, compute appropriate timeslice.
3263 // +    Higher numerical values have higher priority.
3264 
3265 // sched class attributes
3266 typedef struct {
3267   int   schedPolicy;              // classID
3268   int   maxPrio;
3269   int   minPrio;
3270 } SchedInfo;
3271 
3272 
3273 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3274 
3275 #ifdef ASSERT
3276 static int  ReadBackValidate = 1;
3277 #endif
3278 static int  myClass     = 0;
3279 static int  myMin       = 0;
3280 static int  myMax       = 0;
3281 static int  myCur       = 0;
3282 static bool priocntl_enable = false;
3283 
3284 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3285 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3286 
3287 
3288 // lwp_priocntl_init
3289 //
3290 // Try to determine the priority scale for our process.
3291 //
3292 // Return errno or 0 if OK.
3293 //
3294 static int lwp_priocntl_init() {
3295   int rslt;
3296   pcinfo_t ClassInfo;
3297   pcparms_t ParmInfo;
3298   int i;
3299 
3300   if (!UseThreadPriorities) return 0;
3301 
3302   // If ThreadPriorityPolicy is 1, switch tables
3303   if (ThreadPriorityPolicy == 1) {
3304     for (i = 0; i < CriticalPriority+1; i++)
3305       os::java_to_os_priority[i] = prio_policy1[i];
3306   }
3307   if (UseCriticalJavaThreadPriority) {
3308     // MaxPriority always maps to the FX scheduling class and criticalPrio.
3309     // See set_native_priority() and set_lwp_class_and_priority().
3310     // Save original MaxPriority mapping in case attempt to
3311     // use critical priority fails.
3312     java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3313     // Set negative to distinguish from other priorities
3314     os::java_to_os_priority[MaxPriority] = -criticalPrio;
3315   }
3316 
3317   // Get IDs for a set of well-known scheduling classes.
3318   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3319   // the system.  We should have a loop that iterates over the
3320   // classID values, which are known to be "small" integers.
3321 
3322   strcpy(ClassInfo.pc_clname, "TS");
3323   ClassInfo.pc_cid = -1;
3324   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3325   if (rslt < 0) return errno;
3326   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3327   tsLimits.schedPolicy = ClassInfo.pc_cid;
3328   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3329   tsLimits.minPrio = -tsLimits.maxPrio;
3330 
3331   strcpy(ClassInfo.pc_clname, "IA");
3332   ClassInfo.pc_cid = -1;
3333   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3334   if (rslt < 0) return errno;
3335   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3336   iaLimits.schedPolicy = ClassInfo.pc_cid;
3337   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3338   iaLimits.minPrio = -iaLimits.maxPrio;
3339 
3340   strcpy(ClassInfo.pc_clname, "RT");
3341   ClassInfo.pc_cid = -1;
3342   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3343   if (rslt < 0) return errno;
3344   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3345   rtLimits.schedPolicy = ClassInfo.pc_cid;
3346   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3347   rtLimits.minPrio = 0;
3348 
3349   strcpy(ClassInfo.pc_clname, "FX");
3350   ClassInfo.pc_cid = -1;
3351   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3352   if (rslt < 0) return errno;
3353   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3354   fxLimits.schedPolicy = ClassInfo.pc_cid;
3355   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3356   fxLimits.minPrio = 0;
3357 
3358   // Query our "current" scheduling class.
3359   // This will normally be IA, TS or, rarely, FX or RT.
3360   memset(&ParmInfo, 0, sizeof(ParmInfo));
3361   ParmInfo.pc_cid = PC_CLNULL;
3362   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3363   if (rslt < 0) return errno;
3364   myClass = ParmInfo.pc_cid;
3365 
3366   // We now know our scheduling classId, get specific information
3367   // about the class.
3368   ClassInfo.pc_cid = myClass;
3369   ClassInfo.pc_clname[0] = 0;
3370   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3371   if (rslt < 0) return errno;
3372 
3373   if (ThreadPriorityVerbose) {
3374     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3375   }
3376 
3377   memset(&ParmInfo, 0, sizeof(pcparms_t));
3378   ParmInfo.pc_cid = PC_CLNULL;
3379   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3380   if (rslt < 0) return errno;
3381 
3382   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3383     myMin = rtLimits.minPrio;
3384     myMax = rtLimits.maxPrio;
3385   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3386     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3387     myMin = iaLimits.minPrio;
3388     myMax = iaLimits.maxPrio;
3389     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3390   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3391     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3392     myMin = tsLimits.minPrio;
3393     myMax = tsLimits.maxPrio;
3394     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3395   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3396     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3397     myMin = fxLimits.minPrio;
3398     myMax = fxLimits.maxPrio;
3399     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3400   } else {
3401     // No clue - punt
3402     if (ThreadPriorityVerbose) {
3403       tty->print_cr("Unknown scheduling class: %s ... \n",
3404                     ClassInfo.pc_clname);
3405     }
3406     return EINVAL;      // no clue, punt
3407   }
3408 
3409   if (ThreadPriorityVerbose) {
3410     tty->print_cr("Thread priority Range: [%d..%d]\n", myMin, myMax);
3411   }
3412 
3413   priocntl_enable = true;  // Enable changing priorities
3414   return 0;
3415 }
3416 
3417 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3418 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3419 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3420 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3421 
3422 
3423 // scale_to_lwp_priority
3424 //
3425 // Convert from the libthread "thr_setprio" scale to our current
3426 // lwp scheduling class scale.
3427 //
3428 static int scale_to_lwp_priority(int rMin, int rMax, int x) {
3429   int v;
3430 
3431   if (x == 127) return rMax;            // avoid round-down
3432   v = (((x*(rMax-rMin)))/128)+rMin;
3433   return v;
3434 }
3435 
3436 
3437 // set_lwp_class_and_priority
3438 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3439                                int newPrio, int new_class, bool scale) {
3440   int rslt;
3441   int Actual, Expected, prv;
3442   pcparms_t ParmInfo;                   // for GET-SET
3443 #ifdef ASSERT
3444   pcparms_t ReadBack;                   // for readback
3445 #endif
3446 
3447   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3448   // Query current values.
3449   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3450   // Cache "pcparms_t" in global ParmCache.
3451   // TODO: elide set-to-same-value
3452 
3453   // If something went wrong on init, don't change priorities.
3454   if (!priocntl_enable) {
3455     if (ThreadPriorityVerbose) {
3456       tty->print_cr("Trying to set priority but init failed, ignoring");
3457     }
3458     return EINVAL;
3459   }
3460 
3461   // If lwp hasn't started yet, just return
3462   // the _start routine will call us again.
3463   if (lwpid <= 0) {
3464     if (ThreadPriorityVerbose) {
3465       tty->print_cr("deferring the set_lwp_class_and_priority of thread "
3466                     INTPTR_FORMAT " to %d, lwpid not set",
3467                     ThreadID, newPrio);
3468     }
3469     return 0;
3470   }
3471 
3472   if (ThreadPriorityVerbose) {
3473     tty->print_cr ("set_lwp_class_and_priority("
3474                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3475                    ThreadID, lwpid, newPrio);
3476   }
3477 
3478   memset(&ParmInfo, 0, sizeof(pcparms_t));
3479   ParmInfo.pc_cid = PC_CLNULL;
3480   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3481   if (rslt < 0) return errno;
3482 
3483   int cur_class = ParmInfo.pc_cid;
3484   ParmInfo.pc_cid = (id_t)new_class;
3485 
3486   if (new_class == rtLimits.schedPolicy) {
3487     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3488     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3489                                                        rtLimits.maxPrio, newPrio)
3490                                : newPrio;
3491     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3492     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3493     if (ThreadPriorityVerbose) {
3494       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3495     }
3496   } else if (new_class == iaLimits.schedPolicy) {
3497     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3498     int maxClamped     = MIN2(iaLimits.maxPrio,
3499                               cur_class == new_class
3500                               ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3501     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3502                                                        maxClamped, newPrio)
3503                                : newPrio;
3504     iaInfo->ia_uprilim = cur_class == new_class
3505                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3506     iaInfo->ia_mode    = IA_NOCHANGE;
3507     if (ThreadPriorityVerbose) {
3508       tty->print_cr("IA: [%d...%d] %d->%d\n",
3509                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3510     }
3511   } else if (new_class == tsLimits.schedPolicy) {
3512     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3513     int maxClamped     = MIN2(tsLimits.maxPrio,
3514                               cur_class == new_class
3515                               ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3516     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3517                                                        maxClamped, newPrio)
3518                                : newPrio;
3519     tsInfo->ts_uprilim = cur_class == new_class
3520                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3521     if (ThreadPriorityVerbose) {
3522       tty->print_cr("TS: [%d...%d] %d->%d\n",
3523                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3524     }
3525   } else if (new_class == fxLimits.schedPolicy) {
3526     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3527     int maxClamped     = MIN2(fxLimits.maxPrio,
3528                               cur_class == new_class
3529                               ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3530     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3531                                                        maxClamped, newPrio)
3532                                : newPrio;
3533     fxInfo->fx_uprilim = cur_class == new_class
3534                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3535     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3536     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3537     if (ThreadPriorityVerbose) {
3538       tty->print_cr("FX: [%d...%d] %d->%d\n",
3539                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3540     }
3541   } else {
3542     if (ThreadPriorityVerbose) {
3543       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3544     }
3545     return EINVAL;    // no clue, punt
3546   }
3547 
3548   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3549   if (ThreadPriorityVerbose && rslt) {
3550     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3551   }
3552   if (rslt < 0) return errno;
3553 
3554 #ifdef ASSERT
3555   // Sanity check: read back what we just attempted to set.
3556   // In theory it could have changed in the interim ...
3557   //
3558   // The priocntl system call is tricky.
3559   // Sometimes it'll validate the priority value argument and
3560   // return EINVAL if unhappy.  At other times it fails silently.
3561   // Readbacks are prudent.
3562 
3563   if (!ReadBackValidate) return 0;
3564 
3565   memset(&ReadBack, 0, sizeof(pcparms_t));
3566   ReadBack.pc_cid = PC_CLNULL;
3567   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3568   assert(rslt >= 0, "priocntl failed");
3569   Actual = Expected = 0xBAD;
3570   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3571   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3572     Actual   = RTPRI(ReadBack)->rt_pri;
3573     Expected = RTPRI(ParmInfo)->rt_pri;
3574   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3575     Actual   = IAPRI(ReadBack)->ia_upri;
3576     Expected = IAPRI(ParmInfo)->ia_upri;
3577   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3578     Actual   = TSPRI(ReadBack)->ts_upri;
3579     Expected = TSPRI(ParmInfo)->ts_upri;
3580   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3581     Actual   = FXPRI(ReadBack)->fx_upri;
3582     Expected = FXPRI(ParmInfo)->fx_upri;
3583   } else {
3584     if (ThreadPriorityVerbose) {
3585       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3586                     ParmInfo.pc_cid);
3587     }
3588   }
3589 
3590   if (Actual != Expected) {
3591     if (ThreadPriorityVerbose) {
3592       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3593                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3594     }
3595   }
3596 #endif
3597 
3598   return 0;
3599 }
3600 
3601 // Solaris only gives access to 128 real priorities at a time,
3602 // so we expand Java's ten to fill this range.  This would be better
3603 // if we dynamically adjusted relative priorities.
3604 //
3605 // The ThreadPriorityPolicy option allows us to select 2 different
3606 // priority scales.
3607 //
3608 // ThreadPriorityPolicy=0
3609 // Since the Solaris' default priority is MaximumPriority, we do not
3610 // set a priority lower than Max unless a priority lower than
3611 // NormPriority is requested.
3612 //
3613 // ThreadPriorityPolicy=1
3614 // This mode causes the priority table to get filled with
3615 // linear values.  NormPriority get's mapped to 50% of the
3616 // Maximum priority an so on.  This will cause VM threads
3617 // to get unfair treatment against other Solaris processes
3618 // which do not explicitly alter their thread priorities.
3619 
3620 int os::java_to_os_priority[CriticalPriority + 1] = {
3621   -99999,         // 0 Entry should never be used
3622 
3623   0,              // 1 MinPriority
3624   32,             // 2
3625   64,             // 3
3626 
3627   96,             // 4
3628   127,            // 5 NormPriority
3629   127,            // 6
3630 
3631   127,            // 7
3632   127,            // 8
3633   127,            // 9 NearMaxPriority
3634 
3635   127,            // 10 MaxPriority
3636 
3637   -criticalPrio   // 11 CriticalPriority
3638 };
3639 
3640 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3641   OSThread* osthread = thread->osthread();
3642 
3643   // Save requested priority in case the thread hasn't been started
3644   osthread->set_native_priority(newpri);
3645 
3646   // Check for critical priority request
3647   bool fxcritical = false;
3648   if (newpri == -criticalPrio) {
3649     fxcritical = true;
3650     newpri = criticalPrio;
3651   }
3652 
3653   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3654   if (!UseThreadPriorities) return OS_OK;
3655 
3656   int status = 0;
3657 
3658   if (!fxcritical) {
3659     // Use thr_setprio only if we have a priority that thr_setprio understands
3660     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3661   }
3662 
3663   int lwp_status =
3664           set_lwp_class_and_priority(osthread->thread_id(),
3665                                      osthread->lwp_id(),
3666                                      newpri,
3667                                      fxcritical ? fxLimits.schedPolicy : myClass,
3668                                      !fxcritical);
3669   if (lwp_status != 0 && fxcritical) {
3670     // Try again, this time without changing the scheduling class
3671     newpri = java_MaxPriority_to_os_priority;
3672     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3673                                             osthread->lwp_id(),
3674                                             newpri, myClass, false);
3675   }
3676   status |= lwp_status;
3677   return (status == 0) ? OS_OK : OS_ERR;
3678 }
3679 
3680 
3681 OSReturn os::get_native_priority(const Thread* const thread,
3682                                  int *priority_ptr) {
3683   int p;
3684   if (!UseThreadPriorities) {
3685     *priority_ptr = NormalPriority;
3686     return OS_OK;
3687   }
3688   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3689   if (status != 0) {
3690     return OS_ERR;
3691   }
3692   *priority_ptr = p;
3693   return OS_OK;
3694 }
3695 
3696 
3697 // Hint to the underlying OS that a task switch would not be good.
3698 // Void return because it's a hint and can fail.
3699 void os::hint_no_preempt() {
3700   schedctl_start(schedctl_init());
3701 }
3702 
3703 static void resume_clear_context(OSThread *osthread) {
3704   osthread->set_ucontext(NULL);
3705 }
3706 
3707 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3708   osthread->set_ucontext(context);
3709 }
3710 
3711 static Semaphore sr_semaphore;
3712 
3713 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3714   // Save and restore errno to avoid confusing native code with EINTR
3715   // after sigsuspend.
3716   int old_errno = errno;
3717 
3718   OSThread* osthread = thread->osthread();
3719   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3720 
3721   os::SuspendResume::State current = osthread->sr.state();
3722   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3723     suspend_save_context(osthread, uc);
3724 
3725     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3726     os::SuspendResume::State state = osthread->sr.suspended();
3727     if (state == os::SuspendResume::SR_SUSPENDED) {
3728       sigset_t suspend_set;  // signals for sigsuspend()
3729 
3730       // get current set of blocked signals and unblock resume signal
3731       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3732       sigdelset(&suspend_set, os::Solaris::SIGasync());
3733 
3734       sr_semaphore.signal();
3735       // wait here until we are resumed
3736       while (1) {
3737         sigsuspend(&suspend_set);
3738 
3739         os::SuspendResume::State result = osthread->sr.running();
3740         if (result == os::SuspendResume::SR_RUNNING) {
3741           sr_semaphore.signal();
3742           break;
3743         }
3744       }
3745 
3746     } else if (state == os::SuspendResume::SR_RUNNING) {
3747       // request was cancelled, continue
3748     } else {
3749       ShouldNotReachHere();
3750     }
3751 
3752     resume_clear_context(osthread);
3753   } else if (current == os::SuspendResume::SR_RUNNING) {
3754     // request was cancelled, continue
3755   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3756     // ignore
3757   } else {
3758     // ignore
3759   }
3760 
3761   errno = old_errno;
3762 }
3763 
3764 void os::print_statistics() {
3765 }
3766 
3767 int os::message_box(const char* title, const char* message) {
3768   int i;
3769   fdStream err(defaultStream::error_fd());
3770   for (i = 0; i < 78; i++) err.print_raw("=");
3771   err.cr();
3772   err.print_raw_cr(title);
3773   for (i = 0; i < 78; i++) err.print_raw("-");
3774   err.cr();
3775   err.print_raw_cr(message);
3776   for (i = 0; i < 78; i++) err.print_raw("=");
3777   err.cr();
3778 
3779   char buf[16];
3780   // Prevent process from exiting upon "read error" without consuming all CPU
3781   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3782 
3783   return buf[0] == 'y' || buf[0] == 'Y';
3784 }
3785 
3786 static int sr_notify(OSThread* osthread) {
3787   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
3788   assert_status(status == 0, status, "thr_kill");
3789   return status;
3790 }
3791 
3792 // "Randomly" selected value for how long we want to spin
3793 // before bailing out on suspending a thread, also how often
3794 // we send a signal to a thread we want to resume
3795 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3796 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3797 
3798 static bool do_suspend(OSThread* osthread) {
3799   assert(osthread->sr.is_running(), "thread should be running");
3800   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
3801 
3802   // mark as suspended and send signal
3803   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3804     // failed to switch, state wasn't running?
3805     ShouldNotReachHere();
3806     return false;
3807   }
3808 
3809   if (sr_notify(osthread) != 0) {
3810     ShouldNotReachHere();
3811   }
3812 
3813   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3814   while (true) {
3815     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
3816       break;
3817     } else {
3818       // timeout
3819       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3820       if (cancelled == os::SuspendResume::SR_RUNNING) {
3821         return false;
3822       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3823         // make sure that we consume the signal on the semaphore as well
3824         sr_semaphore.wait();
3825         break;
3826       } else {
3827         ShouldNotReachHere();
3828         return false;
3829       }
3830     }
3831   }
3832 
3833   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3834   return true;
3835 }
3836 
3837 static void do_resume(OSThread* osthread) {
3838   assert(osthread->sr.is_suspended(), "thread should be suspended");
3839   assert(!sr_semaphore.trywait(), "invalid semaphore state");
3840 
3841   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3842     // failed to switch to WAKEUP_REQUEST
3843     ShouldNotReachHere();
3844     return;
3845   }
3846 
3847   while (true) {
3848     if (sr_notify(osthread) == 0) {
3849       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
3850         if (osthread->sr.is_running()) {
3851           return;
3852         }
3853       }
3854     } else {
3855       ShouldNotReachHere();
3856     }
3857   }
3858 
3859   guarantee(osthread->sr.is_running(), "Must be running!");
3860 }
3861 
3862 void os::SuspendedThreadTask::internal_do_task() {
3863   if (do_suspend(_thread->osthread())) {
3864     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3865     do_task(context);
3866     do_resume(_thread->osthread());
3867   }
3868 }
3869 
3870 class PcFetcher : public os::SuspendedThreadTask {
3871  public:
3872   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3873   ExtendedPC result();
3874  protected:
3875   void do_task(const os::SuspendedThreadTaskContext& context);
3876  private:
3877   ExtendedPC _epc;
3878 };
3879 
3880 ExtendedPC PcFetcher::result() {
3881   guarantee(is_done(), "task is not done yet.");
3882   return _epc;
3883 }
3884 
3885 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3886   Thread* thread = context.thread();
3887   OSThread* osthread = thread->osthread();
3888   if (osthread->ucontext() != NULL) {
3889     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
3890   } else {
3891     // NULL context is unexpected, double-check this is the VMThread
3892     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3893   }
3894 }
3895 
3896 // A lightweight implementation that does not suspend the target thread and
3897 // thus returns only a hint. Used for profiling only!
3898 ExtendedPC os::get_thread_pc(Thread* thread) {
3899   // Make sure that it is called by the watcher and the Threads lock is owned.
3900   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
3901   // For now, is only used to profile the VM Thread
3902   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3903   PcFetcher fetcher(thread);
3904   fetcher.run();
3905   return fetcher.result();
3906 }
3907 
3908 
3909 // This does not do anything on Solaris. This is basically a hook for being
3910 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
3911 void os::os_exception_wrapper(java_call_t f, JavaValue* value,
3912                               methodHandle* method, JavaCallArguments* args,
3913                               Thread* thread) {
3914   f(value, method, args, thread);
3915 }
3916 
3917 // This routine may be used by user applications as a "hook" to catch signals.
3918 // The user-defined signal handler must pass unrecognized signals to this
3919 // routine, and if it returns true (non-zero), then the signal handler must
3920 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
3921 // routine will never retun false (zero), but instead will execute a VM panic
3922 // routine kill the process.
3923 //
3924 // If this routine returns false, it is OK to call it again.  This allows
3925 // the user-defined signal handler to perform checks either before or after
3926 // the VM performs its own checks.  Naturally, the user code would be making
3927 // a serious error if it tried to handle an exception (such as a null check
3928 // or breakpoint) that the VM was generating for its own correct operation.
3929 //
3930 // This routine may recognize any of the following kinds of signals:
3931 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
3932 // os::Solaris::SIGasync
3933 // It should be consulted by handlers for any of those signals.
3934 // It explicitly does not recognize os::Solaris::SIGinterrupt
3935 //
3936 // The caller of this routine must pass in the three arguments supplied
3937 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3938 // field of the structure passed to sigaction().  This routine assumes that
3939 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3940 //
3941 // Note that the VM will print warnings if it detects conflicting signal
3942 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3943 //
3944 extern "C" JNIEXPORT int JVM_handle_solaris_signal(int signo,
3945                                                    siginfo_t* siginfo,
3946                                                    void* ucontext,
3947                                                    int abort_if_unrecognized);
3948 
3949 
3950 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
3951   int orig_errno = errno;  // Preserve errno value over signal handler.
3952   JVM_handle_solaris_signal(sig, info, ucVoid, true);
3953   errno = orig_errno;
3954 }
3955 
3956 // Do not delete - if guarantee is ever removed,  a signal handler (even empty)
3957 // is needed to provoke threads blocked on IO to return an EINTR
3958 // Note: this explicitly does NOT call JVM_handle_solaris_signal and
3959 // does NOT participate in signal chaining due to requirement for
3960 // NOT setting SA_RESTART to make EINTR work.
3961 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
3962   if (UseSignalChaining) {
3963     struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
3964     if (actp && actp->sa_handler) {
3965       vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
3966     }
3967   }
3968 }
3969 
3970 // This boolean allows users to forward their own non-matching signals
3971 // to JVM_handle_solaris_signal, harmlessly.
3972 bool os::Solaris::signal_handlers_are_installed = false;
3973 
3974 // For signal-chaining
3975 bool os::Solaris::libjsig_is_loaded = false;
3976 typedef struct sigaction *(*get_signal_t)(int);
3977 get_signal_t os::Solaris::get_signal_action = NULL;
3978 
3979 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
3980   struct sigaction *actp = NULL;
3981 
3982   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
3983     // Retrieve the old signal handler from libjsig
3984     actp = (*get_signal_action)(sig);
3985   }
3986   if (actp == NULL) {
3987     // Retrieve the preinstalled signal handler from jvm
3988     actp = get_preinstalled_handler(sig);
3989   }
3990 
3991   return actp;
3992 }
3993 
3994 static bool call_chained_handler(struct sigaction *actp, int sig,
3995                                  siginfo_t *siginfo, void *context) {
3996   // Call the old signal handler
3997   if (actp->sa_handler == SIG_DFL) {
3998     // It's more reasonable to let jvm treat it as an unexpected exception
3999     // instead of taking the default action.
4000     return false;
4001   } else if (actp->sa_handler != SIG_IGN) {
4002     if ((actp->sa_flags & SA_NODEFER) == 0) {
4003       // automaticlly block the signal
4004       sigaddset(&(actp->sa_mask), sig);
4005     }
4006 
4007     sa_handler_t hand;
4008     sa_sigaction_t sa;
4009     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4010     // retrieve the chained handler
4011     if (siginfo_flag_set) {
4012       sa = actp->sa_sigaction;
4013     } else {
4014       hand = actp->sa_handler;
4015     }
4016 
4017     if ((actp->sa_flags & SA_RESETHAND) != 0) {
4018       actp->sa_handler = SIG_DFL;
4019     }
4020 
4021     // try to honor the signal mask
4022     sigset_t oset;
4023     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4024 
4025     // call into the chained handler
4026     if (siginfo_flag_set) {
4027       (*sa)(sig, siginfo, context);
4028     } else {
4029       (*hand)(sig);
4030     }
4031 
4032     // restore the signal mask
4033     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4034   }
4035   // Tell jvm's signal handler the signal is taken care of.
4036   return true;
4037 }
4038 
4039 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4040   bool chained = false;
4041   // signal-chaining
4042   if (UseSignalChaining) {
4043     struct sigaction *actp = get_chained_signal_action(sig);
4044     if (actp != NULL) {
4045       chained = call_chained_handler(actp, sig, siginfo, context);
4046     }
4047   }
4048   return chained;
4049 }
4050 
4051 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4052   assert((chainedsigactions != (struct sigaction *)NULL) &&
4053          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
4054   if (preinstalled_sigs[sig] != 0) {
4055     return &chainedsigactions[sig];
4056   }
4057   return NULL;
4058 }
4059 
4060 void os::Solaris::save_preinstalled_handler(int sig,
4061                                             struct sigaction& oldAct) {
4062   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4063   assert((chainedsigactions != (struct sigaction *)NULL) &&
4064          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
4065   chainedsigactions[sig] = oldAct;
4066   preinstalled_sigs[sig] = 1;
4067 }
4068 
4069 void os::Solaris::set_signal_handler(int sig, bool set_installed,
4070                                      bool oktochain) {
4071   // Check for overwrite.
4072   struct sigaction oldAct;
4073   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4074   void* oldhand =
4075       oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4076                           : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4077   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4078       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4079       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4080     if (AllowUserSignalHandlers || !set_installed) {
4081       // Do not overwrite; user takes responsibility to forward to us.
4082       return;
4083     } else if (UseSignalChaining) {
4084       if (oktochain) {
4085         // save the old handler in jvm
4086         save_preinstalled_handler(sig, oldAct);
4087       } else {
4088         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4089       }
4090       // libjsig also interposes the sigaction() call below and saves the
4091       // old sigaction on it own.
4092     } else {
4093       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4094                     "%#lx for signal %d.", (long)oldhand, sig));
4095     }
4096   }
4097 
4098   struct sigaction sigAct;
4099   sigfillset(&(sigAct.sa_mask));
4100   sigAct.sa_handler = SIG_DFL;
4101 
4102   sigAct.sa_sigaction = signalHandler;
4103   // Handle SIGSEGV on alternate signal stack if
4104   // not using stack banging
4105   if (!UseStackBanging && sig == SIGSEGV) {
4106     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4107   } else if (sig == os::Solaris::SIGinterrupt()) {
4108     // Interruptible i/o requires SA_RESTART cleared so EINTR
4109     // is returned instead of restarting system calls
4110     sigemptyset(&sigAct.sa_mask);
4111     sigAct.sa_handler = NULL;
4112     sigAct.sa_flags = SA_SIGINFO;
4113     sigAct.sa_sigaction = sigINTRHandler;
4114   } else {
4115     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4116   }
4117   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4118 
4119   sigaction(sig, &sigAct, &oldAct);
4120 
4121   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4122                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4123   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4124 }
4125 
4126 
4127 #define DO_SIGNAL_CHECK(sig)                      \
4128   do {                                            \
4129     if (!sigismember(&check_signal_done, sig)) {  \
4130       os::Solaris::check_signal_handler(sig);     \
4131     }                                             \
4132   } while (0)
4133 
4134 // This method is a periodic task to check for misbehaving JNI applications
4135 // under CheckJNI, we can add any periodic checks here
4136 
4137 void os::run_periodic_checks() {
4138   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4139   // thereby preventing a NULL checks.
4140   if (!check_addr0_done) check_addr0_done = check_addr0(tty);
4141 
4142   if (check_signals == false) return;
4143 
4144   // SEGV and BUS if overridden could potentially prevent
4145   // generation of hs*.log in the event of a crash, debugging
4146   // such a case can be very challenging, so we absolutely
4147   // check for the following for a good measure:
4148   DO_SIGNAL_CHECK(SIGSEGV);
4149   DO_SIGNAL_CHECK(SIGILL);
4150   DO_SIGNAL_CHECK(SIGFPE);
4151   DO_SIGNAL_CHECK(SIGBUS);
4152   DO_SIGNAL_CHECK(SIGPIPE);
4153   DO_SIGNAL_CHECK(SIGXFSZ);
4154 
4155   // ReduceSignalUsage allows the user to override these handlers
4156   // see comments at the very top and jvm_solaris.h
4157   if (!ReduceSignalUsage) {
4158     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4159     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4160     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4161     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4162   }
4163 
4164   // See comments above for using JVM1/JVM2 and UseAltSigs
4165   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4166   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4167 
4168 }
4169 
4170 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4171 
4172 static os_sigaction_t os_sigaction = NULL;
4173 
4174 void os::Solaris::check_signal_handler(int sig) {
4175   char buf[O_BUFLEN];
4176   address jvmHandler = NULL;
4177 
4178   struct sigaction act;
4179   if (os_sigaction == NULL) {
4180     // only trust the default sigaction, in case it has been interposed
4181     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4182     if (os_sigaction == NULL) return;
4183   }
4184 
4185   os_sigaction(sig, (struct sigaction*)NULL, &act);
4186 
4187   address thisHandler = (act.sa_flags & SA_SIGINFO)
4188     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4189     : CAST_FROM_FN_PTR(address, act.sa_handler);
4190 
4191 
4192   switch (sig) {
4193   case SIGSEGV:
4194   case SIGBUS:
4195   case SIGFPE:
4196   case SIGPIPE:
4197   case SIGXFSZ:
4198   case SIGILL:
4199     jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4200     break;
4201 
4202   case SHUTDOWN1_SIGNAL:
4203   case SHUTDOWN2_SIGNAL:
4204   case SHUTDOWN3_SIGNAL:
4205   case BREAK_SIGNAL:
4206     jvmHandler = (address)user_handler();
4207     break;
4208 
4209   default:
4210     int intrsig = os::Solaris::SIGinterrupt();
4211     int asynsig = os::Solaris::SIGasync();
4212 
4213     if (sig == intrsig) {
4214       jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4215     } else if (sig == asynsig) {
4216       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4217     } else {
4218       return;
4219     }
4220     break;
4221   }
4222 
4223 
4224   if (thisHandler != jvmHandler) {
4225     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4226     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4227     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4228     // No need to check this sig any longer
4229     sigaddset(&check_signal_done, sig);
4230     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4231     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4232       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4233                     exception_name(sig, buf, O_BUFLEN));
4234     }
4235   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4236     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4237     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4238     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4239     // No need to check this sig any longer
4240     sigaddset(&check_signal_done, sig);
4241   }
4242 
4243   // Print all the signal handler state
4244   if (sigismember(&check_signal_done, sig)) {
4245     print_signal_handlers(tty, buf, O_BUFLEN);
4246   }
4247 
4248 }
4249 
4250 void os::Solaris::install_signal_handlers() {
4251   bool libjsigdone = false;
4252   signal_handlers_are_installed = true;
4253 
4254   // signal-chaining
4255   typedef void (*signal_setting_t)();
4256   signal_setting_t begin_signal_setting = NULL;
4257   signal_setting_t end_signal_setting = NULL;
4258   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4259                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4260   if (begin_signal_setting != NULL) {
4261     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4262                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4263     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4264                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4265     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4266                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4267     libjsig_is_loaded = true;
4268     if (os::Solaris::get_libjsig_version != NULL) {
4269       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4270     }
4271     assert(UseSignalChaining, "should enable signal-chaining");
4272   }
4273   if (libjsig_is_loaded) {
4274     // Tell libjsig jvm is setting signal handlers
4275     (*begin_signal_setting)();
4276   }
4277 
4278   set_signal_handler(SIGSEGV, true, true);
4279   set_signal_handler(SIGPIPE, true, true);
4280   set_signal_handler(SIGXFSZ, true, true);
4281   set_signal_handler(SIGBUS, true, true);
4282   set_signal_handler(SIGILL, true, true);
4283   set_signal_handler(SIGFPE, true, true);
4284 
4285 
4286   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4287 
4288     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4289     // can not register overridable signals which might be > 32
4290     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4291       // Tell libjsig jvm has finished setting signal handlers
4292       (*end_signal_setting)();
4293       libjsigdone = true;
4294     }
4295   }
4296 
4297   // Never ok to chain our SIGinterrupt
4298   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4299   set_signal_handler(os::Solaris::SIGasync(), true, true);
4300 
4301   if (libjsig_is_loaded && !libjsigdone) {
4302     // Tell libjsig jvm finishes setting signal handlers
4303     (*end_signal_setting)();
4304   }
4305 
4306   // We don't activate signal checker if libjsig is in place, we trust ourselves
4307   // and if UserSignalHandler is installed all bets are off.
4308   // Log that signal checking is off only if -verbose:jni is specified.
4309   if (CheckJNICalls) {
4310     if (libjsig_is_loaded) {
4311       if (PrintJNIResolving) {
4312         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4313       }
4314       check_signals = false;
4315     }
4316     if (AllowUserSignalHandlers) {
4317       if (PrintJNIResolving) {
4318         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4319       }
4320       check_signals = false;
4321     }
4322   }
4323 }
4324 
4325 
4326 void report_error(const char* file_name, int line_no, const char* title,
4327                   const char* format, ...);
4328 
4329 const char * signames[] = {
4330   "SIG0",
4331   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4332   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4333   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4334   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4335   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4336   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4337   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4338   "SIGCANCEL", "SIGLOST"
4339 };
4340 
4341 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4342   if (0 < exception_code && exception_code <= SIGRTMAX) {
4343     // signal
4344     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4345       jio_snprintf(buf, size, "%s", signames[exception_code]);
4346     } else {
4347       jio_snprintf(buf, size, "SIG%d", exception_code);
4348     }
4349     return buf;
4350   } else {
4351     return NULL;
4352   }
4353 }
4354 
4355 // (Static) wrapper for getisax(2) call.
4356 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4357 
4358 // (Static) wrappers for the liblgrp API
4359 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4360 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4361 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4362 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4363 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4364 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4365 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4366 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4367 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4368 
4369 // (Static) wrapper for meminfo() call.
4370 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4371 
4372 static address resolve_symbol_lazy(const char* name) {
4373   address addr = (address) dlsym(RTLD_DEFAULT, name);
4374   if (addr == NULL) {
4375     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4376     addr = (address) dlsym(RTLD_NEXT, name);
4377   }
4378   return addr;
4379 }
4380 
4381 static address resolve_symbol(const char* name) {
4382   address addr = resolve_symbol_lazy(name);
4383   if (addr == NULL) {
4384     fatal(dlerror());
4385   }
4386   return addr;
4387 }
4388 
4389 void os::Solaris::libthread_init() {
4390   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4391 
4392   lwp_priocntl_init();
4393 
4394   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4395   if (func == NULL) {
4396     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4397     // Guarantee that this VM is running on an new enough OS (5.6 or
4398     // later) that it will have a new enough libthread.so.
4399     guarantee(func != NULL, "libthread.so is too old.");
4400   }
4401 
4402   int size;
4403   void (*handler_info_func)(address *, int *);
4404   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4405   handler_info_func(&handler_start, &size);
4406   handler_end = handler_start + size;
4407 }
4408 
4409 
4410 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4411 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4412 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4413 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4414 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4415 int os::Solaris::_mutex_scope = USYNC_THREAD;
4416 
4417 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4418 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4419 int_fnP_cond_tP os::Solaris::_cond_signal;
4420 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4421 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4422 int_fnP_cond_tP os::Solaris::_cond_destroy;
4423 int os::Solaris::_cond_scope = USYNC_THREAD;
4424 
4425 void os::Solaris::synchronization_init() {
4426   if (UseLWPSynchronization) {
4427     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4428     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4429     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4430     os::Solaris::set_mutex_init(lwp_mutex_init);
4431     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4432     os::Solaris::set_mutex_scope(USYNC_THREAD);
4433 
4434     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4435     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4436     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4437     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4438     os::Solaris::set_cond_init(lwp_cond_init);
4439     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4440     os::Solaris::set_cond_scope(USYNC_THREAD);
4441   } else {
4442     os::Solaris::set_mutex_scope(USYNC_THREAD);
4443     os::Solaris::set_cond_scope(USYNC_THREAD);
4444 
4445     if (UsePthreads) {
4446       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4447       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4448       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4449       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4450       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4451 
4452       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4453       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4454       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4455       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4456       os::Solaris::set_cond_init(pthread_cond_default_init);
4457       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4458     } else {
4459       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4460       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4461       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4462       os::Solaris::set_mutex_init(::mutex_init);
4463       os::Solaris::set_mutex_destroy(::mutex_destroy);
4464 
4465       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4466       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4467       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4468       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4469       os::Solaris::set_cond_init(::cond_init);
4470       os::Solaris::set_cond_destroy(::cond_destroy);
4471     }
4472   }
4473 }
4474 
4475 bool os::Solaris::liblgrp_init() {
4476   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4477   if (handle != NULL) {
4478     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4479     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4480     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4481     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4482     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4483     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4484     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4485     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4486                                                       dlsym(handle, "lgrp_cookie_stale")));
4487 
4488     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4489     set_lgrp_cookie(c);
4490     return true;
4491   }
4492   return false;
4493 }
4494 
4495 void os::Solaris::misc_sym_init() {
4496   address func;
4497 
4498   // getisax
4499   func = resolve_symbol_lazy("getisax");
4500   if (func != NULL) {
4501     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4502   }
4503 
4504   // meminfo
4505   func = resolve_symbol_lazy("meminfo");
4506   if (func != NULL) {
4507     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4508   }
4509 }
4510 
4511 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4512   assert(_getisax != NULL, "_getisax not set");
4513   return _getisax(array, n);
4514 }
4515 
4516 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4517 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4518 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4519 
4520 void init_pset_getloadavg_ptr(void) {
4521   pset_getloadavg_ptr =
4522     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4523   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4524     warning("pset_getloadavg function not found");
4525   }
4526 }
4527 
4528 int os::Solaris::_dev_zero_fd = -1;
4529 
4530 // this is called _before_ the global arguments have been parsed
4531 void os::init(void) {
4532   _initial_pid = getpid();
4533 
4534   max_hrtime = first_hrtime = gethrtime();
4535 
4536   init_random(1234567);
4537 
4538   page_size = sysconf(_SC_PAGESIZE);
4539   if (page_size == -1) {
4540     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4541                   strerror(errno)));
4542   }
4543   init_page_sizes((size_t) page_size);
4544 
4545   Solaris::initialize_system_info();
4546 
4547   // Initialize misc. symbols as soon as possible, so we can use them
4548   // if we need them.
4549   Solaris::misc_sym_init();
4550 
4551   int fd = ::open("/dev/zero", O_RDWR);
4552   if (fd < 0) {
4553     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4554   } else {
4555     Solaris::set_dev_zero_fd(fd);
4556 
4557     // Close on exec, child won't inherit.
4558     fcntl(fd, F_SETFD, FD_CLOEXEC);
4559   }
4560 
4561   clock_tics_per_sec = CLK_TCK;
4562 
4563   // check if dladdr1() exists; dladdr1 can provide more information than
4564   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4565   // and is available on linker patches for 5.7 and 5.8.
4566   // libdl.so must have been loaded, this call is just an entry lookup
4567   void * hdl = dlopen("libdl.so", RTLD_NOW);
4568   if (hdl) {
4569     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4570   }
4571 
4572   // (Solaris only) this switches to calls that actually do locking.
4573   ThreadCritical::initialize();
4574 
4575   main_thread = thr_self();
4576 
4577   // Constant minimum stack size allowed. It must be at least
4578   // the minimum of what the OS supports (thr_min_stack()), and
4579   // enough to allow the thread to get to user bytecode execution.
4580   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4581   // If the pagesize of the VM is greater than 8K determine the appropriate
4582   // number of initial guard pages.  The user can change this with the
4583   // command line arguments, if needed.
4584   if (vm_page_size() > 8*K) {
4585     StackYellowPages = 1;
4586     StackRedPages = 1;
4587     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4588   }
4589 }
4590 
4591 // To install functions for atexit system call
4592 extern "C" {
4593   static void perfMemory_exit_helper() {
4594     perfMemory_exit();
4595   }
4596 }
4597 
4598 // this is called _after_ the global arguments have been parsed
4599 jint os::init_2(void) {
4600   // try to enable extended file IO ASAP, see 6431278
4601   os::Solaris::try_enable_extended_io();
4602 
4603   // Allocate a single page and mark it as readable for safepoint polling.  Also
4604   // use this first mmap call to check support for MAP_ALIGN.
4605   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4606                                                       page_size,
4607                                                       MAP_PRIVATE | MAP_ALIGN,
4608                                                       PROT_READ);
4609   if (polling_page == NULL) {
4610     has_map_align = false;
4611     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4612                                                 PROT_READ);
4613   }
4614 
4615   os::set_polling_page(polling_page);
4616 
4617 #ifndef PRODUCT
4618   if (Verbose && PrintMiscellaneous) {
4619     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n",
4620                (intptr_t)polling_page);
4621   }
4622 #endif
4623 
4624   if (!UseMembar) {
4625     address mem_serialize_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE);
4626     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4627     os::set_memory_serialize_page(mem_serialize_page);
4628 
4629 #ifndef PRODUCT
4630     if (Verbose && PrintMiscellaneous) {
4631       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n",
4632                  (intptr_t)mem_serialize_page);
4633     }
4634 #endif
4635   }
4636 
4637   // Check minimum allowable stack size for thread creation and to initialize
4638   // the java system classes, including StackOverflowError - depends on page
4639   // size.  Add a page for compiler2 recursion in main thread.
4640   // Add in 2*BytesPerWord times page size to account for VM stack during
4641   // class initialization depending on 32 or 64 bit VM.
4642   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4643                                         (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4644                                         2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4645 
4646   size_t threadStackSizeInBytes = ThreadStackSize * K;
4647   if (threadStackSizeInBytes != 0 &&
4648       threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4649     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4650                   os::Solaris::min_stack_allowed/K);
4651     return JNI_ERR;
4652   }
4653 
4654   // For 64kbps there will be a 64kb page size, which makes
4655   // the usable default stack size quite a bit less.  Increase the
4656   // stack for 64kb (or any > than 8kb) pages, this increases
4657   // virtual memory fragmentation (since we're not creating the
4658   // stack on a power of 2 boundary.  The real fix for this
4659   // should be to fix the guard page mechanism.
4660 
4661   if (vm_page_size() > 8*K) {
4662     threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4663        ? threadStackSizeInBytes +
4664          ((StackYellowPages + StackRedPages) * vm_page_size())
4665        : 0;
4666     ThreadStackSize = threadStackSizeInBytes/K;
4667   }
4668 
4669   // Make the stack size a multiple of the page size so that
4670   // the yellow/red zones can be guarded.
4671   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4672                                                 vm_page_size()));
4673 
4674   Solaris::libthread_init();
4675 
4676   if (UseNUMA) {
4677     if (!Solaris::liblgrp_init()) {
4678       UseNUMA = false;
4679     } else {
4680       size_t lgrp_limit = os::numa_get_groups_num();
4681       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4682       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4683       FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
4684       if (lgrp_num < 2) {
4685         // There's only one locality group, disable NUMA.
4686         UseNUMA = false;
4687       }
4688     }
4689     if (!UseNUMA && ForceNUMA) {
4690       UseNUMA = true;
4691     }
4692   }
4693 
4694   Solaris::signal_sets_init();
4695   Solaris::init_signal_mem();
4696   Solaris::install_signal_handlers();
4697 
4698   if (libjsigversion < JSIG_VERSION_1_4_1) {
4699     Maxlibjsigsigs = OLDMAXSIGNUM;
4700   }
4701 
4702   // initialize synchronization primitives to use either thread or
4703   // lwp synchronization (controlled by UseLWPSynchronization)
4704   Solaris::synchronization_init();
4705 
4706   if (MaxFDLimit) {
4707     // set the number of file descriptors to max. print out error
4708     // if getrlimit/setrlimit fails but continue regardless.
4709     struct rlimit nbr_files;
4710     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4711     if (status != 0) {
4712       if (PrintMiscellaneous && (Verbose || WizardMode)) {
4713         perror("os::init_2 getrlimit failed");
4714       }
4715     } else {
4716       nbr_files.rlim_cur = nbr_files.rlim_max;
4717       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4718       if (status != 0) {
4719         if (PrintMiscellaneous && (Verbose || WizardMode)) {
4720           perror("os::init_2 setrlimit failed");
4721         }
4722       }
4723     }
4724   }
4725 
4726   // Calculate theoretical max. size of Threads to guard gainst
4727   // artifical out-of-memory situations, where all available address-
4728   // space has been reserved by thread stacks. Default stack size is 1Mb.
4729   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
4730     JavaThread::stack_size_at_create() : (1*K*K);
4731   assert(pre_thread_stack_size != 0, "Must have a stack");
4732   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
4733   // we should start doing Virtual Memory banging. Currently when the threads will
4734   // have used all but 200Mb of space.
4735   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
4736   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
4737 
4738   // at-exit methods are called in the reverse order of their registration.
4739   // In Solaris 7 and earlier, atexit functions are called on return from
4740   // main or as a result of a call to exit(3C). There can be only 32 of
4741   // these functions registered and atexit() does not set errno. In Solaris
4742   // 8 and later, there is no limit to the number of functions registered
4743   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
4744   // functions are called upon dlclose(3DL) in addition to return from main
4745   // and exit(3C).
4746 
4747   if (PerfAllowAtExitRegistration) {
4748     // only register atexit functions if PerfAllowAtExitRegistration is set.
4749     // atexit functions can be delayed until process exit time, which
4750     // can be problematic for embedded VM situations. Embedded VMs should
4751     // call DestroyJavaVM() to assure that VM resources are released.
4752 
4753     // note: perfMemory_exit_helper atexit function may be removed in
4754     // the future if the appropriate cleanup code can be added to the
4755     // VM_Exit VMOperation's doit method.
4756     if (atexit(perfMemory_exit_helper) != 0) {
4757       warning("os::init2 atexit(perfMemory_exit_helper) failed");
4758     }
4759   }
4760 
4761   // Init pset_loadavg function pointer
4762   init_pset_getloadavg_ptr();
4763 
4764   return JNI_OK;
4765 }
4766 
4767 // Mark the polling page as unreadable
4768 void os::make_polling_page_unreadable(void) {
4769   if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0) {
4770     fatal("Could not disable polling page");
4771   }
4772 }
4773 
4774 // Mark the polling page as readable
4775 void os::make_polling_page_readable(void) {
4776   if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0) {
4777     fatal("Could not enable polling page");
4778   }
4779 }
4780 
4781 // OS interface.
4782 
4783 bool os::check_heap(bool force) { return true; }
4784 
4785 // Is a (classpath) directory empty?
4786 bool os::dir_is_empty(const char* path) {
4787   DIR *dir = NULL;
4788   struct dirent *ptr;
4789 
4790   dir = opendir(path);
4791   if (dir == NULL) return true;
4792 
4793   // Scan the directory
4794   bool result = true;
4795   char buf[sizeof(struct dirent) + MAX_PATH];
4796   struct dirent *dbuf = (struct dirent *) buf;
4797   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
4798     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4799       result = false;
4800     }
4801   }
4802   closedir(dir);
4803   return result;
4804 }
4805 
4806 // This code originates from JDK's sysOpen and open64_w
4807 // from src/solaris/hpi/src/system_md.c
4808 
4809 int os::open(const char *path, int oflag, int mode) {
4810   if (strlen(path) > MAX_PATH - 1) {
4811     errno = ENAMETOOLONG;
4812     return -1;
4813   }
4814   int fd;
4815 
4816   fd = ::open64(path, oflag, mode);
4817   if (fd == -1) return -1;
4818 
4819   // If the open succeeded, the file might still be a directory
4820   {
4821     struct stat64 buf64;
4822     int ret = ::fstat64(fd, &buf64);
4823     int st_mode = buf64.st_mode;
4824 
4825     if (ret != -1) {
4826       if ((st_mode & S_IFMT) == S_IFDIR) {
4827         errno = EISDIR;
4828         ::close(fd);
4829         return -1;
4830       }
4831     } else {
4832       ::close(fd);
4833       return -1;
4834     }
4835   }
4836 
4837   // 32-bit Solaris systems suffer from:
4838   //
4839   // - an historical default soft limit of 256 per-process file
4840   //   descriptors that is too low for many Java programs.
4841   //
4842   // - a design flaw where file descriptors created using stdio
4843   //   fopen must be less than 256, _even_ when the first limit above
4844   //   has been raised.  This can cause calls to fopen (but not calls to
4845   //   open, for example) to fail mysteriously, perhaps in 3rd party
4846   //   native code (although the JDK itself uses fopen).  One can hardly
4847   //   criticize them for using this most standard of all functions.
4848   //
4849   // We attempt to make everything work anyways by:
4850   //
4851   // - raising the soft limit on per-process file descriptors beyond
4852   //   256
4853   //
4854   // - As of Solaris 10u4, we can request that Solaris raise the 256
4855   //   stdio fopen limit by calling function enable_extended_FILE_stdio.
4856   //   This is done in init_2 and recorded in enabled_extended_FILE_stdio
4857   //
4858   // - If we are stuck on an old (pre 10u4) Solaris system, we can
4859   //   workaround the bug by remapping non-stdio file descriptors below
4860   //   256 to ones beyond 256, which is done below.
4861   //
4862   // See:
4863   // 1085341: 32-bit stdio routines should support file descriptors >255
4864   // 6533291: Work around 32-bit Solaris stdio limit of 256 open files
4865   // 6431278: Netbeans crash on 32 bit Solaris: need to call
4866   //          enable_extended_FILE_stdio() in VM initialisation
4867   // Giri Mandalika's blog
4868   // http://technopark02.blogspot.com/2005_05_01_archive.html
4869   //
4870 #ifndef  _LP64
4871   if ((!enabled_extended_FILE_stdio) && fd < 256) {
4872     int newfd = ::fcntl(fd, F_DUPFD, 256);
4873     if (newfd != -1) {
4874       ::close(fd);
4875       fd = newfd;
4876     }
4877   }
4878 #endif // 32-bit Solaris
4879 
4880   // All file descriptors that are opened in the JVM and not
4881   // specifically destined for a subprocess should have the
4882   // close-on-exec flag set.  If we don't set it, then careless 3rd
4883   // party native code might fork and exec without closing all
4884   // appropriate file descriptors (e.g. as we do in closeDescriptors in
4885   // UNIXProcess.c), and this in turn might:
4886   //
4887   // - cause end-of-file to fail to be detected on some file
4888   //   descriptors, resulting in mysterious hangs, or
4889   //
4890   // - might cause an fopen in the subprocess to fail on a system
4891   //   suffering from bug 1085341.
4892   //
4893   // (Yes, the default setting of the close-on-exec flag is a Unix
4894   // design flaw)
4895   //
4896   // See:
4897   // 1085341: 32-bit stdio routines should support file descriptors >255
4898   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4899   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4900   //
4901 #ifdef FD_CLOEXEC
4902   {
4903     int flags = ::fcntl(fd, F_GETFD);
4904     if (flags != -1) {
4905       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4906     }
4907   }
4908 #endif
4909 
4910   return fd;
4911 }
4912 
4913 // create binary file, rewriting existing file if required
4914 int os::create_binary_file(const char* path, bool rewrite_existing) {
4915   int oflags = O_WRONLY | O_CREAT;
4916   if (!rewrite_existing) {
4917     oflags |= O_EXCL;
4918   }
4919   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4920 }
4921 
4922 // return current position of file pointer
4923 jlong os::current_file_offset(int fd) {
4924   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4925 }
4926 
4927 // move file pointer to the specified offset
4928 jlong os::seek_to_file_offset(int fd, jlong offset) {
4929   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4930 }
4931 
4932 jlong os::lseek(int fd, jlong offset, int whence) {
4933   return (jlong) ::lseek64(fd, offset, whence);
4934 }
4935 
4936 char * os::native_path(char *path) {
4937   return path;
4938 }
4939 
4940 int os::ftruncate(int fd, jlong length) {
4941   return ::ftruncate64(fd, length);
4942 }
4943 
4944 int os::fsync(int fd)  {
4945   RESTARTABLE_RETURN_INT(::fsync(fd));
4946 }
4947 
4948 int os::available(int fd, jlong *bytes) {
4949   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
4950          "Assumed _thread_in_native");
4951   jlong cur, end;
4952   int mode;
4953   struct stat64 buf64;
4954 
4955   if (::fstat64(fd, &buf64) >= 0) {
4956     mode = buf64.st_mode;
4957     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4958       int n,ioctl_return;
4959 
4960       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
4961       if (ioctl_return>= 0) {
4962         *bytes = n;
4963         return 1;
4964       }
4965     }
4966   }
4967   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4968     return 0;
4969   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4970     return 0;
4971   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4972     return 0;
4973   }
4974   *bytes = end - cur;
4975   return 1;
4976 }
4977 
4978 // Map a block of memory.
4979 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4980                         char *addr, size_t bytes, bool read_only,
4981                         bool allow_exec) {
4982   int prot;
4983   int flags;
4984 
4985   if (read_only) {
4986     prot = PROT_READ;
4987     flags = MAP_SHARED;
4988   } else {
4989     prot = PROT_READ | PROT_WRITE;
4990     flags = MAP_PRIVATE;
4991   }
4992 
4993   if (allow_exec) {
4994     prot |= PROT_EXEC;
4995   }
4996 
4997   if (addr != NULL) {
4998     flags |= MAP_FIXED;
4999   }
5000 
5001   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5002                                      fd, file_offset);
5003   if (mapped_address == MAP_FAILED) {
5004     return NULL;
5005   }
5006   return mapped_address;
5007 }
5008 
5009 
5010 // Remap a block of memory.
5011 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5012                           char *addr, size_t bytes, bool read_only,
5013                           bool allow_exec) {
5014   // same as map_memory() on this OS
5015   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5016                         allow_exec);
5017 }
5018 
5019 
5020 // Unmap a block of memory.
5021 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5022   return munmap(addr, bytes) == 0;
5023 }
5024 
5025 void os::pause() {
5026   char filename[MAX_PATH];
5027   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5028     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5029   } else {
5030     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5031   }
5032 
5033   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5034   if (fd != -1) {
5035     struct stat buf;
5036     ::close(fd);
5037     while (::stat(filename, &buf) == 0) {
5038       (void)::poll(NULL, 0, 100);
5039     }
5040   } else {
5041     jio_fprintf(stderr,
5042                 "Could not open pause file '%s', continuing immediately.\n", filename);
5043   }
5044 }
5045 
5046 #ifndef PRODUCT
5047 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5048 // Turn this on if you need to trace synch operations.
5049 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5050 // and call record_synch_enable and record_synch_disable
5051 // around the computation of interest.
5052 
5053 void record_synch(char* name, bool returning);  // defined below
5054 
5055 class RecordSynch {
5056   char* _name;
5057  public:
5058   RecordSynch(char* name) :_name(name) { record_synch(_name, false); }
5059   ~RecordSynch()                       { record_synch(_name, true); }
5060 };
5061 
5062 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5063 extern "C" ret name params {                                    \
5064   typedef ret name##_t params;                                  \
5065   static name##_t* implem = NULL;                               \
5066   static int callcount = 0;                                     \
5067   if (implem == NULL) {                                         \
5068     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5069     if (implem == NULL)  fatal(dlerror());                      \
5070   }                                                             \
5071   ++callcount;                                                  \
5072   RecordSynch _rs(#name);                                       \
5073   inner;                                                        \
5074   return implem args;                                           \
5075 }
5076 // in dbx, examine callcounts this way:
5077 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5078 
5079 #define CHECK_POINTER_OK(p) \
5080   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5081 #define CHECK_MU \
5082   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5083 #define CHECK_CV \
5084   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5085 #define CHECK_P(p) \
5086   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5087 
5088 #define CHECK_MUTEX(mutex_op) \
5089   CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5090 
5091 CHECK_MUTEX(   mutex_lock)
5092 CHECK_MUTEX(  _mutex_lock)
5093 CHECK_MUTEX( mutex_unlock)
5094 CHECK_MUTEX(_mutex_unlock)
5095 CHECK_MUTEX( mutex_trylock)
5096 CHECK_MUTEX(_mutex_trylock)
5097 
5098 #define CHECK_COND(cond_op) \
5099   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU; CHECK_CV);
5100 
5101 CHECK_COND( cond_wait);
5102 CHECK_COND(_cond_wait);
5103 CHECK_COND(_cond_wait_cancel);
5104 
5105 #define CHECK_COND2(cond_op) \
5106   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU; CHECK_CV);
5107 
5108 CHECK_COND2( cond_timedwait);
5109 CHECK_COND2(_cond_timedwait);
5110 CHECK_COND2(_cond_timedwait_cancel);
5111 
5112 // do the _lwp_* versions too
5113 #define mutex_t lwp_mutex_t
5114 #define cond_t  lwp_cond_t
5115 CHECK_MUTEX(  _lwp_mutex_lock)
5116 CHECK_MUTEX(  _lwp_mutex_unlock)
5117 CHECK_MUTEX(  _lwp_mutex_trylock)
5118 CHECK_MUTEX( __lwp_mutex_lock)
5119 CHECK_MUTEX( __lwp_mutex_unlock)
5120 CHECK_MUTEX( __lwp_mutex_trylock)
5121 CHECK_MUTEX(___lwp_mutex_lock)
5122 CHECK_MUTEX(___lwp_mutex_unlock)
5123 
5124 CHECK_COND(  _lwp_cond_wait);
5125 CHECK_COND( __lwp_cond_wait);
5126 CHECK_COND(___lwp_cond_wait);
5127 
5128 CHECK_COND2(  _lwp_cond_timedwait);
5129 CHECK_COND2( __lwp_cond_timedwait);
5130 #undef mutex_t
5131 #undef cond_t
5132 
5133 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5134 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5135 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5136 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5137 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5138 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5139 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5140 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5141 
5142 
5143 // recording machinery:
5144 
5145 enum { RECORD_SYNCH_LIMIT = 200 };
5146 char* record_synch_name[RECORD_SYNCH_LIMIT];
5147 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5148 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5149 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5150 int record_synch_count = 0;
5151 bool record_synch_enabled = false;
5152 
5153 // in dbx, examine recorded data this way:
5154 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5155 
5156 void record_synch(char* name, bool returning) {
5157   if (record_synch_enabled) {
5158     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5159       record_synch_name[record_synch_count] = name;
5160       record_synch_returning[record_synch_count] = returning;
5161       record_synch_thread[record_synch_count] = thr_self();
5162       record_synch_arg0ptr[record_synch_count] = &name;
5163       record_synch_count++;
5164     }
5165     // put more checking code here:
5166     // ...
5167   }
5168 }
5169 
5170 void record_synch_enable() {
5171   // start collecting trace data, if not already doing so
5172   if (!record_synch_enabled)  record_synch_count = 0;
5173   record_synch_enabled = true;
5174 }
5175 
5176 void record_synch_disable() {
5177   // stop collecting trace data
5178   record_synch_enabled = false;
5179 }
5180 
5181 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5182 #endif // PRODUCT
5183 
5184 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5185 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5186                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5187 
5188 
5189 // JVMTI & JVM monitoring and management support
5190 // The thread_cpu_time() and current_thread_cpu_time() are only
5191 // supported if is_thread_cpu_time_supported() returns true.
5192 // They are not supported on Solaris T1.
5193 
5194 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5195 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5196 // of a thread.
5197 //
5198 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5199 // returns the fast estimate available on the platform.
5200 
5201 // hrtime_t gethrvtime() return value includes
5202 // user time but does not include system time
5203 jlong os::current_thread_cpu_time() {
5204   return (jlong) gethrvtime();
5205 }
5206 
5207 jlong os::thread_cpu_time(Thread *thread) {
5208   // return user level CPU time only to be consistent with
5209   // what current_thread_cpu_time returns.
5210   // thread_cpu_time_info() must be changed if this changes
5211   return os::thread_cpu_time(thread, false /* user time only */);
5212 }
5213 
5214 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5215   if (user_sys_cpu_time) {
5216     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5217   } else {
5218     return os::current_thread_cpu_time();
5219   }
5220 }
5221 
5222 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5223   char proc_name[64];
5224   int count;
5225   prusage_t prusage;
5226   jlong lwp_time;
5227   int fd;
5228 
5229   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5230           getpid(),
5231           thread->osthread()->lwp_id());
5232   fd = ::open(proc_name, O_RDONLY);
5233   if (fd == -1) return -1;
5234 
5235   do {
5236     count = ::pread(fd,
5237                     (void *)&prusage.pr_utime,
5238                     thr_time_size,
5239                     thr_time_off);
5240   } while (count < 0 && errno == EINTR);
5241   ::close(fd);
5242   if (count < 0) return -1;
5243 
5244   if (user_sys_cpu_time) {
5245     // user + system CPU time
5246     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5247                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5248                  (jlong)prusage.pr_stime.tv_nsec +
5249                  (jlong)prusage.pr_utime.tv_nsec;
5250   } else {
5251     // user level CPU time only
5252     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5253                 (jlong)prusage.pr_utime.tv_nsec;
5254   }
5255 
5256   return (lwp_time);
5257 }
5258 
5259 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5260   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5261   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5262   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5263   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5264 }
5265 
5266 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5267   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5268   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5269   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5270   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5271 }
5272 
5273 bool os::is_thread_cpu_time_supported() {
5274   return true;
5275 }
5276 
5277 // System loadavg support.  Returns -1 if load average cannot be obtained.
5278 // Return the load average for our processor set if the primitive exists
5279 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5280 int os::loadavg(double loadavg[], int nelem) {
5281   if (pset_getloadavg_ptr != NULL) {
5282     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5283   } else {
5284     return ::getloadavg(loadavg, nelem);
5285   }
5286 }
5287 
5288 //---------------------------------------------------------------------------------
5289 
5290 bool os::find(address addr, outputStream* st) {
5291   Dl_info dlinfo;
5292   memset(&dlinfo, 0, sizeof(dlinfo));
5293   if (dladdr(addr, &dlinfo) != 0) {
5294     st->print(PTR_FORMAT ": ", addr);
5295     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5296       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5297     } else if (dlinfo.dli_fbase != NULL) {
5298       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5299     } else {
5300       st->print("<absolute address>");
5301     }
5302     if (dlinfo.dli_fname != NULL) {
5303       st->print(" in %s", dlinfo.dli_fname);
5304     }
5305     if (dlinfo.dli_fbase != NULL) {
5306       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5307     }
5308     st->cr();
5309 
5310     if (Verbose) {
5311       // decode some bytes around the PC
5312       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5313       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5314       address       lowest = (address) dlinfo.dli_sname;
5315       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5316       if (begin < lowest)  begin = lowest;
5317       Dl_info dlinfo2;
5318       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5319           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
5320         end = (address) dlinfo2.dli_saddr;
5321       }
5322       Disassembler::decode(begin, end, st);
5323     }
5324     return true;
5325   }
5326   return false;
5327 }
5328 
5329 // Following function has been added to support HotSparc's libjvm.so running
5330 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5331 // src/solaris/hpi/native_threads in the EVM codebase.
5332 //
5333 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5334 // libraries and should thus be removed. We will leave it behind for a while
5335 // until we no longer want to able to run on top of 1.3.0 Solaris production
5336 // JDK. See 4341971.
5337 
5338 #define STACK_SLACK 0x800
5339 
5340 extern "C" {
5341   intptr_t sysThreadAvailableStackWithSlack() {
5342     stack_t st;
5343     intptr_t retval, stack_top;
5344     retval = thr_stksegment(&st);
5345     assert(retval == 0, "incorrect return value from thr_stksegment");
5346     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5347     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5348     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5349     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5350   }
5351 }
5352 
5353 // ObjectMonitor park-unpark infrastructure ...
5354 //
5355 // We implement Solaris and Linux PlatformEvents with the
5356 // obvious condvar-mutex-flag triple.
5357 // Another alternative that works quite well is pipes:
5358 // Each PlatformEvent consists of a pipe-pair.
5359 // The thread associated with the PlatformEvent
5360 // calls park(), which reads from the input end of the pipe.
5361 // Unpark() writes into the other end of the pipe.
5362 // The write-side of the pipe must be set NDELAY.
5363 // Unfortunately pipes consume a large # of handles.
5364 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5365 // Using pipes for the 1st few threads might be workable, however.
5366 //
5367 // park() is permitted to return spuriously.
5368 // Callers of park() should wrap the call to park() in
5369 // an appropriate loop.  A litmus test for the correct
5370 // usage of park is the following: if park() were modified
5371 // to immediately return 0 your code should still work,
5372 // albeit degenerating to a spin loop.
5373 //
5374 // In a sense, park()-unpark() just provides more polite spinning
5375 // and polling with the key difference over naive spinning being
5376 // that a parked thread needs to be explicitly unparked() in order
5377 // to wake up and to poll the underlying condition.
5378 //
5379 // Assumption:
5380 //    Only one parker can exist on an event, which is why we allocate
5381 //    them per-thread. Multiple unparkers can coexist.
5382 //
5383 // _Event transitions in park()
5384 //   -1 => -1 : illegal
5385 //    1 =>  0 : pass - return immediately
5386 //    0 => -1 : block; then set _Event to 0 before returning
5387 //
5388 // _Event transitions in unpark()
5389 //    0 => 1 : just return
5390 //    1 => 1 : just return
5391 //   -1 => either 0 or 1; must signal target thread
5392 //         That is, we can safely transition _Event from -1 to either
5393 //         0 or 1.
5394 //
5395 // _Event serves as a restricted-range semaphore.
5396 //   -1 : thread is blocked, i.e. there is a waiter
5397 //    0 : neutral: thread is running or ready,
5398 //        could have been signaled after a wait started
5399 //    1 : signaled - thread is running or ready
5400 //
5401 // Another possible encoding of _Event would be with
5402 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5403 //
5404 // TODO-FIXME: add DTRACE probes for:
5405 // 1.   Tx parks
5406 // 2.   Ty unparks Tx
5407 // 3.   Tx resumes from park
5408 
5409 
5410 // value determined through experimentation
5411 #define ROUNDINGFIX 11
5412 
5413 // utility to compute the abstime argument to timedwait.
5414 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5415 
5416 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5417   // millis is the relative timeout time
5418   // abstime will be the absolute timeout time
5419   if (millis < 0)  millis = 0;
5420   struct timeval now;
5421   int status = gettimeofday(&now, NULL);
5422   assert(status == 0, "gettimeofday");
5423   jlong seconds = millis / 1000;
5424   jlong max_wait_period;
5425 
5426   if (UseLWPSynchronization) {
5427     // forward port of fix for 4275818 (not sleeping long enough)
5428     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5429     // _lwp_cond_timedwait() used a round_down algorithm rather
5430     // than a round_up. For millis less than our roundfactor
5431     // it rounded down to 0 which doesn't meet the spec.
5432     // For millis > roundfactor we may return a bit sooner, but
5433     // since we can not accurately identify the patch level and
5434     // this has already been fixed in Solaris 9 and 8 we will
5435     // leave it alone rather than always rounding down.
5436 
5437     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5438     // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5439     // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5440     max_wait_period = 21000000;
5441   } else {
5442     max_wait_period = 50000000;
5443   }
5444   millis %= 1000;
5445   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5446     seconds = max_wait_period;
5447   }
5448   abstime->tv_sec = now.tv_sec  + seconds;
5449   long       usec = now.tv_usec + millis * 1000;
5450   if (usec >= 1000000) {
5451     abstime->tv_sec += 1;
5452     usec -= 1000000;
5453   }
5454   abstime->tv_nsec = usec * 1000;
5455   return abstime;
5456 }
5457 
5458 void os::PlatformEvent::park() {           // AKA: down()
5459   // Transitions for _Event:
5460   //   -1 => -1 : illegal
5461   //    1 =>  0 : pass - return immediately
5462   //    0 => -1 : block; then set _Event to 0 before returning
5463 
5464   // Invariant: Only the thread associated with the Event/PlatformEvent
5465   // may call park().
5466   assert(_nParked == 0, "invariant");
5467 
5468   int v;
5469   for (;;) {
5470     v = _Event;
5471     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5472   }
5473   guarantee(v >= 0, "invariant");
5474   if (v == 0) {
5475     // Do this the hard way by blocking ...
5476     // See http://monaco.sfbay/detail.jsf?cr=5094058.
5477     // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5478     // Only for SPARC >= V8PlusA
5479 #if defined(__sparc) && defined(COMPILER2)
5480     if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5481 #endif
5482     int status = os::Solaris::mutex_lock(_mutex);
5483     assert_status(status == 0, status, "mutex_lock");
5484     guarantee(_nParked == 0, "invariant");
5485     ++_nParked;
5486     while (_Event < 0) {
5487       // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5488       // Treat this the same as if the wait was interrupted
5489       // With usr/lib/lwp going to kernel, always handle ETIME
5490       status = os::Solaris::cond_wait(_cond, _mutex);
5491       if (status == ETIME) status = EINTR;
5492       assert_status(status == 0 || status == EINTR, status, "cond_wait");
5493     }
5494     --_nParked;
5495     _Event = 0;
5496     status = os::Solaris::mutex_unlock(_mutex);
5497     assert_status(status == 0, status, "mutex_unlock");
5498     // Paranoia to ensure our locked and lock-free paths interact
5499     // correctly with each other.
5500     OrderAccess::fence();
5501   }
5502 }
5503 
5504 int os::PlatformEvent::park(jlong millis) {
5505   // Transitions for _Event:
5506   //   -1 => -1 : illegal
5507   //    1 =>  0 : pass - return immediately
5508   //    0 => -1 : block; then set _Event to 0 before returning
5509 
5510   guarantee(_nParked == 0, "invariant");
5511   int v;
5512   for (;;) {
5513     v = _Event;
5514     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5515   }
5516   guarantee(v >= 0, "invariant");
5517   if (v != 0) return OS_OK;
5518 
5519   int ret = OS_TIMEOUT;
5520   timestruc_t abst;
5521   compute_abstime(&abst, millis);
5522 
5523   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5524   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5525   // Only for SPARC >= V8PlusA
5526 #if defined(__sparc) && defined(COMPILER2)
5527   if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5528 #endif
5529   int status = os::Solaris::mutex_lock(_mutex);
5530   assert_status(status == 0, status, "mutex_lock");
5531   guarantee(_nParked == 0, "invariant");
5532   ++_nParked;
5533   while (_Event < 0) {
5534     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5535     assert_status(status == 0 || status == EINTR ||
5536                   status == ETIME || status == ETIMEDOUT,
5537                   status, "cond_timedwait");
5538     if (!FilterSpuriousWakeups) break;                // previous semantics
5539     if (status == ETIME || status == ETIMEDOUT) break;
5540     // We consume and ignore EINTR and spurious wakeups.
5541   }
5542   --_nParked;
5543   if (_Event >= 0) ret = OS_OK;
5544   _Event = 0;
5545   status = os::Solaris::mutex_unlock(_mutex);
5546   assert_status(status == 0, status, "mutex_unlock");
5547   // Paranoia to ensure our locked and lock-free paths interact
5548   // correctly with each other.
5549   OrderAccess::fence();
5550   return ret;
5551 }
5552 
5553 void os::PlatformEvent::unpark() {
5554   // Transitions for _Event:
5555   //    0 => 1 : just return
5556   //    1 => 1 : just return
5557   //   -1 => either 0 or 1; must signal target thread
5558   //         That is, we can safely transition _Event from -1 to either
5559   //         0 or 1.
5560   // See also: "Semaphores in Plan 9" by Mullender & Cox
5561   //
5562   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5563   // that it will take two back-to-back park() calls for the owning
5564   // thread to block. This has the benefit of forcing a spurious return
5565   // from the first park() call after an unpark() call which will help
5566   // shake out uses of park() and unpark() without condition variables.
5567 
5568   if (Atomic::xchg(1, &_Event) >= 0) return;
5569 
5570   // If the thread associated with the event was parked, wake it.
5571   // Wait for the thread assoc with the PlatformEvent to vacate.
5572   int status = os::Solaris::mutex_lock(_mutex);
5573   assert_status(status == 0, status, "mutex_lock");
5574   int AnyWaiters = _nParked;
5575   status = os::Solaris::mutex_unlock(_mutex);
5576   assert_status(status == 0, status, "mutex_unlock");
5577   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5578   if (AnyWaiters != 0) {
5579     // Note that we signal() *after* dropping the lock for "immortal" Events.
5580     // This is safe and avoids a common class of  futile wakeups.  In rare
5581     // circumstances this can cause a thread to return prematurely from
5582     // cond_{timed}wait() but the spurious wakeup is benign and the victim
5583     // will simply re-test the condition and re-park itself.
5584     // This provides particular benefit if the underlying platform does not
5585     // provide wait morphing.
5586     status = os::Solaris::cond_signal(_cond);
5587     assert_status(status == 0, status, "cond_signal");
5588   }
5589 }
5590 
5591 // JSR166
5592 // -------------------------------------------------------
5593 
5594 // The solaris and linux implementations of park/unpark are fairly
5595 // conservative for now, but can be improved. They currently use a
5596 // mutex/condvar pair, plus _counter.
5597 // Park decrements _counter if > 0, else does a condvar wait.  Unpark
5598 // sets count to 1 and signals condvar.  Only one thread ever waits
5599 // on the condvar. Contention seen when trying to park implies that someone
5600 // is unparking you, so don't wait. And spurious returns are fine, so there
5601 // is no need to track notifications.
5602 
5603 #define MAX_SECS 100000000
5604 
5605 // This code is common to linux and solaris and will be moved to a
5606 // common place in dolphin.
5607 //
5608 // The passed in time value is either a relative time in nanoseconds
5609 // or an absolute time in milliseconds. Either way it has to be unpacked
5610 // into suitable seconds and nanoseconds components and stored in the
5611 // given timespec structure.
5612 // Given time is a 64-bit value and the time_t used in the timespec is only
5613 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
5614 // overflow if times way in the future are given. Further on Solaris versions
5615 // prior to 10 there is a restriction (see cond_timedwait) that the specified
5616 // number of seconds, in abstime, is less than current_time  + 100,000,000.
5617 // As it will be 28 years before "now + 100000000" will overflow we can
5618 // ignore overflow and just impose a hard-limit on seconds using the value
5619 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
5620 // years from "now".
5621 //
5622 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5623   assert(time > 0, "convertTime");
5624 
5625   struct timeval now;
5626   int status = gettimeofday(&now, NULL);
5627   assert(status == 0, "gettimeofday");
5628 
5629   time_t max_secs = now.tv_sec + MAX_SECS;
5630 
5631   if (isAbsolute) {
5632     jlong secs = time / 1000;
5633     if (secs > max_secs) {
5634       absTime->tv_sec = max_secs;
5635     } else {
5636       absTime->tv_sec = secs;
5637     }
5638     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5639   } else {
5640     jlong secs = time / NANOSECS_PER_SEC;
5641     if (secs >= MAX_SECS) {
5642       absTime->tv_sec = max_secs;
5643       absTime->tv_nsec = 0;
5644     } else {
5645       absTime->tv_sec = now.tv_sec + secs;
5646       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5647       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5648         absTime->tv_nsec -= NANOSECS_PER_SEC;
5649         ++absTime->tv_sec; // note: this must be <= max_secs
5650       }
5651     }
5652   }
5653   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5654   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5655   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5656   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5657 }
5658 
5659 void Parker::park(bool isAbsolute, jlong time) {
5660   // Ideally we'd do something useful while spinning, such
5661   // as calling unpackTime().
5662 
5663   // Optional fast-path check:
5664   // Return immediately if a permit is available.
5665   // We depend on Atomic::xchg() having full barrier semantics
5666   // since we are doing a lock-free update to _counter.
5667   if (Atomic::xchg(0, &_counter) > 0) return;
5668 
5669   // Optional fast-exit: Check interrupt before trying to wait
5670   Thread* thread = Thread::current();
5671   assert(thread->is_Java_thread(), "Must be JavaThread");
5672   JavaThread *jt = (JavaThread *)thread;
5673   if (Thread::is_interrupted(thread, false)) {
5674     return;
5675   }
5676 
5677   // First, demultiplex/decode time arguments
5678   timespec absTime;
5679   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
5680     return;
5681   }
5682   if (time > 0) {
5683     // Warning: this code might be exposed to the old Solaris time
5684     // round-down bugs.  Grep "roundingFix" for details.
5685     unpackTime(&absTime, isAbsolute, time);
5686   }
5687 
5688   // Enter safepoint region
5689   // Beware of deadlocks such as 6317397.
5690   // The per-thread Parker:: _mutex is a classic leaf-lock.
5691   // In particular a thread must never block on the Threads_lock while
5692   // holding the Parker:: mutex.  If safepoints are pending both the
5693   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5694   ThreadBlockInVM tbivm(jt);
5695 
5696   // Don't wait if cannot get lock since interference arises from
5697   // unblocking.  Also. check interrupt before trying wait
5698   if (Thread::is_interrupted(thread, false) ||
5699       os::Solaris::mutex_trylock(_mutex) != 0) {
5700     return;
5701   }
5702 
5703   int status;
5704 
5705   if (_counter > 0)  { // no wait needed
5706     _counter = 0;
5707     status = os::Solaris::mutex_unlock(_mutex);
5708     assert(status == 0, "invariant");
5709     // Paranoia to ensure our locked and lock-free paths interact
5710     // correctly with each other and Java-level accesses.
5711     OrderAccess::fence();
5712     return;
5713   }
5714 
5715 #ifdef ASSERT
5716   // Don't catch signals while blocked; let the running threads have the signals.
5717   // (This allows a debugger to break into the running thread.)
5718   sigset_t oldsigs;
5719   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
5720   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5721 #endif
5722 
5723   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5724   jt->set_suspend_equivalent();
5725   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5726 
5727   // Do this the hard way by blocking ...
5728   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5729   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5730   // Only for SPARC >= V8PlusA
5731 #if defined(__sparc) && defined(COMPILER2)
5732   if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5733 #endif
5734 
5735   if (time == 0) {
5736     status = os::Solaris::cond_wait(_cond, _mutex);
5737   } else {
5738     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
5739   }
5740   // Note that an untimed cond_wait() can sometimes return ETIME on older
5741   // versions of the Solaris.
5742   assert_status(status == 0 || status == EINTR ||
5743                 status == ETIME || status == ETIMEDOUT,
5744                 status, "cond_timedwait");
5745 
5746 #ifdef ASSERT
5747   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
5748 #endif
5749   _counter = 0;
5750   status = os::Solaris::mutex_unlock(_mutex);
5751   assert_status(status == 0, status, "mutex_unlock");
5752   // Paranoia to ensure our locked and lock-free paths interact
5753   // correctly with each other and Java-level accesses.
5754   OrderAccess::fence();
5755 
5756   // If externally suspended while waiting, re-suspend
5757   if (jt->handle_special_suspend_equivalent_condition()) {
5758     jt->java_suspend_self();
5759   }
5760 }
5761 
5762 void Parker::unpark() {
5763   int status = os::Solaris::mutex_lock(_mutex);
5764   assert(status == 0, "invariant");
5765   const int s = _counter;
5766   _counter = 1;
5767   status = os::Solaris::mutex_unlock(_mutex);
5768   assert(status == 0, "invariant");
5769 
5770   if (s < 1) {
5771     status = os::Solaris::cond_signal(_cond);
5772     assert(status == 0, "invariant");
5773   }
5774 }
5775 
5776 extern char** environ;
5777 
5778 // Run the specified command in a separate process. Return its exit value,
5779 // or -1 on failure (e.g. can't fork a new process).
5780 // Unlike system(), this function can be called from signal handler. It
5781 // doesn't block SIGINT et al.
5782 int os::fork_and_exec(char* cmd) {
5783   char * argv[4];
5784   argv[0] = (char *)"sh";
5785   argv[1] = (char *)"-c";
5786   argv[2] = cmd;
5787   argv[3] = NULL;
5788 
5789   // fork is async-safe, fork1 is not so can't use in signal handler
5790   pid_t pid;
5791   Thread* t = ThreadLocalStorage::get_thread_slow();
5792   if (t != NULL && t->is_inside_signal_handler()) {
5793     pid = fork();
5794   } else {
5795     pid = fork1();
5796   }
5797 
5798   if (pid < 0) {
5799     // fork failed
5800     warning("fork failed: %s", strerror(errno));
5801     return -1;
5802 
5803   } else if (pid == 0) {
5804     // child process
5805 
5806     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
5807     execve("/usr/bin/sh", argv, environ);
5808 
5809     // execve failed
5810     _exit(-1);
5811 
5812   } else  {
5813     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5814     // care about the actual exit code, for now.
5815 
5816     int status;
5817 
5818     // Wait for the child process to exit.  This returns immediately if
5819     // the child has already exited. */
5820     while (waitpid(pid, &status, 0) < 0) {
5821       switch (errno) {
5822       case ECHILD: return 0;
5823       case EINTR: break;
5824       default: return -1;
5825       }
5826     }
5827 
5828     if (WIFEXITED(status)) {
5829       // The child exited normally; get its exit code.
5830       return WEXITSTATUS(status);
5831     } else if (WIFSIGNALED(status)) {
5832       // The child exited because of a signal
5833       // The best value to return is 0x80 + signal number,
5834       // because that is what all Unix shells do, and because
5835       // it allows callers to distinguish between process exit and
5836       // process death by signal.
5837       return 0x80 + WTERMSIG(status);
5838     } else {
5839       // Unknown exit code; pass it through
5840       return status;
5841     }
5842   }
5843 }
5844 
5845 // is_headless_jre()
5846 //
5847 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5848 // in order to report if we are running in a headless jre
5849 //
5850 // Since JDK8 xawt/libmawt.so was moved into the same directory
5851 // as libawt.so, and renamed libawt_xawt.so
5852 //
5853 bool os::is_headless_jre() {
5854   struct stat statbuf;
5855   char buf[MAXPATHLEN];
5856   char libmawtpath[MAXPATHLEN];
5857   const char *xawtstr  = "/xawt/libmawt.so";
5858   const char *new_xawtstr = "/libawt_xawt.so";
5859   char *p;
5860 
5861   // Get path to libjvm.so
5862   os::jvm_path(buf, sizeof(buf));
5863 
5864   // Get rid of libjvm.so
5865   p = strrchr(buf, '/');
5866   if (p == NULL) {
5867     return false;
5868   } else {
5869     *p = '\0';
5870   }
5871 
5872   // Get rid of client or server
5873   p = strrchr(buf, '/');
5874   if (p == NULL) {
5875     return false;
5876   } else {
5877     *p = '\0';
5878   }
5879 
5880   // check xawt/libmawt.so
5881   strcpy(libmawtpath, buf);
5882   strcat(libmawtpath, xawtstr);
5883   if (::stat(libmawtpath, &statbuf) == 0) return false;
5884 
5885   // check libawt_xawt.so
5886   strcpy(libmawtpath, buf);
5887   strcat(libmawtpath, new_xawtstr);
5888   if (::stat(libmawtpath, &statbuf) == 0) return false;
5889 
5890   return true;
5891 }
5892 
5893 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
5894   size_t res;
5895   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5896          "Assumed _thread_in_native");
5897   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
5898   return res;
5899 }
5900 
5901 int os::close(int fd) {
5902   return ::close(fd);
5903 }
5904 
5905 int os::socket_close(int fd) {
5906   return ::close(fd);
5907 }
5908 
5909 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5910   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5911          "Assumed _thread_in_native");
5912   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
5913 }
5914 
5915 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5916   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5917          "Assumed _thread_in_native");
5918   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5919 }
5920 
5921 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5922   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5923 }
5924 
5925 // As both poll and select can be interrupted by signals, we have to be
5926 // prepared to restart the system call after updating the timeout, unless
5927 // a poll() is done with timeout == -1, in which case we repeat with this
5928 // "wait forever" value.
5929 
5930 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
5931   int _result;
5932   _result = ::connect(fd, him, len);
5933 
5934   // On Solaris, when a connect() call is interrupted, the connection
5935   // can be established asynchronously (see 6343810). Subsequent calls
5936   // to connect() must check the errno value which has the semantic
5937   // described below (copied from the connect() man page). Handling
5938   // of asynchronously established connections is required for both
5939   // blocking and non-blocking sockets.
5940   //     EINTR            The  connection  attempt  was   interrupted
5941   //                      before  any data arrived by the delivery of
5942   //                      a signal. The connection, however, will  be
5943   //                      established asynchronously.
5944   //
5945   //     EINPROGRESS      The socket is non-blocking, and the connec-
5946   //                      tion  cannot  be completed immediately.
5947   //
5948   //     EALREADY         The socket is non-blocking,  and a previous
5949   //                      connection  attempt  has  not yet been com-
5950   //                      pleted.
5951   //
5952   //     EISCONN          The socket is already connected.
5953   if (_result == OS_ERR && errno == EINTR) {
5954     // restarting a connect() changes its errno semantics
5955     RESTARTABLE(::connect(fd, him, len), _result);
5956     // undo these changes
5957     if (_result == OS_ERR) {
5958       if (errno == EALREADY) {
5959         errno = EINPROGRESS; // fall through
5960       } else if (errno == EISCONN) {
5961         errno = 0;
5962         return OS_OK;
5963       }
5964     }
5965   }
5966   return _result;
5967 }
5968 
5969 // Get the default path to the core file
5970 // Returns the length of the string
5971 int os::get_core_path(char* buffer, size_t bufferSize) {
5972   const char* p = get_current_directory(buffer, bufferSize);
5973 
5974   if (p == NULL) {
5975     assert(p != NULL, "failed to get current directory");
5976     return 0;
5977   }
5978 
5979   return strlen(buffer);
5980 }
5981 
5982 #ifndef PRODUCT
5983 void TestReserveMemorySpecial_test() {
5984   // No tests available for this platform
5985 }
5986 #endif
--- EOF ---