1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "os_solaris.inline.hpp"
  41 #include "prims/jniFastGetField.hpp"
  42 #include "prims/jvm.h"
  43 #include "prims/jvm_misc.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/atomic.inline.hpp"
  46 #include "runtime/extendedPC.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/interfaceSupport.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/mutexLocker.hpp"
  52 #include "runtime/objectMonitor.hpp"
  53 #include "runtime/orderAccess.inline.hpp"
  54 #include "runtime/osThread.hpp"
  55 #include "runtime/perfMemory.hpp"
  56 #include "runtime/sharedRuntime.hpp"
  57 #include "runtime/statSampler.hpp"
  58 #include "runtime/stubRoutines.hpp"
  59 #include "runtime/thread.inline.hpp"
  60 #include "runtime/threadCritical.hpp"
  61 #include "runtime/timer.hpp"
  62 #include "runtime/vm_version.hpp"
  63 #include "services/attachListener.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "services/runtimeService.hpp"
  66 #include "utilities/decoder.hpp"
  67 #include "utilities/defaultStream.hpp"
  68 #include "utilities/events.hpp"
  69 #include "utilities/growableArray.hpp"
  70 #include "utilities/vmError.hpp"
  71 
  72 // put OS-includes here
  73 # include <dlfcn.h>
  74 # include <errno.h>
  75 # include <exception>
  76 # include <link.h>
  77 # include <poll.h>
  78 # include <pthread.h>
  79 # include <pwd.h>
  80 # include <schedctl.h>
  81 # include <setjmp.h>
  82 # include <signal.h>
  83 # include <stdio.h>
  84 # include <alloca.h>
  85 # include <sys/filio.h>
  86 # include <sys/ipc.h>
  87 # include <sys/lwp.h>
  88 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  89 # include <sys/mman.h>
  90 # include <sys/processor.h>
  91 # include <sys/procset.h>
  92 # include <sys/pset.h>
  93 # include <sys/resource.h>
  94 # include <sys/shm.h>
  95 # include <sys/socket.h>
  96 # include <sys/stat.h>
  97 # include <sys/systeminfo.h>
  98 # include <sys/time.h>
  99 # include <sys/times.h>
 100 # include <sys/types.h>
 101 # include <sys/wait.h>
 102 # include <sys/utsname.h>
 103 # include <thread.h>
 104 # include <unistd.h>
 105 # include <sys/priocntl.h>
 106 # include <sys/rtpriocntl.h>
 107 # include <sys/tspriocntl.h>
 108 # include <sys/iapriocntl.h>
 109 # include <sys/fxpriocntl.h>
 110 # include <sys/loadavg.h>
 111 # include <string.h>
 112 # include <stdio.h>
 113 
 114 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 115 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 116 
 117 #define MAX_PATH (2 * K)
 118 
 119 // for timer info max values which include all bits
 120 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 121 
 122 
 123 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 124 // compile on older systems without this header file.
 125 
 126 #ifndef MADV_ACCESS_LWP
 127   #define  MADV_ACCESS_LWP   7       /* next LWP to access heavily */
 128 #endif
 129 #ifndef MADV_ACCESS_MANY
 130   #define  MADV_ACCESS_MANY  8       /* many processes to access heavily */
 131 #endif
 132 
 133 #ifndef LGRP_RSRC_CPU
 134   #define LGRP_RSRC_CPU      0       /* CPU resources */
 135 #endif
 136 #ifndef LGRP_RSRC_MEM
 137   #define LGRP_RSRC_MEM      1       /* memory resources */
 138 #endif
 139 
 140 // see thr_setprio(3T) for the basis of these numbers
 141 #define MinimumPriority 0
 142 #define NormalPriority  64
 143 #define MaximumPriority 127
 144 
 145 // Values for ThreadPriorityPolicy == 1
 146 int prio_policy1[CriticalPriority+1] = {
 147   -99999,  0, 16,  32,  48,  64,
 148           80, 96, 112, 124, 127, 127 };
 149 
 150 // System parameters used internally
 151 static clock_t clock_tics_per_sec = 100;
 152 
 153 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 154 static bool enabled_extended_FILE_stdio = false;
 155 
 156 // For diagnostics to print a message once. see run_periodic_checks
 157 static bool check_addr0_done = false;
 158 static sigset_t check_signal_done;
 159 static bool check_signals = true;
 160 
 161 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 162 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 163 
 164 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 165 
 166 
 167 // "default" initializers for missing libc APIs
 168 extern "C" {
 169   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 170   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 171 
 172   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 173   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 174 }
 175 
 176 // "default" initializers for pthread-based synchronization
 177 extern "C" {
 178   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 179   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 180 }
 181 
 182 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 183 
 184 // Thread Local Storage
 185 // This is common to all Solaris platforms so it is defined here,
 186 // in this common file.
 187 // The declarations are in the os_cpu threadLS*.hpp files.
 188 //
 189 // Static member initialization for TLS
 190 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
 191 
 192 #ifndef PRODUCT
 193   #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
 194 
 195 int ThreadLocalStorage::_tcacheHit = 0;
 196 int ThreadLocalStorage::_tcacheMiss = 0;
 197 
 198 void ThreadLocalStorage::print_statistics() {
 199   int total = _tcacheMiss+_tcacheHit;
 200   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
 201                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
 202 }
 203   #undef _PCT
 204 #endif // PRODUCT
 205 
 206 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
 207                                                         int index) {
 208   Thread *thread = get_thread_slow();
 209   if (thread != NULL) {
 210     address sp = os::current_stack_pointer();
 211     guarantee(thread->_stack_base == NULL ||
 212               (sp <= thread->_stack_base &&
 213               sp >= thread->_stack_base - thread->_stack_size) ||
 214               is_error_reported(),
 215               "sp must be inside of selected thread stack");
 216 
 217     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
 218     _get_thread_cache[index] = thread;
 219   }
 220   return thread;
 221 }
 222 
 223 
 224 static const double all_zero[sizeof(Thread) / sizeof(double) + 1] = {0};
 225 #define NO_CACHED_THREAD ((Thread*)all_zero)
 226 
 227 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
 228 
 229   // Store the new value before updating the cache to prevent a race
 230   // between get_thread_via_cache_slowly() and this store operation.
 231   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
 232 
 233   // Update thread cache with new thread if setting on thread create,
 234   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
 235   uintptr_t raw = pd_raw_thread_id();
 236   int ix = pd_cache_index(raw);
 237   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
 238 }
 239 
 240 void ThreadLocalStorage::pd_init() {
 241   for (int i = 0; i < _pd_cache_size; i++) {
 242     _get_thread_cache[i] = NO_CACHED_THREAD;
 243   }
 244 }
 245 
 246 // Invalidate all the caches (happens to be the same as pd_init).
 247 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
 248 
 249 #undef NO_CACHED_THREAD
 250 
 251 // END Thread Local Storage
 252 
 253 static inline size_t adjust_stack_size(address base, size_t size) {
 254   if ((ssize_t)size < 0) {
 255     // 4759953: Compensate for ridiculous stack size.
 256     size = max_intx;
 257   }
 258   if (size > (size_t)base) {
 259     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 260     size = (size_t)base;
 261   }
 262   return size;
 263 }
 264 
 265 static inline stack_t get_stack_info() {
 266   stack_t st;
 267   int retval = thr_stksegment(&st);
 268   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 269   assert(retval == 0, "incorrect return value from thr_stksegment");
 270   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 271   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 272   return st;
 273 }
 274 
 275 address os::current_stack_base() {
 276   int r = thr_main();
 277   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 278   bool is_primordial_thread = r;
 279 
 280   // Workaround 4352906, avoid calls to thr_stksegment by
 281   // thr_main after the first one (it looks like we trash
 282   // some data, causing the value for ss_sp to be incorrect).
 283   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 284     stack_t st = get_stack_info();
 285     if (is_primordial_thread) {
 286       // cache initial value of stack base
 287       os::Solaris::_main_stack_base = (address)st.ss_sp;
 288     }
 289     return (address)st.ss_sp;
 290   } else {
 291     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 292     return os::Solaris::_main_stack_base;
 293   }
 294 }
 295 
 296 size_t os::current_stack_size() {
 297   size_t size;
 298 
 299   int r = thr_main();
 300   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 301   if (!r) {
 302     size = get_stack_info().ss_size;
 303   } else {
 304     struct rlimit limits;
 305     getrlimit(RLIMIT_STACK, &limits);
 306     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 307   }
 308   // base may not be page aligned
 309   address base = current_stack_base();
 310   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 311   return (size_t)(base - bottom);
 312 }
 313 
 314 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 315   return localtime_r(clock, res);
 316 }
 317 
 318 void os::Solaris::try_enable_extended_io() {
 319   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 320 
 321   if (!UseExtendedFileIO) {
 322     return;
 323   }
 324 
 325   enable_extended_FILE_stdio_t enabler =
 326     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 327                                          "enable_extended_FILE_stdio");
 328   if (enabler) {
 329     enabler(-1, -1);
 330   }
 331 }
 332 
 333 static int _processors_online = 0;
 334 
 335 jint os::Solaris::_os_thread_limit = 0;
 336 volatile jint os::Solaris::_os_thread_count = 0;
 337 
 338 julong os::available_memory() {
 339   return Solaris::available_memory();
 340 }
 341 
 342 julong os::Solaris::available_memory() {
 343   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 344 }
 345 
 346 julong os::Solaris::_physical_memory = 0;
 347 
 348 julong os::physical_memory() {
 349   return Solaris::physical_memory();
 350 }
 351 
 352 static hrtime_t first_hrtime = 0;
 353 static const hrtime_t hrtime_hz = 1000*1000*1000;
 354 static volatile hrtime_t max_hrtime = 0;
 355 
 356 
 357 void os::Solaris::initialize_system_info() {
 358   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 359   _processors_online = sysconf(_SC_NPROCESSORS_ONLN);
 360   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) *
 361                                      (julong)sysconf(_SC_PAGESIZE);
 362 }
 363 
 364 int os::active_processor_count() {
 365   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 366   pid_t pid = getpid();
 367   psetid_t pset = PS_NONE;
 368   // Are we running in a processor set or is there any processor set around?
 369   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 370     uint_t pset_cpus;
 371     // Query the number of cpus available to us.
 372     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 373       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 374       _processors_online = pset_cpus;
 375       return pset_cpus;
 376     }
 377   }
 378   // Otherwise return number of online cpus
 379   return online_cpus;
 380 }
 381 
 382 static bool find_processors_in_pset(psetid_t        pset,
 383                                     processorid_t** id_array,
 384                                     uint_t*         id_length) {
 385   bool result = false;
 386   // Find the number of processors in the processor set.
 387   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 388     // Make up an array to hold their ids.
 389     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 390     // Fill in the array with their processor ids.
 391     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 392       result = true;
 393     }
 394   }
 395   return result;
 396 }
 397 
 398 // Callers of find_processors_online() must tolerate imprecise results --
 399 // the system configuration can change asynchronously because of DR
 400 // or explicit psradm operations.
 401 //
 402 // We also need to take care that the loop (below) terminates as the
 403 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 404 // request and the loop that builds the list of processor ids.   Unfortunately
 405 // there's no reliable way to determine the maximum valid processor id,
 406 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 407 // man pages, which claim the processor id set is "sparse, but
 408 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 409 // exit the loop.
 410 //
 411 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 412 // not available on S8.0.
 413 
 414 static bool find_processors_online(processorid_t** id_array,
 415                                    uint*           id_length) {
 416   const processorid_t MAX_PROCESSOR_ID = 100000;
 417   // Find the number of processors online.
 418   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 419   // Make up an array to hold their ids.
 420   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 421   // Processors need not be numbered consecutively.
 422   long found = 0;
 423   processorid_t next = 0;
 424   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 425     processor_info_t info;
 426     if (processor_info(next, &info) == 0) {
 427       // NB, PI_NOINTR processors are effectively online ...
 428       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 429         (*id_array)[found] = next;
 430         found += 1;
 431       }
 432     }
 433     next += 1;
 434   }
 435   if (found < *id_length) {
 436     // The loop above didn't identify the expected number of processors.
 437     // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 438     // and re-running the loop, above, but there's no guarantee of progress
 439     // if the system configuration is in flux.  Instead, we just return what
 440     // we've got.  Note that in the worst case find_processors_online() could
 441     // return an empty set.  (As a fall-back in the case of the empty set we
 442     // could just return the ID of the current processor).
 443     *id_length = found;
 444   }
 445 
 446   return true;
 447 }
 448 
 449 static bool assign_distribution(processorid_t* id_array,
 450                                 uint           id_length,
 451                                 uint*          distribution,
 452                                 uint           distribution_length) {
 453   // We assume we can assign processorid_t's to uint's.
 454   assert(sizeof(processorid_t) == sizeof(uint),
 455          "can't convert processorid_t to uint");
 456   // Quick check to see if we won't succeed.
 457   if (id_length < distribution_length) {
 458     return false;
 459   }
 460   // Assign processor ids to the distribution.
 461   // Try to shuffle processors to distribute work across boards,
 462   // assuming 4 processors per board.
 463   const uint processors_per_board = ProcessDistributionStride;
 464   // Find the maximum processor id.
 465   processorid_t max_id = 0;
 466   for (uint m = 0; m < id_length; m += 1) {
 467     max_id = MAX2(max_id, id_array[m]);
 468   }
 469   // The next id, to limit loops.
 470   const processorid_t limit_id = max_id + 1;
 471   // Make up markers for available processors.
 472   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 473   for (uint c = 0; c < limit_id; c += 1) {
 474     available_id[c] = false;
 475   }
 476   for (uint a = 0; a < id_length; a += 1) {
 477     available_id[id_array[a]] = true;
 478   }
 479   // Step by "boards", then by "slot", copying to "assigned".
 480   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 481   //                remembering which processors have been assigned by
 482   //                previous calls, etc., so as to distribute several
 483   //                independent calls of this method.  What we'd like is
 484   //                It would be nice to have an API that let us ask
 485   //                how many processes are bound to a processor,
 486   //                but we don't have that, either.
 487   //                In the short term, "board" is static so that
 488   //                subsequent distributions don't all start at board 0.
 489   static uint board = 0;
 490   uint assigned = 0;
 491   // Until we've found enough processors ....
 492   while (assigned < distribution_length) {
 493     // ... find the next available processor in the board.
 494     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 495       uint try_id = board * processors_per_board + slot;
 496       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 497         distribution[assigned] = try_id;
 498         available_id[try_id] = false;
 499         assigned += 1;
 500         break;
 501       }
 502     }
 503     board += 1;
 504     if (board * processors_per_board + 0 >= limit_id) {
 505       board = 0;
 506     }
 507   }
 508   if (available_id != NULL) {
 509     FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
 510   }
 511   return true;
 512 }
 513 
 514 void os::set_native_thread_name(const char *name) {
 515   // Not yet implemented.
 516   return;
 517 }
 518 
 519 bool os::distribute_processes(uint length, uint* distribution) {
 520   bool result = false;
 521   // Find the processor id's of all the available CPUs.
 522   processorid_t* id_array  = NULL;
 523   uint           id_length = 0;
 524   // There are some races between querying information and using it,
 525   // since processor sets can change dynamically.
 526   psetid_t pset = PS_NONE;
 527   // Are we running in a processor set?
 528   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 529     result = find_processors_in_pset(pset, &id_array, &id_length);
 530   } else {
 531     result = find_processors_online(&id_array, &id_length);
 532   }
 533   if (result == true) {
 534     if (id_length >= length) {
 535       result = assign_distribution(id_array, id_length, distribution, length);
 536     } else {
 537       result = false;
 538     }
 539   }
 540   if (id_array != NULL) {
 541     FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
 542   }
 543   return result;
 544 }
 545 
 546 bool os::bind_to_processor(uint processor_id) {
 547   // We assume that a processorid_t can be stored in a uint.
 548   assert(sizeof(uint) == sizeof(processorid_t),
 549          "can't convert uint to processorid_t");
 550   int bind_result =
 551     processor_bind(P_LWPID,                       // bind LWP.
 552                    P_MYID,                        // bind current LWP.
 553                    (processorid_t) processor_id,  // id.
 554                    NULL);                         // don't return old binding.
 555   return (bind_result == 0);
 556 }
 557 
 558 bool os::getenv(const char* name, char* buffer, int len) {
 559   char* val = ::getenv(name);
 560   if (val == NULL || strlen(val) + 1 > len) {
 561     if (len > 0) buffer[0] = 0; // return a null string
 562     return false;
 563   }
 564   strcpy(buffer, val);
 565   return true;
 566 }
 567 
 568 
 569 // Return true if user is running as root.
 570 
 571 bool os::have_special_privileges() {
 572   static bool init = false;
 573   static bool privileges = false;
 574   if (!init) {
 575     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 576     init = true;
 577   }
 578   return privileges;
 579 }
 580 
 581 
 582 void os::init_system_properties_values() {
 583   // The next steps are taken in the product version:
 584   //
 585   // Obtain the JAVA_HOME value from the location of libjvm.so.
 586   // This library should be located at:
 587   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 588   //
 589   // If "/jre/lib/" appears at the right place in the path, then we
 590   // assume libjvm.so is installed in a JDK and we use this path.
 591   //
 592   // Otherwise exit with message: "Could not create the Java virtual machine."
 593   //
 594   // The following extra steps are taken in the debugging version:
 595   //
 596   // If "/jre/lib/" does NOT appear at the right place in the path
 597   // instead of exit check for $JAVA_HOME environment variable.
 598   //
 599   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 600   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 601   // it looks like libjvm.so is installed there
 602   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 603   //
 604   // Otherwise exit.
 605   //
 606   // Important note: if the location of libjvm.so changes this
 607   // code needs to be changed accordingly.
 608 
 609 // Base path of extensions installed on the system.
 610 #define SYS_EXT_DIR     "/usr/jdk/packages"
 611 #define EXTENSIONS_DIR  "/lib/ext"
 612 #define ENDORSED_DIR    "/lib/endorsed"
 613 
 614   char cpu_arch[12];
 615   // Buffer that fits several sprintfs.
 616   // Note that the space for the colon and the trailing null are provided
 617   // by the nulls included by the sizeof operator.
 618   const size_t bufsize =
 619     MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
 620          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 621          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
 622          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
 623   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 624 
 625   // sysclasspath, java_home, dll_dir
 626   {
 627     char *pslash;
 628     os::jvm_path(buf, bufsize);
 629 
 630     // Found the full path to libjvm.so.
 631     // Now cut the path to <java_home>/jre if we can.
 632     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 633     pslash = strrchr(buf, '/');
 634     if (pslash != NULL) {
 635       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 636     }
 637     Arguments::set_dll_dir(buf);
 638 
 639     if (pslash != NULL) {
 640       pslash = strrchr(buf, '/');
 641       if (pslash != NULL) {
 642         *pslash = '\0';          // Get rid of /<arch>.
 643         pslash = strrchr(buf, '/');
 644         if (pslash != NULL) {
 645           *pslash = '\0';        // Get rid of /lib.
 646         }
 647       }
 648     }
 649     Arguments::set_java_home(buf);
 650     set_boot_path('/', ':');
 651   }
 652 
 653   // Where to look for native libraries.
 654   {
 655     // Use dlinfo() to determine the correct java.library.path.
 656     //
 657     // If we're launched by the Java launcher, and the user
 658     // does not set java.library.path explicitly on the commandline,
 659     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 660     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 661     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 662     // /usr/lib), which is exactly what we want.
 663     //
 664     // If the user does set java.library.path, it completely
 665     // overwrites this setting, and always has.
 666     //
 667     // If we're not launched by the Java launcher, we may
 668     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 669     // settings.  Again, dlinfo does exactly what we want.
 670 
 671     Dl_serinfo     info_sz, *info = &info_sz;
 672     Dl_serpath     *path;
 673     char           *library_path;
 674     char           *common_path = buf;
 675 
 676     // Determine search path count and required buffer size.
 677     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 678       FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 679       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 680     }
 681 
 682     // Allocate new buffer and initialize.
 683     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 684     info->dls_size = info_sz.dls_size;
 685     info->dls_cnt = info_sz.dls_cnt;
 686 
 687     // Obtain search path information.
 688     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 689       FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 690       FREE_C_HEAP_ARRAY(char, info, mtInternal);
 691       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 692     }
 693 
 694     path = &info->dls_serpath[0];
 695 
 696     // Note: Due to a legacy implementation, most of the library path
 697     // is set in the launcher. This was to accomodate linking restrictions
 698     // on legacy Solaris implementations (which are no longer supported).
 699     // Eventually, all the library path setting will be done here.
 700     //
 701     // However, to prevent the proliferation of improperly built native
 702     // libraries, the new path component /usr/jdk/packages is added here.
 703 
 704     // Determine the actual CPU architecture.
 705     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 706 #ifdef _LP64
 707     // If we are a 64-bit vm, perform the following translations:
 708     //   sparc   -> sparcv9
 709     //   i386    -> amd64
 710     if (strcmp(cpu_arch, "sparc") == 0) {
 711       strcat(cpu_arch, "v9");
 712     } else if (strcmp(cpu_arch, "i386") == 0) {
 713       strcpy(cpu_arch, "amd64");
 714     }
 715 #endif
 716 
 717     // Construct the invariant part of ld_library_path.
 718     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 719 
 720     // Struct size is more than sufficient for the path components obtained
 721     // through the dlinfo() call, so only add additional space for the path
 722     // components explicitly added here.
 723     size_t library_path_size = info->dls_size + strlen(common_path);
 724     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 725     library_path[0] = '\0';
 726 
 727     // Construct the desired Java library path from the linker's library
 728     // search path.
 729     //
 730     // For compatibility, it is optimal that we insert the additional path
 731     // components specific to the Java VM after those components specified
 732     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 733     // infrastructure.
 734     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 735       strcpy(library_path, common_path);
 736     } else {
 737       int inserted = 0;
 738       int i;
 739       for (i = 0; i < info->dls_cnt; i++, path++) {
 740         uint_t flags = path->dls_flags & LA_SER_MASK;
 741         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 742           strcat(library_path, common_path);
 743           strcat(library_path, os::path_separator());
 744           inserted = 1;
 745         }
 746         strcat(library_path, path->dls_name);
 747         strcat(library_path, os::path_separator());
 748       }
 749       // Eliminate trailing path separator.
 750       library_path[strlen(library_path)-1] = '\0';
 751     }
 752 
 753     // happens before argument parsing - can't use a trace flag
 754     // tty->print_raw("init_system_properties_values: native lib path: ");
 755     // tty->print_raw_cr(library_path);
 756 
 757     // Callee copies into its own buffer.
 758     Arguments::set_library_path(library_path);
 759 
 760     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
 761     FREE_C_HEAP_ARRAY(char, info, mtInternal);
 762   }
 763 
 764   // Extensions directories.
 765   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 766   Arguments::set_ext_dirs(buf);
 767 
 768   // Endorsed standards default directory.
 769   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
 770   Arguments::set_endorsed_dirs(buf);
 771 
 772   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 773 
 774 #undef SYS_EXT_DIR
 775 #undef EXTENSIONS_DIR
 776 #undef ENDORSED_DIR
 777 }
 778 
 779 void os::breakpoint() {
 780   BREAKPOINT;
 781 }
 782 
 783 bool os::obsolete_option(const JavaVMOption *option) {
 784   if (!strncmp(option->optionString, "-Xt", 3)) {
 785     return true;
 786   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 787     return true;
 788   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 789     return true;
 790   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 791     return true;
 792   }
 793   return false;
 794 }
 795 
 796 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 797   address  stackStart  = (address)thread->stack_base();
 798   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 799   if (sp < stackStart && sp >= stackEnd) return true;
 800   return false;
 801 }
 802 
 803 extern "C" void breakpoint() {
 804   // use debugger to set breakpoint here
 805 }
 806 
 807 static thread_t main_thread;
 808 
 809 // Thread start routine for all new Java threads
 810 extern "C" void* java_start(void* thread_addr) {
 811   // Try to randomize the cache line index of hot stack frames.
 812   // This helps when threads of the same stack traces evict each other's
 813   // cache lines. The threads can be either from the same JVM instance, or
 814   // from different JVM instances. The benefit is especially true for
 815   // processors with hyperthreading technology.
 816   static int counter = 0;
 817   int pid = os::current_process_id();
 818   alloca(((pid ^ counter++) & 7) * 128);
 819 
 820   int prio;
 821   Thread* thread = (Thread*)thread_addr;
 822   OSThread* osthr = thread->osthread();
 823 
 824   osthr->set_lwp_id(_lwp_self());  // Store lwp in case we are bound
 825   thread->_schedctl = (void *) schedctl_init();
 826 
 827   if (UseNUMA) {
 828     int lgrp_id = os::numa_get_group_id();
 829     if (lgrp_id != -1) {
 830       thread->set_lgrp_id(lgrp_id);
 831     }
 832   }
 833 
 834   // If the creator called set priority before we started,
 835   // we need to call set_native_priority now that we have an lwp.
 836   // We used to get the priority from thr_getprio (we called
 837   // thr_setprio way back in create_thread) and pass it to
 838   // set_native_priority, but Solaris scales the priority
 839   // in java_to_os_priority, so when we read it back here,
 840   // we pass trash to set_native_priority instead of what's
 841   // in java_to_os_priority. So we save the native priority
 842   // in the osThread and recall it here.
 843 
 844   if (osthr->thread_id() != -1) {
 845     if (UseThreadPriorities) {
 846       int prio = osthr->native_priority();
 847       if (ThreadPriorityVerbose) {
 848         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 849                       INTPTR_FORMAT ", setting priority: %d\n",
 850                       osthr->thread_id(), osthr->lwp_id(), prio);
 851       }
 852       os::set_native_priority(thread, prio);
 853     }
 854   } else if (ThreadPriorityVerbose) {
 855     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 856   }
 857 
 858   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 859 
 860   // initialize signal mask for this thread
 861   os::Solaris::hotspot_sigmask(thread);
 862 
 863   thread->run();
 864 
 865   // One less thread is executing
 866   // When the VMThread gets here, the main thread may have already exited
 867   // which frees the CodeHeap containing the Atomic::dec code
 868   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 869     Atomic::dec(&os::Solaris::_os_thread_count);
 870   }
 871 
 872   if (UseDetachedThreads) {
 873     thr_exit(NULL);
 874     ShouldNotReachHere();
 875   }
 876   return NULL;
 877 }
 878 
 879 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 880   // Allocate the OSThread object
 881   OSThread* osthread = new OSThread(NULL, NULL);
 882   if (osthread == NULL) return NULL;
 883 
 884   // Store info on the Solaris thread into the OSThread
 885   osthread->set_thread_id(thread_id);
 886   osthread->set_lwp_id(_lwp_self());
 887   thread->_schedctl = (void *) schedctl_init();
 888 
 889   if (UseNUMA) {
 890     int lgrp_id = os::numa_get_group_id();
 891     if (lgrp_id != -1) {
 892       thread->set_lgrp_id(lgrp_id);
 893     }
 894   }
 895 
 896   if (ThreadPriorityVerbose) {
 897     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 898                   osthread->thread_id(), osthread->lwp_id());
 899   }
 900 
 901   // Initial thread state is INITIALIZED, not SUSPENDED
 902   osthread->set_state(INITIALIZED);
 903 
 904   return osthread;
 905 }
 906 
 907 void os::Solaris::hotspot_sigmask(Thread* thread) {
 908   //Save caller's signal mask
 909   sigset_t sigmask;
 910   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 911   OSThread *osthread = thread->osthread();
 912   osthread->set_caller_sigmask(sigmask);
 913 
 914   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 915   if (!ReduceSignalUsage) {
 916     if (thread->is_VM_thread()) {
 917       // Only the VM thread handles BREAK_SIGNAL ...
 918       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 919     } else {
 920       // ... all other threads block BREAK_SIGNAL
 921       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 922       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 923     }
 924   }
 925 }
 926 
 927 bool os::create_attached_thread(JavaThread* thread) {
 928 #ifdef ASSERT
 929   thread->verify_not_published();
 930 #endif
 931   OSThread* osthread = create_os_thread(thread, thr_self());
 932   if (osthread == NULL) {
 933     return false;
 934   }
 935 
 936   // Initial thread state is RUNNABLE
 937   osthread->set_state(RUNNABLE);
 938   thread->set_osthread(osthread);
 939 
 940   // initialize signal mask for this thread
 941   // and save the caller's signal mask
 942   os::Solaris::hotspot_sigmask(thread);
 943 
 944   return true;
 945 }
 946 
 947 bool os::create_main_thread(JavaThread* thread) {
 948 #ifdef ASSERT
 949   thread->verify_not_published();
 950 #endif
 951   if (_starting_thread == NULL) {
 952     _starting_thread = create_os_thread(thread, main_thread);
 953     if (_starting_thread == NULL) {
 954       return false;
 955     }
 956   }
 957 
 958   // The primodial thread is runnable from the start
 959   _starting_thread->set_state(RUNNABLE);
 960 
 961   thread->set_osthread(_starting_thread);
 962 
 963   // initialize signal mask for this thread
 964   // and save the caller's signal mask
 965   os::Solaris::hotspot_sigmask(thread);
 966 
 967   return true;
 968 }
 969 
 970 
 971 bool os::create_thread(Thread* thread, ThreadType thr_type,
 972                        size_t stack_size) {
 973   // Allocate the OSThread object
 974   OSThread* osthread = new OSThread(NULL, NULL);
 975   if (osthread == NULL) {
 976     return false;
 977   }
 978 
 979   if (ThreadPriorityVerbose) {
 980     char *thrtyp;
 981     switch (thr_type) {
 982     case vm_thread:
 983       thrtyp = (char *)"vm";
 984       break;
 985     case cgc_thread:
 986       thrtyp = (char *)"cgc";
 987       break;
 988     case pgc_thread:
 989       thrtyp = (char *)"pgc";
 990       break;
 991     case java_thread:
 992       thrtyp = (char *)"java";
 993       break;
 994     case compiler_thread:
 995       thrtyp = (char *)"compiler";
 996       break;
 997     case watcher_thread:
 998       thrtyp = (char *)"watcher";
 999       break;
1000     default:
1001       thrtyp = (char *)"unknown";
1002       break;
1003     }
1004     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1005   }
1006 
1007   // Calculate stack size if it's not specified by caller.
1008   if (stack_size == 0) {
1009     // The default stack size 1M (2M for LP64).
1010     stack_size = (BytesPerWord >> 2) * K * K;
1011 
1012     switch (thr_type) {
1013     case os::java_thread:
1014       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1015       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1016       break;
1017     case os::compiler_thread:
1018       if (CompilerThreadStackSize > 0) {
1019         stack_size = (size_t)(CompilerThreadStackSize * K);
1020         break;
1021       } // else fall through:
1022         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1023     case os::vm_thread:
1024     case os::pgc_thread:
1025     case os::cgc_thread:
1026     case os::watcher_thread:
1027       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1028       break;
1029     }
1030   }
1031   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1032 
1033   // Initial state is ALLOCATED but not INITIALIZED
1034   osthread->set_state(ALLOCATED);
1035 
1036   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1037     // We got lots of threads. Check if we still have some address space left.
1038     // Need to be at least 5Mb of unreserved address space. We do check by
1039     // trying to reserve some.
1040     const size_t VirtualMemoryBangSize = 20*K*K;
1041     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1042     if (mem == NULL) {
1043       delete osthread;
1044       return false;
1045     } else {
1046       // Release the memory again
1047       os::release_memory(mem, VirtualMemoryBangSize);
1048     }
1049   }
1050 
1051   // Setup osthread because the child thread may need it.
1052   thread->set_osthread(osthread);
1053 
1054   // Create the Solaris thread
1055   thread_t tid = 0;
1056   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
1057   int      status;
1058 
1059   // Mark that we don't have an lwp or thread id yet.
1060   // In case we attempt to set the priority before the thread starts.
1061   osthread->set_lwp_id(-1);
1062   osthread->set_thread_id(-1);
1063 
1064   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1065   if (status != 0) {
1066     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1067       perror("os::create_thread");
1068     }
1069     thread->set_osthread(NULL);
1070     // Need to clean up stuff we've allocated so far
1071     delete osthread;
1072     return false;
1073   }
1074 
1075   Atomic::inc(&os::Solaris::_os_thread_count);
1076 
1077   // Store info on the Solaris thread into the OSThread
1078   osthread->set_thread_id(tid);
1079 
1080   // Remember that we created this thread so we can set priority on it
1081   osthread->set_vm_created();
1082 
1083   // Initial thread state is INITIALIZED, not SUSPENDED
1084   osthread->set_state(INITIALIZED);
1085 
1086   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1087   return true;
1088 }
1089 
1090 // defined for >= Solaris 10. This allows builds on earlier versions
1091 // of Solaris to take advantage of the newly reserved Solaris JVM signals
1092 // With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1093 // and -XX:+UseAltSigs does nothing since these should have no conflict
1094 //
1095 #if !defined(SIGJVM1)
1096   #define SIGJVM1 39
1097   #define SIGJVM2 40
1098 #endif
1099 
1100 debug_only(static bool signal_sets_initialized = false);
1101 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1102 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1103 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1104 
1105 bool os::Solaris::is_sig_ignored(int sig) {
1106   struct sigaction oact;
1107   sigaction(sig, (struct sigaction*)NULL, &oact);
1108   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1109                                  : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1110   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
1111     return true;
1112   } else {
1113     return false;
1114   }
1115 }
1116 
1117 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1118 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1119 static bool isJVM1available() {
1120   return SIGJVM1 < SIGRTMIN;
1121 }
1122 
1123 void os::Solaris::signal_sets_init() {
1124   // Should also have an assertion stating we are still single-threaded.
1125   assert(!signal_sets_initialized, "Already initialized");
1126   // Fill in signals that are necessarily unblocked for all threads in
1127   // the VM. Currently, we unblock the following signals:
1128   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1129   //                         by -Xrs (=ReduceSignalUsage));
1130   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1131   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1132   // the dispositions or masks wrt these signals.
1133   // Programs embedding the VM that want to use the above signals for their
1134   // own purposes must, at this time, use the "-Xrs" option to prevent
1135   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1136   // (See bug 4345157, and other related bugs).
1137   // In reality, though, unblocking these signals is really a nop, since
1138   // these signals are not blocked by default.
1139   sigemptyset(&unblocked_sigs);
1140   sigemptyset(&allowdebug_blocked_sigs);
1141   sigaddset(&unblocked_sigs, SIGILL);
1142   sigaddset(&unblocked_sigs, SIGSEGV);
1143   sigaddset(&unblocked_sigs, SIGBUS);
1144   sigaddset(&unblocked_sigs, SIGFPE);
1145 
1146   if (isJVM1available) {
1147     os::Solaris::set_SIGinterrupt(SIGJVM1);
1148     os::Solaris::set_SIGasync(SIGJVM2);
1149   } else if (UseAltSigs) {
1150     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1151     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1152   } else {
1153     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1154     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1155   }
1156 
1157   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1158   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1159 
1160   if (!ReduceSignalUsage) {
1161     if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1162       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1163       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1164     }
1165     if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1166       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1167       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1168     }
1169     if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1170       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1171       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1172     }
1173   }
1174   // Fill in signals that are blocked by all but the VM thread.
1175   sigemptyset(&vm_sigs);
1176   if (!ReduceSignalUsage) {
1177     sigaddset(&vm_sigs, BREAK_SIGNAL);
1178   }
1179   debug_only(signal_sets_initialized = true);
1180 
1181   // For diagnostics only used in run_periodic_checks
1182   sigemptyset(&check_signal_done);
1183 }
1184 
1185 // These are signals that are unblocked while a thread is running Java.
1186 // (For some reason, they get blocked by default.)
1187 sigset_t* os::Solaris::unblocked_signals() {
1188   assert(signal_sets_initialized, "Not initialized");
1189   return &unblocked_sigs;
1190 }
1191 
1192 // These are the signals that are blocked while a (non-VM) thread is
1193 // running Java. Only the VM thread handles these signals.
1194 sigset_t* os::Solaris::vm_signals() {
1195   assert(signal_sets_initialized, "Not initialized");
1196   return &vm_sigs;
1197 }
1198 
1199 // These are signals that are blocked during cond_wait to allow debugger in
1200 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1201   assert(signal_sets_initialized, "Not initialized");
1202   return &allowdebug_blocked_sigs;
1203 }
1204 
1205 
1206 void _handle_uncaught_cxx_exception() {
1207   VMError err("An uncaught C++ exception");
1208   err.report_and_die();
1209 }
1210 
1211 
1212 // First crack at OS-specific initialization, from inside the new thread.
1213 void os::initialize_thread(Thread* thr) {
1214   int r = thr_main();
1215   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
1216   if (r) {
1217     JavaThread* jt = (JavaThread *)thr;
1218     assert(jt != NULL, "Sanity check");
1219     size_t stack_size;
1220     address base = jt->stack_base();
1221     if (Arguments::created_by_java_launcher()) {
1222       // Use 2MB to allow for Solaris 7 64 bit mode.
1223       stack_size = JavaThread::stack_size_at_create() == 0
1224         ? 2048*K : JavaThread::stack_size_at_create();
1225 
1226       // There are rare cases when we may have already used more than
1227       // the basic stack size allotment before this method is invoked.
1228       // Attempt to allow for a normally sized java_stack.
1229       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1230       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1231     } else {
1232       // 6269555: If we were not created by a Java launcher, i.e. if we are
1233       // running embedded in a native application, treat the primordial thread
1234       // as much like a native attached thread as possible.  This means using
1235       // the current stack size from thr_stksegment(), unless it is too large
1236       // to reliably setup guard pages.  A reasonable max size is 8MB.
1237       size_t current_size = current_stack_size();
1238       // This should never happen, but just in case....
1239       if (current_size == 0) current_size = 2 * K * K;
1240       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1241     }
1242     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1243     stack_size = (size_t)(base - bottom);
1244 
1245     assert(stack_size > 0, "Stack size calculation problem");
1246 
1247     if (stack_size > jt->stack_size()) {
1248 #ifndef PRODUCT
1249       struct rlimit limits;
1250       getrlimit(RLIMIT_STACK, &limits);
1251       size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1252       assert(size >= jt->stack_size(), "Stack size problem in main thread");
1253 #endif
1254       tty->print_cr("Stack size of %d Kb exceeds current limit of %d Kb.\n"
1255                     "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1256                     "See limit(1) to increase the stack size limit.",
1257                     stack_size / K, jt->stack_size() / K);
1258       vm_exit(1);
1259     }
1260     assert(jt->stack_size() >= stack_size,
1261            "Attempt to map more stack than was allocated");
1262     jt->set_stack_size(stack_size);
1263   }
1264 
1265   // With the T2 libthread (T1 is no longer supported) threads are always bound
1266   // and we use stackbanging in all cases.
1267 
1268   os::Solaris::init_thread_fpu_state();
1269   std::set_terminate(_handle_uncaught_cxx_exception);
1270 }
1271 
1272 
1273 
1274 // Free Solaris resources related to the OSThread
1275 void os::free_thread(OSThread* osthread) {
1276   assert(osthread != NULL, "os::free_thread but osthread not set");
1277 
1278 
1279   // We are told to free resources of the argument thread,
1280   // but we can only really operate on the current thread.
1281   // The main thread must take the VMThread down synchronously
1282   // before the main thread exits and frees up CodeHeap
1283   guarantee((Thread::current()->osthread() == osthread
1284              || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1285   if (Thread::current()->osthread() == osthread) {
1286     // Restore caller's signal mask
1287     sigset_t sigmask = osthread->caller_sigmask();
1288     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1289   }
1290   delete osthread;
1291 }
1292 
1293 void os::pd_start_thread(Thread* thread) {
1294   int status = thr_continue(thread->osthread()->thread_id());
1295   assert_status(status == 0, status, "thr_continue failed");
1296 }
1297 
1298 
1299 intx os::current_thread_id() {
1300   return (intx)thr_self();
1301 }
1302 
1303 static pid_t _initial_pid = 0;
1304 
1305 int os::current_process_id() {
1306   return (int)(_initial_pid ? _initial_pid : getpid());
1307 }
1308 
1309 int os::allocate_thread_local_storage() {
1310   // %%%       in Win32 this allocates a memory segment pointed to by a
1311   //           register.  Dan Stein can implement a similar feature in
1312   //           Solaris.  Alternatively, the VM can do the same thing
1313   //           explicitly: malloc some storage and keep the pointer in a
1314   //           register (which is part of the thread's context) (or keep it
1315   //           in TLS).
1316   // %%%       In current versions of Solaris, thr_self and TSD can
1317   //           be accessed via short sequences of displaced indirections.
1318   //           The value of thr_self is available as %g7(36).
1319   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1320   //           assuming that the current thread already has a value bound to k.
1321   //           It may be worth experimenting with such access patterns,
1322   //           and later having the parameters formally exported from a Solaris
1323   //           interface.  I think, however, that it will be faster to
1324   //           maintain the invariant that %g2 always contains the
1325   //           JavaThread in Java code, and have stubs simply
1326   //           treat %g2 as a caller-save register, preserving it in a %lN.
1327   thread_key_t tk;
1328   if (thr_keycreate(&tk, NULL)) {
1329     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1330                   "(%s)", strerror(errno)));
1331   }
1332   return int(tk);
1333 }
1334 
1335 void os::free_thread_local_storage(int index) {
1336   // %%% don't think we need anything here
1337   // if (pthread_key_delete((pthread_key_t) tk)) {
1338   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
1339   // }
1340 }
1341 
1342 // libthread allocate for tsd_common is a version specific
1343 // small number - point is NO swap space available
1344 #define SMALLINT 32
1345 void os::thread_local_storage_at_put(int index, void* value) {
1346   // %%% this is used only in threadLocalStorage.cpp
1347   if (thr_setspecific((thread_key_t)index, value)) {
1348     if (errno == ENOMEM) {
1349       vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1350                             "thr_setspecific: out of swap space");
1351     } else {
1352       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1353                     "(%s)", strerror(errno)));
1354     }
1355   } else {
1356     ThreadLocalStorage::set_thread_in_slot((Thread *) value);
1357   }
1358 }
1359 
1360 // This function could be called before TLS is initialized, for example, when
1361 // VM receives an async signal or when VM causes a fatal error during
1362 // initialization. Return NULL if thr_getspecific() fails.
1363 void* os::thread_local_storage_at(int index) {
1364   // %%% this is used only in threadLocalStorage.cpp
1365   void* r = NULL;
1366   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1367 }
1368 
1369 
1370 // gethrtime() should be monotonic according to the documentation,
1371 // but some virtualized platforms are known to break this guarantee.
1372 // getTimeNanos() must be guaranteed not to move backwards, so we
1373 // are forced to add a check here.
1374 inline hrtime_t getTimeNanos() {
1375   const hrtime_t now = gethrtime();
1376   const hrtime_t prev = max_hrtime;
1377   if (now <= prev) {
1378     return prev;   // same or retrograde time;
1379   }
1380   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1381   assert(obsv >= prev, "invariant");   // Monotonicity
1382   // If the CAS succeeded then we're done and return "now".
1383   // If the CAS failed and the observed value "obsv" is >= now then
1384   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1385   // some other thread raced this thread and installed a new value, in which case
1386   // we could either (a) retry the entire operation, (b) retry trying to install now
1387   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1388   // we might discard a higher "now" value in deference to a slightly lower but freshly
1389   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1390   // to (a) or (b) -- and greatly reduces coherence traffic.
1391   // We might also condition (c) on the magnitude of the delta between obsv and now.
1392   // Avoiding excessive CAS operations to hot RW locations is critical.
1393   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1394   return (prev == obsv) ? now : obsv;
1395 }
1396 
1397 // Time since start-up in seconds to a fine granularity.
1398 // Used by VMSelfDestructTimer and the MemProfiler.
1399 double os::elapsedTime() {
1400   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1401 }
1402 
1403 jlong os::elapsed_counter() {
1404   return (jlong)(getTimeNanos() - first_hrtime);
1405 }
1406 
1407 jlong os::elapsed_frequency() {
1408   return hrtime_hz;
1409 }
1410 
1411 // Return the real, user, and system times in seconds from an
1412 // arbitrary fixed point in the past.
1413 bool os::getTimesSecs(double* process_real_time,
1414                       double* process_user_time,
1415                       double* process_system_time) {
1416   struct tms ticks;
1417   clock_t real_ticks = times(&ticks);
1418 
1419   if (real_ticks == (clock_t) (-1)) {
1420     return false;
1421   } else {
1422     double ticks_per_second = (double) clock_tics_per_sec;
1423     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1424     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1425     // For consistency return the real time from getTimeNanos()
1426     // converted to seconds.
1427     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1428 
1429     return true;
1430   }
1431 }
1432 
1433 bool os::supports_vtime() { return true; }
1434 
1435 bool os::enable_vtime() {
1436   int fd = ::open("/proc/self/ctl", O_WRONLY);
1437   if (fd == -1) {
1438     return false;
1439   }
1440 
1441   long cmd[] = { PCSET, PR_MSACCT };
1442   int res = ::write(fd, cmd, sizeof(long) * 2);
1443   ::close(fd);
1444   if (res != sizeof(long) * 2) {
1445     return false;
1446   }
1447   return true;
1448 }
1449 
1450 bool os::vtime_enabled() {
1451   int fd = ::open("/proc/self/status", O_RDONLY);
1452   if (fd == -1) {
1453     return false;
1454   }
1455 
1456   pstatus_t status;
1457   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1458   ::close(fd);
1459   if (res != sizeof(pstatus_t)) {
1460     return false;
1461   }
1462   return status.pr_flags & PR_MSACCT;
1463 }
1464 
1465 double os::elapsedVTime() {
1466   return (double)gethrvtime() / (double)hrtime_hz;
1467 }
1468 
1469 // Used internally for comparisons only
1470 // getTimeMillis guaranteed to not move backwards on Solaris
1471 jlong getTimeMillis() {
1472   jlong nanotime = getTimeNanos();
1473   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1474 }
1475 
1476 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1477 jlong os::javaTimeMillis() {
1478   timeval t;
1479   if (gettimeofday(&t, NULL) == -1) {
1480     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1481   }
1482   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1483 }
1484 
1485 jlong os::javaTimeNanos() {
1486   return (jlong)getTimeNanos();
1487 }
1488 
1489 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1490   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1491   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1492   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1493   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1494 }
1495 
1496 char * os::local_time_string(char *buf, size_t buflen) {
1497   struct tm t;
1498   time_t long_time;
1499   time(&long_time);
1500   localtime_r(&long_time, &t);
1501   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1502                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1503                t.tm_hour, t.tm_min, t.tm_sec);
1504   return buf;
1505 }
1506 
1507 // Note: os::shutdown() might be called very early during initialization, or
1508 // called from signal handler. Before adding something to os::shutdown(), make
1509 // sure it is async-safe and can handle partially initialized VM.
1510 void os::shutdown() {
1511 
1512   // allow PerfMemory to attempt cleanup of any persistent resources
1513   perfMemory_exit();
1514 
1515   // needs to remove object in file system
1516   AttachListener::abort();
1517 
1518   // flush buffered output, finish log files
1519   ostream_abort();
1520 
1521   // Check for abort hook
1522   abort_hook_t abort_hook = Arguments::abort_hook();
1523   if (abort_hook != NULL) {
1524     abort_hook();
1525   }
1526 }
1527 
1528 // Note: os::abort() might be called very early during initialization, or
1529 // called from signal handler. Before adding something to os::abort(), make
1530 // sure it is async-safe and can handle partially initialized VM.
1531 void os::abort(bool dump_core) {
1532   os::shutdown();
1533   if (dump_core) {
1534 #ifndef PRODUCT
1535     fdStream out(defaultStream::output_fd());
1536     out.print_raw("Current thread is ");
1537     char buf[16];
1538     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1539     out.print_raw_cr(buf);
1540     out.print_raw_cr("Dumping core ...");
1541 #endif
1542     ::abort(); // dump core (for debugging)
1543   }
1544 
1545   ::exit(1);
1546 }
1547 
1548 // Die immediately, no exit hook, no abort hook, no cleanup.
1549 void os::die() {
1550   ::abort(); // dump core (for debugging)
1551 }
1552 
1553 // DLL functions
1554 
1555 const char* os::dll_file_extension() { return ".so"; }
1556 
1557 // This must be hard coded because it's the system's temporary
1558 // directory not the java application's temp directory, ala java.io.tmpdir.
1559 const char* os::get_temp_directory() { return "/tmp"; }
1560 
1561 static bool file_exists(const char* filename) {
1562   struct stat statbuf;
1563   if (filename == NULL || strlen(filename) == 0) {
1564     return false;
1565   }
1566   return os::stat(filename, &statbuf) == 0;
1567 }
1568 
1569 bool os::dll_build_name(char* buffer, size_t buflen,
1570                         const char* pname, const char* fname) {
1571   bool retval = false;
1572   const size_t pnamelen = pname ? strlen(pname) : 0;
1573 
1574   // Return error on buffer overflow.
1575   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1576     return retval;
1577   }
1578 
1579   if (pnamelen == 0) {
1580     snprintf(buffer, buflen, "lib%s.so", fname);
1581     retval = true;
1582   } else if (strchr(pname, *os::path_separator()) != NULL) {
1583     int n;
1584     char** pelements = split_path(pname, &n);
1585     if (pelements == NULL) {
1586       return false;
1587     }
1588     for (int i = 0; i < n; i++) {
1589       // really shouldn't be NULL but what the heck, check can't hurt
1590       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1591         continue; // skip the empty path values
1592       }
1593       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1594       if (file_exists(buffer)) {
1595         retval = true;
1596         break;
1597       }
1598     }
1599     // release the storage
1600     for (int i = 0; i < n; i++) {
1601       if (pelements[i] != NULL) {
1602         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1603       }
1604     }
1605     if (pelements != NULL) {
1606       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1607     }
1608   } else {
1609     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1610     retval = true;
1611   }
1612   return retval;
1613 }
1614 
1615 // check if addr is inside libjvm.so
1616 bool os::address_is_in_vm(address addr) {
1617   static address libjvm_base_addr;
1618   Dl_info dlinfo;
1619 
1620   if (libjvm_base_addr == NULL) {
1621     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1622       libjvm_base_addr = (address)dlinfo.dli_fbase;
1623     }
1624     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1625   }
1626 
1627   if (dladdr((void *)addr, &dlinfo) != 0) {
1628     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1629   }
1630 
1631   return false;
1632 }
1633 
1634 typedef int (*dladdr1_func_type)(void *, Dl_info *, void **, int);
1635 static dladdr1_func_type dladdr1_func = NULL;
1636 
1637 bool os::dll_address_to_function_name(address addr, char *buf,
1638                                       int buflen, int * offset) {
1639   // buf is not optional, but offset is optional
1640   assert(buf != NULL, "sanity check");
1641 
1642   Dl_info dlinfo;
1643 
1644   // dladdr1_func was initialized in os::init()
1645   if (dladdr1_func != NULL) {
1646     // yes, we have dladdr1
1647 
1648     // Support for dladdr1 is checked at runtime; it may be
1649     // available even if the vm is built on a machine that does
1650     // not have dladdr1 support.  Make sure there is a value for
1651     // RTLD_DL_SYMENT.
1652 #ifndef RTLD_DL_SYMENT
1653   #define RTLD_DL_SYMENT 1
1654 #endif
1655 #ifdef _LP64
1656     Elf64_Sym * info;
1657 #else
1658     Elf32_Sym * info;
1659 #endif
1660     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1661                      RTLD_DL_SYMENT) != 0) {
1662       // see if we have a matching symbol that covers our address
1663       if (dlinfo.dli_saddr != NULL &&
1664           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1665         if (dlinfo.dli_sname != NULL) {
1666           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1667             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1668           }
1669           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1670           return true;
1671         }
1672       }
1673       // no matching symbol so try for just file info
1674       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1675         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1676                             buf, buflen, offset, dlinfo.dli_fname)) {
1677           return true;
1678         }
1679       }
1680     }
1681     buf[0] = '\0';
1682     if (offset != NULL) *offset  = -1;
1683     return false;
1684   }
1685 
1686   // no, only dladdr is available
1687   if (dladdr((void *)addr, &dlinfo) != 0) {
1688     // see if we have a matching symbol
1689     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1690       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1691         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1692       }
1693       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1694       return true;
1695     }
1696     // no matching symbol so try for just file info
1697     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1698       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1699                           buf, buflen, offset, dlinfo.dli_fname)) {
1700         return true;
1701       }
1702     }
1703   }
1704   buf[0] = '\0';
1705   if (offset != NULL) *offset  = -1;
1706   return false;
1707 }
1708 
1709 bool os::dll_address_to_library_name(address addr, char* buf,
1710                                      int buflen, int* offset) {
1711   // buf is not optional, but offset is optional
1712   assert(buf != NULL, "sanity check");
1713 
1714   Dl_info dlinfo;
1715 
1716   if (dladdr((void*)addr, &dlinfo) != 0) {
1717     if (dlinfo.dli_fname != NULL) {
1718       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1719     }
1720     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1721       *offset = addr - (address)dlinfo.dli_fbase;
1722     }
1723     return true;
1724   }
1725 
1726   buf[0] = '\0';
1727   if (offset) *offset = -1;
1728   return false;
1729 }
1730 
1731 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1732   Dl_info dli;
1733   // Sanity check?
1734   if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 ||
1735       dli.dli_fname == NULL) {
1736     return 1;
1737   }
1738 
1739   void * handle = dlopen(dli.dli_fname, RTLD_LAZY);
1740   if (handle == NULL) {
1741     return 1;
1742   }
1743 
1744   Link_map *map;
1745   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1746   if (map == NULL) {
1747     dlclose(handle);
1748     return 1;
1749   }
1750 
1751   while (map->l_prev != NULL) {
1752     map = map->l_prev;
1753   }
1754 
1755   while (map != NULL) {
1756     // Iterate through all map entries and call callback with fields of interest
1757     if(callback(map->l_name, (address)map->l_addr, (address)0, param)) {
1758       dlclose(handle);
1759       return 1;
1760     }
1761     map = map->l_next;
1762   }
1763 
1764   dlclose(handle);
1765   return 0;
1766 }
1767 
1768 int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) {
1769   outputStream * out = (outputStream *) param;
1770   out->print_cr(PTR_FORMAT " \t%s", base_address, name);
1771   return 0;
1772 }
1773 
1774 void os::print_dll_info(outputStream * st) {
1775   st->print_cr("Dynamic libraries:"); st->flush();
1776   if (get_loaded_modules_info(_print_dll_info_cb, (void *)st)) {
1777     st->print_cr("Error: Cannot print dynamic libraries.");
1778   }
1779 }
1780 
1781 // Loads .dll/.so and
1782 // in case of error it checks if .dll/.so was built for the
1783 // same architecture as Hotspot is running on
1784 
1785 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1786   void * result= ::dlopen(filename, RTLD_LAZY);
1787   if (result != NULL) {
1788     // Successful loading
1789     return result;
1790   }
1791 
1792   Elf32_Ehdr elf_head;
1793 
1794   // Read system error message into ebuf
1795   // It may or may not be overwritten below
1796   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1797   ebuf[ebuflen-1]='\0';
1798   int diag_msg_max_length=ebuflen-strlen(ebuf);
1799   char* diag_msg_buf=ebuf+strlen(ebuf);
1800 
1801   if (diag_msg_max_length==0) {
1802     // No more space in ebuf for additional diagnostics message
1803     return NULL;
1804   }
1805 
1806 
1807   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1808 
1809   if (file_descriptor < 0) {
1810     // Can't open library, report dlerror() message
1811     return NULL;
1812   }
1813 
1814   bool failed_to_read_elf_head=
1815     (sizeof(elf_head)!=
1816      (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1817 
1818   ::close(file_descriptor);
1819   if (failed_to_read_elf_head) {
1820     // file i/o error - report dlerror() msg
1821     return NULL;
1822   }
1823 
1824   typedef struct {
1825     Elf32_Half  code;         // Actual value as defined in elf.h
1826     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1827     char        elf_class;    // 32 or 64 bit
1828     char        endianess;    // MSB or LSB
1829     char*       name;         // String representation
1830   } arch_t;
1831 
1832   static const arch_t arch_array[]={
1833     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1834     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1835     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1836     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1837     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1838     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1839     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1840     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1841     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1842     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1843   };
1844 
1845 #if  (defined IA32)
1846   static  Elf32_Half running_arch_code=EM_386;
1847 #elif   (defined AMD64)
1848   static  Elf32_Half running_arch_code=EM_X86_64;
1849 #elif  (defined IA64)
1850   static  Elf32_Half running_arch_code=EM_IA_64;
1851 #elif  (defined __sparc) && (defined _LP64)
1852   static  Elf32_Half running_arch_code=EM_SPARCV9;
1853 #elif  (defined __sparc) && (!defined _LP64)
1854   static  Elf32_Half running_arch_code=EM_SPARC;
1855 #elif  (defined __powerpc64__)
1856   static  Elf32_Half running_arch_code=EM_PPC64;
1857 #elif  (defined __powerpc__)
1858   static  Elf32_Half running_arch_code=EM_PPC;
1859 #elif (defined ARM)
1860   static  Elf32_Half running_arch_code=EM_ARM;
1861 #else
1862   #error Method os::dll_load requires that one of following is defined:\
1863        IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1864 #endif
1865 
1866   // Identify compatability class for VM's architecture and library's architecture
1867   // Obtain string descriptions for architectures
1868 
1869   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1870   int running_arch_index=-1;
1871 
1872   for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
1873     if (running_arch_code == arch_array[i].code) {
1874       running_arch_index    = i;
1875     }
1876     if (lib_arch.code == arch_array[i].code) {
1877       lib_arch.compat_class = arch_array[i].compat_class;
1878       lib_arch.name         = arch_array[i].name;
1879     }
1880   }
1881 
1882   assert(running_arch_index != -1,
1883          "Didn't find running architecture code (running_arch_code) in arch_array");
1884   if (running_arch_index == -1) {
1885     // Even though running architecture detection failed
1886     // we may still continue with reporting dlerror() message
1887     return NULL;
1888   }
1889 
1890   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1891     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1892     return NULL;
1893   }
1894 
1895   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1896     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1897     return NULL;
1898   }
1899 
1900   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1901     if (lib_arch.name!=NULL) {
1902       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1903                  " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1904                  lib_arch.name, arch_array[running_arch_index].name);
1905     } else {
1906       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1907                  " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1908                  lib_arch.code,
1909                  arch_array[running_arch_index].name);
1910     }
1911   }
1912 
1913   return NULL;
1914 }
1915 
1916 void* os::dll_lookup(void* handle, const char* name) {
1917   return dlsym(handle, name);
1918 }
1919 
1920 void* os::get_default_process_handle() {
1921   return (void*)::dlopen(NULL, RTLD_LAZY);
1922 }
1923 
1924 int os::stat(const char *path, struct stat *sbuf) {
1925   char pathbuf[MAX_PATH];
1926   if (strlen(path) > MAX_PATH - 1) {
1927     errno = ENAMETOOLONG;
1928     return -1;
1929   }
1930   os::native_path(strcpy(pathbuf, path));
1931   return ::stat(pathbuf, sbuf);
1932 }
1933 
1934 static bool _print_ascii_file(const char* filename, outputStream* st) {
1935   int fd = ::open(filename, O_RDONLY);
1936   if (fd == -1) {
1937     return false;
1938   }
1939 
1940   char buf[32];
1941   int bytes;
1942   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1943     st->print_raw(buf, bytes);
1944   }
1945 
1946   ::close(fd);
1947 
1948   return true;
1949 }
1950 
1951 void os::print_os_info_brief(outputStream* st) {
1952   os::Solaris::print_distro_info(st);
1953 
1954   os::Posix::print_uname_info(st);
1955 
1956   os::Solaris::print_libversion_info(st);
1957 }
1958 
1959 void os::print_os_info(outputStream* st) {
1960   st->print("OS:");
1961 
1962   os::Solaris::print_distro_info(st);
1963 
1964   os::Posix::print_uname_info(st);
1965 
1966   os::Solaris::print_libversion_info(st);
1967 
1968   os::Posix::print_rlimit_info(st);
1969 
1970   os::Posix::print_load_average(st);
1971 }
1972 
1973 void os::Solaris::print_distro_info(outputStream* st) {
1974   if (!_print_ascii_file("/etc/release", st)) {
1975     st->print("Solaris");
1976   }
1977   st->cr();
1978 }
1979 
1980 void os::Solaris::print_libversion_info(outputStream* st) {
1981   st->print("  (T2 libthread)");
1982   st->cr();
1983 }
1984 
1985 static bool check_addr0(outputStream* st) {
1986   jboolean status = false;
1987   int fd = ::open("/proc/self/map",O_RDONLY);
1988   if (fd >= 0) {
1989     prmap_t p;
1990     while (::read(fd, &p, sizeof(p)) > 0) {
1991       if (p.pr_vaddr == 0x0) {
1992         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
1993         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
1994         st->print("Access:");
1995         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
1996         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
1997         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
1998         st->cr();
1999         status = true;
2000       }
2001     }
2002     ::close(fd);
2003   }
2004   return status;
2005 }
2006 
2007 void os::pd_print_cpu_info(outputStream* st) {
2008   // Nothing to do for now.
2009 }
2010 
2011 void os::print_memory_info(outputStream* st) {
2012   st->print("Memory:");
2013   st->print(" %dk page", os::vm_page_size()>>10);
2014   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2015   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2016   st->cr();
2017   (void) check_addr0(st);
2018 }
2019 
2020 void os::print_siginfo(outputStream* st, void* siginfo) {
2021   const siginfo_t* si = (const siginfo_t*)siginfo;
2022 
2023   os::Posix::print_siginfo_brief(st, si);
2024 
2025   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2026       UseSharedSpaces) {
2027     FileMapInfo* mapinfo = FileMapInfo::current_info();
2028     if (mapinfo->is_in_shared_space(si->si_addr)) {
2029       st->print("\n\nError accessing class data sharing archive."   \
2030                 " Mapped file inaccessible during execution, "      \
2031                 " possible disk/network problem.");
2032     }
2033   }
2034   st->cr();
2035 }
2036 
2037 // Moved from whole group, because we need them here for diagnostic
2038 // prints.
2039 #define OLDMAXSIGNUM 32
2040 static int Maxsignum = 0;
2041 static int *ourSigFlags = NULL;
2042 
2043 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2044 
2045 int os::Solaris::get_our_sigflags(int sig) {
2046   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2047   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2048   return ourSigFlags[sig];
2049 }
2050 
2051 void os::Solaris::set_our_sigflags(int sig, int flags) {
2052   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2053   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2054   ourSigFlags[sig] = flags;
2055 }
2056 
2057 
2058 static const char* get_signal_handler_name(address handler,
2059                                            char* buf, int buflen) {
2060   int offset;
2061   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2062   if (found) {
2063     // skip directory names
2064     const char *p1, *p2;
2065     p1 = buf;
2066     size_t len = strlen(os::file_separator());
2067     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2068     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2069   } else {
2070     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2071   }
2072   return buf;
2073 }
2074 
2075 static void print_signal_handler(outputStream* st, int sig,
2076                                  char* buf, size_t buflen) {
2077   struct sigaction sa;
2078 
2079   sigaction(sig, NULL, &sa);
2080 
2081   st->print("%s: ", os::exception_name(sig, buf, buflen));
2082 
2083   address handler = (sa.sa_flags & SA_SIGINFO)
2084                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2085                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2086 
2087   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2088     st->print("SIG_DFL");
2089   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2090     st->print("SIG_IGN");
2091   } else {
2092     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2093   }
2094 
2095   st->print(", sa_mask[0]=");
2096   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2097 
2098   address rh = VMError::get_resetted_sighandler(sig);
2099   // May be, handler was resetted by VMError?
2100   if (rh != NULL) {
2101     handler = rh;
2102     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2103   }
2104 
2105   st->print(", sa_flags=");
2106   os::Posix::print_sa_flags(st, sa.sa_flags);
2107 
2108   // Check: is it our handler?
2109   if (handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2110       handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2111     // It is our signal handler
2112     // check for flags
2113     if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2114       st->print(
2115                 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2116                 os::Solaris::get_our_sigflags(sig));
2117     }
2118   }
2119   st->cr();
2120 }
2121 
2122 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2123   st->print_cr("Signal Handlers:");
2124   print_signal_handler(st, SIGSEGV, buf, buflen);
2125   print_signal_handler(st, SIGBUS , buf, buflen);
2126   print_signal_handler(st, SIGFPE , buf, buflen);
2127   print_signal_handler(st, SIGPIPE, buf, buflen);
2128   print_signal_handler(st, SIGXFSZ, buf, buflen);
2129   print_signal_handler(st, SIGILL , buf, buflen);
2130   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2131   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2132   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2133   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2134   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2135   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2136   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2137   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2138 }
2139 
2140 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2141 
2142 // Find the full path to the current module, libjvm.so
2143 void os::jvm_path(char *buf, jint buflen) {
2144   // Error checking.
2145   if (buflen < MAXPATHLEN) {
2146     assert(false, "must use a large-enough buffer");
2147     buf[0] = '\0';
2148     return;
2149   }
2150   // Lazy resolve the path to current module.
2151   if (saved_jvm_path[0] != 0) {
2152     strcpy(buf, saved_jvm_path);
2153     return;
2154   }
2155 
2156   Dl_info dlinfo;
2157   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2158   assert(ret != 0, "cannot locate libjvm");
2159   if (ret != 0 && dlinfo.dli_fname != NULL) {
2160     realpath((char *)dlinfo.dli_fname, buf);
2161   } else {
2162     buf[0] = '\0';
2163     return;
2164   }
2165 
2166   if (Arguments::sun_java_launcher_is_altjvm()) {
2167     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2168     // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
2169     // If "/jre/lib/" appears at the right place in the string, then
2170     // assume we are installed in a JDK and we're done.  Otherwise, check
2171     // for a JAVA_HOME environment variable and fix up the path so it
2172     // looks like libjvm.so is installed there (append a fake suffix
2173     // hotspot/libjvm.so).
2174     const char *p = buf + strlen(buf) - 1;
2175     for (int count = 0; p > buf && count < 5; ++count) {
2176       for (--p; p > buf && *p != '/'; --p)
2177         /* empty */ ;
2178     }
2179 
2180     if (strncmp(p, "/jre/lib/", 9) != 0) {
2181       // Look for JAVA_HOME in the environment.
2182       char* java_home_var = ::getenv("JAVA_HOME");
2183       if (java_home_var != NULL && java_home_var[0] != 0) {
2184         char cpu_arch[12];
2185         char* jrelib_p;
2186         int   len;
2187         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2188 #ifdef _LP64
2189         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2190         if (strcmp(cpu_arch, "sparc") == 0) {
2191           strcat(cpu_arch, "v9");
2192         } else if (strcmp(cpu_arch, "i386") == 0) {
2193           strcpy(cpu_arch, "amd64");
2194         }
2195 #endif
2196         // Check the current module name "libjvm.so".
2197         p = strrchr(buf, '/');
2198         assert(strstr(p, "/libjvm") == p, "invalid library name");
2199 
2200         realpath(java_home_var, buf);
2201         // determine if this is a legacy image or modules image
2202         // modules image doesn't have "jre" subdirectory
2203         len = strlen(buf);
2204         assert(len < buflen, "Ran out of buffer space");
2205         jrelib_p = buf + len;
2206         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2207         if (0 != access(buf, F_OK)) {
2208           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2209         }
2210 
2211         if (0 == access(buf, F_OK)) {
2212           // Use current module name "libjvm.so"
2213           len = strlen(buf);
2214           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2215         } else {
2216           // Go back to path of .so
2217           realpath((char *)dlinfo.dli_fname, buf);
2218         }
2219       }
2220     }
2221   }
2222 
2223   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2224 }
2225 
2226 
2227 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2228   // no prefix required, not even "_"
2229 }
2230 
2231 
2232 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2233   // no suffix required
2234 }
2235 
2236 // This method is a copy of JDK's sysGetLastErrorString
2237 // from src/solaris/hpi/src/system_md.c
2238 
2239 size_t os::lasterror(char *buf, size_t len) {
2240   if (errno == 0)  return 0;
2241 
2242   const char *s = ::strerror(errno);
2243   size_t n = ::strlen(s);
2244   if (n >= len) {
2245     n = len - 1;
2246   }
2247   ::strncpy(buf, s, n);
2248   buf[n] = '\0';
2249   return n;
2250 }
2251 
2252 
2253 // sun.misc.Signal
2254 
2255 extern "C" {
2256   static void UserHandler(int sig, void *siginfo, void *context) {
2257     // Ctrl-C is pressed during error reporting, likely because the error
2258     // handler fails to abort. Let VM die immediately.
2259     if (sig == SIGINT && is_error_reported()) {
2260       os::die();
2261     }
2262 
2263     os::signal_notify(sig);
2264     // We do not need to reinstate the signal handler each time...
2265   }
2266 }
2267 
2268 void* os::user_handler() {
2269   return CAST_FROM_FN_PTR(void*, UserHandler);
2270 }
2271 
2272 class Semaphore : public StackObj {
2273  public:
2274   Semaphore();
2275   ~Semaphore();
2276   void signal();
2277   void wait();
2278   bool trywait();
2279   bool timedwait(unsigned int sec, int nsec);
2280  private:
2281   sema_t _semaphore;
2282 };
2283 
2284 
2285 Semaphore::Semaphore() {
2286   sema_init(&_semaphore, 0, NULL, NULL);
2287 }
2288 
2289 Semaphore::~Semaphore() {
2290   sema_destroy(&_semaphore);
2291 }
2292 
2293 void Semaphore::signal() {
2294   sema_post(&_semaphore);
2295 }
2296 
2297 void Semaphore::wait() {
2298   sema_wait(&_semaphore);
2299 }
2300 
2301 bool Semaphore::trywait() {
2302   return sema_trywait(&_semaphore) == 0;
2303 }
2304 
2305 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2306   struct timespec ts;
2307   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2308 
2309   while (1) {
2310     int result = sema_timedwait(&_semaphore, &ts);
2311     if (result == 0) {
2312       return true;
2313     } else if (errno == EINTR) {
2314       continue;
2315     } else if (errno == ETIME) {
2316       return false;
2317     } else {
2318       return false;
2319     }
2320   }
2321 }
2322 
2323 extern "C" {
2324   typedef void (*sa_handler_t)(int);
2325   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2326 }
2327 
2328 void* os::signal(int signal_number, void* handler) {
2329   struct sigaction sigAct, oldSigAct;
2330   sigfillset(&(sigAct.sa_mask));
2331   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2332   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2333 
2334   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
2335     // -1 means registration failed
2336     return (void *)-1;
2337   }
2338 
2339   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2340 }
2341 
2342 void os::signal_raise(int signal_number) {
2343   raise(signal_number);
2344 }
2345 
2346 // The following code is moved from os.cpp for making this
2347 // code platform specific, which it is by its very nature.
2348 
2349 // a counter for each possible signal value
2350 static int Sigexit = 0;
2351 static int Maxlibjsigsigs;
2352 static jint *pending_signals = NULL;
2353 static int *preinstalled_sigs = NULL;
2354 static struct sigaction *chainedsigactions = NULL;
2355 static sema_t sig_sem;
2356 typedef int (*version_getting_t)();
2357 version_getting_t os::Solaris::get_libjsig_version = NULL;
2358 static int libjsigversion = NULL;
2359 
2360 int os::sigexitnum_pd() {
2361   assert(Sigexit > 0, "signal memory not yet initialized");
2362   return Sigexit;
2363 }
2364 
2365 void os::Solaris::init_signal_mem() {
2366   // Initialize signal structures
2367   Maxsignum = SIGRTMAX;
2368   Sigexit = Maxsignum+1;
2369   assert(Maxsignum >0, "Unable to obtain max signal number");
2370 
2371   Maxlibjsigsigs = Maxsignum;
2372 
2373   // pending_signals has one int per signal
2374   // The additional signal is for SIGEXIT - exit signal to signal_thread
2375   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2376   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2377 
2378   if (UseSignalChaining) {
2379     chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2380                                                    * (Maxsignum + 1), mtInternal);
2381     memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2382     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2383     memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2384   }
2385   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2386   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2387 }
2388 
2389 void os::signal_init_pd() {
2390   int ret;
2391 
2392   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2393   assert(ret == 0, "sema_init() failed");
2394 }
2395 
2396 void os::signal_notify(int signal_number) {
2397   int ret;
2398 
2399   Atomic::inc(&pending_signals[signal_number]);
2400   ret = ::sema_post(&sig_sem);
2401   assert(ret == 0, "sema_post() failed");
2402 }
2403 
2404 static int check_pending_signals(bool wait_for_signal) {
2405   int ret;
2406   while (true) {
2407     for (int i = 0; i < Sigexit + 1; i++) {
2408       jint n = pending_signals[i];
2409       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2410         return i;
2411       }
2412     }
2413     if (!wait_for_signal) {
2414       return -1;
2415     }
2416     JavaThread *thread = JavaThread::current();
2417     ThreadBlockInVM tbivm(thread);
2418 
2419     bool threadIsSuspended;
2420     do {
2421       thread->set_suspend_equivalent();
2422       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2423       while ((ret = ::sema_wait(&sig_sem)) == EINTR)
2424         ;
2425       assert(ret == 0, "sema_wait() failed");
2426 
2427       // were we externally suspended while we were waiting?
2428       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2429       if (threadIsSuspended) {
2430         // The semaphore has been incremented, but while we were waiting
2431         // another thread suspended us. We don't want to continue running
2432         // while suspended because that would surprise the thread that
2433         // suspended us.
2434         ret = ::sema_post(&sig_sem);
2435         assert(ret == 0, "sema_post() failed");
2436 
2437         thread->java_suspend_self();
2438       }
2439     } while (threadIsSuspended);
2440   }
2441 }
2442 
2443 int os::signal_lookup() {
2444   return check_pending_signals(false);
2445 }
2446 
2447 int os::signal_wait() {
2448   return check_pending_signals(true);
2449 }
2450 
2451 ////////////////////////////////////////////////////////////////////////////////
2452 // Virtual Memory
2453 
2454 static int page_size = -1;
2455 
2456 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2457 // clear this var if support is not available.
2458 static bool has_map_align = true;
2459 
2460 int os::vm_page_size() {
2461   assert(page_size != -1, "must call os::init");
2462   return page_size;
2463 }
2464 
2465 // Solaris allocates memory by pages.
2466 int os::vm_allocation_granularity() {
2467   assert(page_size != -1, "must call os::init");
2468   return page_size;
2469 }
2470 
2471 static bool recoverable_mmap_error(int err) {
2472   // See if the error is one we can let the caller handle. This
2473   // list of errno values comes from the Solaris mmap(2) man page.
2474   switch (err) {
2475   case EBADF:
2476   case EINVAL:
2477   case ENOTSUP:
2478     // let the caller deal with these errors
2479     return true;
2480 
2481   default:
2482     // Any remaining errors on this OS can cause our reserved mapping
2483     // to be lost. That can cause confusion where different data
2484     // structures think they have the same memory mapped. The worst
2485     // scenario is if both the VM and a library think they have the
2486     // same memory mapped.
2487     return false;
2488   }
2489 }
2490 
2491 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2492                                     int err) {
2493   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2494           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2495           strerror(err), err);
2496 }
2497 
2498 static void warn_fail_commit_memory(char* addr, size_t bytes,
2499                                     size_t alignment_hint, bool exec,
2500                                     int err) {
2501   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2502           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2503           alignment_hint, exec, strerror(err), err);
2504 }
2505 
2506 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2507   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2508   size_t size = bytes;
2509   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2510   if (res != NULL) {
2511     if (UseNUMAInterleaving) {
2512       numa_make_global(addr, bytes);
2513     }
2514     return 0;
2515   }
2516 
2517   int err = errno;  // save errno from mmap() call in mmap_chunk()
2518 
2519   if (!recoverable_mmap_error(err)) {
2520     warn_fail_commit_memory(addr, bytes, exec, err);
2521     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2522   }
2523 
2524   return err;
2525 }
2526 
2527 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2528   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2529 }
2530 
2531 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2532                                   const char* mesg) {
2533   assert(mesg != NULL, "mesg must be specified");
2534   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2535   if (err != 0) {
2536     // the caller wants all commit errors to exit with the specified mesg:
2537     warn_fail_commit_memory(addr, bytes, exec, err);
2538     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2539   }
2540 }
2541 
2542 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
2543   assert(is_size_aligned(alignment, (size_t) vm_page_size()),
2544          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
2545                  alignment, (size_t) vm_page_size()));
2546 
2547   for (int i = 0; _page_sizes[i] != 0; i++) {
2548     if (is_size_aligned(alignment, _page_sizes[i])) {
2549       return _page_sizes[i];
2550     }
2551   }
2552 
2553   return (size_t) vm_page_size();
2554 }
2555 
2556 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2557                                     size_t alignment_hint, bool exec) {
2558   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2559   if (err == 0 && UseLargePages && alignment_hint > 0) {
2560     assert(is_size_aligned(bytes, alignment_hint),
2561            err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
2562 
2563     // The syscall memcntl requires an exact page size (see man memcntl for details).
2564     size_t page_size = page_size_for_alignment(alignment_hint);
2565     if (page_size > (size_t) vm_page_size()) {
2566       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2567     }
2568   }
2569   return err;
2570 }
2571 
2572 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2573                           bool exec) {
2574   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2575 }
2576 
2577 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2578                                   size_t alignment_hint, bool exec,
2579                                   const char* mesg) {
2580   assert(mesg != NULL, "mesg must be specified");
2581   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2582   if (err != 0) {
2583     // the caller wants all commit errors to exit with the specified mesg:
2584     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2585     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2586   }
2587 }
2588 
2589 // Uncommit the pages in a specified region.
2590 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2591   if (madvise(addr, bytes, MADV_FREE) < 0) {
2592     debug_only(warning("MADV_FREE failed."));
2593     return;
2594   }
2595 }
2596 
2597 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2598   return os::commit_memory(addr, size, !ExecMem);
2599 }
2600 
2601 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2602   return os::uncommit_memory(addr, size);
2603 }
2604 
2605 // Change the page size in a given range.
2606 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2607   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2608   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2609   if (UseLargePages) {
2610     Solaris::setup_large_pages(addr, bytes, alignment_hint);
2611   }
2612 }
2613 
2614 // Tell the OS to make the range local to the first-touching LWP
2615 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2616   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2617   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2618     debug_only(warning("MADV_ACCESS_LWP failed."));
2619   }
2620 }
2621 
2622 // Tell the OS that this range would be accessed from different LWPs.
2623 void os::numa_make_global(char *addr, size_t bytes) {
2624   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2625   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2626     debug_only(warning("MADV_ACCESS_MANY failed."));
2627   }
2628 }
2629 
2630 // Get the number of the locality groups.
2631 size_t os::numa_get_groups_num() {
2632   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2633   return n != -1 ? n : 1;
2634 }
2635 
2636 // Get a list of leaf locality groups. A leaf lgroup is group that
2637 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2638 // board. An LWP is assigned to one of these groups upon creation.
2639 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2640   if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2641     ids[0] = 0;
2642     return 1;
2643   }
2644   int result_size = 0, top = 1, bottom = 0, cur = 0;
2645   for (int k = 0; k < size; k++) {
2646     int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2647                                    (Solaris::lgrp_id_t*)&ids[top], size - top);
2648     if (r == -1) {
2649       ids[0] = 0;
2650       return 1;
2651     }
2652     if (!r) {
2653       // That's a leaf node.
2654       assert(bottom <= cur, "Sanity check");
2655       // Check if the node has memory
2656       if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2657                                   NULL, 0, LGRP_RSRC_MEM) > 0) {
2658         ids[bottom++] = ids[cur];
2659       }
2660     }
2661     top += r;
2662     cur++;
2663   }
2664   if (bottom == 0) {
2665     // Handle a situation, when the OS reports no memory available.
2666     // Assume UMA architecture.
2667     ids[0] = 0;
2668     return 1;
2669   }
2670   return bottom;
2671 }
2672 
2673 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2674 bool os::numa_topology_changed() {
2675   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2676   if (is_stale != -1 && is_stale) {
2677     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2678     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2679     assert(c != 0, "Failure to initialize LGRP API");
2680     Solaris::set_lgrp_cookie(c);
2681     return true;
2682   }
2683   return false;
2684 }
2685 
2686 // Get the group id of the current LWP.
2687 int os::numa_get_group_id() {
2688   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2689   if (lgrp_id == -1) {
2690     return 0;
2691   }
2692   const int size = os::numa_get_groups_num();
2693   int *ids = (int*)alloca(size * sizeof(int));
2694 
2695   // Get the ids of all lgroups with memory; r is the count.
2696   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2697                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2698   if (r <= 0) {
2699     return 0;
2700   }
2701   return ids[os::random() % r];
2702 }
2703 
2704 // Request information about the page.
2705 bool os::get_page_info(char *start, page_info* info) {
2706   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2707   uint64_t addr = (uintptr_t)start;
2708   uint64_t outdata[2];
2709   uint_t validity = 0;
2710 
2711   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2712     return false;
2713   }
2714 
2715   info->size = 0;
2716   info->lgrp_id = -1;
2717 
2718   if ((validity & 1) != 0) {
2719     if ((validity & 2) != 0) {
2720       info->lgrp_id = outdata[0];
2721     }
2722     if ((validity & 4) != 0) {
2723       info->size = outdata[1];
2724     }
2725     return true;
2726   }
2727   return false;
2728 }
2729 
2730 // Scan the pages from start to end until a page different than
2731 // the one described in the info parameter is encountered.
2732 char *os::scan_pages(char *start, char* end, page_info* page_expected,
2733                      page_info* page_found) {
2734   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2735   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2736   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2737   uint_t validity[MAX_MEMINFO_CNT];
2738 
2739   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2740   uint64_t p = (uint64_t)start;
2741   while (p < (uint64_t)end) {
2742     addrs[0] = p;
2743     size_t addrs_count = 1;
2744     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2745       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2746       addrs_count++;
2747     }
2748 
2749     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2750       return NULL;
2751     }
2752 
2753     size_t i = 0;
2754     for (; i < addrs_count; i++) {
2755       if ((validity[i] & 1) != 0) {
2756         if ((validity[i] & 4) != 0) {
2757           if (outdata[types * i + 1] != page_expected->size) {
2758             break;
2759           }
2760         } else if (page_expected->size != 0) {
2761           break;
2762         }
2763 
2764         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2765           if (outdata[types * i] != page_expected->lgrp_id) {
2766             break;
2767           }
2768         }
2769       } else {
2770         return NULL;
2771       }
2772     }
2773 
2774     if (i < addrs_count) {
2775       if ((validity[i] & 2) != 0) {
2776         page_found->lgrp_id = outdata[types * i];
2777       } else {
2778         page_found->lgrp_id = -1;
2779       }
2780       if ((validity[i] & 4) != 0) {
2781         page_found->size = outdata[types * i + 1];
2782       } else {
2783         page_found->size = 0;
2784       }
2785       return (char*)addrs[i];
2786     }
2787 
2788     p = addrs[addrs_count - 1] + page_size;
2789   }
2790   return end;
2791 }
2792 
2793 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2794   size_t size = bytes;
2795   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2796   // uncommitted page. Otherwise, the read/write might succeed if we
2797   // have enough swap space to back the physical page.
2798   return
2799     NULL != Solaris::mmap_chunk(addr, size,
2800                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2801                                 PROT_NONE);
2802 }
2803 
2804 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2805   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2806 
2807   if (b == MAP_FAILED) {
2808     return NULL;
2809   }
2810   return b;
2811 }
2812 
2813 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes,
2814                              size_t alignment_hint, bool fixed) {
2815   char* addr = requested_addr;
2816   int flags = MAP_PRIVATE | MAP_NORESERVE;
2817 
2818   assert(!(fixed && (alignment_hint > 0)),
2819          "alignment hint meaningless with fixed mmap");
2820 
2821   if (fixed) {
2822     flags |= MAP_FIXED;
2823   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2824     flags |= MAP_ALIGN;
2825     addr = (char*) alignment_hint;
2826   }
2827 
2828   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2829   // uncommitted page. Otherwise, the read/write might succeed if we
2830   // have enough swap space to back the physical page.
2831   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2832 }
2833 
2834 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2835                             size_t alignment_hint) {
2836   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint,
2837                                   (requested_addr != NULL));
2838 
2839   guarantee(requested_addr == NULL || requested_addr == addr,
2840             "OS failed to return requested mmap address.");
2841   return addr;
2842 }
2843 
2844 // Reserve memory at an arbitrary address, only if that area is
2845 // available (and not reserved for something else).
2846 
2847 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2848   const int max_tries = 10;
2849   char* base[max_tries];
2850   size_t size[max_tries];
2851 
2852   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2853   // is dependent on the requested size and the MMU.  Our initial gap
2854   // value here is just a guess and will be corrected later.
2855   bool had_top_overlap = false;
2856   bool have_adjusted_gap = false;
2857   size_t gap = 0x400000;
2858 
2859   // Assert only that the size is a multiple of the page size, since
2860   // that's all that mmap requires, and since that's all we really know
2861   // about at this low abstraction level.  If we need higher alignment,
2862   // we can either pass an alignment to this method or verify alignment
2863   // in one of the methods further up the call chain.  See bug 5044738.
2864   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2865 
2866   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2867   // Give it a try, if the kernel honors the hint we can return immediately.
2868   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2869 
2870   volatile int err = errno;
2871   if (addr == requested_addr) {
2872     return addr;
2873   } else if (addr != NULL) {
2874     pd_unmap_memory(addr, bytes);
2875   }
2876 
2877   if (PrintMiscellaneous && Verbose) {
2878     char buf[256];
2879     buf[0] = '\0';
2880     if (addr == NULL) {
2881       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2882     }
2883     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2884             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2885             "%s", bytes, requested_addr, addr, buf);
2886   }
2887 
2888   // Address hint method didn't work.  Fall back to the old method.
2889   // In theory, once SNV becomes our oldest supported platform, this
2890   // code will no longer be needed.
2891   //
2892   // Repeatedly allocate blocks until the block is allocated at the
2893   // right spot. Give up after max_tries.
2894   int i;
2895   for (i = 0; i < max_tries; ++i) {
2896     base[i] = reserve_memory(bytes);
2897 
2898     if (base[i] != NULL) {
2899       // Is this the block we wanted?
2900       if (base[i] == requested_addr) {
2901         size[i] = bytes;
2902         break;
2903       }
2904 
2905       // check that the gap value is right
2906       if (had_top_overlap && !have_adjusted_gap) {
2907         size_t actual_gap = base[i-1] - base[i] - bytes;
2908         if (gap != actual_gap) {
2909           // adjust the gap value and retry the last 2 allocations
2910           assert(i > 0, "gap adjustment code problem");
2911           have_adjusted_gap = true;  // adjust the gap only once, just in case
2912           gap = actual_gap;
2913           if (PrintMiscellaneous && Verbose) {
2914             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2915           }
2916           unmap_memory(base[i], bytes);
2917           unmap_memory(base[i-1], size[i-1]);
2918           i-=2;
2919           continue;
2920         }
2921       }
2922 
2923       // Does this overlap the block we wanted? Give back the overlapped
2924       // parts and try again.
2925       //
2926       // There is still a bug in this code: if top_overlap == bytes,
2927       // the overlap is offset from requested region by the value of gap.
2928       // In this case giving back the overlapped part will not work,
2929       // because we'll give back the entire block at base[i] and
2930       // therefore the subsequent allocation will not generate a new gap.
2931       // This could be fixed with a new algorithm that used larger
2932       // or variable size chunks to find the requested region -
2933       // but such a change would introduce additional complications.
2934       // It's rare enough that the planets align for this bug,
2935       // so we'll just wait for a fix for 6204603/5003415 which
2936       // will provide a mmap flag to allow us to avoid this business.
2937 
2938       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2939       if (top_overlap >= 0 && top_overlap < bytes) {
2940         had_top_overlap = true;
2941         unmap_memory(base[i], top_overlap);
2942         base[i] += top_overlap;
2943         size[i] = bytes - top_overlap;
2944       } else {
2945         size_t bottom_overlap = base[i] + bytes - requested_addr;
2946         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2947           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2948             warning("attempt_reserve_memory_at: possible alignment bug");
2949           }
2950           unmap_memory(requested_addr, bottom_overlap);
2951           size[i] = bytes - bottom_overlap;
2952         } else {
2953           size[i] = bytes;
2954         }
2955       }
2956     }
2957   }
2958 
2959   // Give back the unused reserved pieces.
2960 
2961   for (int j = 0; j < i; ++j) {
2962     if (base[j] != NULL) {
2963       unmap_memory(base[j], size[j]);
2964     }
2965   }
2966 
2967   return (i < max_tries) ? requested_addr : NULL;
2968 }
2969 
2970 bool os::pd_release_memory(char* addr, size_t bytes) {
2971   size_t size = bytes;
2972   return munmap(addr, size) == 0;
2973 }
2974 
2975 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
2976   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
2977          "addr must be page aligned");
2978   int retVal = mprotect(addr, bytes, prot);
2979   return retVal == 0;
2980 }
2981 
2982 // Protect memory (Used to pass readonly pages through
2983 // JNI GetArray<type>Elements with empty arrays.)
2984 // Also, used for serialization page and for compressed oops null pointer
2985 // checking.
2986 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2987                         bool is_committed) {
2988   unsigned int p = 0;
2989   switch (prot) {
2990   case MEM_PROT_NONE: p = PROT_NONE; break;
2991   case MEM_PROT_READ: p = PROT_READ; break;
2992   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2993   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2994   default:
2995     ShouldNotReachHere();
2996   }
2997   // is_committed is unused.
2998   return solaris_mprotect(addr, bytes, p);
2999 }
3000 
3001 // guard_memory and unguard_memory only happens within stack guard pages.
3002 // Since ISM pertains only to the heap, guard and unguard memory should not
3003 /// happen with an ISM region.
3004 bool os::guard_memory(char* addr, size_t bytes) {
3005   return solaris_mprotect(addr, bytes, PROT_NONE);
3006 }
3007 
3008 bool os::unguard_memory(char* addr, size_t bytes) {
3009   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3010 }
3011 
3012 // Large page support
3013 static size_t _large_page_size = 0;
3014 
3015 // Insertion sort for small arrays (descending order).
3016 static void insertion_sort_descending(size_t* array, int len) {
3017   for (int i = 0; i < len; i++) {
3018     size_t val = array[i];
3019     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3020       size_t tmp = array[key];
3021       array[key] = array[key - 1];
3022       array[key - 1] = tmp;
3023     }
3024   }
3025 }
3026 
3027 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3028   const unsigned int usable_count = VM_Version::page_size_count();
3029   if (usable_count == 1) {
3030     return false;
3031   }
3032 
3033   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3034   // build platform, getpagesizes() (without the '2') can be called directly.
3035   typedef int (*gps_t)(size_t[], int);
3036   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3037   if (gps_func == NULL) {
3038     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3039     if (gps_func == NULL) {
3040       if (warn) {
3041         warning("MPSS is not supported by the operating system.");
3042       }
3043       return false;
3044     }
3045   }
3046 
3047   // Fill the array of page sizes.
3048   int n = (*gps_func)(_page_sizes, page_sizes_max);
3049   assert(n > 0, "Solaris bug?");
3050 
3051   if (n == page_sizes_max) {
3052     // Add a sentinel value (necessary only if the array was completely filled
3053     // since it is static (zeroed at initialization)).
3054     _page_sizes[--n] = 0;
3055     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3056   }
3057   assert(_page_sizes[n] == 0, "missing sentinel");
3058   trace_page_sizes("available page sizes", _page_sizes, n);
3059 
3060   if (n == 1) return false;     // Only one page size available.
3061 
3062   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3063   // select up to usable_count elements.  First sort the array, find the first
3064   // acceptable value, then copy the usable sizes to the top of the array and
3065   // trim the rest.  Make sure to include the default page size :-).
3066   //
3067   // A better policy could get rid of the 4M limit by taking the sizes of the
3068   // important VM memory regions (java heap and possibly the code cache) into
3069   // account.
3070   insertion_sort_descending(_page_sizes, n);
3071   const size_t size_limit =
3072     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3073   int beg;
3074   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */;
3075   const int end = MIN2((int)usable_count, n) - 1;
3076   for (int cur = 0; cur < end; ++cur, ++beg) {
3077     _page_sizes[cur] = _page_sizes[beg];
3078   }
3079   _page_sizes[end] = vm_page_size();
3080   _page_sizes[end + 1] = 0;
3081 
3082   if (_page_sizes[end] > _page_sizes[end - 1]) {
3083     // Default page size is not the smallest; sort again.
3084     insertion_sort_descending(_page_sizes, end + 1);
3085   }
3086   *page_size = _page_sizes[0];
3087 
3088   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3089   return true;
3090 }
3091 
3092 void os::large_page_init() {
3093   if (UseLargePages) {
3094     // print a warning if any large page related flag is specified on command line
3095     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3096                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3097 
3098     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3099   }
3100 }
3101 
3102 bool os::Solaris::is_valid_page_size(size_t bytes) {
3103   for (int i = 0; _page_sizes[i] != 0; i++) {
3104     if (_page_sizes[i] == bytes) {
3105       return true;
3106     }
3107   }
3108   return false;
3109 }
3110 
3111 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3112   assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
3113   assert(is_ptr_aligned((void*) start, align),
3114          err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
3115   assert(is_size_aligned(bytes, align),
3116          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
3117 
3118   // Signal to OS that we want large pages for addresses
3119   // from addr, addr + bytes
3120   struct memcntl_mha mpss_struct;
3121   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3122   mpss_struct.mha_pagesize = align;
3123   mpss_struct.mha_flags = 0;
3124   // Upon successful completion, memcntl() returns 0
3125   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3126     debug_only(warning("Attempt to use MPSS failed."));
3127     return false;
3128   }
3129   return true;
3130 }
3131 
3132 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3133   fatal("os::reserve_memory_special should not be called on Solaris.");
3134   return NULL;
3135 }
3136 
3137 bool os::release_memory_special(char* base, size_t bytes) {
3138   fatal("os::release_memory_special should not be called on Solaris.");
3139   return false;
3140 }
3141 
3142 size_t os::large_page_size() {
3143   return _large_page_size;
3144 }
3145 
3146 // MPSS allows application to commit large page memory on demand; with ISM
3147 // the entire memory region must be allocated as shared memory.
3148 bool os::can_commit_large_page_memory() {
3149   return true;
3150 }
3151 
3152 bool os::can_execute_large_page_memory() {
3153   return true;
3154 }
3155 
3156 // Read calls from inside the vm need to perform state transitions
3157 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3158   size_t res;
3159   JavaThread* thread = (JavaThread*)Thread::current();
3160   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3161   ThreadBlockInVM tbiv(thread);
3162   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3163   return res;
3164 }
3165 
3166 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3167   size_t res;
3168   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
3169          "Assumed _thread_in_native");
3170   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3171   return res;
3172 }
3173 
3174 void os::naked_short_sleep(jlong ms) {
3175   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3176 
3177   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3178   // Solaris requires -lrt for this.
3179   usleep((ms * 1000));
3180 
3181   return;
3182 }
3183 
3184 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3185 void os::infinite_sleep() {
3186   while (true) {    // sleep forever ...
3187     ::sleep(100);   // ... 100 seconds at a time
3188   }
3189 }
3190 
3191 // Used to convert frequent JVM_Yield() to nops
3192 bool os::dont_yield() {
3193   if (DontYieldALot) {
3194     static hrtime_t last_time = 0;
3195     hrtime_t diff = getTimeNanos() - last_time;
3196 
3197     if (diff < DontYieldALotInterval * 1000000) {
3198       return true;
3199     }
3200 
3201     last_time += diff;
3202 
3203     return false;
3204   } else {
3205     return false;
3206   }
3207 }
3208 
3209 // Note that yield semantics are defined by the scheduling class to which
3210 // the thread currently belongs.  Typically, yield will _not yield to
3211 // other equal or higher priority threads that reside on the dispatch queues
3212 // of other CPUs.
3213 
3214 void os::naked_yield() {
3215   thr_yield();
3216 }
3217 
3218 // Interface for setting lwp priorities.  If we are using T2 libthread,
3219 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3220 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3221 // function is meaningless in this mode so we must adjust the real lwp's priority
3222 // The routines below implement the getting and setting of lwp priorities.
3223 //
3224 // Note: T2 is now the only supported libthread. UseBoundThreads flag is
3225 //       being deprecated and all threads are now BoundThreads
3226 //
3227 // Note: There are three priority scales used on Solaris.  Java priotities
3228 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3229 //       from 0 to 127, and the current scheduling class of the process we
3230 //       are running in.  This is typically from -60 to +60.
3231 //       The setting of the lwp priorities in done after a call to thr_setprio
3232 //       so Java priorities are mapped to libthread priorities and we map from
3233 //       the latter to lwp priorities.  We don't keep priorities stored in
3234 //       Java priorities since some of our worker threads want to set priorities
3235 //       higher than all Java threads.
3236 //
3237 // For related information:
3238 // (1)  man -s 2 priocntl
3239 // (2)  man -s 4 priocntl
3240 // (3)  man dispadmin
3241 // =    librt.so
3242 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3243 // =    ps -cL <pid> ... to validate priority.
3244 // =    sched_get_priority_min and _max
3245 //              pthread_create
3246 //              sched_setparam
3247 //              pthread_setschedparam
3248 //
3249 // Assumptions:
3250 // +    We assume that all threads in the process belong to the same
3251 //              scheduling class.   IE. an homogenous process.
3252 // +    Must be root or in IA group to change change "interactive" attribute.
3253 //              Priocntl() will fail silently.  The only indication of failure is when
3254 //              we read-back the value and notice that it hasn't changed.
3255 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3256 // +    For RT, change timeslice as well.  Invariant:
3257 //              constant "priority integral"
3258 //              Konst == TimeSlice * (60-Priority)
3259 //              Given a priority, compute appropriate timeslice.
3260 // +    Higher numerical values have higher priority.
3261 
3262 // sched class attributes
3263 typedef struct {
3264   int   schedPolicy;              // classID
3265   int   maxPrio;
3266   int   minPrio;
3267 } SchedInfo;
3268 
3269 
3270 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3271 
3272 #ifdef ASSERT
3273 static int  ReadBackValidate = 1;
3274 #endif
3275 static int  myClass     = 0;
3276 static int  myMin       = 0;
3277 static int  myMax       = 0;
3278 static int  myCur       = 0;
3279 static bool priocntl_enable = false;
3280 
3281 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3282 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3283 
3284 
3285 // lwp_priocntl_init
3286 //
3287 // Try to determine the priority scale for our process.
3288 //
3289 // Return errno or 0 if OK.
3290 //
3291 static int lwp_priocntl_init() {
3292   int rslt;
3293   pcinfo_t ClassInfo;
3294   pcparms_t ParmInfo;
3295   int i;
3296 
3297   if (!UseThreadPriorities) return 0;
3298 
3299   // If ThreadPriorityPolicy is 1, switch tables
3300   if (ThreadPriorityPolicy == 1) {
3301     for (i = 0; i < CriticalPriority+1; i++)
3302       os::java_to_os_priority[i] = prio_policy1[i];
3303   }
3304   if (UseCriticalJavaThreadPriority) {
3305     // MaxPriority always maps to the FX scheduling class and criticalPrio.
3306     // See set_native_priority() and set_lwp_class_and_priority().
3307     // Save original MaxPriority mapping in case attempt to
3308     // use critical priority fails.
3309     java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3310     // Set negative to distinguish from other priorities
3311     os::java_to_os_priority[MaxPriority] = -criticalPrio;
3312   }
3313 
3314   // Get IDs for a set of well-known scheduling classes.
3315   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3316   // the system.  We should have a loop that iterates over the
3317   // classID values, which are known to be "small" integers.
3318 
3319   strcpy(ClassInfo.pc_clname, "TS");
3320   ClassInfo.pc_cid = -1;
3321   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3322   if (rslt < 0) return errno;
3323   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3324   tsLimits.schedPolicy = ClassInfo.pc_cid;
3325   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3326   tsLimits.minPrio = -tsLimits.maxPrio;
3327 
3328   strcpy(ClassInfo.pc_clname, "IA");
3329   ClassInfo.pc_cid = -1;
3330   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3331   if (rslt < 0) return errno;
3332   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3333   iaLimits.schedPolicy = ClassInfo.pc_cid;
3334   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3335   iaLimits.minPrio = -iaLimits.maxPrio;
3336 
3337   strcpy(ClassInfo.pc_clname, "RT");
3338   ClassInfo.pc_cid = -1;
3339   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3340   if (rslt < 0) return errno;
3341   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3342   rtLimits.schedPolicy = ClassInfo.pc_cid;
3343   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3344   rtLimits.minPrio = 0;
3345 
3346   strcpy(ClassInfo.pc_clname, "FX");
3347   ClassInfo.pc_cid = -1;
3348   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3349   if (rslt < 0) return errno;
3350   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3351   fxLimits.schedPolicy = ClassInfo.pc_cid;
3352   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3353   fxLimits.minPrio = 0;
3354 
3355   // Query our "current" scheduling class.
3356   // This will normally be IA, TS or, rarely, FX or RT.
3357   memset(&ParmInfo, 0, sizeof(ParmInfo));
3358   ParmInfo.pc_cid = PC_CLNULL;
3359   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3360   if (rslt < 0) return errno;
3361   myClass = ParmInfo.pc_cid;
3362 
3363   // We now know our scheduling classId, get specific information
3364   // about the class.
3365   ClassInfo.pc_cid = myClass;
3366   ClassInfo.pc_clname[0] = 0;
3367   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3368   if (rslt < 0) return errno;
3369 
3370   if (ThreadPriorityVerbose) {
3371     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3372   }
3373 
3374   memset(&ParmInfo, 0, sizeof(pcparms_t));
3375   ParmInfo.pc_cid = PC_CLNULL;
3376   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3377   if (rslt < 0) return errno;
3378 
3379   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3380     myMin = rtLimits.minPrio;
3381     myMax = rtLimits.maxPrio;
3382   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3383     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3384     myMin = iaLimits.minPrio;
3385     myMax = iaLimits.maxPrio;
3386     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3387   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3388     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3389     myMin = tsLimits.minPrio;
3390     myMax = tsLimits.maxPrio;
3391     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3392   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3393     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3394     myMin = fxLimits.minPrio;
3395     myMax = fxLimits.maxPrio;
3396     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3397   } else {
3398     // No clue - punt
3399     if (ThreadPriorityVerbose) {
3400       tty->print_cr("Unknown scheduling class: %s ... \n",
3401                     ClassInfo.pc_clname);
3402     }
3403     return EINVAL;      // no clue, punt
3404   }
3405 
3406   if (ThreadPriorityVerbose) {
3407     tty->print_cr("Thread priority Range: [%d..%d]\n", myMin, myMax);
3408   }
3409 
3410   priocntl_enable = true;  // Enable changing priorities
3411   return 0;
3412 }
3413 
3414 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3415 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3416 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3417 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3418 
3419 
3420 // scale_to_lwp_priority
3421 //
3422 // Convert from the libthread "thr_setprio" scale to our current
3423 // lwp scheduling class scale.
3424 //
3425 static int scale_to_lwp_priority(int rMin, int rMax, int x) {
3426   int v;
3427 
3428   if (x == 127) return rMax;            // avoid round-down
3429   v = (((x*(rMax-rMin)))/128)+rMin;
3430   return v;
3431 }
3432 
3433 
3434 // set_lwp_class_and_priority
3435 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3436                                int newPrio, int new_class, bool scale) {
3437   int rslt;
3438   int Actual, Expected, prv;
3439   pcparms_t ParmInfo;                   // for GET-SET
3440 #ifdef ASSERT
3441   pcparms_t ReadBack;                   // for readback
3442 #endif
3443 
3444   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3445   // Query current values.
3446   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3447   // Cache "pcparms_t" in global ParmCache.
3448   // TODO: elide set-to-same-value
3449 
3450   // If something went wrong on init, don't change priorities.
3451   if (!priocntl_enable) {
3452     if (ThreadPriorityVerbose) {
3453       tty->print_cr("Trying to set priority but init failed, ignoring");
3454     }
3455     return EINVAL;
3456   }
3457 
3458   // If lwp hasn't started yet, just return
3459   // the _start routine will call us again.
3460   if (lwpid <= 0) {
3461     if (ThreadPriorityVerbose) {
3462       tty->print_cr("deferring the set_lwp_class_and_priority of thread "
3463                     INTPTR_FORMAT " to %d, lwpid not set",
3464                     ThreadID, newPrio);
3465     }
3466     return 0;
3467   }
3468 
3469   if (ThreadPriorityVerbose) {
3470     tty->print_cr ("set_lwp_class_and_priority("
3471                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3472                    ThreadID, lwpid, newPrio);
3473   }
3474 
3475   memset(&ParmInfo, 0, sizeof(pcparms_t));
3476   ParmInfo.pc_cid = PC_CLNULL;
3477   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3478   if (rslt < 0) return errno;
3479 
3480   int cur_class = ParmInfo.pc_cid;
3481   ParmInfo.pc_cid = (id_t)new_class;
3482 
3483   if (new_class == rtLimits.schedPolicy) {
3484     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3485     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3486                                                        rtLimits.maxPrio, newPrio)
3487                                : newPrio;
3488     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3489     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3490     if (ThreadPriorityVerbose) {
3491       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3492     }
3493   } else if (new_class == iaLimits.schedPolicy) {
3494     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3495     int maxClamped     = MIN2(iaLimits.maxPrio,
3496                               cur_class == new_class
3497                               ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3498     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3499                                                        maxClamped, newPrio)
3500                                : newPrio;
3501     iaInfo->ia_uprilim = cur_class == new_class
3502                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3503     iaInfo->ia_mode    = IA_NOCHANGE;
3504     if (ThreadPriorityVerbose) {
3505       tty->print_cr("IA: [%d...%d] %d->%d\n",
3506                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3507     }
3508   } else if (new_class == tsLimits.schedPolicy) {
3509     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3510     int maxClamped     = MIN2(tsLimits.maxPrio,
3511                               cur_class == new_class
3512                               ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3513     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3514                                                        maxClamped, newPrio)
3515                                : newPrio;
3516     tsInfo->ts_uprilim = cur_class == new_class
3517                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3518     if (ThreadPriorityVerbose) {
3519       tty->print_cr("TS: [%d...%d] %d->%d\n",
3520                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3521     }
3522   } else if (new_class == fxLimits.schedPolicy) {
3523     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3524     int maxClamped     = MIN2(fxLimits.maxPrio,
3525                               cur_class == new_class
3526                               ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3527     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3528                                                        maxClamped, newPrio)
3529                                : newPrio;
3530     fxInfo->fx_uprilim = cur_class == new_class
3531                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3532     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3533     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3534     if (ThreadPriorityVerbose) {
3535       tty->print_cr("FX: [%d...%d] %d->%d\n",
3536                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3537     }
3538   } else {
3539     if (ThreadPriorityVerbose) {
3540       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3541     }
3542     return EINVAL;    // no clue, punt
3543   }
3544 
3545   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3546   if (ThreadPriorityVerbose && rslt) {
3547     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3548   }
3549   if (rslt < 0) return errno;
3550 
3551 #ifdef ASSERT
3552   // Sanity check: read back what we just attempted to set.
3553   // In theory it could have changed in the interim ...
3554   //
3555   // The priocntl system call is tricky.
3556   // Sometimes it'll validate the priority value argument and
3557   // return EINVAL if unhappy.  At other times it fails silently.
3558   // Readbacks are prudent.
3559 
3560   if (!ReadBackValidate) return 0;
3561 
3562   memset(&ReadBack, 0, sizeof(pcparms_t));
3563   ReadBack.pc_cid = PC_CLNULL;
3564   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3565   assert(rslt >= 0, "priocntl failed");
3566   Actual = Expected = 0xBAD;
3567   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3568   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3569     Actual   = RTPRI(ReadBack)->rt_pri;
3570     Expected = RTPRI(ParmInfo)->rt_pri;
3571   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3572     Actual   = IAPRI(ReadBack)->ia_upri;
3573     Expected = IAPRI(ParmInfo)->ia_upri;
3574   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3575     Actual   = TSPRI(ReadBack)->ts_upri;
3576     Expected = TSPRI(ParmInfo)->ts_upri;
3577   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3578     Actual   = FXPRI(ReadBack)->fx_upri;
3579     Expected = FXPRI(ParmInfo)->fx_upri;
3580   } else {
3581     if (ThreadPriorityVerbose) {
3582       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3583                     ParmInfo.pc_cid);
3584     }
3585   }
3586 
3587   if (Actual != Expected) {
3588     if (ThreadPriorityVerbose) {
3589       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3590                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3591     }
3592   }
3593 #endif
3594 
3595   return 0;
3596 }
3597 
3598 // Solaris only gives access to 128 real priorities at a time,
3599 // so we expand Java's ten to fill this range.  This would be better
3600 // if we dynamically adjusted relative priorities.
3601 //
3602 // The ThreadPriorityPolicy option allows us to select 2 different
3603 // priority scales.
3604 //
3605 // ThreadPriorityPolicy=0
3606 // Since the Solaris' default priority is MaximumPriority, we do not
3607 // set a priority lower than Max unless a priority lower than
3608 // NormPriority is requested.
3609 //
3610 // ThreadPriorityPolicy=1
3611 // This mode causes the priority table to get filled with
3612 // linear values.  NormPriority get's mapped to 50% of the
3613 // Maximum priority an so on.  This will cause VM threads
3614 // to get unfair treatment against other Solaris processes
3615 // which do not explicitly alter their thread priorities.
3616 
3617 int os::java_to_os_priority[CriticalPriority + 1] = {
3618   -99999,         // 0 Entry should never be used
3619 
3620   0,              // 1 MinPriority
3621   32,             // 2
3622   64,             // 3
3623 
3624   96,             // 4
3625   127,            // 5 NormPriority
3626   127,            // 6
3627 
3628   127,            // 7
3629   127,            // 8
3630   127,            // 9 NearMaxPriority
3631 
3632   127,            // 10 MaxPriority
3633 
3634   -criticalPrio   // 11 CriticalPriority
3635 };
3636 
3637 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3638   OSThread* osthread = thread->osthread();
3639 
3640   // Save requested priority in case the thread hasn't been started
3641   osthread->set_native_priority(newpri);
3642 
3643   // Check for critical priority request
3644   bool fxcritical = false;
3645   if (newpri == -criticalPrio) {
3646     fxcritical = true;
3647     newpri = criticalPrio;
3648   }
3649 
3650   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3651   if (!UseThreadPriorities) return OS_OK;
3652 
3653   int status = 0;
3654 
3655   if (!fxcritical) {
3656     // Use thr_setprio only if we have a priority that thr_setprio understands
3657     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3658   }
3659 
3660   int lwp_status =
3661           set_lwp_class_and_priority(osthread->thread_id(),
3662                                      osthread->lwp_id(),
3663                                      newpri,
3664                                      fxcritical ? fxLimits.schedPolicy : myClass,
3665                                      !fxcritical);
3666   if (lwp_status != 0 && fxcritical) {
3667     // Try again, this time without changing the scheduling class
3668     newpri = java_MaxPriority_to_os_priority;
3669     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3670                                             osthread->lwp_id(),
3671                                             newpri, myClass, false);
3672   }
3673   status |= lwp_status;
3674   return (status == 0) ? OS_OK : OS_ERR;
3675 }
3676 
3677 
3678 OSReturn os::get_native_priority(const Thread* const thread,
3679                                  int *priority_ptr) {
3680   int p;
3681   if (!UseThreadPriorities) {
3682     *priority_ptr = NormalPriority;
3683     return OS_OK;
3684   }
3685   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3686   if (status != 0) {
3687     return OS_ERR;
3688   }
3689   *priority_ptr = p;
3690   return OS_OK;
3691 }
3692 
3693 
3694 // Hint to the underlying OS that a task switch would not be good.
3695 // Void return because it's a hint and can fail.
3696 void os::hint_no_preempt() {
3697   schedctl_start(schedctl_init());
3698 }
3699 
3700 static void resume_clear_context(OSThread *osthread) {
3701   osthread->set_ucontext(NULL);
3702 }
3703 
3704 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3705   osthread->set_ucontext(context);
3706 }
3707 
3708 static Semaphore sr_semaphore;
3709 
3710 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3711   // Save and restore errno to avoid confusing native code with EINTR
3712   // after sigsuspend.
3713   int old_errno = errno;
3714 
3715   OSThread* osthread = thread->osthread();
3716   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3717 
3718   os::SuspendResume::State current = osthread->sr.state();
3719   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3720     suspend_save_context(osthread, uc);
3721 
3722     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3723     os::SuspendResume::State state = osthread->sr.suspended();
3724     if (state == os::SuspendResume::SR_SUSPENDED) {
3725       sigset_t suspend_set;  // signals for sigsuspend()
3726 
3727       // get current set of blocked signals and unblock resume signal
3728       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3729       sigdelset(&suspend_set, os::Solaris::SIGasync());
3730 
3731       sr_semaphore.signal();
3732       // wait here until we are resumed
3733       while (1) {
3734         sigsuspend(&suspend_set);
3735 
3736         os::SuspendResume::State result = osthread->sr.running();
3737         if (result == os::SuspendResume::SR_RUNNING) {
3738           sr_semaphore.signal();
3739           break;
3740         }
3741       }
3742 
3743     } else if (state == os::SuspendResume::SR_RUNNING) {
3744       // request was cancelled, continue
3745     } else {
3746       ShouldNotReachHere();
3747     }
3748 
3749     resume_clear_context(osthread);
3750   } else if (current == os::SuspendResume::SR_RUNNING) {
3751     // request was cancelled, continue
3752   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3753     // ignore
3754   } else {
3755     // ignore
3756   }
3757 
3758   errno = old_errno;
3759 }
3760 
3761 void os::print_statistics() {
3762 }
3763 
3764 int os::message_box(const char* title, const char* message) {
3765   int i;
3766   fdStream err(defaultStream::error_fd());
3767   for (i = 0; i < 78; i++) err.print_raw("=");
3768   err.cr();
3769   err.print_raw_cr(title);
3770   for (i = 0; i < 78; i++) err.print_raw("-");
3771   err.cr();
3772   err.print_raw_cr(message);
3773   for (i = 0; i < 78; i++) err.print_raw("=");
3774   err.cr();
3775 
3776   char buf[16];
3777   // Prevent process from exiting upon "read error" without consuming all CPU
3778   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3779 
3780   return buf[0] == 'y' || buf[0] == 'Y';
3781 }
3782 
3783 static int sr_notify(OSThread* osthread) {
3784   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
3785   assert_status(status == 0, status, "thr_kill");
3786   return status;
3787 }
3788 
3789 // "Randomly" selected value for how long we want to spin
3790 // before bailing out on suspending a thread, also how often
3791 // we send a signal to a thread we want to resume
3792 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3793 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3794 
3795 static bool do_suspend(OSThread* osthread) {
3796   assert(osthread->sr.is_running(), "thread should be running");
3797   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
3798 
3799   // mark as suspended and send signal
3800   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3801     // failed to switch, state wasn't running?
3802     ShouldNotReachHere();
3803     return false;
3804   }
3805 
3806   if (sr_notify(osthread) != 0) {
3807     ShouldNotReachHere();
3808   }
3809 
3810   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3811   while (true) {
3812     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
3813       break;
3814     } else {
3815       // timeout
3816       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3817       if (cancelled == os::SuspendResume::SR_RUNNING) {
3818         return false;
3819       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3820         // make sure that we consume the signal on the semaphore as well
3821         sr_semaphore.wait();
3822         break;
3823       } else {
3824         ShouldNotReachHere();
3825         return false;
3826       }
3827     }
3828   }
3829 
3830   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3831   return true;
3832 }
3833 
3834 static void do_resume(OSThread* osthread) {
3835   assert(osthread->sr.is_suspended(), "thread should be suspended");
3836   assert(!sr_semaphore.trywait(), "invalid semaphore state");
3837 
3838   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3839     // failed to switch to WAKEUP_REQUEST
3840     ShouldNotReachHere();
3841     return;
3842   }
3843 
3844   while (true) {
3845     if (sr_notify(osthread) == 0) {
3846       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
3847         if (osthread->sr.is_running()) {
3848           return;
3849         }
3850       }
3851     } else {
3852       ShouldNotReachHere();
3853     }
3854   }
3855 
3856   guarantee(osthread->sr.is_running(), "Must be running!");
3857 }
3858 
3859 void os::SuspendedThreadTask::internal_do_task() {
3860   if (do_suspend(_thread->osthread())) {
3861     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3862     do_task(context);
3863     do_resume(_thread->osthread());
3864   }
3865 }
3866 
3867 class PcFetcher : public os::SuspendedThreadTask {
3868  public:
3869   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3870   ExtendedPC result();
3871  protected:
3872   void do_task(const os::SuspendedThreadTaskContext& context);
3873  private:
3874   ExtendedPC _epc;
3875 };
3876 
3877 ExtendedPC PcFetcher::result() {
3878   guarantee(is_done(), "task is not done yet.");
3879   return _epc;
3880 }
3881 
3882 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3883   Thread* thread = context.thread();
3884   OSThread* osthread = thread->osthread();
3885   if (osthread->ucontext() != NULL) {
3886     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
3887   } else {
3888     // NULL context is unexpected, double-check this is the VMThread
3889     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3890   }
3891 }
3892 
3893 // A lightweight implementation that does not suspend the target thread and
3894 // thus returns only a hint. Used for profiling only!
3895 ExtendedPC os::get_thread_pc(Thread* thread) {
3896   // Make sure that it is called by the watcher and the Threads lock is owned.
3897   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
3898   // For now, is only used to profile the VM Thread
3899   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3900   PcFetcher fetcher(thread);
3901   fetcher.run();
3902   return fetcher.result();
3903 }
3904 
3905 
3906 // This does not do anything on Solaris. This is basically a hook for being
3907 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
3908 void os::os_exception_wrapper(java_call_t f, JavaValue* value,
3909                               methodHandle* method, JavaCallArguments* args,
3910                               Thread* thread) {
3911   f(value, method, args, thread);
3912 }
3913 
3914 // This routine may be used by user applications as a "hook" to catch signals.
3915 // The user-defined signal handler must pass unrecognized signals to this
3916 // routine, and if it returns true (non-zero), then the signal handler must
3917 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
3918 // routine will never retun false (zero), but instead will execute a VM panic
3919 // routine kill the process.
3920 //
3921 // If this routine returns false, it is OK to call it again.  This allows
3922 // the user-defined signal handler to perform checks either before or after
3923 // the VM performs its own checks.  Naturally, the user code would be making
3924 // a serious error if it tried to handle an exception (such as a null check
3925 // or breakpoint) that the VM was generating for its own correct operation.
3926 //
3927 // This routine may recognize any of the following kinds of signals:
3928 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
3929 // os::Solaris::SIGasync
3930 // It should be consulted by handlers for any of those signals.
3931 // It explicitly does not recognize os::Solaris::SIGinterrupt
3932 //
3933 // The caller of this routine must pass in the three arguments supplied
3934 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3935 // field of the structure passed to sigaction().  This routine assumes that
3936 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3937 //
3938 // Note that the VM will print warnings if it detects conflicting signal
3939 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3940 //
3941 extern "C" JNIEXPORT int JVM_handle_solaris_signal(int signo,
3942                                                    siginfo_t* siginfo,
3943                                                    void* ucontext,
3944                                                    int abort_if_unrecognized);
3945 
3946 
3947 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
3948   int orig_errno = errno;  // Preserve errno value over signal handler.
3949   JVM_handle_solaris_signal(sig, info, ucVoid, true);
3950   errno = orig_errno;
3951 }
3952 
3953 // Do not delete - if guarantee is ever removed,  a signal handler (even empty)
3954 // is needed to provoke threads blocked on IO to return an EINTR
3955 // Note: this explicitly does NOT call JVM_handle_solaris_signal and
3956 // does NOT participate in signal chaining due to requirement for
3957 // NOT setting SA_RESTART to make EINTR work.
3958 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
3959   if (UseSignalChaining) {
3960     struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
3961     if (actp && actp->sa_handler) {
3962       vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
3963     }
3964   }
3965 }
3966 
3967 // This boolean allows users to forward their own non-matching signals
3968 // to JVM_handle_solaris_signal, harmlessly.
3969 bool os::Solaris::signal_handlers_are_installed = false;
3970 
3971 // For signal-chaining
3972 bool os::Solaris::libjsig_is_loaded = false;
3973 typedef struct sigaction *(*get_signal_t)(int);
3974 get_signal_t os::Solaris::get_signal_action = NULL;
3975 
3976 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
3977   struct sigaction *actp = NULL;
3978 
3979   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
3980     // Retrieve the old signal handler from libjsig
3981     actp = (*get_signal_action)(sig);
3982   }
3983   if (actp == NULL) {
3984     // Retrieve the preinstalled signal handler from jvm
3985     actp = get_preinstalled_handler(sig);
3986   }
3987 
3988   return actp;
3989 }
3990 
3991 static bool call_chained_handler(struct sigaction *actp, int sig,
3992                                  siginfo_t *siginfo, void *context) {
3993   // Call the old signal handler
3994   if (actp->sa_handler == SIG_DFL) {
3995     // It's more reasonable to let jvm treat it as an unexpected exception
3996     // instead of taking the default action.
3997     return false;
3998   } else if (actp->sa_handler != SIG_IGN) {
3999     if ((actp->sa_flags & SA_NODEFER) == 0) {
4000       // automaticlly block the signal
4001       sigaddset(&(actp->sa_mask), sig);
4002     }
4003 
4004     sa_handler_t hand;
4005     sa_sigaction_t sa;
4006     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4007     // retrieve the chained handler
4008     if (siginfo_flag_set) {
4009       sa = actp->sa_sigaction;
4010     } else {
4011       hand = actp->sa_handler;
4012     }
4013 
4014     if ((actp->sa_flags & SA_RESETHAND) != 0) {
4015       actp->sa_handler = SIG_DFL;
4016     }
4017 
4018     // try to honor the signal mask
4019     sigset_t oset;
4020     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4021 
4022     // call into the chained handler
4023     if (siginfo_flag_set) {
4024       (*sa)(sig, siginfo, context);
4025     } else {
4026       (*hand)(sig);
4027     }
4028 
4029     // restore the signal mask
4030     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4031   }
4032   // Tell jvm's signal handler the signal is taken care of.
4033   return true;
4034 }
4035 
4036 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4037   bool chained = false;
4038   // signal-chaining
4039   if (UseSignalChaining) {
4040     struct sigaction *actp = get_chained_signal_action(sig);
4041     if (actp != NULL) {
4042       chained = call_chained_handler(actp, sig, siginfo, context);
4043     }
4044   }
4045   return chained;
4046 }
4047 
4048 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4049   assert((chainedsigactions != (struct sigaction *)NULL) &&
4050          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
4051   if (preinstalled_sigs[sig] != 0) {
4052     return &chainedsigactions[sig];
4053   }
4054   return NULL;
4055 }
4056 
4057 void os::Solaris::save_preinstalled_handler(int sig,
4058                                             struct sigaction& oldAct) {
4059   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4060   assert((chainedsigactions != (struct sigaction *)NULL) &&
4061          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
4062   chainedsigactions[sig] = oldAct;
4063   preinstalled_sigs[sig] = 1;
4064 }
4065 
4066 void os::Solaris::set_signal_handler(int sig, bool set_installed,
4067                                      bool oktochain) {
4068   // Check for overwrite.
4069   struct sigaction oldAct;
4070   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4071   void* oldhand =
4072       oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4073                           : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4074   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4075       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4076       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4077     if (AllowUserSignalHandlers || !set_installed) {
4078       // Do not overwrite; user takes responsibility to forward to us.
4079       return;
4080     } else if (UseSignalChaining) {
4081       if (oktochain) {
4082         // save the old handler in jvm
4083         save_preinstalled_handler(sig, oldAct);
4084       } else {
4085         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4086       }
4087       // libjsig also interposes the sigaction() call below and saves the
4088       // old sigaction on it own.
4089     } else {
4090       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4091                     "%#lx for signal %d.", (long)oldhand, sig));
4092     }
4093   }
4094 
4095   struct sigaction sigAct;
4096   sigfillset(&(sigAct.sa_mask));
4097   sigAct.sa_handler = SIG_DFL;
4098 
4099   sigAct.sa_sigaction = signalHandler;
4100   // Handle SIGSEGV on alternate signal stack if
4101   // not using stack banging
4102   if (!UseStackBanging && sig == SIGSEGV) {
4103     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4104   } else if (sig == os::Solaris::SIGinterrupt()) {
4105     // Interruptible i/o requires SA_RESTART cleared so EINTR
4106     // is returned instead of restarting system calls
4107     sigemptyset(&sigAct.sa_mask);
4108     sigAct.sa_handler = NULL;
4109     sigAct.sa_flags = SA_SIGINFO;
4110     sigAct.sa_sigaction = sigINTRHandler;
4111   } else {
4112     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4113   }
4114   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4115 
4116   sigaction(sig, &sigAct, &oldAct);
4117 
4118   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4119                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4120   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4121 }
4122 
4123 
4124 #define DO_SIGNAL_CHECK(sig)                      \
4125   do {                                            \
4126     if (!sigismember(&check_signal_done, sig)) {  \
4127       os::Solaris::check_signal_handler(sig);     \
4128     }                                             \
4129   } while (0)
4130 
4131 // This method is a periodic task to check for misbehaving JNI applications
4132 // under CheckJNI, we can add any periodic checks here
4133 
4134 void os::run_periodic_checks() {
4135   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4136   // thereby preventing a NULL checks.
4137   if (!check_addr0_done) check_addr0_done = check_addr0(tty);
4138 
4139   if (check_signals == false) return;
4140 
4141   // SEGV and BUS if overridden could potentially prevent
4142   // generation of hs*.log in the event of a crash, debugging
4143   // such a case can be very challenging, so we absolutely
4144   // check for the following for a good measure:
4145   DO_SIGNAL_CHECK(SIGSEGV);
4146   DO_SIGNAL_CHECK(SIGILL);
4147   DO_SIGNAL_CHECK(SIGFPE);
4148   DO_SIGNAL_CHECK(SIGBUS);
4149   DO_SIGNAL_CHECK(SIGPIPE);
4150   DO_SIGNAL_CHECK(SIGXFSZ);
4151 
4152   // ReduceSignalUsage allows the user to override these handlers
4153   // see comments at the very top and jvm_solaris.h
4154   if (!ReduceSignalUsage) {
4155     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4156     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4157     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4158     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4159   }
4160 
4161   // See comments above for using JVM1/JVM2 and UseAltSigs
4162   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4163   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4164 
4165 }
4166 
4167 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4168 
4169 static os_sigaction_t os_sigaction = NULL;
4170 
4171 void os::Solaris::check_signal_handler(int sig) {
4172   char buf[O_BUFLEN];
4173   address jvmHandler = NULL;
4174 
4175   struct sigaction act;
4176   if (os_sigaction == NULL) {
4177     // only trust the default sigaction, in case it has been interposed
4178     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4179     if (os_sigaction == NULL) return;
4180   }
4181 
4182   os_sigaction(sig, (struct sigaction*)NULL, &act);
4183 
4184   address thisHandler = (act.sa_flags & SA_SIGINFO)
4185     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4186     : CAST_FROM_FN_PTR(address, act.sa_handler);
4187 
4188 
4189   switch (sig) {
4190   case SIGSEGV:
4191   case SIGBUS:
4192   case SIGFPE:
4193   case SIGPIPE:
4194   case SIGXFSZ:
4195   case SIGILL:
4196     jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4197     break;
4198 
4199   case SHUTDOWN1_SIGNAL:
4200   case SHUTDOWN2_SIGNAL:
4201   case SHUTDOWN3_SIGNAL:
4202   case BREAK_SIGNAL:
4203     jvmHandler = (address)user_handler();
4204     break;
4205 
4206   default:
4207     int intrsig = os::Solaris::SIGinterrupt();
4208     int asynsig = os::Solaris::SIGasync();
4209 
4210     if (sig == intrsig) {
4211       jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4212     } else if (sig == asynsig) {
4213       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4214     } else {
4215       return;
4216     }
4217     break;
4218   }
4219 
4220 
4221   if (thisHandler != jvmHandler) {
4222     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4223     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4224     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4225     // No need to check this sig any longer
4226     sigaddset(&check_signal_done, sig);
4227     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4228     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4229       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4230                     exception_name(sig, buf, O_BUFLEN));
4231     }
4232   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4233     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4234     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4235     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4236     // No need to check this sig any longer
4237     sigaddset(&check_signal_done, sig);
4238   }
4239 
4240   // Print all the signal handler state
4241   if (sigismember(&check_signal_done, sig)) {
4242     print_signal_handlers(tty, buf, O_BUFLEN);
4243   }
4244 
4245 }
4246 
4247 void os::Solaris::install_signal_handlers() {
4248   bool libjsigdone = false;
4249   signal_handlers_are_installed = true;
4250 
4251   // signal-chaining
4252   typedef void (*signal_setting_t)();
4253   signal_setting_t begin_signal_setting = NULL;
4254   signal_setting_t end_signal_setting = NULL;
4255   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4256                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4257   if (begin_signal_setting != NULL) {
4258     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4259                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4260     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4261                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4262     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4263                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4264     libjsig_is_loaded = true;
4265     if (os::Solaris::get_libjsig_version != NULL) {
4266       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4267     }
4268     assert(UseSignalChaining, "should enable signal-chaining");
4269   }
4270   if (libjsig_is_loaded) {
4271     // Tell libjsig jvm is setting signal handlers
4272     (*begin_signal_setting)();
4273   }
4274 
4275   set_signal_handler(SIGSEGV, true, true);
4276   set_signal_handler(SIGPIPE, true, true);
4277   set_signal_handler(SIGXFSZ, true, true);
4278   set_signal_handler(SIGBUS, true, true);
4279   set_signal_handler(SIGILL, true, true);
4280   set_signal_handler(SIGFPE, true, true);
4281 
4282 
4283   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4284 
4285     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4286     // can not register overridable signals which might be > 32
4287     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4288       // Tell libjsig jvm has finished setting signal handlers
4289       (*end_signal_setting)();
4290       libjsigdone = true;
4291     }
4292   }
4293 
4294   // Never ok to chain our SIGinterrupt
4295   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4296   set_signal_handler(os::Solaris::SIGasync(), true, true);
4297 
4298   if (libjsig_is_loaded && !libjsigdone) {
4299     // Tell libjsig jvm finishes setting signal handlers
4300     (*end_signal_setting)();
4301   }
4302 
4303   // We don't activate signal checker if libjsig is in place, we trust ourselves
4304   // and if UserSignalHandler is installed all bets are off.
4305   // Log that signal checking is off only if -verbose:jni is specified.
4306   if (CheckJNICalls) {
4307     if (libjsig_is_loaded) {
4308       if (PrintJNIResolving) {
4309         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4310       }
4311       check_signals = false;
4312     }
4313     if (AllowUserSignalHandlers) {
4314       if (PrintJNIResolving) {
4315         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4316       }
4317       check_signals = false;
4318     }
4319   }
4320 }
4321 
4322 
4323 void report_error(const char* file_name, int line_no, const char* title,
4324                   const char* format, ...);
4325 
4326 const char * signames[] = {
4327   "SIG0",
4328   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4329   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4330   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4331   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4332   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4333   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4334   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4335   "SIGCANCEL", "SIGLOST"
4336 };
4337 
4338 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4339   if (0 < exception_code && exception_code <= SIGRTMAX) {
4340     // signal
4341     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4342       jio_snprintf(buf, size, "%s", signames[exception_code]);
4343     } else {
4344       jio_snprintf(buf, size, "SIG%d", exception_code);
4345     }
4346     return buf;
4347   } else {
4348     return NULL;
4349   }
4350 }
4351 
4352 // (Static) wrapper for getisax(2) call.
4353 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4354 
4355 // (Static) wrappers for the liblgrp API
4356 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4357 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4358 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4359 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4360 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4361 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4362 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4363 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4364 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4365 
4366 // (Static) wrapper for meminfo() call.
4367 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4368 
4369 static address resolve_symbol_lazy(const char* name) {
4370   address addr = (address) dlsym(RTLD_DEFAULT, name);
4371   if (addr == NULL) {
4372     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4373     addr = (address) dlsym(RTLD_NEXT, name);
4374   }
4375   return addr;
4376 }
4377 
4378 static address resolve_symbol(const char* name) {
4379   address addr = resolve_symbol_lazy(name);
4380   if (addr == NULL) {
4381     fatal(dlerror());
4382   }
4383   return addr;
4384 }
4385 
4386 void os::Solaris::libthread_init() {
4387   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4388 
4389   lwp_priocntl_init();
4390 
4391   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4392   if (func == NULL) {
4393     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4394     // Guarantee that this VM is running on an new enough OS (5.6 or
4395     // later) that it will have a new enough libthread.so.
4396     guarantee(func != NULL, "libthread.so is too old.");
4397   }
4398 
4399   int size;
4400   void (*handler_info_func)(address *, int *);
4401   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4402   handler_info_func(&handler_start, &size);
4403   handler_end = handler_start + size;
4404 }
4405 
4406 
4407 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4408 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4409 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4410 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4411 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4412 int os::Solaris::_mutex_scope = USYNC_THREAD;
4413 
4414 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4415 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4416 int_fnP_cond_tP os::Solaris::_cond_signal;
4417 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4418 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4419 int_fnP_cond_tP os::Solaris::_cond_destroy;
4420 int os::Solaris::_cond_scope = USYNC_THREAD;
4421 
4422 void os::Solaris::synchronization_init() {
4423   if (UseLWPSynchronization) {
4424     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4425     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4426     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4427     os::Solaris::set_mutex_init(lwp_mutex_init);
4428     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4429     os::Solaris::set_mutex_scope(USYNC_THREAD);
4430 
4431     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4432     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4433     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4434     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4435     os::Solaris::set_cond_init(lwp_cond_init);
4436     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4437     os::Solaris::set_cond_scope(USYNC_THREAD);
4438   } else {
4439     os::Solaris::set_mutex_scope(USYNC_THREAD);
4440     os::Solaris::set_cond_scope(USYNC_THREAD);
4441 
4442     if (UsePthreads) {
4443       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4444       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4445       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4446       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4447       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4448 
4449       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4450       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4451       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4452       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4453       os::Solaris::set_cond_init(pthread_cond_default_init);
4454       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4455     } else {
4456       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4457       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4458       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4459       os::Solaris::set_mutex_init(::mutex_init);
4460       os::Solaris::set_mutex_destroy(::mutex_destroy);
4461 
4462       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4463       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4464       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4465       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4466       os::Solaris::set_cond_init(::cond_init);
4467       os::Solaris::set_cond_destroy(::cond_destroy);
4468     }
4469   }
4470 }
4471 
4472 bool os::Solaris::liblgrp_init() {
4473   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4474   if (handle != NULL) {
4475     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4476     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4477     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4478     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4479     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4480     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4481     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4482     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4483                                                       dlsym(handle, "lgrp_cookie_stale")));
4484 
4485     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4486     set_lgrp_cookie(c);
4487     return true;
4488   }
4489   return false;
4490 }
4491 
4492 void os::Solaris::misc_sym_init() {
4493   address func;
4494 
4495   // getisax
4496   func = resolve_symbol_lazy("getisax");
4497   if (func != NULL) {
4498     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4499   }
4500 
4501   // meminfo
4502   func = resolve_symbol_lazy("meminfo");
4503   if (func != NULL) {
4504     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4505   }
4506 }
4507 
4508 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4509   assert(_getisax != NULL, "_getisax not set");
4510   return _getisax(array, n);
4511 }
4512 
4513 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4514 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4515 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4516 
4517 void init_pset_getloadavg_ptr(void) {
4518   pset_getloadavg_ptr =
4519     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4520   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4521     warning("pset_getloadavg function not found");
4522   }
4523 }
4524 
4525 int os::Solaris::_dev_zero_fd = -1;
4526 
4527 // this is called _before_ the global arguments have been parsed
4528 void os::init(void) {
4529   _initial_pid = getpid();
4530 
4531   max_hrtime = first_hrtime = gethrtime();
4532 
4533   init_random(1234567);
4534 
4535   page_size = sysconf(_SC_PAGESIZE);
4536   if (page_size == -1) {
4537     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4538                   strerror(errno)));
4539   }
4540   init_page_sizes((size_t) page_size);
4541 
4542   Solaris::initialize_system_info();
4543 
4544   // Initialize misc. symbols as soon as possible, so we can use them
4545   // if we need them.
4546   Solaris::misc_sym_init();
4547 
4548   int fd = ::open("/dev/zero", O_RDWR);
4549   if (fd < 0) {
4550     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4551   } else {
4552     Solaris::set_dev_zero_fd(fd);
4553 
4554     // Close on exec, child won't inherit.
4555     fcntl(fd, F_SETFD, FD_CLOEXEC);
4556   }
4557 
4558   clock_tics_per_sec = CLK_TCK;
4559 
4560   // check if dladdr1() exists; dladdr1 can provide more information than
4561   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4562   // and is available on linker patches for 5.7 and 5.8.
4563   // libdl.so must have been loaded, this call is just an entry lookup
4564   void * hdl = dlopen("libdl.so", RTLD_NOW);
4565   if (hdl) {
4566     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4567   }
4568 
4569   // (Solaris only) this switches to calls that actually do locking.
4570   ThreadCritical::initialize();
4571 
4572   main_thread = thr_self();
4573 
4574   // Constant minimum stack size allowed. It must be at least
4575   // the minimum of what the OS supports (thr_min_stack()), and
4576   // enough to allow the thread to get to user bytecode execution.
4577   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4578   // If the pagesize of the VM is greater than 8K determine the appropriate
4579   // number of initial guard pages.  The user can change this with the
4580   // command line arguments, if needed.
4581   if (vm_page_size() > 8*K) {
4582     StackYellowPages = 1;
4583     StackRedPages = 1;
4584     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4585   }
4586 }
4587 
4588 // To install functions for atexit system call
4589 extern "C" {
4590   static void perfMemory_exit_helper() {
4591     perfMemory_exit();
4592   }
4593 }
4594 
4595 // this is called _after_ the global arguments have been parsed
4596 jint os::init_2(void) {
4597   // try to enable extended file IO ASAP, see 6431278
4598   os::Solaris::try_enable_extended_io();
4599 
4600   // Allocate a single page and mark it as readable for safepoint polling.  Also
4601   // use this first mmap call to check support for MAP_ALIGN.
4602   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4603                                                       page_size,
4604                                                       MAP_PRIVATE | MAP_ALIGN,
4605                                                       PROT_READ);
4606   if (polling_page == NULL) {
4607     has_map_align = false;
4608     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4609                                                 PROT_READ);
4610   }
4611 
4612   os::set_polling_page(polling_page);
4613 
4614 #ifndef PRODUCT
4615   if (Verbose && PrintMiscellaneous) {
4616     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n",
4617                (intptr_t)polling_page);
4618   }
4619 #endif
4620 
4621   if (!UseMembar) {
4622     address mem_serialize_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE);
4623     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4624     os::set_memory_serialize_page(mem_serialize_page);
4625 
4626 #ifndef PRODUCT
4627     if (Verbose && PrintMiscellaneous) {
4628       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n",
4629                  (intptr_t)mem_serialize_page);
4630     }
4631 #endif
4632   }
4633 
4634   // Check minimum allowable stack size for thread creation and to initialize
4635   // the java system classes, including StackOverflowError - depends on page
4636   // size.  Add a page for compiler2 recursion in main thread.
4637   // Add in 2*BytesPerWord times page size to account for VM stack during
4638   // class initialization depending on 32 or 64 bit VM.
4639   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4640                                         (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4641                                         2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4642 
4643   size_t threadStackSizeInBytes = ThreadStackSize * K;
4644   if (threadStackSizeInBytes != 0 &&
4645       threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4646     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4647                   os::Solaris::min_stack_allowed/K);
4648     return JNI_ERR;
4649   }
4650 
4651   // For 64kbps there will be a 64kb page size, which makes
4652   // the usable default stack size quite a bit less.  Increase the
4653   // stack for 64kb (or any > than 8kb) pages, this increases
4654   // virtual memory fragmentation (since we're not creating the
4655   // stack on a power of 2 boundary.  The real fix for this
4656   // should be to fix the guard page mechanism.
4657 
4658   if (vm_page_size() > 8*K) {
4659     threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4660        ? threadStackSizeInBytes +
4661          ((StackYellowPages + StackRedPages) * vm_page_size())
4662        : 0;
4663     ThreadStackSize = threadStackSizeInBytes/K;
4664   }
4665 
4666   // Make the stack size a multiple of the page size so that
4667   // the yellow/red zones can be guarded.
4668   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4669                                                 vm_page_size()));
4670 
4671   Solaris::libthread_init();
4672 
4673   if (UseNUMA) {
4674     if (!Solaris::liblgrp_init()) {
4675       UseNUMA = false;
4676     } else {
4677       size_t lgrp_limit = os::numa_get_groups_num();
4678       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4679       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4680       FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
4681       if (lgrp_num < 2) {
4682         // There's only one locality group, disable NUMA.
4683         UseNUMA = false;
4684       }
4685     }
4686     if (!UseNUMA && ForceNUMA) {
4687       UseNUMA = true;
4688     }
4689   }
4690 
4691   Solaris::signal_sets_init();
4692   Solaris::init_signal_mem();
4693   Solaris::install_signal_handlers();
4694 
4695   if (libjsigversion < JSIG_VERSION_1_4_1) {
4696     Maxlibjsigsigs = OLDMAXSIGNUM;
4697   }
4698 
4699   // initialize synchronization primitives to use either thread or
4700   // lwp synchronization (controlled by UseLWPSynchronization)
4701   Solaris::synchronization_init();
4702 
4703   if (MaxFDLimit) {
4704     // set the number of file descriptors to max. print out error
4705     // if getrlimit/setrlimit fails but continue regardless.
4706     struct rlimit nbr_files;
4707     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4708     if (status != 0) {
4709       if (PrintMiscellaneous && (Verbose || WizardMode)) {
4710         perror("os::init_2 getrlimit failed");
4711       }
4712     } else {
4713       nbr_files.rlim_cur = nbr_files.rlim_max;
4714       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4715       if (status != 0) {
4716         if (PrintMiscellaneous && (Verbose || WizardMode)) {
4717           perror("os::init_2 setrlimit failed");
4718         }
4719       }
4720     }
4721   }
4722 
4723   // Calculate theoretical max. size of Threads to guard gainst
4724   // artifical out-of-memory situations, where all available address-
4725   // space has been reserved by thread stacks. Default stack size is 1Mb.
4726   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
4727     JavaThread::stack_size_at_create() : (1*K*K);
4728   assert(pre_thread_stack_size != 0, "Must have a stack");
4729   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
4730   // we should start doing Virtual Memory banging. Currently when the threads will
4731   // have used all but 200Mb of space.
4732   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
4733   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
4734 
4735   // at-exit methods are called in the reverse order of their registration.
4736   // In Solaris 7 and earlier, atexit functions are called on return from
4737   // main or as a result of a call to exit(3C). There can be only 32 of
4738   // these functions registered and atexit() does not set errno. In Solaris
4739   // 8 and later, there is no limit to the number of functions registered
4740   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
4741   // functions are called upon dlclose(3DL) in addition to return from main
4742   // and exit(3C).
4743 
4744   if (PerfAllowAtExitRegistration) {
4745     // only register atexit functions if PerfAllowAtExitRegistration is set.
4746     // atexit functions can be delayed until process exit time, which
4747     // can be problematic for embedded VM situations. Embedded VMs should
4748     // call DestroyJavaVM() to assure that VM resources are released.
4749 
4750     // note: perfMemory_exit_helper atexit function may be removed in
4751     // the future if the appropriate cleanup code can be added to the
4752     // VM_Exit VMOperation's doit method.
4753     if (atexit(perfMemory_exit_helper) != 0) {
4754       warning("os::init2 atexit(perfMemory_exit_helper) failed");
4755     }
4756   }
4757 
4758   // Init pset_loadavg function pointer
4759   init_pset_getloadavg_ptr();
4760 
4761   return JNI_OK;
4762 }
4763 
4764 void os::init_3(void) {
4765   return;
4766 }
4767 
4768 // Mark the polling page as unreadable
4769 void os::make_polling_page_unreadable(void) {
4770   if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0) {
4771     fatal("Could not disable polling page");
4772   }
4773 }
4774 
4775 // Mark the polling page as readable
4776 void os::make_polling_page_readable(void) {
4777   if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0) {
4778     fatal("Could not enable polling page");
4779   }
4780 }
4781 
4782 // OS interface.
4783 
4784 bool os::check_heap(bool force) { return true; }
4785 
4786 // Is a (classpath) directory empty?
4787 bool os::dir_is_empty(const char* path) {
4788   DIR *dir = NULL;
4789   struct dirent *ptr;
4790 
4791   dir = opendir(path);
4792   if (dir == NULL) return true;
4793 
4794   // Scan the directory
4795   bool result = true;
4796   char buf[sizeof(struct dirent) + MAX_PATH];
4797   struct dirent *dbuf = (struct dirent *) buf;
4798   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
4799     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4800       result = false;
4801     }
4802   }
4803   closedir(dir);
4804   return result;
4805 }
4806 
4807 // This code originates from JDK's sysOpen and open64_w
4808 // from src/solaris/hpi/src/system_md.c
4809 
4810 int os::open(const char *path, int oflag, int mode) {
4811   if (strlen(path) > MAX_PATH - 1) {
4812     errno = ENAMETOOLONG;
4813     return -1;
4814   }
4815   int fd;
4816 
4817   fd = ::open64(path, oflag, mode);
4818   if (fd == -1) return -1;
4819 
4820   // If the open succeeded, the file might still be a directory
4821   {
4822     struct stat64 buf64;
4823     int ret = ::fstat64(fd, &buf64);
4824     int st_mode = buf64.st_mode;
4825 
4826     if (ret != -1) {
4827       if ((st_mode & S_IFMT) == S_IFDIR) {
4828         errno = EISDIR;
4829         ::close(fd);
4830         return -1;
4831       }
4832     } else {
4833       ::close(fd);
4834       return -1;
4835     }
4836   }
4837 
4838   // 32-bit Solaris systems suffer from:
4839   //
4840   // - an historical default soft limit of 256 per-process file
4841   //   descriptors that is too low for many Java programs.
4842   //
4843   // - a design flaw where file descriptors created using stdio
4844   //   fopen must be less than 256, _even_ when the first limit above
4845   //   has been raised.  This can cause calls to fopen (but not calls to
4846   //   open, for example) to fail mysteriously, perhaps in 3rd party
4847   //   native code (although the JDK itself uses fopen).  One can hardly
4848   //   criticize them for using this most standard of all functions.
4849   //
4850   // We attempt to make everything work anyways by:
4851   //
4852   // - raising the soft limit on per-process file descriptors beyond
4853   //   256
4854   //
4855   // - As of Solaris 10u4, we can request that Solaris raise the 256
4856   //   stdio fopen limit by calling function enable_extended_FILE_stdio.
4857   //   This is done in init_2 and recorded in enabled_extended_FILE_stdio
4858   //
4859   // - If we are stuck on an old (pre 10u4) Solaris system, we can
4860   //   workaround the bug by remapping non-stdio file descriptors below
4861   //   256 to ones beyond 256, which is done below.
4862   //
4863   // See:
4864   // 1085341: 32-bit stdio routines should support file descriptors >255
4865   // 6533291: Work around 32-bit Solaris stdio limit of 256 open files
4866   // 6431278: Netbeans crash on 32 bit Solaris: need to call
4867   //          enable_extended_FILE_stdio() in VM initialisation
4868   // Giri Mandalika's blog
4869   // http://technopark02.blogspot.com/2005_05_01_archive.html
4870   //
4871 #ifndef  _LP64
4872   if ((!enabled_extended_FILE_stdio) && fd < 256) {
4873     int newfd = ::fcntl(fd, F_DUPFD, 256);
4874     if (newfd != -1) {
4875       ::close(fd);
4876       fd = newfd;
4877     }
4878   }
4879 #endif // 32-bit Solaris
4880 
4881   // All file descriptors that are opened in the JVM and not
4882   // specifically destined for a subprocess should have the
4883   // close-on-exec flag set.  If we don't set it, then careless 3rd
4884   // party native code might fork and exec without closing all
4885   // appropriate file descriptors (e.g. as we do in closeDescriptors in
4886   // UNIXProcess.c), and this in turn might:
4887   //
4888   // - cause end-of-file to fail to be detected on some file
4889   //   descriptors, resulting in mysterious hangs, or
4890   //
4891   // - might cause an fopen in the subprocess to fail on a system
4892   //   suffering from bug 1085341.
4893   //
4894   // (Yes, the default setting of the close-on-exec flag is a Unix
4895   // design flaw)
4896   //
4897   // See:
4898   // 1085341: 32-bit stdio routines should support file descriptors >255
4899   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4900   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4901   //
4902 #ifdef FD_CLOEXEC
4903   {
4904     int flags = ::fcntl(fd, F_GETFD);
4905     if (flags != -1) {
4906       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4907     }
4908   }
4909 #endif
4910 
4911   return fd;
4912 }
4913 
4914 // create binary file, rewriting existing file if required
4915 int os::create_binary_file(const char* path, bool rewrite_existing) {
4916   int oflags = O_WRONLY | O_CREAT;
4917   if (!rewrite_existing) {
4918     oflags |= O_EXCL;
4919   }
4920   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4921 }
4922 
4923 // return current position of file pointer
4924 jlong os::current_file_offset(int fd) {
4925   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4926 }
4927 
4928 // move file pointer to the specified offset
4929 jlong os::seek_to_file_offset(int fd, jlong offset) {
4930   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4931 }
4932 
4933 jlong os::lseek(int fd, jlong offset, int whence) {
4934   return (jlong) ::lseek64(fd, offset, whence);
4935 }
4936 
4937 char * os::native_path(char *path) {
4938   return path;
4939 }
4940 
4941 int os::ftruncate(int fd, jlong length) {
4942   return ::ftruncate64(fd, length);
4943 }
4944 
4945 int os::fsync(int fd)  {
4946   RESTARTABLE_RETURN_INT(::fsync(fd));
4947 }
4948 
4949 int os::available(int fd, jlong *bytes) {
4950   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
4951          "Assumed _thread_in_native");
4952   jlong cur, end;
4953   int mode;
4954   struct stat64 buf64;
4955 
4956   if (::fstat64(fd, &buf64) >= 0) {
4957     mode = buf64.st_mode;
4958     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4959       int n,ioctl_return;
4960 
4961       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
4962       if (ioctl_return>= 0) {
4963         *bytes = n;
4964         return 1;
4965       }
4966     }
4967   }
4968   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4969     return 0;
4970   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4971     return 0;
4972   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4973     return 0;
4974   }
4975   *bytes = end - cur;
4976   return 1;
4977 }
4978 
4979 // Map a block of memory.
4980 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4981                         char *addr, size_t bytes, bool read_only,
4982                         bool allow_exec) {
4983   int prot;
4984   int flags;
4985 
4986   if (read_only) {
4987     prot = PROT_READ;
4988     flags = MAP_SHARED;
4989   } else {
4990     prot = PROT_READ | PROT_WRITE;
4991     flags = MAP_PRIVATE;
4992   }
4993 
4994   if (allow_exec) {
4995     prot |= PROT_EXEC;
4996   }
4997 
4998   if (addr != NULL) {
4999     flags |= MAP_FIXED;
5000   }
5001 
5002   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5003                                      fd, file_offset);
5004   if (mapped_address == MAP_FAILED) {
5005     return NULL;
5006   }
5007   return mapped_address;
5008 }
5009 
5010 
5011 // Remap a block of memory.
5012 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5013                           char *addr, size_t bytes, bool read_only,
5014                           bool allow_exec) {
5015   // same as map_memory() on this OS
5016   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5017                         allow_exec);
5018 }
5019 
5020 
5021 // Unmap a block of memory.
5022 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5023   return munmap(addr, bytes) == 0;
5024 }
5025 
5026 void os::pause() {
5027   char filename[MAX_PATH];
5028   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5029     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5030   } else {
5031     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5032   }
5033 
5034   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5035   if (fd != -1) {
5036     struct stat buf;
5037     ::close(fd);
5038     while (::stat(filename, &buf) == 0) {
5039       (void)::poll(NULL, 0, 100);
5040     }
5041   } else {
5042     jio_fprintf(stderr,
5043                 "Could not open pause file '%s', continuing immediately.\n", filename);
5044   }
5045 }
5046 
5047 #ifndef PRODUCT
5048 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5049 // Turn this on if you need to trace synch operations.
5050 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5051 // and call record_synch_enable and record_synch_disable
5052 // around the computation of interest.
5053 
5054 void record_synch(char* name, bool returning);  // defined below
5055 
5056 class RecordSynch {
5057   char* _name;
5058  public:
5059   RecordSynch(char* name) :_name(name) { record_synch(_name, false); }
5060   ~RecordSynch()                       { record_synch(_name, true); }
5061 };
5062 
5063 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5064 extern "C" ret name params {                                    \
5065   typedef ret name##_t params;                                  \
5066   static name##_t* implem = NULL;                               \
5067   static int callcount = 0;                                     \
5068   if (implem == NULL) {                                         \
5069     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5070     if (implem == NULL)  fatal(dlerror());                      \
5071   }                                                             \
5072   ++callcount;                                                  \
5073   RecordSynch _rs(#name);                                       \
5074   inner;                                                        \
5075   return implem args;                                           \
5076 }
5077 // in dbx, examine callcounts this way:
5078 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5079 
5080 #define CHECK_POINTER_OK(p) \
5081   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5082 #define CHECK_MU \
5083   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5084 #define CHECK_CV \
5085   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5086 #define CHECK_P(p) \
5087   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5088 
5089 #define CHECK_MUTEX(mutex_op) \
5090   CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5091 
5092 CHECK_MUTEX(   mutex_lock)
5093 CHECK_MUTEX(  _mutex_lock)
5094 CHECK_MUTEX( mutex_unlock)
5095 CHECK_MUTEX(_mutex_unlock)
5096 CHECK_MUTEX( mutex_trylock)
5097 CHECK_MUTEX(_mutex_trylock)
5098 
5099 #define CHECK_COND(cond_op) \
5100   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU; CHECK_CV);
5101 
5102 CHECK_COND( cond_wait);
5103 CHECK_COND(_cond_wait);
5104 CHECK_COND(_cond_wait_cancel);
5105 
5106 #define CHECK_COND2(cond_op) \
5107   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU; CHECK_CV);
5108 
5109 CHECK_COND2( cond_timedwait);
5110 CHECK_COND2(_cond_timedwait);
5111 CHECK_COND2(_cond_timedwait_cancel);
5112 
5113 // do the _lwp_* versions too
5114 #define mutex_t lwp_mutex_t
5115 #define cond_t  lwp_cond_t
5116 CHECK_MUTEX(  _lwp_mutex_lock)
5117 CHECK_MUTEX(  _lwp_mutex_unlock)
5118 CHECK_MUTEX(  _lwp_mutex_trylock)
5119 CHECK_MUTEX( __lwp_mutex_lock)
5120 CHECK_MUTEX( __lwp_mutex_unlock)
5121 CHECK_MUTEX( __lwp_mutex_trylock)
5122 CHECK_MUTEX(___lwp_mutex_lock)
5123 CHECK_MUTEX(___lwp_mutex_unlock)
5124 
5125 CHECK_COND(  _lwp_cond_wait);
5126 CHECK_COND( __lwp_cond_wait);
5127 CHECK_COND(___lwp_cond_wait);
5128 
5129 CHECK_COND2(  _lwp_cond_timedwait);
5130 CHECK_COND2( __lwp_cond_timedwait);
5131 #undef mutex_t
5132 #undef cond_t
5133 
5134 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5135 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5136 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5137 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5138 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5139 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5140 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5141 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5142 
5143 
5144 // recording machinery:
5145 
5146 enum { RECORD_SYNCH_LIMIT = 200 };
5147 char* record_synch_name[RECORD_SYNCH_LIMIT];
5148 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5149 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5150 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5151 int record_synch_count = 0;
5152 bool record_synch_enabled = false;
5153 
5154 // in dbx, examine recorded data this way:
5155 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5156 
5157 void record_synch(char* name, bool returning) {
5158   if (record_synch_enabled) {
5159     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5160       record_synch_name[record_synch_count] = name;
5161       record_synch_returning[record_synch_count] = returning;
5162       record_synch_thread[record_synch_count] = thr_self();
5163       record_synch_arg0ptr[record_synch_count] = &name;
5164       record_synch_count++;
5165     }
5166     // put more checking code here:
5167     // ...
5168   }
5169 }
5170 
5171 void record_synch_enable() {
5172   // start collecting trace data, if not already doing so
5173   if (!record_synch_enabled)  record_synch_count = 0;
5174   record_synch_enabled = true;
5175 }
5176 
5177 void record_synch_disable() {
5178   // stop collecting trace data
5179   record_synch_enabled = false;
5180 }
5181 
5182 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5183 #endif // PRODUCT
5184 
5185 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5186 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5187                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5188 
5189 
5190 // JVMTI & JVM monitoring and management support
5191 // The thread_cpu_time() and current_thread_cpu_time() are only
5192 // supported if is_thread_cpu_time_supported() returns true.
5193 // They are not supported on Solaris T1.
5194 
5195 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5196 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5197 // of a thread.
5198 //
5199 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5200 // returns the fast estimate available on the platform.
5201 
5202 // hrtime_t gethrvtime() return value includes
5203 // user time but does not include system time
5204 jlong os::current_thread_cpu_time() {
5205   return (jlong) gethrvtime();
5206 }
5207 
5208 jlong os::thread_cpu_time(Thread *thread) {
5209   // return user level CPU time only to be consistent with
5210   // what current_thread_cpu_time returns.
5211   // thread_cpu_time_info() must be changed if this changes
5212   return os::thread_cpu_time(thread, false /* user time only */);
5213 }
5214 
5215 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5216   if (user_sys_cpu_time) {
5217     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5218   } else {
5219     return os::current_thread_cpu_time();
5220   }
5221 }
5222 
5223 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5224   char proc_name[64];
5225   int count;
5226   prusage_t prusage;
5227   jlong lwp_time;
5228   int fd;
5229 
5230   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5231           getpid(),
5232           thread->osthread()->lwp_id());
5233   fd = ::open(proc_name, O_RDONLY);
5234   if (fd == -1) return -1;
5235 
5236   do {
5237     count = ::pread(fd,
5238                     (void *)&prusage.pr_utime,
5239                     thr_time_size,
5240                     thr_time_off);
5241   } while (count < 0 && errno == EINTR);
5242   ::close(fd);
5243   if (count < 0) return -1;
5244 
5245   if (user_sys_cpu_time) {
5246     // user + system CPU time
5247     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5248                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5249                  (jlong)prusage.pr_stime.tv_nsec +
5250                  (jlong)prusage.pr_utime.tv_nsec;
5251   } else {
5252     // user level CPU time only
5253     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5254                 (jlong)prusage.pr_utime.tv_nsec;
5255   }
5256 
5257   return (lwp_time);
5258 }
5259 
5260 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5261   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5262   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5263   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5264   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5265 }
5266 
5267 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5268   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5269   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5270   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5271   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5272 }
5273 
5274 bool os::is_thread_cpu_time_supported() {
5275   return true;
5276 }
5277 
5278 // System loadavg support.  Returns -1 if load average cannot be obtained.
5279 // Return the load average for our processor set if the primitive exists
5280 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5281 int os::loadavg(double loadavg[], int nelem) {
5282   if (pset_getloadavg_ptr != NULL) {
5283     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5284   } else {
5285     return ::getloadavg(loadavg, nelem);
5286   }
5287 }
5288 
5289 //---------------------------------------------------------------------------------
5290 
5291 bool os::find(address addr, outputStream* st) {
5292   Dl_info dlinfo;
5293   memset(&dlinfo, 0, sizeof(dlinfo));
5294   if (dladdr(addr, &dlinfo) != 0) {
5295     st->print(PTR_FORMAT ": ", addr);
5296     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5297       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5298     } else if (dlinfo.dli_fbase != NULL) {
5299       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5300     } else {
5301       st->print("<absolute address>");
5302     }
5303     if (dlinfo.dli_fname != NULL) {
5304       st->print(" in %s", dlinfo.dli_fname);
5305     }
5306     if (dlinfo.dli_fbase != NULL) {
5307       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5308     }
5309     st->cr();
5310 
5311     if (Verbose) {
5312       // decode some bytes around the PC
5313       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5314       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5315       address       lowest = (address) dlinfo.dli_sname;
5316       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5317       if (begin < lowest)  begin = lowest;
5318       Dl_info dlinfo2;
5319       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5320           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
5321         end = (address) dlinfo2.dli_saddr;
5322       }
5323       Disassembler::decode(begin, end, st);
5324     }
5325     return true;
5326   }
5327   return false;
5328 }
5329 
5330 // Following function has been added to support HotSparc's libjvm.so running
5331 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5332 // src/solaris/hpi/native_threads in the EVM codebase.
5333 //
5334 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5335 // libraries and should thus be removed. We will leave it behind for a while
5336 // until we no longer want to able to run on top of 1.3.0 Solaris production
5337 // JDK. See 4341971.
5338 
5339 #define STACK_SLACK 0x800
5340 
5341 extern "C" {
5342   intptr_t sysThreadAvailableStackWithSlack() {
5343     stack_t st;
5344     intptr_t retval, stack_top;
5345     retval = thr_stksegment(&st);
5346     assert(retval == 0, "incorrect return value from thr_stksegment");
5347     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5348     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5349     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5350     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5351   }
5352 }
5353 
5354 // ObjectMonitor park-unpark infrastructure ...
5355 //
5356 // We implement Solaris and Linux PlatformEvents with the
5357 // obvious condvar-mutex-flag triple.
5358 // Another alternative that works quite well is pipes:
5359 // Each PlatformEvent consists of a pipe-pair.
5360 // The thread associated with the PlatformEvent
5361 // calls park(), which reads from the input end of the pipe.
5362 // Unpark() writes into the other end of the pipe.
5363 // The write-side of the pipe must be set NDELAY.
5364 // Unfortunately pipes consume a large # of handles.
5365 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5366 // Using pipes for the 1st few threads might be workable, however.
5367 //
5368 // park() is permitted to return spuriously.
5369 // Callers of park() should wrap the call to park() in
5370 // an appropriate loop.  A litmus test for the correct
5371 // usage of park is the following: if park() were modified
5372 // to immediately return 0 your code should still work,
5373 // albeit degenerating to a spin loop.
5374 //
5375 // An interesting optimization for park() is to use a trylock()
5376 // to attempt to acquire the mutex.  If the trylock() fails
5377 // then we know that a concurrent unpark() operation is in-progress.
5378 // in that case the park() code could simply set _count to 0
5379 // and return immediately.  The subsequent park() operation *might*
5380 // return immediately.  That's harmless as the caller of park() is
5381 // expected to loop.  By using trylock() we will have avoided a
5382 // avoided a context switch caused by contention on the per-thread mutex.
5383 //
5384 // TODO-FIXME:
5385 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
5386 //     objectmonitor implementation.
5387 // 2.  Collapse the JSR166 parker event, and the
5388 //     objectmonitor ParkEvent into a single "Event" construct.
5389 // 3.  In park() and unpark() add:
5390 //     assert (Thread::current() == AssociatedWith).
5391 // 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5392 //     1-out-of-N park() operations will return immediately.
5393 //
5394 // _Event transitions in park()
5395 //   -1 => -1 : illegal
5396 //    1 =>  0 : pass - return immediately
5397 //    0 => -1 : block
5398 //
5399 // _Event serves as a restricted-range semaphore.
5400 //
5401 // Another possible encoding of _Event would be with
5402 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5403 //
5404 // TODO-FIXME: add DTRACE probes for:
5405 // 1.   Tx parks
5406 // 2.   Ty unparks Tx
5407 // 3.   Tx resumes from park
5408 
5409 
5410 // value determined through experimentation
5411 #define ROUNDINGFIX 11
5412 
5413 // utility to compute the abstime argument to timedwait.
5414 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5415 
5416 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5417   // millis is the relative timeout time
5418   // abstime will be the absolute timeout time
5419   if (millis < 0)  millis = 0;
5420   struct timeval now;
5421   int status = gettimeofday(&now, NULL);
5422   assert(status == 0, "gettimeofday");
5423   jlong seconds = millis / 1000;
5424   jlong max_wait_period;
5425 
5426   if (UseLWPSynchronization) {
5427     // forward port of fix for 4275818 (not sleeping long enough)
5428     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5429     // _lwp_cond_timedwait() used a round_down algorithm rather
5430     // than a round_up. For millis less than our roundfactor
5431     // it rounded down to 0 which doesn't meet the spec.
5432     // For millis > roundfactor we may return a bit sooner, but
5433     // since we can not accurately identify the patch level and
5434     // this has already been fixed in Solaris 9 and 8 we will
5435     // leave it alone rather than always rounding down.
5436 
5437     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5438     // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5439     // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5440     max_wait_period = 21000000;
5441   } else {
5442     max_wait_period = 50000000;
5443   }
5444   millis %= 1000;
5445   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5446     seconds = max_wait_period;
5447   }
5448   abstime->tv_sec = now.tv_sec  + seconds;
5449   long       usec = now.tv_usec + millis * 1000;
5450   if (usec >= 1000000) {
5451     abstime->tv_sec += 1;
5452     usec -= 1000000;
5453   }
5454   abstime->tv_nsec = usec * 1000;
5455   return abstime;
5456 }
5457 
5458 void os::PlatformEvent::park() {           // AKA: down()
5459   // Invariant: Only the thread associated with the Event/PlatformEvent
5460   // may call park().
5461   assert(_nParked == 0, "invariant");
5462 
5463   int v;
5464   for (;;) {
5465     v = _Event;
5466     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5467   }
5468   guarantee(v >= 0, "invariant");
5469   if (v == 0) {
5470     // Do this the hard way by blocking ...
5471     // See http://monaco.sfbay/detail.jsf?cr=5094058.
5472     // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5473     // Only for SPARC >= V8PlusA
5474 #if defined(__sparc) && defined(COMPILER2)
5475     if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5476 #endif
5477     int status = os::Solaris::mutex_lock(_mutex);
5478     assert_status(status == 0, status, "mutex_lock");
5479     guarantee(_nParked == 0, "invariant");
5480     ++_nParked;
5481     while (_Event < 0) {
5482       // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5483       // Treat this the same as if the wait was interrupted
5484       // With usr/lib/lwp going to kernel, always handle ETIME
5485       status = os::Solaris::cond_wait(_cond, _mutex);
5486       if (status == ETIME) status = EINTR;
5487       assert_status(status == 0 || status == EINTR, status, "cond_wait");
5488     }
5489     --_nParked;
5490     _Event = 0;
5491     status = os::Solaris::mutex_unlock(_mutex);
5492     assert_status(status == 0, status, "mutex_unlock");
5493     // Paranoia to ensure our locked and lock-free paths interact
5494     // correctly with each other.
5495     OrderAccess::fence();
5496   }
5497 }
5498 
5499 int os::PlatformEvent::park(jlong millis) {
5500   guarantee(_nParked == 0, "invariant");
5501   int v;
5502   for (;;) {
5503     v = _Event;
5504     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5505   }
5506   guarantee(v >= 0, "invariant");
5507   if (v != 0) return OS_OK;
5508 
5509   int ret = OS_TIMEOUT;
5510   timestruc_t abst;
5511   compute_abstime(&abst, millis);
5512 
5513   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5514   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5515   // Only for SPARC >= V8PlusA
5516 #if defined(__sparc) && defined(COMPILER2)
5517   if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5518 #endif
5519   int status = os::Solaris::mutex_lock(_mutex);
5520   assert_status(status == 0, status, "mutex_lock");
5521   guarantee(_nParked == 0, "invariant");
5522   ++_nParked;
5523   while (_Event < 0) {
5524     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5525     assert_status(status == 0 || status == EINTR ||
5526                   status == ETIME || status == ETIMEDOUT,
5527                   status, "cond_timedwait");
5528     if (!FilterSpuriousWakeups) break;                // previous semantics
5529     if (status == ETIME || status == ETIMEDOUT) break;
5530     // We consume and ignore EINTR and spurious wakeups.
5531   }
5532   --_nParked;
5533   if (_Event >= 0) ret = OS_OK;
5534   _Event = 0;
5535   status = os::Solaris::mutex_unlock(_mutex);
5536   assert_status(status == 0, status, "mutex_unlock");
5537   // Paranoia to ensure our locked and lock-free paths interact
5538   // correctly with each other.
5539   OrderAccess::fence();
5540   return ret;
5541 }
5542 
5543 void os::PlatformEvent::unpark() {
5544   // Transitions for _Event:
5545   //    0 :=> 1
5546   //    1 :=> 1
5547   //   -1 :=> either 0 or 1; must signal target thread
5548   //          That is, we can safely transition _Event from -1 to either
5549   //          0 or 1.
5550   // See also: "Semaphores in Plan 9" by Mullender & Cox
5551   //
5552   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5553   // that it will take two back-to-back park() calls for the owning
5554   // thread to block. This has the benefit of forcing a spurious return
5555   // from the first park() call after an unpark() call which will help
5556   // shake out uses of park() and unpark() without condition variables.
5557 
5558   if (Atomic::xchg(1, &_Event) >= 0) return;
5559 
5560   // If the thread associated with the event was parked, wake it.
5561   // Wait for the thread assoc with the PlatformEvent to vacate.
5562   int status = os::Solaris::mutex_lock(_mutex);
5563   assert_status(status == 0, status, "mutex_lock");
5564   int AnyWaiters = _nParked;
5565   status = os::Solaris::mutex_unlock(_mutex);
5566   assert_status(status == 0, status, "mutex_unlock");
5567   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5568   if (AnyWaiters != 0) {
5569     // We intentional signal *after* dropping the lock
5570     // to avoid a common class of futile wakeups.
5571     status = os::Solaris::cond_signal(_cond);
5572     assert_status(status == 0, status, "cond_signal");
5573   }
5574 }
5575 
5576 // JSR166
5577 // -------------------------------------------------------
5578 
5579 // The solaris and linux implementations of park/unpark are fairly
5580 // conservative for now, but can be improved. They currently use a
5581 // mutex/condvar pair, plus _counter.
5582 // Park decrements _counter if > 0, else does a condvar wait.  Unpark
5583 // sets count to 1 and signals condvar.  Only one thread ever waits
5584 // on the condvar. Contention seen when trying to park implies that someone
5585 // is unparking you, so don't wait. And spurious returns are fine, so there
5586 // is no need to track notifications.
5587 
5588 #define MAX_SECS 100000000
5589 
5590 // This code is common to linux and solaris and will be moved to a
5591 // common place in dolphin.
5592 //
5593 // The passed in time value is either a relative time in nanoseconds
5594 // or an absolute time in milliseconds. Either way it has to be unpacked
5595 // into suitable seconds and nanoseconds components and stored in the
5596 // given timespec structure.
5597 // Given time is a 64-bit value and the time_t used in the timespec is only
5598 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
5599 // overflow if times way in the future are given. Further on Solaris versions
5600 // prior to 10 there is a restriction (see cond_timedwait) that the specified
5601 // number of seconds, in abstime, is less than current_time  + 100,000,000.
5602 // As it will be 28 years before "now + 100000000" will overflow we can
5603 // ignore overflow and just impose a hard-limit on seconds using the value
5604 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
5605 // years from "now".
5606 //
5607 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5608   assert(time > 0, "convertTime");
5609 
5610   struct timeval now;
5611   int status = gettimeofday(&now, NULL);
5612   assert(status == 0, "gettimeofday");
5613 
5614   time_t max_secs = now.tv_sec + MAX_SECS;
5615 
5616   if (isAbsolute) {
5617     jlong secs = time / 1000;
5618     if (secs > max_secs) {
5619       absTime->tv_sec = max_secs;
5620     } else {
5621       absTime->tv_sec = secs;
5622     }
5623     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5624   } else {
5625     jlong secs = time / NANOSECS_PER_SEC;
5626     if (secs >= MAX_SECS) {
5627       absTime->tv_sec = max_secs;
5628       absTime->tv_nsec = 0;
5629     } else {
5630       absTime->tv_sec = now.tv_sec + secs;
5631       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5632       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5633         absTime->tv_nsec -= NANOSECS_PER_SEC;
5634         ++absTime->tv_sec; // note: this must be <= max_secs
5635       }
5636     }
5637   }
5638   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5639   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5640   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5641   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5642 }
5643 
5644 void Parker::park(bool isAbsolute, jlong time) {
5645   // Ideally we'd do something useful while spinning, such
5646   // as calling unpackTime().
5647 
5648   // Optional fast-path check:
5649   // Return immediately if a permit is available.
5650   // We depend on Atomic::xchg() having full barrier semantics
5651   // since we are doing a lock-free update to _counter.
5652   if (Atomic::xchg(0, &_counter) > 0) return;
5653 
5654   // Optional fast-exit: Check interrupt before trying to wait
5655   Thread* thread = Thread::current();
5656   assert(thread->is_Java_thread(), "Must be JavaThread");
5657   JavaThread *jt = (JavaThread *)thread;
5658   if (Thread::is_interrupted(thread, false)) {
5659     return;
5660   }
5661 
5662   // First, demultiplex/decode time arguments
5663   timespec absTime;
5664   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
5665     return;
5666   }
5667   if (time > 0) {
5668     // Warning: this code might be exposed to the old Solaris time
5669     // round-down bugs.  Grep "roundingFix" for details.
5670     unpackTime(&absTime, isAbsolute, time);
5671   }
5672 
5673   // Enter safepoint region
5674   // Beware of deadlocks such as 6317397.
5675   // The per-thread Parker:: _mutex is a classic leaf-lock.
5676   // In particular a thread must never block on the Threads_lock while
5677   // holding the Parker:: mutex.  If safepoints are pending both the
5678   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5679   ThreadBlockInVM tbivm(jt);
5680 
5681   // Don't wait if cannot get lock since interference arises from
5682   // unblocking.  Also. check interrupt before trying wait
5683   if (Thread::is_interrupted(thread, false) ||
5684       os::Solaris::mutex_trylock(_mutex) != 0) {
5685     return;
5686   }
5687 
5688   int status;
5689 
5690   if (_counter > 0)  { // no wait needed
5691     _counter = 0;
5692     status = os::Solaris::mutex_unlock(_mutex);
5693     assert(status == 0, "invariant");
5694     // Paranoia to ensure our locked and lock-free paths interact
5695     // correctly with each other and Java-level accesses.
5696     OrderAccess::fence();
5697     return;
5698   }
5699 
5700 #ifdef ASSERT
5701   // Don't catch signals while blocked; let the running threads have the signals.
5702   // (This allows a debugger to break into the running thread.)
5703   sigset_t oldsigs;
5704   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
5705   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5706 #endif
5707 
5708   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5709   jt->set_suspend_equivalent();
5710   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5711 
5712   // Do this the hard way by blocking ...
5713   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5714   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5715   // Only for SPARC >= V8PlusA
5716 #if defined(__sparc) && defined(COMPILER2)
5717   if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5718 #endif
5719 
5720   if (time == 0) {
5721     status = os::Solaris::cond_wait(_cond, _mutex);
5722   } else {
5723     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
5724   }
5725   // Note that an untimed cond_wait() can sometimes return ETIME on older
5726   // versions of the Solaris.
5727   assert_status(status == 0 || status == EINTR ||
5728                 status == ETIME || status == ETIMEDOUT,
5729                 status, "cond_timedwait");
5730 
5731 #ifdef ASSERT
5732   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
5733 #endif
5734   _counter = 0;
5735   status = os::Solaris::mutex_unlock(_mutex);
5736   assert_status(status == 0, status, "mutex_unlock");
5737   // Paranoia to ensure our locked and lock-free paths interact
5738   // correctly with each other and Java-level accesses.
5739   OrderAccess::fence();
5740 
5741   // If externally suspended while waiting, re-suspend
5742   if (jt->handle_special_suspend_equivalent_condition()) {
5743     jt->java_suspend_self();
5744   }
5745 }
5746 
5747 void Parker::unpark() {
5748   int status = os::Solaris::mutex_lock(_mutex);
5749   assert(status == 0, "invariant");
5750   const int s = _counter;
5751   _counter = 1;
5752   status = os::Solaris::mutex_unlock(_mutex);
5753   assert(status == 0, "invariant");
5754 
5755   if (s < 1) {
5756     status = os::Solaris::cond_signal(_cond);
5757     assert(status == 0, "invariant");
5758   }
5759 }
5760 
5761 extern char** environ;
5762 
5763 // Run the specified command in a separate process. Return its exit value,
5764 // or -1 on failure (e.g. can't fork a new process).
5765 // Unlike system(), this function can be called from signal handler. It
5766 // doesn't block SIGINT et al.
5767 int os::fork_and_exec(char* cmd) {
5768   char * argv[4];
5769   argv[0] = (char *)"sh";
5770   argv[1] = (char *)"-c";
5771   argv[2] = cmd;
5772   argv[3] = NULL;
5773 
5774   // fork is async-safe, fork1 is not so can't use in signal handler
5775   pid_t pid;
5776   Thread* t = ThreadLocalStorage::get_thread_slow();
5777   if (t != NULL && t->is_inside_signal_handler()) {
5778     pid = fork();
5779   } else {
5780     pid = fork1();
5781   }
5782 
5783   if (pid < 0) {
5784     // fork failed
5785     warning("fork failed: %s", strerror(errno));
5786     return -1;
5787 
5788   } else if (pid == 0) {
5789     // child process
5790 
5791     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
5792     execve("/usr/bin/sh", argv, environ);
5793 
5794     // execve failed
5795     _exit(-1);
5796 
5797   } else  {
5798     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5799     // care about the actual exit code, for now.
5800 
5801     int status;
5802 
5803     // Wait for the child process to exit.  This returns immediately if
5804     // the child has already exited. */
5805     while (waitpid(pid, &status, 0) < 0) {
5806       switch (errno) {
5807       case ECHILD: return 0;
5808       case EINTR: break;
5809       default: return -1;
5810       }
5811     }
5812 
5813     if (WIFEXITED(status)) {
5814       // The child exited normally; get its exit code.
5815       return WEXITSTATUS(status);
5816     } else if (WIFSIGNALED(status)) {
5817       // The child exited because of a signal
5818       // The best value to return is 0x80 + signal number,
5819       // because that is what all Unix shells do, and because
5820       // it allows callers to distinguish between process exit and
5821       // process death by signal.
5822       return 0x80 + WTERMSIG(status);
5823     } else {
5824       // Unknown exit code; pass it through
5825       return status;
5826     }
5827   }
5828 }
5829 
5830 // is_headless_jre()
5831 //
5832 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5833 // in order to report if we are running in a headless jre
5834 //
5835 // Since JDK8 xawt/libmawt.so was moved into the same directory
5836 // as libawt.so, and renamed libawt_xawt.so
5837 //
5838 bool os::is_headless_jre() {
5839   struct stat statbuf;
5840   char buf[MAXPATHLEN];
5841   char libmawtpath[MAXPATHLEN];
5842   const char *xawtstr  = "/xawt/libmawt.so";
5843   const char *new_xawtstr = "/libawt_xawt.so";
5844   char *p;
5845 
5846   // Get path to libjvm.so
5847   os::jvm_path(buf, sizeof(buf));
5848 
5849   // Get rid of libjvm.so
5850   p = strrchr(buf, '/');
5851   if (p == NULL) {
5852     return false;
5853   } else {
5854     *p = '\0';
5855   }
5856 
5857   // Get rid of client or server
5858   p = strrchr(buf, '/');
5859   if (p == NULL) {
5860     return false;
5861   } else {
5862     *p = '\0';
5863   }
5864 
5865   // check xawt/libmawt.so
5866   strcpy(libmawtpath, buf);
5867   strcat(libmawtpath, xawtstr);
5868   if (::stat(libmawtpath, &statbuf) == 0) return false;
5869 
5870   // check libawt_xawt.so
5871   strcpy(libmawtpath, buf);
5872   strcat(libmawtpath, new_xawtstr);
5873   if (::stat(libmawtpath, &statbuf) == 0) return false;
5874 
5875   return true;
5876 }
5877 
5878 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
5879   size_t res;
5880   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5881          "Assumed _thread_in_native");
5882   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
5883   return res;
5884 }
5885 
5886 int os::close(int fd) {
5887   return ::close(fd);
5888 }
5889 
5890 int os::socket_close(int fd) {
5891   return ::close(fd);
5892 }
5893 
5894 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5895   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5896          "Assumed _thread_in_native");
5897   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
5898 }
5899 
5900 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5901   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5902          "Assumed _thread_in_native");
5903   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5904 }
5905 
5906 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5907   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5908 }
5909 
5910 // As both poll and select can be interrupted by signals, we have to be
5911 // prepared to restart the system call after updating the timeout, unless
5912 // a poll() is done with timeout == -1, in which case we repeat with this
5913 // "wait forever" value.
5914 
5915 int os::timeout(int fd, long timeout) {
5916   int res;
5917   struct timeval t;
5918   julong prevtime, newtime;
5919   static const char* aNull = 0;
5920   struct pollfd pfd;
5921   pfd.fd = fd;
5922   pfd.events = POLLIN;
5923 
5924   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5925          "Assumed _thread_in_native");
5926 
5927   gettimeofday(&t, &aNull);
5928   prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
5929 
5930   for (;;) {
5931     res = ::poll(&pfd, 1, timeout);
5932     if (res == OS_ERR && errno == EINTR) {
5933       if (timeout != -1) {
5934         gettimeofday(&t, &aNull);
5935         newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
5936         timeout -= newtime - prevtime;
5937         if (timeout <= 0) {
5938           return OS_OK;
5939         }
5940         prevtime = newtime;
5941       }
5942     } else return res;
5943   }
5944 }
5945 
5946 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
5947   int _result;
5948   _result = ::connect(fd, him, len);
5949 
5950   // On Solaris, when a connect() call is interrupted, the connection
5951   // can be established asynchronously (see 6343810). Subsequent calls
5952   // to connect() must check the errno value which has the semantic
5953   // described below (copied from the connect() man page). Handling
5954   // of asynchronously established connections is required for both
5955   // blocking and non-blocking sockets.
5956   //     EINTR            The  connection  attempt  was   interrupted
5957   //                      before  any data arrived by the delivery of
5958   //                      a signal. The connection, however, will  be
5959   //                      established asynchronously.
5960   //
5961   //     EINPROGRESS      The socket is non-blocking, and the connec-
5962   //                      tion  cannot  be completed immediately.
5963   //
5964   //     EALREADY         The socket is non-blocking,  and a previous
5965   //                      connection  attempt  has  not yet been com-
5966   //                      pleted.
5967   //
5968   //     EISCONN          The socket is already connected.
5969   if (_result == OS_ERR && errno == EINTR) {
5970     // restarting a connect() changes its errno semantics
5971     RESTARTABLE(::connect(fd, him, len), _result);
5972     // undo these changes
5973     if (_result == OS_ERR) {
5974       if (errno == EALREADY) {
5975         errno = EINPROGRESS; // fall through
5976       } else if (errno == EISCONN) {
5977         errno = 0;
5978         return OS_OK;
5979       }
5980     }
5981   }
5982   return _result;
5983 }
5984 
5985 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
5986   if (fd < 0) {
5987     return OS_ERR;
5988   }
5989   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5990          "Assumed _thread_in_native");
5991   RESTARTABLE_RETURN_INT((int)::accept(fd, him, len));
5992 }
5993 
5994 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
5995                  sockaddr* from, socklen_t* fromlen) {
5996   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5997          "Assumed _thread_in_native");
5998   RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
5999 }
6000 
6001 int os::sendto(int fd, char* buf, size_t len, uint flags,
6002                struct sockaddr* to, socklen_t tolen) {
6003   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
6004          "Assumed _thread_in_native");
6005   RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
6006 }
6007 
6008 int os::socket_available(int fd, jint *pbytes) {
6009   if (fd < 0) {
6010     return OS_OK;
6011   }
6012   int ret;
6013   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6014   // note: ioctl can return 0 when successful, JVM_SocketAvailable
6015   // is expected to return 0 on failure and 1 on success to the jdk.
6016   return (ret == OS_ERR) ? 0 : 1;
6017 }
6018 
6019 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6020   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
6021          "Assumed _thread_in_native");
6022   return ::bind(fd, him, len);
6023 }
6024 
6025 // Get the default path to the core file
6026 // Returns the length of the string
6027 int os::get_core_path(char* buffer, size_t bufferSize) {
6028   const char* p = get_current_directory(buffer, bufferSize);
6029 
6030   if (p == NULL) {
6031     assert(p != NULL, "failed to get current directory");
6032     return 0;
6033   }
6034 
6035   return strlen(buffer);
6036 }
6037 
6038 #ifndef PRODUCT
6039 void TestReserveMemorySpecial_test() {
6040   // No tests available for this platform
6041 }
6042 #endif