1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "os_solaris.inline.hpp"
  41 #include "prims/jniFastGetField.hpp"
  42 #include "prims/jvm.h"
  43 #include "prims/jvm_misc.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/atomic.inline.hpp"
  46 #include "runtime/extendedPC.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/interfaceSupport.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/mutexLocker.hpp"
  52 #include "runtime/objectMonitor.hpp"
  53 #include "runtime/orderAccess.inline.hpp"
  54 #include "runtime/osThread.hpp"
  55 #include "runtime/perfMemory.hpp"
  56 #include "runtime/sharedRuntime.hpp"
  57 #include "runtime/statSampler.hpp"
  58 #include "runtime/stubRoutines.hpp"
  59 #include "runtime/thread.inline.hpp"
  60 #include "runtime/threadCritical.hpp"
  61 #include "runtime/timer.hpp"
  62 #include "runtime/vm_version.hpp"
  63 #include "services/attachListener.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "services/runtimeService.hpp"
  66 #include "utilities/decoder.hpp"
  67 #include "utilities/defaultStream.hpp"
  68 #include "utilities/events.hpp"
  69 #include "utilities/growableArray.hpp"
  70 #include "utilities/vmError.hpp"
  71 
  72 // put OS-includes here
  73 # include <dlfcn.h>
  74 # include <errno.h>
  75 # include <exception>
  76 # include <link.h>
  77 # include <poll.h>
  78 # include <pthread.h>
  79 # include <pwd.h>
  80 # include <schedctl.h>
  81 # include <setjmp.h>
  82 # include <signal.h>
  83 # include <stdio.h>
  84 # include <alloca.h>
  85 # include <sys/filio.h>
  86 # include <sys/ipc.h>
  87 # include <sys/lwp.h>
  88 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  89 # include <sys/mman.h>
  90 # include <sys/processor.h>
  91 # include <sys/procset.h>
  92 # include <sys/pset.h>
  93 # include <sys/resource.h>
  94 # include <sys/shm.h>
  95 # include <sys/socket.h>
  96 # include <sys/stat.h>
  97 # include <sys/systeminfo.h>
  98 # include <sys/time.h>
  99 # include <sys/times.h>
 100 # include <sys/types.h>
 101 # include <sys/wait.h>
 102 # include <sys/utsname.h>
 103 # include <thread.h>
 104 # include <unistd.h>
 105 # include <sys/priocntl.h>
 106 # include <sys/rtpriocntl.h>
 107 # include <sys/tspriocntl.h>
 108 # include <sys/iapriocntl.h>
 109 # include <sys/fxpriocntl.h>
 110 # include <sys/loadavg.h>
 111 # include <string.h>
 112 # include <stdio.h>
 113 
 114 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 115 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 116 
 117 #define MAX_PATH (2 * K)
 118 
 119 // for timer info max values which include all bits
 120 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 121 
 122 
 123 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 124 // compile on older systems without this header file.
 125 
 126 #ifndef MADV_ACCESS_LWP
 127   #define  MADV_ACCESS_LWP   7       /* next LWP to access heavily */
 128 #endif
 129 #ifndef MADV_ACCESS_MANY
 130   #define  MADV_ACCESS_MANY  8       /* many processes to access heavily */
 131 #endif
 132 
 133 #ifndef LGRP_RSRC_CPU
 134   #define LGRP_RSRC_CPU      0       /* CPU resources */
 135 #endif
 136 #ifndef LGRP_RSRC_MEM
 137   #define LGRP_RSRC_MEM      1       /* memory resources */
 138 #endif
 139 
 140 // see thr_setprio(3T) for the basis of these numbers
 141 #define MinimumPriority 0
 142 #define NormalPriority  64
 143 #define MaximumPriority 127
 144 
 145 // Values for ThreadPriorityPolicy == 1
 146 int prio_policy1[CriticalPriority+1] = {
 147   -99999,  0, 16,  32,  48,  64,
 148           80, 96, 112, 124, 127, 127 };
 149 
 150 // System parameters used internally
 151 static clock_t clock_tics_per_sec = 100;
 152 
 153 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 154 static bool enabled_extended_FILE_stdio = false;
 155 
 156 // For diagnostics to print a message once. see run_periodic_checks
 157 static bool check_addr0_done = false;
 158 static sigset_t check_signal_done;
 159 static bool check_signals = true;
 160 
 161 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 162 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 163 
 164 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 165 
 166 
 167 // "default" initializers for missing libc APIs
 168 extern "C" {
 169   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 170   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 171 
 172   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 173   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 174 }
 175 
 176 // "default" initializers for pthread-based synchronization
 177 extern "C" {
 178   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 179   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 180 }
 181 
 182 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 183 
 184 // Thread Local Storage
 185 // This is common to all Solaris platforms so it is defined here,
 186 // in this common file.
 187 // The declarations are in the os_cpu threadLS*.hpp files.
 188 //
 189 // Static member initialization for TLS
 190 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
 191 
 192 #ifndef PRODUCT
 193   #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
 194 
 195 int ThreadLocalStorage::_tcacheHit = 0;
 196 int ThreadLocalStorage::_tcacheMiss = 0;
 197 
 198 void ThreadLocalStorage::print_statistics() {
 199   int total = _tcacheMiss+_tcacheHit;
 200   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
 201                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
 202 }
 203   #undef _PCT
 204 #endif // PRODUCT
 205 
 206 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
 207                                                         int index) {
 208   Thread *thread = get_thread_slow();
 209   if (thread != NULL) {
 210     address sp = os::current_stack_pointer();
 211     guarantee(thread->_stack_base == NULL ||
 212               (sp <= thread->_stack_base &&
 213               sp >= thread->_stack_base - thread->_stack_size) ||
 214               is_error_reported(),
 215               "sp must be inside of selected thread stack");
 216 
 217     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
 218     _get_thread_cache[index] = thread;
 219   }
 220   return thread;
 221 }
 222 
 223 
 224 static const double all_zero[sizeof(Thread) / sizeof(double) + 1] = {0};
 225 #define NO_CACHED_THREAD ((Thread*)all_zero)
 226 
 227 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
 228 
 229   // Store the new value before updating the cache to prevent a race
 230   // between get_thread_via_cache_slowly() and this store operation.
 231   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
 232 
 233   // Update thread cache with new thread if setting on thread create,
 234   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
 235   uintptr_t raw = pd_raw_thread_id();
 236   int ix = pd_cache_index(raw);
 237   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
 238 }
 239 
 240 void ThreadLocalStorage::pd_init() {
 241   for (int i = 0; i < _pd_cache_size; i++) {
 242     _get_thread_cache[i] = NO_CACHED_THREAD;
 243   }
 244 }
 245 
 246 // Invalidate all the caches (happens to be the same as pd_init).
 247 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
 248 
 249 #undef NO_CACHED_THREAD
 250 
 251 // END Thread Local Storage
 252 
 253 static inline size_t adjust_stack_size(address base, size_t size) {
 254   if ((ssize_t)size < 0) {
 255     // 4759953: Compensate for ridiculous stack size.
 256     size = max_intx;
 257   }
 258   if (size > (size_t)base) {
 259     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 260     size = (size_t)base;
 261   }
 262   return size;
 263 }
 264 
 265 static inline stack_t get_stack_info() {
 266   stack_t st;
 267   int retval = thr_stksegment(&st);
 268   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 269   assert(retval == 0, "incorrect return value from thr_stksegment");
 270   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 271   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 272   return st;
 273 }
 274 
 275 address os::current_stack_base() {
 276   int r = thr_main();
 277   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 278   bool is_primordial_thread = r;
 279 
 280   // Workaround 4352906, avoid calls to thr_stksegment by
 281   // thr_main after the first one (it looks like we trash
 282   // some data, causing the value for ss_sp to be incorrect).
 283   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 284     stack_t st = get_stack_info();
 285     if (is_primordial_thread) {
 286       // cache initial value of stack base
 287       os::Solaris::_main_stack_base = (address)st.ss_sp;
 288     }
 289     return (address)st.ss_sp;
 290   } else {
 291     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 292     return os::Solaris::_main_stack_base;
 293   }
 294 }
 295 
 296 size_t os::current_stack_size() {
 297   size_t size;
 298 
 299   int r = thr_main();
 300   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 301   if (!r) {
 302     size = get_stack_info().ss_size;
 303   } else {
 304     struct rlimit limits;
 305     getrlimit(RLIMIT_STACK, &limits);
 306     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 307   }
 308   // base may not be page aligned
 309   address base = current_stack_base();
 310   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 311   return (size_t)(base - bottom);
 312 }
 313 
 314 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 315   return localtime_r(clock, res);
 316 }
 317 
 318 void os::Solaris::try_enable_extended_io() {
 319   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 320 
 321   if (!UseExtendedFileIO) {
 322     return;
 323   }
 324 
 325   enable_extended_FILE_stdio_t enabler =
 326     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 327                                          "enable_extended_FILE_stdio");
 328   if (enabler) {
 329     enabler(-1, -1);
 330   }
 331 }
 332 
 333 static int _processors_online = 0;
 334 
 335 jint os::Solaris::_os_thread_limit = 0;
 336 volatile jint os::Solaris::_os_thread_count = 0;
 337 
 338 julong os::available_memory() {
 339   return Solaris::available_memory();
 340 }
 341 
 342 julong os::Solaris::available_memory() {
 343   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 344 }
 345 
 346 julong os::Solaris::_physical_memory = 0;
 347 
 348 julong os::physical_memory() {
 349   return Solaris::physical_memory();
 350 }
 351 
 352 static hrtime_t first_hrtime = 0;
 353 static const hrtime_t hrtime_hz = 1000*1000*1000;
 354 static volatile hrtime_t max_hrtime = 0;
 355 
 356 
 357 void os::Solaris::initialize_system_info() {
 358   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 359   _processors_online = sysconf(_SC_NPROCESSORS_ONLN);
 360   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) *
 361                                      (julong)sysconf(_SC_PAGESIZE);
 362 }
 363 
 364 int os::active_processor_count() {
 365   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 366   pid_t pid = getpid();
 367   psetid_t pset = PS_NONE;
 368   // Are we running in a processor set or is there any processor set around?
 369   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 370     uint_t pset_cpus;
 371     // Query the number of cpus available to us.
 372     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 373       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 374       _processors_online = pset_cpus;
 375       return pset_cpus;
 376     }
 377   }
 378   // Otherwise return number of online cpus
 379   return online_cpus;
 380 }
 381 
 382 static bool find_processors_in_pset(psetid_t        pset,
 383                                     processorid_t** id_array,
 384                                     uint_t*         id_length) {
 385   bool result = false;
 386   // Find the number of processors in the processor set.
 387   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 388     // Make up an array to hold their ids.
 389     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 390     // Fill in the array with their processor ids.
 391     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 392       result = true;
 393     }
 394   }
 395   return result;
 396 }
 397 
 398 // Callers of find_processors_online() must tolerate imprecise results --
 399 // the system configuration can change asynchronously because of DR
 400 // or explicit psradm operations.
 401 //
 402 // We also need to take care that the loop (below) terminates as the
 403 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 404 // request and the loop that builds the list of processor ids.   Unfortunately
 405 // there's no reliable way to determine the maximum valid processor id,
 406 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 407 // man pages, which claim the processor id set is "sparse, but
 408 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 409 // exit the loop.
 410 //
 411 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 412 // not available on S8.0.
 413 
 414 static bool find_processors_online(processorid_t** id_array,
 415                                    uint*           id_length) {
 416   const processorid_t MAX_PROCESSOR_ID = 100000;
 417   // Find the number of processors online.
 418   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 419   // Make up an array to hold their ids.
 420   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 421   // Processors need not be numbered consecutively.
 422   long found = 0;
 423   processorid_t next = 0;
 424   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 425     processor_info_t info;
 426     if (processor_info(next, &info) == 0) {
 427       // NB, PI_NOINTR processors are effectively online ...
 428       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 429         (*id_array)[found] = next;
 430         found += 1;
 431       }
 432     }
 433     next += 1;
 434   }
 435   if (found < *id_length) {
 436     // The loop above didn't identify the expected number of processors.
 437     // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 438     // and re-running the loop, above, but there's no guarantee of progress
 439     // if the system configuration is in flux.  Instead, we just return what
 440     // we've got.  Note that in the worst case find_processors_online() could
 441     // return an empty set.  (As a fall-back in the case of the empty set we
 442     // could just return the ID of the current processor).
 443     *id_length = found;
 444   }
 445 
 446   return true;
 447 }
 448 
 449 static bool assign_distribution(processorid_t* id_array,
 450                                 uint           id_length,
 451                                 uint*          distribution,
 452                                 uint           distribution_length) {
 453   // We assume we can assign processorid_t's to uint's.
 454   assert(sizeof(processorid_t) == sizeof(uint),
 455          "can't convert processorid_t to uint");
 456   // Quick check to see if we won't succeed.
 457   if (id_length < distribution_length) {
 458     return false;
 459   }
 460   // Assign processor ids to the distribution.
 461   // Try to shuffle processors to distribute work across boards,
 462   // assuming 4 processors per board.
 463   const uint processors_per_board = ProcessDistributionStride;
 464   // Find the maximum processor id.
 465   processorid_t max_id = 0;
 466   for (uint m = 0; m < id_length; m += 1) {
 467     max_id = MAX2(max_id, id_array[m]);
 468   }
 469   // The next id, to limit loops.
 470   const processorid_t limit_id = max_id + 1;
 471   // Make up markers for available processors.
 472   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 473   for (uint c = 0; c < limit_id; c += 1) {
 474     available_id[c] = false;
 475   }
 476   for (uint a = 0; a < id_length; a += 1) {
 477     available_id[id_array[a]] = true;
 478   }
 479   // Step by "boards", then by "slot", copying to "assigned".
 480   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 481   //                remembering which processors have been assigned by
 482   //                previous calls, etc., so as to distribute several
 483   //                independent calls of this method.  What we'd like is
 484   //                It would be nice to have an API that let us ask
 485   //                how many processes are bound to a processor,
 486   //                but we don't have that, either.
 487   //                In the short term, "board" is static so that
 488   //                subsequent distributions don't all start at board 0.
 489   static uint board = 0;
 490   uint assigned = 0;
 491   // Until we've found enough processors ....
 492   while (assigned < distribution_length) {
 493     // ... find the next available processor in the board.
 494     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 495       uint try_id = board * processors_per_board + slot;
 496       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 497         distribution[assigned] = try_id;
 498         available_id[try_id] = false;
 499         assigned += 1;
 500         break;
 501       }
 502     }
 503     board += 1;
 504     if (board * processors_per_board + 0 >= limit_id) {
 505       board = 0;
 506     }
 507   }
 508   if (available_id != NULL) {
 509     FREE_C_HEAP_ARRAY(bool, available_id);
 510   }
 511   return true;
 512 }
 513 
 514 void os::set_native_thread_name(const char *name) {
 515   // Not yet implemented.
 516   return;
 517 }
 518 
 519 bool os::distribute_processes(uint length, uint* distribution) {
 520   bool result = false;
 521   // Find the processor id's of all the available CPUs.
 522   processorid_t* id_array  = NULL;
 523   uint           id_length = 0;
 524   // There are some races between querying information and using it,
 525   // since processor sets can change dynamically.
 526   psetid_t pset = PS_NONE;
 527   // Are we running in a processor set?
 528   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 529     result = find_processors_in_pset(pset, &id_array, &id_length);
 530   } else {
 531     result = find_processors_online(&id_array, &id_length);
 532   }
 533   if (result == true) {
 534     if (id_length >= length) {
 535       result = assign_distribution(id_array, id_length, distribution, length);
 536     } else {
 537       result = false;
 538     }
 539   }
 540   if (id_array != NULL) {
 541     FREE_C_HEAP_ARRAY(processorid_t, id_array);
 542   }
 543   return result;
 544 }
 545 
 546 bool os::bind_to_processor(uint processor_id) {
 547   // We assume that a processorid_t can be stored in a uint.
 548   assert(sizeof(uint) == sizeof(processorid_t),
 549          "can't convert uint to processorid_t");
 550   int bind_result =
 551     processor_bind(P_LWPID,                       // bind LWP.
 552                    P_MYID,                        // bind current LWP.
 553                    (processorid_t) processor_id,  // id.
 554                    NULL);                         // don't return old binding.
 555   return (bind_result == 0);
 556 }
 557 
 558 bool os::getenv(const char* name, char* buffer, int len) {
 559   char* val = ::getenv(name);
 560   if (val == NULL || strlen(val) + 1 > len) {
 561     if (len > 0) buffer[0] = 0; // return a null string
 562     return false;
 563   }
 564   strcpy(buffer, val);
 565   return true;
 566 }
 567 
 568 
 569 // Return true if user is running as root.
 570 
 571 bool os::have_special_privileges() {
 572   static bool init = false;
 573   static bool privileges = false;
 574   if (!init) {
 575     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 576     init = true;
 577   }
 578   return privileges;
 579 }
 580 
 581 
 582 void os::init_system_properties_values() {
 583   // The next steps are taken in the product version:
 584   //
 585   // Obtain the JAVA_HOME value from the location of libjvm.so.
 586   // This library should be located at:
 587   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 588   //
 589   // If "/jre/lib/" appears at the right place in the path, then we
 590   // assume libjvm.so is installed in a JDK and we use this path.
 591   //
 592   // Otherwise exit with message: "Could not create the Java virtual machine."
 593   //
 594   // The following extra steps are taken in the debugging version:
 595   //
 596   // If "/jre/lib/" does NOT appear at the right place in the path
 597   // instead of exit check for $JAVA_HOME environment variable.
 598   //
 599   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 600   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 601   // it looks like libjvm.so is installed there
 602   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 603   //
 604   // Otherwise exit.
 605   //
 606   // Important note: if the location of libjvm.so changes this
 607   // code needs to be changed accordingly.
 608 
 609 // Base path of extensions installed on the system.
 610 #define SYS_EXT_DIR     "/usr/jdk/packages"
 611 #define EXTENSIONS_DIR  "/lib/ext"
 612 
 613   char cpu_arch[12];
 614   // Buffer that fits several sprintfs.
 615   // Note that the space for the colon and the trailing null are provided
 616   // by the nulls included by the sizeof operator.
 617   const size_t bufsize =
 618     MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
 619          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 620          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir
 621   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 622 
 623   // sysclasspath, java_home, dll_dir
 624   {
 625     char *pslash;
 626     os::jvm_path(buf, bufsize);
 627 
 628     // Found the full path to libjvm.so.
 629     // Now cut the path to <java_home>/jre if we can.
 630     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 631     pslash = strrchr(buf, '/');
 632     if (pslash != NULL) {
 633       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 634     }
 635     Arguments::set_dll_dir(buf);
 636 
 637     if (pslash != NULL) {
 638       pslash = strrchr(buf, '/');
 639       if (pslash != NULL) {
 640         *pslash = '\0';          // Get rid of /<arch>.
 641         pslash = strrchr(buf, '/');
 642         if (pslash != NULL) {
 643           *pslash = '\0';        // Get rid of /lib.
 644         }
 645       }
 646     }
 647     Arguments::set_java_home(buf);
 648     set_boot_path('/', ':');
 649   }
 650 
 651   // Where to look for native libraries.
 652   {
 653     // Use dlinfo() to determine the correct java.library.path.
 654     //
 655     // If we're launched by the Java launcher, and the user
 656     // does not set java.library.path explicitly on the commandline,
 657     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 658     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 659     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 660     // /usr/lib), which is exactly what we want.
 661     //
 662     // If the user does set java.library.path, it completely
 663     // overwrites this setting, and always has.
 664     //
 665     // If we're not launched by the Java launcher, we may
 666     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 667     // settings.  Again, dlinfo does exactly what we want.
 668 
 669     Dl_serinfo     info_sz, *info = &info_sz;
 670     Dl_serpath     *path;
 671     char           *library_path;
 672     char           *common_path = buf;
 673 
 674     // Determine search path count and required buffer size.
 675     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 676       FREE_C_HEAP_ARRAY(char, buf);
 677       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 678     }
 679 
 680     // Allocate new buffer and initialize.
 681     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 682     info->dls_size = info_sz.dls_size;
 683     info->dls_cnt = info_sz.dls_cnt;
 684 
 685     // Obtain search path information.
 686     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 687       FREE_C_HEAP_ARRAY(char, buf);
 688       FREE_C_HEAP_ARRAY(char, info);
 689       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 690     }
 691 
 692     path = &info->dls_serpath[0];
 693 
 694     // Note: Due to a legacy implementation, most of the library path
 695     // is set in the launcher. This was to accomodate linking restrictions
 696     // on legacy Solaris implementations (which are no longer supported).
 697     // Eventually, all the library path setting will be done here.
 698     //
 699     // However, to prevent the proliferation of improperly built native
 700     // libraries, the new path component /usr/jdk/packages is added here.
 701 
 702     // Determine the actual CPU architecture.
 703     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 704 #ifdef _LP64
 705     // If we are a 64-bit vm, perform the following translations:
 706     //   sparc   -> sparcv9
 707     //   i386    -> amd64
 708     if (strcmp(cpu_arch, "sparc") == 0) {
 709       strcat(cpu_arch, "v9");
 710     } else if (strcmp(cpu_arch, "i386") == 0) {
 711       strcpy(cpu_arch, "amd64");
 712     }
 713 #endif
 714 
 715     // Construct the invariant part of ld_library_path.
 716     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 717 
 718     // Struct size is more than sufficient for the path components obtained
 719     // through the dlinfo() call, so only add additional space for the path
 720     // components explicitly added here.
 721     size_t library_path_size = info->dls_size + strlen(common_path);
 722     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 723     library_path[0] = '\0';
 724 
 725     // Construct the desired Java library path from the linker's library
 726     // search path.
 727     //
 728     // For compatibility, it is optimal that we insert the additional path
 729     // components specific to the Java VM after those components specified
 730     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 731     // infrastructure.
 732     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 733       strcpy(library_path, common_path);
 734     } else {
 735       int inserted = 0;
 736       int i;
 737       for (i = 0; i < info->dls_cnt; i++, path++) {
 738         uint_t flags = path->dls_flags & LA_SER_MASK;
 739         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 740           strcat(library_path, common_path);
 741           strcat(library_path, os::path_separator());
 742           inserted = 1;
 743         }
 744         strcat(library_path, path->dls_name);
 745         strcat(library_path, os::path_separator());
 746       }
 747       // Eliminate trailing path separator.
 748       library_path[strlen(library_path)-1] = '\0';
 749     }
 750 
 751     // happens before argument parsing - can't use a trace flag
 752     // tty->print_raw("init_system_properties_values: native lib path: ");
 753     // tty->print_raw_cr(library_path);
 754 
 755     // Callee copies into its own buffer.
 756     Arguments::set_library_path(library_path);
 757 
 758     FREE_C_HEAP_ARRAY(char, library_path);
 759     FREE_C_HEAP_ARRAY(char, info);
 760   }
 761 
 762   // Extensions directories.
 763   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 764   Arguments::set_ext_dirs(buf);
 765 
 766   FREE_C_HEAP_ARRAY(char, buf);
 767 
 768 #undef SYS_EXT_DIR
 769 #undef EXTENSIONS_DIR
 770 }
 771 
 772 void os::breakpoint() {
 773   BREAKPOINT;
 774 }
 775 
 776 bool os::obsolete_option(const JavaVMOption *option) {
 777   if (!strncmp(option->optionString, "-Xt", 3)) {
 778     return true;
 779   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 780     return true;
 781   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 782     return true;
 783   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 784     return true;
 785   }
 786   return false;
 787 }
 788 
 789 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 790   address  stackStart  = (address)thread->stack_base();
 791   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 792   if (sp < stackStart && sp >= stackEnd) return true;
 793   return false;
 794 }
 795 
 796 extern "C" void breakpoint() {
 797   // use debugger to set breakpoint here
 798 }
 799 
 800 static thread_t main_thread;
 801 
 802 // Thread start routine for all new Java threads
 803 extern "C" void* java_start(void* thread_addr) {
 804   // Try to randomize the cache line index of hot stack frames.
 805   // This helps when threads of the same stack traces evict each other's
 806   // cache lines. The threads can be either from the same JVM instance, or
 807   // from different JVM instances. The benefit is especially true for
 808   // processors with hyperthreading technology.
 809   static int counter = 0;
 810   int pid = os::current_process_id();
 811   alloca(((pid ^ counter++) & 7) * 128);
 812 
 813   int prio;
 814   Thread* thread = (Thread*)thread_addr;
 815   OSThread* osthr = thread->osthread();
 816 
 817   osthr->set_lwp_id(_lwp_self());  // Store lwp in case we are bound
 818   thread->_schedctl = (void *) schedctl_init();
 819 
 820   if (UseNUMA) {
 821     int lgrp_id = os::numa_get_group_id();
 822     if (lgrp_id != -1) {
 823       thread->set_lgrp_id(lgrp_id);
 824     }
 825   }
 826 
 827   // If the creator called set priority before we started,
 828   // we need to call set_native_priority now that we have an lwp.
 829   // We used to get the priority from thr_getprio (we called
 830   // thr_setprio way back in create_thread) and pass it to
 831   // set_native_priority, but Solaris scales the priority
 832   // in java_to_os_priority, so when we read it back here,
 833   // we pass trash to set_native_priority instead of what's
 834   // in java_to_os_priority. So we save the native priority
 835   // in the osThread and recall it here.
 836 
 837   if (osthr->thread_id() != -1) {
 838     if (UseThreadPriorities) {
 839       int prio = osthr->native_priority();
 840       if (ThreadPriorityVerbose) {
 841         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 842                       INTPTR_FORMAT ", setting priority: %d\n",
 843                       osthr->thread_id(), osthr->lwp_id(), prio);
 844       }
 845       os::set_native_priority(thread, prio);
 846     }
 847   } else if (ThreadPriorityVerbose) {
 848     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 849   }
 850 
 851   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 852 
 853   // initialize signal mask for this thread
 854   os::Solaris::hotspot_sigmask(thread);
 855 
 856   thread->run();
 857 
 858   // One less thread is executing
 859   // When the VMThread gets here, the main thread may have already exited
 860   // which frees the CodeHeap containing the Atomic::dec code
 861   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 862     Atomic::dec(&os::Solaris::_os_thread_count);
 863   }
 864 
 865   if (UseDetachedThreads) {
 866     thr_exit(NULL);
 867     ShouldNotReachHere();
 868   }
 869   return NULL;
 870 }
 871 
 872 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 873   // Allocate the OSThread object
 874   OSThread* osthread = new OSThread(NULL, NULL);
 875   if (osthread == NULL) return NULL;
 876 
 877   // Store info on the Solaris thread into the OSThread
 878   osthread->set_thread_id(thread_id);
 879   osthread->set_lwp_id(_lwp_self());
 880   thread->_schedctl = (void *) schedctl_init();
 881 
 882   if (UseNUMA) {
 883     int lgrp_id = os::numa_get_group_id();
 884     if (lgrp_id != -1) {
 885       thread->set_lgrp_id(lgrp_id);
 886     }
 887   }
 888 
 889   if (ThreadPriorityVerbose) {
 890     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 891                   osthread->thread_id(), osthread->lwp_id());
 892   }
 893 
 894   // Initial thread state is INITIALIZED, not SUSPENDED
 895   osthread->set_state(INITIALIZED);
 896 
 897   return osthread;
 898 }
 899 
 900 void os::Solaris::hotspot_sigmask(Thread* thread) {
 901   //Save caller's signal mask
 902   sigset_t sigmask;
 903   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 904   OSThread *osthread = thread->osthread();
 905   osthread->set_caller_sigmask(sigmask);
 906 
 907   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 908   if (!ReduceSignalUsage) {
 909     if (thread->is_VM_thread()) {
 910       // Only the VM thread handles BREAK_SIGNAL ...
 911       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 912     } else {
 913       // ... all other threads block BREAK_SIGNAL
 914       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 915       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 916     }
 917   }
 918 }
 919 
 920 bool os::create_attached_thread(JavaThread* thread) {
 921 #ifdef ASSERT
 922   thread->verify_not_published();
 923 #endif
 924   OSThread* osthread = create_os_thread(thread, thr_self());
 925   if (osthread == NULL) {
 926     return false;
 927   }
 928 
 929   // Initial thread state is RUNNABLE
 930   osthread->set_state(RUNNABLE);
 931   thread->set_osthread(osthread);
 932 
 933   // initialize signal mask for this thread
 934   // and save the caller's signal mask
 935   os::Solaris::hotspot_sigmask(thread);
 936 
 937   return true;
 938 }
 939 
 940 bool os::create_main_thread(JavaThread* thread) {
 941 #ifdef ASSERT
 942   thread->verify_not_published();
 943 #endif
 944   if (_starting_thread == NULL) {
 945     _starting_thread = create_os_thread(thread, main_thread);
 946     if (_starting_thread == NULL) {
 947       return false;
 948     }
 949   }
 950 
 951   // The primodial thread is runnable from the start
 952   _starting_thread->set_state(RUNNABLE);
 953 
 954   thread->set_osthread(_starting_thread);
 955 
 956   // initialize signal mask for this thread
 957   // and save the caller's signal mask
 958   os::Solaris::hotspot_sigmask(thread);
 959 
 960   return true;
 961 }
 962 
 963 
 964 bool os::create_thread(Thread* thread, ThreadType thr_type,
 965                        size_t stack_size) {
 966   // Allocate the OSThread object
 967   OSThread* osthread = new OSThread(NULL, NULL);
 968   if (osthread == NULL) {
 969     return false;
 970   }
 971 
 972   if (ThreadPriorityVerbose) {
 973     char *thrtyp;
 974     switch (thr_type) {
 975     case vm_thread:
 976       thrtyp = (char *)"vm";
 977       break;
 978     case cgc_thread:
 979       thrtyp = (char *)"cgc";
 980       break;
 981     case pgc_thread:
 982       thrtyp = (char *)"pgc";
 983       break;
 984     case java_thread:
 985       thrtyp = (char *)"java";
 986       break;
 987     case compiler_thread:
 988       thrtyp = (char *)"compiler";
 989       break;
 990     case watcher_thread:
 991       thrtyp = (char *)"watcher";
 992       break;
 993     default:
 994       thrtyp = (char *)"unknown";
 995       break;
 996     }
 997     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
 998   }
 999 
1000   // Calculate stack size if it's not specified by caller.
1001   if (stack_size == 0) {
1002     // The default stack size 1M (2M for LP64).
1003     stack_size = (BytesPerWord >> 2) * K * K;
1004 
1005     switch (thr_type) {
1006     case os::java_thread:
1007       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1008       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1009       break;
1010     case os::compiler_thread:
1011       if (CompilerThreadStackSize > 0) {
1012         stack_size = (size_t)(CompilerThreadStackSize * K);
1013         break;
1014       } // else fall through:
1015         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1016     case os::vm_thread:
1017     case os::pgc_thread:
1018     case os::cgc_thread:
1019     case os::watcher_thread:
1020       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1021       break;
1022     }
1023   }
1024   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1025 
1026   // Initial state is ALLOCATED but not INITIALIZED
1027   osthread->set_state(ALLOCATED);
1028 
1029   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1030     // We got lots of threads. Check if we still have some address space left.
1031     // Need to be at least 5Mb of unreserved address space. We do check by
1032     // trying to reserve some.
1033     const size_t VirtualMemoryBangSize = 20*K*K;
1034     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1035     if (mem == NULL) {
1036       delete osthread;
1037       return false;
1038     } else {
1039       // Release the memory again
1040       os::release_memory(mem, VirtualMemoryBangSize);
1041     }
1042   }
1043 
1044   // Setup osthread because the child thread may need it.
1045   thread->set_osthread(osthread);
1046 
1047   // Create the Solaris thread
1048   thread_t tid = 0;
1049   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
1050   int      status;
1051 
1052   // Mark that we don't have an lwp or thread id yet.
1053   // In case we attempt to set the priority before the thread starts.
1054   osthread->set_lwp_id(-1);
1055   osthread->set_thread_id(-1);
1056 
1057   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1058   if (status != 0) {
1059     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1060       perror("os::create_thread");
1061     }
1062     thread->set_osthread(NULL);
1063     // Need to clean up stuff we've allocated so far
1064     delete osthread;
1065     return false;
1066   }
1067 
1068   Atomic::inc(&os::Solaris::_os_thread_count);
1069 
1070   // Store info on the Solaris thread into the OSThread
1071   osthread->set_thread_id(tid);
1072 
1073   // Remember that we created this thread so we can set priority on it
1074   osthread->set_vm_created();
1075 
1076   // Initial thread state is INITIALIZED, not SUSPENDED
1077   osthread->set_state(INITIALIZED);
1078 
1079   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1080   return true;
1081 }
1082 
1083 // defined for >= Solaris 10. This allows builds on earlier versions
1084 // of Solaris to take advantage of the newly reserved Solaris JVM signals
1085 // With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1086 // and -XX:+UseAltSigs does nothing since these should have no conflict
1087 //
1088 #if !defined(SIGJVM1)
1089   #define SIGJVM1 39
1090   #define SIGJVM2 40
1091 #endif
1092 
1093 debug_only(static bool signal_sets_initialized = false);
1094 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1095 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1096 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1097 
1098 bool os::Solaris::is_sig_ignored(int sig) {
1099   struct sigaction oact;
1100   sigaction(sig, (struct sigaction*)NULL, &oact);
1101   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1102                                  : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1103   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
1104     return true;
1105   } else {
1106     return false;
1107   }
1108 }
1109 
1110 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1111 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1112 static bool isJVM1available() {
1113   return SIGJVM1 < SIGRTMIN;
1114 }
1115 
1116 void os::Solaris::signal_sets_init() {
1117   // Should also have an assertion stating we are still single-threaded.
1118   assert(!signal_sets_initialized, "Already initialized");
1119   // Fill in signals that are necessarily unblocked for all threads in
1120   // the VM. Currently, we unblock the following signals:
1121   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1122   //                         by -Xrs (=ReduceSignalUsage));
1123   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1124   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1125   // the dispositions or masks wrt these signals.
1126   // Programs embedding the VM that want to use the above signals for their
1127   // own purposes must, at this time, use the "-Xrs" option to prevent
1128   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1129   // (See bug 4345157, and other related bugs).
1130   // In reality, though, unblocking these signals is really a nop, since
1131   // these signals are not blocked by default.
1132   sigemptyset(&unblocked_sigs);
1133   sigemptyset(&allowdebug_blocked_sigs);
1134   sigaddset(&unblocked_sigs, SIGILL);
1135   sigaddset(&unblocked_sigs, SIGSEGV);
1136   sigaddset(&unblocked_sigs, SIGBUS);
1137   sigaddset(&unblocked_sigs, SIGFPE);
1138 
1139   if (isJVM1available) {
1140     os::Solaris::set_SIGinterrupt(SIGJVM1);
1141     os::Solaris::set_SIGasync(SIGJVM2);
1142   } else if (UseAltSigs) {
1143     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1144     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1145   } else {
1146     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1147     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1148   }
1149 
1150   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1151   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1152 
1153   if (!ReduceSignalUsage) {
1154     if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1155       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1156       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1157     }
1158     if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1159       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1160       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1161     }
1162     if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1163       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1164       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1165     }
1166   }
1167   // Fill in signals that are blocked by all but the VM thread.
1168   sigemptyset(&vm_sigs);
1169   if (!ReduceSignalUsage) {
1170     sigaddset(&vm_sigs, BREAK_SIGNAL);
1171   }
1172   debug_only(signal_sets_initialized = true);
1173 
1174   // For diagnostics only used in run_periodic_checks
1175   sigemptyset(&check_signal_done);
1176 }
1177 
1178 // These are signals that are unblocked while a thread is running Java.
1179 // (For some reason, they get blocked by default.)
1180 sigset_t* os::Solaris::unblocked_signals() {
1181   assert(signal_sets_initialized, "Not initialized");
1182   return &unblocked_sigs;
1183 }
1184 
1185 // These are the signals that are blocked while a (non-VM) thread is
1186 // running Java. Only the VM thread handles these signals.
1187 sigset_t* os::Solaris::vm_signals() {
1188   assert(signal_sets_initialized, "Not initialized");
1189   return &vm_sigs;
1190 }
1191 
1192 // These are signals that are blocked during cond_wait to allow debugger in
1193 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1194   assert(signal_sets_initialized, "Not initialized");
1195   return &allowdebug_blocked_sigs;
1196 }
1197 
1198 
1199 void _handle_uncaught_cxx_exception() {
1200   VMError err("An uncaught C++ exception");
1201   err.report_and_die();
1202 }
1203 
1204 
1205 // First crack at OS-specific initialization, from inside the new thread.
1206 void os::initialize_thread(Thread* thr) {
1207   int r = thr_main();
1208   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
1209   if (r) {
1210     JavaThread* jt = (JavaThread *)thr;
1211     assert(jt != NULL, "Sanity check");
1212     size_t stack_size;
1213     address base = jt->stack_base();
1214     if (Arguments::created_by_java_launcher()) {
1215       // Use 2MB to allow for Solaris 7 64 bit mode.
1216       stack_size = JavaThread::stack_size_at_create() == 0
1217         ? 2048*K : JavaThread::stack_size_at_create();
1218 
1219       // There are rare cases when we may have already used more than
1220       // the basic stack size allotment before this method is invoked.
1221       // Attempt to allow for a normally sized java_stack.
1222       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1223       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1224     } else {
1225       // 6269555: If we were not created by a Java launcher, i.e. if we are
1226       // running embedded in a native application, treat the primordial thread
1227       // as much like a native attached thread as possible.  This means using
1228       // the current stack size from thr_stksegment(), unless it is too large
1229       // to reliably setup guard pages.  A reasonable max size is 8MB.
1230       size_t current_size = current_stack_size();
1231       // This should never happen, but just in case....
1232       if (current_size == 0) current_size = 2 * K * K;
1233       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1234     }
1235     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1236     stack_size = (size_t)(base - bottom);
1237 
1238     assert(stack_size > 0, "Stack size calculation problem");
1239 
1240     if (stack_size > jt->stack_size()) {
1241 #ifndef PRODUCT
1242       struct rlimit limits;
1243       getrlimit(RLIMIT_STACK, &limits);
1244       size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1245       assert(size >= jt->stack_size(), "Stack size problem in main thread");
1246 #endif
1247       tty->print_cr("Stack size of %d Kb exceeds current limit of %d Kb.\n"
1248                     "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1249                     "See limit(1) to increase the stack size limit.",
1250                     stack_size / K, jt->stack_size() / K);
1251       vm_exit(1);
1252     }
1253     assert(jt->stack_size() >= stack_size,
1254            "Attempt to map more stack than was allocated");
1255     jt->set_stack_size(stack_size);
1256   }
1257 
1258   // With the T2 libthread (T1 is no longer supported) threads are always bound
1259   // and we use stackbanging in all cases.
1260 
1261   os::Solaris::init_thread_fpu_state();
1262   std::set_terminate(_handle_uncaught_cxx_exception);
1263 }
1264 
1265 
1266 
1267 // Free Solaris resources related to the OSThread
1268 void os::free_thread(OSThread* osthread) {
1269   assert(osthread != NULL, "os::free_thread but osthread not set");
1270 
1271 
1272   // We are told to free resources of the argument thread,
1273   // but we can only really operate on the current thread.
1274   // The main thread must take the VMThread down synchronously
1275   // before the main thread exits and frees up CodeHeap
1276   guarantee((Thread::current()->osthread() == osthread
1277              || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1278   if (Thread::current()->osthread() == osthread) {
1279     // Restore caller's signal mask
1280     sigset_t sigmask = osthread->caller_sigmask();
1281     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1282   }
1283   delete osthread;
1284 }
1285 
1286 void os::pd_start_thread(Thread* thread) {
1287   int status = thr_continue(thread->osthread()->thread_id());
1288   assert_status(status == 0, status, "thr_continue failed");
1289 }
1290 
1291 
1292 intx os::current_thread_id() {
1293   return (intx)thr_self();
1294 }
1295 
1296 static pid_t _initial_pid = 0;
1297 
1298 int os::current_process_id() {
1299   return (int)(_initial_pid ? _initial_pid : getpid());
1300 }
1301 
1302 int os::allocate_thread_local_storage() {
1303   // %%%       in Win32 this allocates a memory segment pointed to by a
1304   //           register.  Dan Stein can implement a similar feature in
1305   //           Solaris.  Alternatively, the VM can do the same thing
1306   //           explicitly: malloc some storage and keep the pointer in a
1307   //           register (which is part of the thread's context) (or keep it
1308   //           in TLS).
1309   // %%%       In current versions of Solaris, thr_self and TSD can
1310   //           be accessed via short sequences of displaced indirections.
1311   //           The value of thr_self is available as %g7(36).
1312   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1313   //           assuming that the current thread already has a value bound to k.
1314   //           It may be worth experimenting with such access patterns,
1315   //           and later having the parameters formally exported from a Solaris
1316   //           interface.  I think, however, that it will be faster to
1317   //           maintain the invariant that %g2 always contains the
1318   //           JavaThread in Java code, and have stubs simply
1319   //           treat %g2 as a caller-save register, preserving it in a %lN.
1320   thread_key_t tk;
1321   if (thr_keycreate(&tk, NULL)) {
1322     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1323                   "(%s)", strerror(errno)));
1324   }
1325   return int(tk);
1326 }
1327 
1328 void os::free_thread_local_storage(int index) {
1329   // %%% don't think we need anything here
1330   // if (pthread_key_delete((pthread_key_t) tk)) {
1331   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
1332   // }
1333 }
1334 
1335 // libthread allocate for tsd_common is a version specific
1336 // small number - point is NO swap space available
1337 #define SMALLINT 32
1338 void os::thread_local_storage_at_put(int index, void* value) {
1339   // %%% this is used only in threadLocalStorage.cpp
1340   if (thr_setspecific((thread_key_t)index, value)) {
1341     if (errno == ENOMEM) {
1342       vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1343                             "thr_setspecific: out of swap space");
1344     } else {
1345       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1346                     "(%s)", strerror(errno)));
1347     }
1348   } else {
1349     ThreadLocalStorage::set_thread_in_slot((Thread *) value);
1350   }
1351 }
1352 
1353 // This function could be called before TLS is initialized, for example, when
1354 // VM receives an async signal or when VM causes a fatal error during
1355 // initialization. Return NULL if thr_getspecific() fails.
1356 void* os::thread_local_storage_at(int index) {
1357   // %%% this is used only in threadLocalStorage.cpp
1358   void* r = NULL;
1359   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1360 }
1361 
1362 
1363 // gethrtime() should be monotonic according to the documentation,
1364 // but some virtualized platforms are known to break this guarantee.
1365 // getTimeNanos() must be guaranteed not to move backwards, so we
1366 // are forced to add a check here.
1367 inline hrtime_t getTimeNanos() {
1368   const hrtime_t now = gethrtime();
1369   const hrtime_t prev = max_hrtime;
1370   if (now <= prev) {
1371     return prev;   // same or retrograde time;
1372   }
1373   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1374   assert(obsv >= prev, "invariant");   // Monotonicity
1375   // If the CAS succeeded then we're done and return "now".
1376   // If the CAS failed and the observed value "obsv" is >= now then
1377   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1378   // some other thread raced this thread and installed a new value, in which case
1379   // we could either (a) retry the entire operation, (b) retry trying to install now
1380   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1381   // we might discard a higher "now" value in deference to a slightly lower but freshly
1382   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1383   // to (a) or (b) -- and greatly reduces coherence traffic.
1384   // We might also condition (c) on the magnitude of the delta between obsv and now.
1385   // Avoiding excessive CAS operations to hot RW locations is critical.
1386   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1387   return (prev == obsv) ? now : obsv;
1388 }
1389 
1390 // Time since start-up in seconds to a fine granularity.
1391 // Used by VMSelfDestructTimer and the MemProfiler.
1392 double os::elapsedTime() {
1393   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1394 }
1395 
1396 jlong os::elapsed_counter() {
1397   return (jlong)(getTimeNanos() - first_hrtime);
1398 }
1399 
1400 jlong os::elapsed_frequency() {
1401   return hrtime_hz;
1402 }
1403 
1404 // Return the real, user, and system times in seconds from an
1405 // arbitrary fixed point in the past.
1406 bool os::getTimesSecs(double* process_real_time,
1407                       double* process_user_time,
1408                       double* process_system_time) {
1409   struct tms ticks;
1410   clock_t real_ticks = times(&ticks);
1411 
1412   if (real_ticks == (clock_t) (-1)) {
1413     return false;
1414   } else {
1415     double ticks_per_second = (double) clock_tics_per_sec;
1416     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1417     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1418     // For consistency return the real time from getTimeNanos()
1419     // converted to seconds.
1420     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1421 
1422     return true;
1423   }
1424 }
1425 
1426 bool os::supports_vtime() { return true; }
1427 
1428 bool os::enable_vtime() {
1429   int fd = ::open("/proc/self/ctl", O_WRONLY);
1430   if (fd == -1) {
1431     return false;
1432   }
1433 
1434   long cmd[] = { PCSET, PR_MSACCT };
1435   int res = ::write(fd, cmd, sizeof(long) * 2);
1436   ::close(fd);
1437   if (res != sizeof(long) * 2) {
1438     return false;
1439   }
1440   return true;
1441 }
1442 
1443 bool os::vtime_enabled() {
1444   int fd = ::open("/proc/self/status", O_RDONLY);
1445   if (fd == -1) {
1446     return false;
1447   }
1448 
1449   pstatus_t status;
1450   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1451   ::close(fd);
1452   if (res != sizeof(pstatus_t)) {
1453     return false;
1454   }
1455   return status.pr_flags & PR_MSACCT;
1456 }
1457 
1458 double os::elapsedVTime() {
1459   return (double)gethrvtime() / (double)hrtime_hz;
1460 }
1461 
1462 // Used internally for comparisons only
1463 // getTimeMillis guaranteed to not move backwards on Solaris
1464 jlong getTimeMillis() {
1465   jlong nanotime = getTimeNanos();
1466   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1467 }
1468 
1469 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1470 jlong os::javaTimeMillis() {
1471   timeval t;
1472   if (gettimeofday(&t, NULL) == -1) {
1473     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1474   }
1475   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1476 }
1477 
1478 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1479   timeval t;
1480   if (gettimeofday(&t, NULL) == -1) {
1481     fatal(err_msg("os::javaTimeSystemUTC: gettimeofday (%s)", strerror(errno)));
1482   }
1483   seconds = jlong(t.tv_sec);
1484   nanos = jlong(t.tv_usec) * 1000;
1485 }
1486 
1487 
1488 jlong os::javaTimeNanos() {
1489   return (jlong)getTimeNanos();
1490 }
1491 
1492 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1493   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1494   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1495   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1496   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1497 }
1498 
1499 char * os::local_time_string(char *buf, size_t buflen) {
1500   struct tm t;
1501   time_t long_time;
1502   time(&long_time);
1503   localtime_r(&long_time, &t);
1504   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1505                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1506                t.tm_hour, t.tm_min, t.tm_sec);
1507   return buf;
1508 }
1509 
1510 // Note: os::shutdown() might be called very early during initialization, or
1511 // called from signal handler. Before adding something to os::shutdown(), make
1512 // sure it is async-safe and can handle partially initialized VM.
1513 void os::shutdown() {
1514 
1515   // allow PerfMemory to attempt cleanup of any persistent resources
1516   perfMemory_exit();
1517 
1518   // needs to remove object in file system
1519   AttachListener::abort();
1520 
1521   // flush buffered output, finish log files
1522   ostream_abort();
1523 
1524   // Check for abort hook
1525   abort_hook_t abort_hook = Arguments::abort_hook();
1526   if (abort_hook != NULL) {
1527     abort_hook();
1528   }
1529 }
1530 
1531 // Note: os::abort() might be called very early during initialization, or
1532 // called from signal handler. Before adding something to os::abort(), make
1533 // sure it is async-safe and can handle partially initialized VM.
1534 void os::abort(bool dump_core) {
1535   os::shutdown();
1536   if (dump_core) {
1537 #ifndef PRODUCT
1538     fdStream out(defaultStream::output_fd());
1539     out.print_raw("Current thread is ");
1540     char buf[16];
1541     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1542     out.print_raw_cr(buf);
1543     out.print_raw_cr("Dumping core ...");
1544 #endif
1545     ::abort(); // dump core (for debugging)
1546   }
1547 
1548   ::exit(1);
1549 }
1550 
1551 // Die immediately, no exit hook, no abort hook, no cleanup.
1552 void os::die() {
1553   ::abort(); // dump core (for debugging)
1554 }
1555 
1556 // DLL functions
1557 
1558 const char* os::dll_file_extension() { return ".so"; }
1559 
1560 // This must be hard coded because it's the system's temporary
1561 // directory not the java application's temp directory, ala java.io.tmpdir.
1562 const char* os::get_temp_directory() { return "/tmp"; }
1563 
1564 static bool file_exists(const char* filename) {
1565   struct stat statbuf;
1566   if (filename == NULL || strlen(filename) == 0) {
1567     return false;
1568   }
1569   return os::stat(filename, &statbuf) == 0;
1570 }
1571 
1572 bool os::dll_build_name(char* buffer, size_t buflen,
1573                         const char* pname, const char* fname) {
1574   bool retval = false;
1575   const size_t pnamelen = pname ? strlen(pname) : 0;
1576 
1577   // Return error on buffer overflow.
1578   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1579     return retval;
1580   }
1581 
1582   if (pnamelen == 0) {
1583     snprintf(buffer, buflen, "lib%s.so", fname);
1584     retval = true;
1585   } else if (strchr(pname, *os::path_separator()) != NULL) {
1586     int n;
1587     char** pelements = split_path(pname, &n);
1588     if (pelements == NULL) {
1589       return false;
1590     }
1591     for (int i = 0; i < n; i++) {
1592       // really shouldn't be NULL but what the heck, check can't hurt
1593       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1594         continue; // skip the empty path values
1595       }
1596       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1597       if (file_exists(buffer)) {
1598         retval = true;
1599         break;
1600       }
1601     }
1602     // release the storage
1603     for (int i = 0; i < n; i++) {
1604       if (pelements[i] != NULL) {
1605         FREE_C_HEAP_ARRAY(char, pelements[i]);
1606       }
1607     }
1608     if (pelements != NULL) {
1609       FREE_C_HEAP_ARRAY(char*, pelements);
1610     }
1611   } else {
1612     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1613     retval = true;
1614   }
1615   return retval;
1616 }
1617 
1618 // check if addr is inside libjvm.so
1619 bool os::address_is_in_vm(address addr) {
1620   static address libjvm_base_addr;
1621   Dl_info dlinfo;
1622 
1623   if (libjvm_base_addr == NULL) {
1624     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1625       libjvm_base_addr = (address)dlinfo.dli_fbase;
1626     }
1627     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1628   }
1629 
1630   if (dladdr((void *)addr, &dlinfo) != 0) {
1631     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1632   }
1633 
1634   return false;
1635 }
1636 
1637 typedef int (*dladdr1_func_type)(void *, Dl_info *, void **, int);
1638 static dladdr1_func_type dladdr1_func = NULL;
1639 
1640 bool os::dll_address_to_function_name(address addr, char *buf,
1641                                       int buflen, int * offset) {
1642   // buf is not optional, but offset is optional
1643   assert(buf != NULL, "sanity check");
1644 
1645   Dl_info dlinfo;
1646 
1647   // dladdr1_func was initialized in os::init()
1648   if (dladdr1_func != NULL) {
1649     // yes, we have dladdr1
1650 
1651     // Support for dladdr1 is checked at runtime; it may be
1652     // available even if the vm is built on a machine that does
1653     // not have dladdr1 support.  Make sure there is a value for
1654     // RTLD_DL_SYMENT.
1655 #ifndef RTLD_DL_SYMENT
1656   #define RTLD_DL_SYMENT 1
1657 #endif
1658 #ifdef _LP64
1659     Elf64_Sym * info;
1660 #else
1661     Elf32_Sym * info;
1662 #endif
1663     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1664                      RTLD_DL_SYMENT) != 0) {
1665       // see if we have a matching symbol that covers our address
1666       if (dlinfo.dli_saddr != NULL &&
1667           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1668         if (dlinfo.dli_sname != NULL) {
1669           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1670             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1671           }
1672           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1673           return true;
1674         }
1675       }
1676       // no matching symbol so try for just file info
1677       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1678         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1679                             buf, buflen, offset, dlinfo.dli_fname)) {
1680           return true;
1681         }
1682       }
1683     }
1684     buf[0] = '\0';
1685     if (offset != NULL) *offset  = -1;
1686     return false;
1687   }
1688 
1689   // no, only dladdr is available
1690   if (dladdr((void *)addr, &dlinfo) != 0) {
1691     // see if we have a matching symbol
1692     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1693       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1694         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1695       }
1696       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1697       return true;
1698     }
1699     // no matching symbol so try for just file info
1700     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1701       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1702                           buf, buflen, offset, dlinfo.dli_fname)) {
1703         return true;
1704       }
1705     }
1706   }
1707   buf[0] = '\0';
1708   if (offset != NULL) *offset  = -1;
1709   return false;
1710 }
1711 
1712 bool os::dll_address_to_library_name(address addr, char* buf,
1713                                      int buflen, int* offset) {
1714   // buf is not optional, but offset is optional
1715   assert(buf != NULL, "sanity check");
1716 
1717   Dl_info dlinfo;
1718 
1719   if (dladdr((void*)addr, &dlinfo) != 0) {
1720     if (dlinfo.dli_fname != NULL) {
1721       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1722     }
1723     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1724       *offset = addr - (address)dlinfo.dli_fbase;
1725     }
1726     return true;
1727   }
1728 
1729   buf[0] = '\0';
1730   if (offset) *offset = -1;
1731   return false;
1732 }
1733 
1734 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1735   Dl_info dli;
1736   // Sanity check?
1737   if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 ||
1738       dli.dli_fname == NULL) {
1739     return 1;
1740   }
1741 
1742   void * handle = dlopen(dli.dli_fname, RTLD_LAZY);
1743   if (handle == NULL) {
1744     return 1;
1745   }
1746 
1747   Link_map *map;
1748   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1749   if (map == NULL) {
1750     dlclose(handle);
1751     return 1;
1752   }
1753 
1754   while (map->l_prev != NULL) {
1755     map = map->l_prev;
1756   }
1757 
1758   while (map != NULL) {
1759     // Iterate through all map entries and call callback with fields of interest
1760     if(callback(map->l_name, (address)map->l_addr, (address)0, param)) {
1761       dlclose(handle);
1762       return 1;
1763     }
1764     map = map->l_next;
1765   }
1766 
1767   dlclose(handle);
1768   return 0;
1769 }
1770 
1771 int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) {
1772   outputStream * out = (outputStream *) param;
1773   out->print_cr(PTR_FORMAT " \t%s", base_address, name);
1774   return 0;
1775 }
1776 
1777 void os::print_dll_info(outputStream * st) {
1778   st->print_cr("Dynamic libraries:"); st->flush();
1779   if (get_loaded_modules_info(_print_dll_info_cb, (void *)st)) {
1780     st->print_cr("Error: Cannot print dynamic libraries.");
1781   }
1782 }
1783 
1784 // Loads .dll/.so and
1785 // in case of error it checks if .dll/.so was built for the
1786 // same architecture as Hotspot is running on
1787 
1788 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1789   void * result= ::dlopen(filename, RTLD_LAZY);
1790   if (result != NULL) {
1791     // Successful loading
1792     return result;
1793   }
1794 
1795   Elf32_Ehdr elf_head;
1796 
1797   // Read system error message into ebuf
1798   // It may or may not be overwritten below
1799   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1800   ebuf[ebuflen-1]='\0';
1801   int diag_msg_max_length=ebuflen-strlen(ebuf);
1802   char* diag_msg_buf=ebuf+strlen(ebuf);
1803 
1804   if (diag_msg_max_length==0) {
1805     // No more space in ebuf for additional diagnostics message
1806     return NULL;
1807   }
1808 
1809 
1810   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1811 
1812   if (file_descriptor < 0) {
1813     // Can't open library, report dlerror() message
1814     return NULL;
1815   }
1816 
1817   bool failed_to_read_elf_head=
1818     (sizeof(elf_head)!=
1819      (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1820 
1821   ::close(file_descriptor);
1822   if (failed_to_read_elf_head) {
1823     // file i/o error - report dlerror() msg
1824     return NULL;
1825   }
1826 
1827   typedef struct {
1828     Elf32_Half  code;         // Actual value as defined in elf.h
1829     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1830     char        elf_class;    // 32 or 64 bit
1831     char        endianess;    // MSB or LSB
1832     char*       name;         // String representation
1833   } arch_t;
1834 
1835   static const arch_t arch_array[]={
1836     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1837     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1838     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1839     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1840     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1841     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1842     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1843     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1844     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1845     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1846   };
1847 
1848 #if  (defined IA32)
1849   static  Elf32_Half running_arch_code=EM_386;
1850 #elif   (defined AMD64)
1851   static  Elf32_Half running_arch_code=EM_X86_64;
1852 #elif  (defined IA64)
1853   static  Elf32_Half running_arch_code=EM_IA_64;
1854 #elif  (defined __sparc) && (defined _LP64)
1855   static  Elf32_Half running_arch_code=EM_SPARCV9;
1856 #elif  (defined __sparc) && (!defined _LP64)
1857   static  Elf32_Half running_arch_code=EM_SPARC;
1858 #elif  (defined __powerpc64__)
1859   static  Elf32_Half running_arch_code=EM_PPC64;
1860 #elif  (defined __powerpc__)
1861   static  Elf32_Half running_arch_code=EM_PPC;
1862 #elif (defined ARM)
1863   static  Elf32_Half running_arch_code=EM_ARM;
1864 #else
1865   #error Method os::dll_load requires that one of following is defined:\
1866        IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1867 #endif
1868 
1869   // Identify compatability class for VM's architecture and library's architecture
1870   // Obtain string descriptions for architectures
1871 
1872   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1873   int running_arch_index=-1;
1874 
1875   for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
1876     if (running_arch_code == arch_array[i].code) {
1877       running_arch_index    = i;
1878     }
1879     if (lib_arch.code == arch_array[i].code) {
1880       lib_arch.compat_class = arch_array[i].compat_class;
1881       lib_arch.name         = arch_array[i].name;
1882     }
1883   }
1884 
1885   assert(running_arch_index != -1,
1886          "Didn't find running architecture code (running_arch_code) in arch_array");
1887   if (running_arch_index == -1) {
1888     // Even though running architecture detection failed
1889     // we may still continue with reporting dlerror() message
1890     return NULL;
1891   }
1892 
1893   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1894     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1895     return NULL;
1896   }
1897 
1898   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1899     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1900     return NULL;
1901   }
1902 
1903   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1904     if (lib_arch.name!=NULL) {
1905       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1906                  " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1907                  lib_arch.name, arch_array[running_arch_index].name);
1908     } else {
1909       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1910                  " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1911                  lib_arch.code,
1912                  arch_array[running_arch_index].name);
1913     }
1914   }
1915 
1916   return NULL;
1917 }
1918 
1919 void* os::dll_lookup(void* handle, const char* name) {
1920   return dlsym(handle, name);
1921 }
1922 
1923 void* os::get_default_process_handle() {
1924   return (void*)::dlopen(NULL, RTLD_LAZY);
1925 }
1926 
1927 int os::stat(const char *path, struct stat *sbuf) {
1928   char pathbuf[MAX_PATH];
1929   if (strlen(path) > MAX_PATH - 1) {
1930     errno = ENAMETOOLONG;
1931     return -1;
1932   }
1933   os::native_path(strcpy(pathbuf, path));
1934   return ::stat(pathbuf, sbuf);
1935 }
1936 
1937 static bool _print_ascii_file(const char* filename, outputStream* st) {
1938   int fd = ::open(filename, O_RDONLY);
1939   if (fd == -1) {
1940     return false;
1941   }
1942 
1943   char buf[32];
1944   int bytes;
1945   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1946     st->print_raw(buf, bytes);
1947   }
1948 
1949   ::close(fd);
1950 
1951   return true;
1952 }
1953 
1954 void os::print_os_info_brief(outputStream* st) {
1955   os::Solaris::print_distro_info(st);
1956 
1957   os::Posix::print_uname_info(st);
1958 
1959   os::Solaris::print_libversion_info(st);
1960 }
1961 
1962 void os::print_os_info(outputStream* st) {
1963   st->print("OS:");
1964 
1965   os::Solaris::print_distro_info(st);
1966 
1967   os::Posix::print_uname_info(st);
1968 
1969   os::Solaris::print_libversion_info(st);
1970 
1971   os::Posix::print_rlimit_info(st);
1972 
1973   os::Posix::print_load_average(st);
1974 }
1975 
1976 void os::Solaris::print_distro_info(outputStream* st) {
1977   if (!_print_ascii_file("/etc/release", st)) {
1978     st->print("Solaris");
1979   }
1980   st->cr();
1981 }
1982 
1983 void os::Solaris::print_libversion_info(outputStream* st) {
1984   st->print("  (T2 libthread)");
1985   st->cr();
1986 }
1987 
1988 static bool check_addr0(outputStream* st) {
1989   jboolean status = false;
1990   int fd = ::open("/proc/self/map",O_RDONLY);
1991   if (fd >= 0) {
1992     prmap_t p;
1993     while (::read(fd, &p, sizeof(p)) > 0) {
1994       if (p.pr_vaddr == 0x0) {
1995         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
1996         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
1997         st->print("Access:");
1998         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
1999         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
2000         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
2001         st->cr();
2002         status = true;
2003       }
2004     }
2005     ::close(fd);
2006   }
2007   return status;
2008 }
2009 
2010 void os::pd_print_cpu_info(outputStream* st) {
2011   // Nothing to do for now.
2012 }
2013 
2014 void os::print_memory_info(outputStream* st) {
2015   st->print("Memory:");
2016   st->print(" %dk page", os::vm_page_size()>>10);
2017   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2018   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2019   st->cr();
2020   (void) check_addr0(st);
2021 }
2022 
2023 void os::print_siginfo(outputStream* st, void* siginfo) {
2024   const siginfo_t* si = (const siginfo_t*)siginfo;
2025 
2026   os::Posix::print_siginfo_brief(st, si);
2027 
2028   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2029       UseSharedSpaces) {
2030     FileMapInfo* mapinfo = FileMapInfo::current_info();
2031     if (mapinfo->is_in_shared_space(si->si_addr)) {
2032       st->print("\n\nError accessing class data sharing archive."   \
2033                 " Mapped file inaccessible during execution, "      \
2034                 " possible disk/network problem.");
2035     }
2036   }
2037   st->cr();
2038 }
2039 
2040 // Moved from whole group, because we need them here for diagnostic
2041 // prints.
2042 #define OLDMAXSIGNUM 32
2043 static int Maxsignum = 0;
2044 static int *ourSigFlags = NULL;
2045 
2046 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2047 
2048 int os::Solaris::get_our_sigflags(int sig) {
2049   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2050   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2051   return ourSigFlags[sig];
2052 }
2053 
2054 void os::Solaris::set_our_sigflags(int sig, int flags) {
2055   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2056   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2057   ourSigFlags[sig] = flags;
2058 }
2059 
2060 
2061 static const char* get_signal_handler_name(address handler,
2062                                            char* buf, int buflen) {
2063   int offset;
2064   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2065   if (found) {
2066     // skip directory names
2067     const char *p1, *p2;
2068     p1 = buf;
2069     size_t len = strlen(os::file_separator());
2070     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2071     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2072   } else {
2073     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2074   }
2075   return buf;
2076 }
2077 
2078 static void print_signal_handler(outputStream* st, int sig,
2079                                  char* buf, size_t buflen) {
2080   struct sigaction sa;
2081 
2082   sigaction(sig, NULL, &sa);
2083 
2084   st->print("%s: ", os::exception_name(sig, buf, buflen));
2085 
2086   address handler = (sa.sa_flags & SA_SIGINFO)
2087                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2088                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2089 
2090   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2091     st->print("SIG_DFL");
2092   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2093     st->print("SIG_IGN");
2094   } else {
2095     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2096   }
2097 
2098   st->print(", sa_mask[0]=");
2099   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2100 
2101   address rh = VMError::get_resetted_sighandler(sig);
2102   // May be, handler was resetted by VMError?
2103   if (rh != NULL) {
2104     handler = rh;
2105     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2106   }
2107 
2108   st->print(", sa_flags=");
2109   os::Posix::print_sa_flags(st, sa.sa_flags);
2110 
2111   // Check: is it our handler?
2112   if (handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2113       handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2114     // It is our signal handler
2115     // check for flags
2116     if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2117       st->print(
2118                 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2119                 os::Solaris::get_our_sigflags(sig));
2120     }
2121   }
2122   st->cr();
2123 }
2124 
2125 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2126   st->print_cr("Signal Handlers:");
2127   print_signal_handler(st, SIGSEGV, buf, buflen);
2128   print_signal_handler(st, SIGBUS , buf, buflen);
2129   print_signal_handler(st, SIGFPE , buf, buflen);
2130   print_signal_handler(st, SIGPIPE, buf, buflen);
2131   print_signal_handler(st, SIGXFSZ, buf, buflen);
2132   print_signal_handler(st, SIGILL , buf, buflen);
2133   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2134   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2135   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2136   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2137   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2138   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2139   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2140   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2141 }
2142 
2143 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2144 
2145 // Find the full path to the current module, libjvm.so
2146 void os::jvm_path(char *buf, jint buflen) {
2147   // Error checking.
2148   if (buflen < MAXPATHLEN) {
2149     assert(false, "must use a large-enough buffer");
2150     buf[0] = '\0';
2151     return;
2152   }
2153   // Lazy resolve the path to current module.
2154   if (saved_jvm_path[0] != 0) {
2155     strcpy(buf, saved_jvm_path);
2156     return;
2157   }
2158 
2159   Dl_info dlinfo;
2160   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2161   assert(ret != 0, "cannot locate libjvm");
2162   if (ret != 0 && dlinfo.dli_fname != NULL) {
2163     realpath((char *)dlinfo.dli_fname, buf);
2164   } else {
2165     buf[0] = '\0';
2166     return;
2167   }
2168 
2169   if (Arguments::sun_java_launcher_is_altjvm()) {
2170     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2171     // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
2172     // If "/jre/lib/" appears at the right place in the string, then
2173     // assume we are installed in a JDK and we're done.  Otherwise, check
2174     // for a JAVA_HOME environment variable and fix up the path so it
2175     // looks like libjvm.so is installed there (append a fake suffix
2176     // hotspot/libjvm.so).
2177     const char *p = buf + strlen(buf) - 1;
2178     for (int count = 0; p > buf && count < 5; ++count) {
2179       for (--p; p > buf && *p != '/'; --p)
2180         /* empty */ ;
2181     }
2182 
2183     if (strncmp(p, "/jre/lib/", 9) != 0) {
2184       // Look for JAVA_HOME in the environment.
2185       char* java_home_var = ::getenv("JAVA_HOME");
2186       if (java_home_var != NULL && java_home_var[0] != 0) {
2187         char cpu_arch[12];
2188         char* jrelib_p;
2189         int   len;
2190         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2191 #ifdef _LP64
2192         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2193         if (strcmp(cpu_arch, "sparc") == 0) {
2194           strcat(cpu_arch, "v9");
2195         } else if (strcmp(cpu_arch, "i386") == 0) {
2196           strcpy(cpu_arch, "amd64");
2197         }
2198 #endif
2199         // Check the current module name "libjvm.so".
2200         p = strrchr(buf, '/');
2201         assert(strstr(p, "/libjvm") == p, "invalid library name");
2202 
2203         realpath(java_home_var, buf);
2204         // determine if this is a legacy image or modules image
2205         // modules image doesn't have "jre" subdirectory
2206         len = strlen(buf);
2207         assert(len < buflen, "Ran out of buffer space");
2208         jrelib_p = buf + len;
2209         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2210         if (0 != access(buf, F_OK)) {
2211           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2212         }
2213 
2214         if (0 == access(buf, F_OK)) {
2215           // Use current module name "libjvm.so"
2216           len = strlen(buf);
2217           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2218         } else {
2219           // Go back to path of .so
2220           realpath((char *)dlinfo.dli_fname, buf);
2221         }
2222       }
2223     }
2224   }
2225 
2226   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2227   saved_jvm_path[MAXPATHLEN - 1] = '\0';
2228 }
2229 
2230 
2231 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2232   // no prefix required, not even "_"
2233 }
2234 
2235 
2236 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2237   // no suffix required
2238 }
2239 
2240 // This method is a copy of JDK's sysGetLastErrorString
2241 // from src/solaris/hpi/src/system_md.c
2242 
2243 size_t os::lasterror(char *buf, size_t len) {
2244   if (errno == 0)  return 0;
2245 
2246   const char *s = ::strerror(errno);
2247   size_t n = ::strlen(s);
2248   if (n >= len) {
2249     n = len - 1;
2250   }
2251   ::strncpy(buf, s, n);
2252   buf[n] = '\0';
2253   return n;
2254 }
2255 
2256 
2257 // sun.misc.Signal
2258 
2259 extern "C" {
2260   static void UserHandler(int sig, void *siginfo, void *context) {
2261     // Ctrl-C is pressed during error reporting, likely because the error
2262     // handler fails to abort. Let VM die immediately.
2263     if (sig == SIGINT && is_error_reported()) {
2264       os::die();
2265     }
2266 
2267     os::signal_notify(sig);
2268     // We do not need to reinstate the signal handler each time...
2269   }
2270 }
2271 
2272 void* os::user_handler() {
2273   return CAST_FROM_FN_PTR(void*, UserHandler);
2274 }
2275 
2276 class Semaphore : public StackObj {
2277  public:
2278   Semaphore();
2279   ~Semaphore();
2280   void signal();
2281   void wait();
2282   bool trywait();
2283   bool timedwait(unsigned int sec, int nsec);
2284  private:
2285   sema_t _semaphore;
2286 };
2287 
2288 
2289 Semaphore::Semaphore() {
2290   sema_init(&_semaphore, 0, NULL, NULL);
2291 }
2292 
2293 Semaphore::~Semaphore() {
2294   sema_destroy(&_semaphore);
2295 }
2296 
2297 void Semaphore::signal() {
2298   sema_post(&_semaphore);
2299 }
2300 
2301 void Semaphore::wait() {
2302   sema_wait(&_semaphore);
2303 }
2304 
2305 bool Semaphore::trywait() {
2306   return sema_trywait(&_semaphore) == 0;
2307 }
2308 
2309 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2310   struct timespec ts;
2311   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2312 
2313   while (1) {
2314     int result = sema_timedwait(&_semaphore, &ts);
2315     if (result == 0) {
2316       return true;
2317     } else if (errno == EINTR) {
2318       continue;
2319     } else if (errno == ETIME) {
2320       return false;
2321     } else {
2322       return false;
2323     }
2324   }
2325 }
2326 
2327 extern "C" {
2328   typedef void (*sa_handler_t)(int);
2329   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2330 }
2331 
2332 void* os::signal(int signal_number, void* handler) {
2333   struct sigaction sigAct, oldSigAct;
2334   sigfillset(&(sigAct.sa_mask));
2335   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2336   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2337 
2338   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
2339     // -1 means registration failed
2340     return (void *)-1;
2341   }
2342 
2343   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2344 }
2345 
2346 void os::signal_raise(int signal_number) {
2347   raise(signal_number);
2348 }
2349 
2350 // The following code is moved from os.cpp for making this
2351 // code platform specific, which it is by its very nature.
2352 
2353 // a counter for each possible signal value
2354 static int Sigexit = 0;
2355 static int Maxlibjsigsigs;
2356 static jint *pending_signals = NULL;
2357 static int *preinstalled_sigs = NULL;
2358 static struct sigaction *chainedsigactions = NULL;
2359 static sema_t sig_sem;
2360 typedef int (*version_getting_t)();
2361 version_getting_t os::Solaris::get_libjsig_version = NULL;
2362 static int libjsigversion = NULL;
2363 
2364 int os::sigexitnum_pd() {
2365   assert(Sigexit > 0, "signal memory not yet initialized");
2366   return Sigexit;
2367 }
2368 
2369 void os::Solaris::init_signal_mem() {
2370   // Initialize signal structures
2371   Maxsignum = SIGRTMAX;
2372   Sigexit = Maxsignum+1;
2373   assert(Maxsignum >0, "Unable to obtain max signal number");
2374 
2375   Maxlibjsigsigs = Maxsignum;
2376 
2377   // pending_signals has one int per signal
2378   // The additional signal is for SIGEXIT - exit signal to signal_thread
2379   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2380   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2381 
2382   if (UseSignalChaining) {
2383     chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2384                                                    * (Maxsignum + 1), mtInternal);
2385     memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2386     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2387     memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2388   }
2389   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2390   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2391 }
2392 
2393 void os::signal_init_pd() {
2394   int ret;
2395 
2396   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2397   assert(ret == 0, "sema_init() failed");
2398 }
2399 
2400 void os::signal_notify(int signal_number) {
2401   int ret;
2402 
2403   Atomic::inc(&pending_signals[signal_number]);
2404   ret = ::sema_post(&sig_sem);
2405   assert(ret == 0, "sema_post() failed");
2406 }
2407 
2408 static int check_pending_signals(bool wait_for_signal) {
2409   int ret;
2410   while (true) {
2411     for (int i = 0; i < Sigexit + 1; i++) {
2412       jint n = pending_signals[i];
2413       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2414         return i;
2415       }
2416     }
2417     if (!wait_for_signal) {
2418       return -1;
2419     }
2420     JavaThread *thread = JavaThread::current();
2421     ThreadBlockInVM tbivm(thread);
2422 
2423     bool threadIsSuspended;
2424     do {
2425       thread->set_suspend_equivalent();
2426       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2427       while ((ret = ::sema_wait(&sig_sem)) == EINTR)
2428         ;
2429       assert(ret == 0, "sema_wait() failed");
2430 
2431       // were we externally suspended while we were waiting?
2432       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2433       if (threadIsSuspended) {
2434         // The semaphore has been incremented, but while we were waiting
2435         // another thread suspended us. We don't want to continue running
2436         // while suspended because that would surprise the thread that
2437         // suspended us.
2438         ret = ::sema_post(&sig_sem);
2439         assert(ret == 0, "sema_post() failed");
2440 
2441         thread->java_suspend_self();
2442       }
2443     } while (threadIsSuspended);
2444   }
2445 }
2446 
2447 int os::signal_lookup() {
2448   return check_pending_signals(false);
2449 }
2450 
2451 int os::signal_wait() {
2452   return check_pending_signals(true);
2453 }
2454 
2455 ////////////////////////////////////////////////////////////////////////////////
2456 // Virtual Memory
2457 
2458 static int page_size = -1;
2459 
2460 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2461 // clear this var if support is not available.
2462 static bool has_map_align = true;
2463 
2464 int os::vm_page_size() {
2465   assert(page_size != -1, "must call os::init");
2466   return page_size;
2467 }
2468 
2469 // Solaris allocates memory by pages.
2470 int os::vm_allocation_granularity() {
2471   assert(page_size != -1, "must call os::init");
2472   return page_size;
2473 }
2474 
2475 static bool recoverable_mmap_error(int err) {
2476   // See if the error is one we can let the caller handle. This
2477   // list of errno values comes from the Solaris mmap(2) man page.
2478   switch (err) {
2479   case EBADF:
2480   case EINVAL:
2481   case ENOTSUP:
2482     // let the caller deal with these errors
2483     return true;
2484 
2485   default:
2486     // Any remaining errors on this OS can cause our reserved mapping
2487     // to be lost. That can cause confusion where different data
2488     // structures think they have the same memory mapped. The worst
2489     // scenario is if both the VM and a library think they have the
2490     // same memory mapped.
2491     return false;
2492   }
2493 }
2494 
2495 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2496                                     int err) {
2497   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2498           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2499           strerror(err), err);
2500 }
2501 
2502 static void warn_fail_commit_memory(char* addr, size_t bytes,
2503                                     size_t alignment_hint, bool exec,
2504                                     int err) {
2505   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2506           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2507           alignment_hint, exec, strerror(err), err);
2508 }
2509 
2510 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2511   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2512   size_t size = bytes;
2513   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2514   if (res != NULL) {
2515     if (UseNUMAInterleaving) {
2516       numa_make_global(addr, bytes);
2517     }
2518     return 0;
2519   }
2520 
2521   int err = errno;  // save errno from mmap() call in mmap_chunk()
2522 
2523   if (!recoverable_mmap_error(err)) {
2524     warn_fail_commit_memory(addr, bytes, exec, err);
2525     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2526   }
2527 
2528   return err;
2529 }
2530 
2531 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2532   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2533 }
2534 
2535 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2536                                   const char* mesg) {
2537   assert(mesg != NULL, "mesg must be specified");
2538   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2539   if (err != 0) {
2540     // the caller wants all commit errors to exit with the specified mesg:
2541     warn_fail_commit_memory(addr, bytes, exec, err);
2542     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2543   }
2544 }
2545 
2546 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
2547   assert(is_size_aligned(alignment, (size_t) vm_page_size()),
2548          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
2549                  alignment, (size_t) vm_page_size()));
2550 
2551   for (int i = 0; _page_sizes[i] != 0; i++) {
2552     if (is_size_aligned(alignment, _page_sizes[i])) {
2553       return _page_sizes[i];
2554     }
2555   }
2556 
2557   return (size_t) vm_page_size();
2558 }
2559 
2560 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2561                                     size_t alignment_hint, bool exec) {
2562   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2563   if (err == 0 && UseLargePages && alignment_hint > 0) {
2564     assert(is_size_aligned(bytes, alignment_hint),
2565            err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
2566 
2567     // The syscall memcntl requires an exact page size (see man memcntl for details).
2568     size_t page_size = page_size_for_alignment(alignment_hint);
2569     if (page_size > (size_t) vm_page_size()) {
2570       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2571     }
2572   }
2573   return err;
2574 }
2575 
2576 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2577                           bool exec) {
2578   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2579 }
2580 
2581 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2582                                   size_t alignment_hint, bool exec,
2583                                   const char* mesg) {
2584   assert(mesg != NULL, "mesg must be specified");
2585   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2586   if (err != 0) {
2587     // the caller wants all commit errors to exit with the specified mesg:
2588     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2589     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2590   }
2591 }
2592 
2593 // Uncommit the pages in a specified region.
2594 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2595   if (madvise(addr, bytes, MADV_FREE) < 0) {
2596     debug_only(warning("MADV_FREE failed."));
2597     return;
2598   }
2599 }
2600 
2601 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2602   return os::commit_memory(addr, size, !ExecMem);
2603 }
2604 
2605 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2606   return os::uncommit_memory(addr, size);
2607 }
2608 
2609 // Change the page size in a given range.
2610 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2611   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2612   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2613   if (UseLargePages) {
2614     size_t page_size = Solaris::page_size_for_alignment(alignment_hint);
2615     if (page_size > (size_t) vm_page_size()) {
2616       Solaris::setup_large_pages(addr, bytes, page_size);
2617     }
2618   }
2619 }
2620 
2621 // Tell the OS to make the range local to the first-touching LWP
2622 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2623   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2624   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2625     debug_only(warning("MADV_ACCESS_LWP failed."));
2626   }
2627 }
2628 
2629 // Tell the OS that this range would be accessed from different LWPs.
2630 void os::numa_make_global(char *addr, size_t bytes) {
2631   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2632   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2633     debug_only(warning("MADV_ACCESS_MANY failed."));
2634   }
2635 }
2636 
2637 // Get the number of the locality groups.
2638 size_t os::numa_get_groups_num() {
2639   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2640   return n != -1 ? n : 1;
2641 }
2642 
2643 // Get a list of leaf locality groups. A leaf lgroup is group that
2644 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2645 // board. An LWP is assigned to one of these groups upon creation.
2646 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2647   if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2648     ids[0] = 0;
2649     return 1;
2650   }
2651   int result_size = 0, top = 1, bottom = 0, cur = 0;
2652   for (int k = 0; k < size; k++) {
2653     int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2654                                    (Solaris::lgrp_id_t*)&ids[top], size - top);
2655     if (r == -1) {
2656       ids[0] = 0;
2657       return 1;
2658     }
2659     if (!r) {
2660       // That's a leaf node.
2661       assert(bottom <= cur, "Sanity check");
2662       // Check if the node has memory
2663       if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2664                                   NULL, 0, LGRP_RSRC_MEM) > 0) {
2665         ids[bottom++] = ids[cur];
2666       }
2667     }
2668     top += r;
2669     cur++;
2670   }
2671   if (bottom == 0) {
2672     // Handle a situation, when the OS reports no memory available.
2673     // Assume UMA architecture.
2674     ids[0] = 0;
2675     return 1;
2676   }
2677   return bottom;
2678 }
2679 
2680 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2681 bool os::numa_topology_changed() {
2682   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2683   if (is_stale != -1 && is_stale) {
2684     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2685     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2686     assert(c != 0, "Failure to initialize LGRP API");
2687     Solaris::set_lgrp_cookie(c);
2688     return true;
2689   }
2690   return false;
2691 }
2692 
2693 // Get the group id of the current LWP.
2694 int os::numa_get_group_id() {
2695   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2696   if (lgrp_id == -1) {
2697     return 0;
2698   }
2699   const int size = os::numa_get_groups_num();
2700   int *ids = (int*)alloca(size * sizeof(int));
2701 
2702   // Get the ids of all lgroups with memory; r is the count.
2703   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2704                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2705   if (r <= 0) {
2706     return 0;
2707   }
2708   return ids[os::random() % r];
2709 }
2710 
2711 // Request information about the page.
2712 bool os::get_page_info(char *start, page_info* info) {
2713   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2714   uint64_t addr = (uintptr_t)start;
2715   uint64_t outdata[2];
2716   uint_t validity = 0;
2717 
2718   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2719     return false;
2720   }
2721 
2722   info->size = 0;
2723   info->lgrp_id = -1;
2724 
2725   if ((validity & 1) != 0) {
2726     if ((validity & 2) != 0) {
2727       info->lgrp_id = outdata[0];
2728     }
2729     if ((validity & 4) != 0) {
2730       info->size = outdata[1];
2731     }
2732     return true;
2733   }
2734   return false;
2735 }
2736 
2737 // Scan the pages from start to end until a page different than
2738 // the one described in the info parameter is encountered.
2739 char *os::scan_pages(char *start, char* end, page_info* page_expected,
2740                      page_info* page_found) {
2741   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2742   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2743   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2744   uint_t validity[MAX_MEMINFO_CNT];
2745 
2746   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2747   uint64_t p = (uint64_t)start;
2748   while (p < (uint64_t)end) {
2749     addrs[0] = p;
2750     size_t addrs_count = 1;
2751     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2752       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2753       addrs_count++;
2754     }
2755 
2756     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2757       return NULL;
2758     }
2759 
2760     size_t i = 0;
2761     for (; i < addrs_count; i++) {
2762       if ((validity[i] & 1) != 0) {
2763         if ((validity[i] & 4) != 0) {
2764           if (outdata[types * i + 1] != page_expected->size) {
2765             break;
2766           }
2767         } else if (page_expected->size != 0) {
2768           break;
2769         }
2770 
2771         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2772           if (outdata[types * i] != page_expected->lgrp_id) {
2773             break;
2774           }
2775         }
2776       } else {
2777         return NULL;
2778       }
2779     }
2780 
2781     if (i < addrs_count) {
2782       if ((validity[i] & 2) != 0) {
2783         page_found->lgrp_id = outdata[types * i];
2784       } else {
2785         page_found->lgrp_id = -1;
2786       }
2787       if ((validity[i] & 4) != 0) {
2788         page_found->size = outdata[types * i + 1];
2789       } else {
2790         page_found->size = 0;
2791       }
2792       return (char*)addrs[i];
2793     }
2794 
2795     p = addrs[addrs_count - 1] + page_size;
2796   }
2797   return end;
2798 }
2799 
2800 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2801   size_t size = bytes;
2802   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2803   // uncommitted page. Otherwise, the read/write might succeed if we
2804   // have enough swap space to back the physical page.
2805   return
2806     NULL != Solaris::mmap_chunk(addr, size,
2807                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2808                                 PROT_NONE);
2809 }
2810 
2811 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2812   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2813 
2814   if (b == MAP_FAILED) {
2815     return NULL;
2816   }
2817   return b;
2818 }
2819 
2820 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes,
2821                              size_t alignment_hint, bool fixed) {
2822   char* addr = requested_addr;
2823   int flags = MAP_PRIVATE | MAP_NORESERVE;
2824 
2825   assert(!(fixed && (alignment_hint > 0)),
2826          "alignment hint meaningless with fixed mmap");
2827 
2828   if (fixed) {
2829     flags |= MAP_FIXED;
2830   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2831     flags |= MAP_ALIGN;
2832     addr = (char*) alignment_hint;
2833   }
2834 
2835   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2836   // uncommitted page. Otherwise, the read/write might succeed if we
2837   // have enough swap space to back the physical page.
2838   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2839 }
2840 
2841 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2842                             size_t alignment_hint) {
2843   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint,
2844                                   (requested_addr != NULL));
2845 
2846   guarantee(requested_addr == NULL || requested_addr == addr,
2847             "OS failed to return requested mmap address.");
2848   return addr;
2849 }
2850 
2851 // Reserve memory at an arbitrary address, only if that area is
2852 // available (and not reserved for something else).
2853 
2854 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2855   const int max_tries = 10;
2856   char* base[max_tries];
2857   size_t size[max_tries];
2858 
2859   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2860   // is dependent on the requested size and the MMU.  Our initial gap
2861   // value here is just a guess and will be corrected later.
2862   bool had_top_overlap = false;
2863   bool have_adjusted_gap = false;
2864   size_t gap = 0x400000;
2865 
2866   // Assert only that the size is a multiple of the page size, since
2867   // that's all that mmap requires, and since that's all we really know
2868   // about at this low abstraction level.  If we need higher alignment,
2869   // we can either pass an alignment to this method or verify alignment
2870   // in one of the methods further up the call chain.  See bug 5044738.
2871   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2872 
2873   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2874   // Give it a try, if the kernel honors the hint we can return immediately.
2875   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2876 
2877   volatile int err = errno;
2878   if (addr == requested_addr) {
2879     return addr;
2880   } else if (addr != NULL) {
2881     pd_unmap_memory(addr, bytes);
2882   }
2883 
2884   if (PrintMiscellaneous && Verbose) {
2885     char buf[256];
2886     buf[0] = '\0';
2887     if (addr == NULL) {
2888       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2889     }
2890     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2891             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2892             "%s", bytes, requested_addr, addr, buf);
2893   }
2894 
2895   // Address hint method didn't work.  Fall back to the old method.
2896   // In theory, once SNV becomes our oldest supported platform, this
2897   // code will no longer be needed.
2898   //
2899   // Repeatedly allocate blocks until the block is allocated at the
2900   // right spot. Give up after max_tries.
2901   int i;
2902   for (i = 0; i < max_tries; ++i) {
2903     base[i] = reserve_memory(bytes);
2904 
2905     if (base[i] != NULL) {
2906       // Is this the block we wanted?
2907       if (base[i] == requested_addr) {
2908         size[i] = bytes;
2909         break;
2910       }
2911 
2912       // check that the gap value is right
2913       if (had_top_overlap && !have_adjusted_gap) {
2914         size_t actual_gap = base[i-1] - base[i] - bytes;
2915         if (gap != actual_gap) {
2916           // adjust the gap value and retry the last 2 allocations
2917           assert(i > 0, "gap adjustment code problem");
2918           have_adjusted_gap = true;  // adjust the gap only once, just in case
2919           gap = actual_gap;
2920           if (PrintMiscellaneous && Verbose) {
2921             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2922           }
2923           unmap_memory(base[i], bytes);
2924           unmap_memory(base[i-1], size[i-1]);
2925           i-=2;
2926           continue;
2927         }
2928       }
2929 
2930       // Does this overlap the block we wanted? Give back the overlapped
2931       // parts and try again.
2932       //
2933       // There is still a bug in this code: if top_overlap == bytes,
2934       // the overlap is offset from requested region by the value of gap.
2935       // In this case giving back the overlapped part will not work,
2936       // because we'll give back the entire block at base[i] and
2937       // therefore the subsequent allocation will not generate a new gap.
2938       // This could be fixed with a new algorithm that used larger
2939       // or variable size chunks to find the requested region -
2940       // but such a change would introduce additional complications.
2941       // It's rare enough that the planets align for this bug,
2942       // so we'll just wait for a fix for 6204603/5003415 which
2943       // will provide a mmap flag to allow us to avoid this business.
2944 
2945       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2946       if (top_overlap >= 0 && top_overlap < bytes) {
2947         had_top_overlap = true;
2948         unmap_memory(base[i], top_overlap);
2949         base[i] += top_overlap;
2950         size[i] = bytes - top_overlap;
2951       } else {
2952         size_t bottom_overlap = base[i] + bytes - requested_addr;
2953         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2954           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2955             warning("attempt_reserve_memory_at: possible alignment bug");
2956           }
2957           unmap_memory(requested_addr, bottom_overlap);
2958           size[i] = bytes - bottom_overlap;
2959         } else {
2960           size[i] = bytes;
2961         }
2962       }
2963     }
2964   }
2965 
2966   // Give back the unused reserved pieces.
2967 
2968   for (int j = 0; j < i; ++j) {
2969     if (base[j] != NULL) {
2970       unmap_memory(base[j], size[j]);
2971     }
2972   }
2973 
2974   return (i < max_tries) ? requested_addr : NULL;
2975 }
2976 
2977 bool os::pd_release_memory(char* addr, size_t bytes) {
2978   size_t size = bytes;
2979   return munmap(addr, size) == 0;
2980 }
2981 
2982 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
2983   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
2984          "addr must be page aligned");
2985   int retVal = mprotect(addr, bytes, prot);
2986   return retVal == 0;
2987 }
2988 
2989 // Protect memory (Used to pass readonly pages through
2990 // JNI GetArray<type>Elements with empty arrays.)
2991 // Also, used for serialization page and for compressed oops null pointer
2992 // checking.
2993 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2994                         bool is_committed) {
2995   unsigned int p = 0;
2996   switch (prot) {
2997   case MEM_PROT_NONE: p = PROT_NONE; break;
2998   case MEM_PROT_READ: p = PROT_READ; break;
2999   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
3000   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3001   default:
3002     ShouldNotReachHere();
3003   }
3004   // is_committed is unused.
3005   return solaris_mprotect(addr, bytes, p);
3006 }
3007 
3008 // guard_memory and unguard_memory only happens within stack guard pages.
3009 // Since ISM pertains only to the heap, guard and unguard memory should not
3010 /// happen with an ISM region.
3011 bool os::guard_memory(char* addr, size_t bytes) {
3012   return solaris_mprotect(addr, bytes, PROT_NONE);
3013 }
3014 
3015 bool os::unguard_memory(char* addr, size_t bytes) {
3016   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3017 }
3018 
3019 // Large page support
3020 static size_t _large_page_size = 0;
3021 
3022 // Insertion sort for small arrays (descending order).
3023 static void insertion_sort_descending(size_t* array, int len) {
3024   for (int i = 0; i < len; i++) {
3025     size_t val = array[i];
3026     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3027       size_t tmp = array[key];
3028       array[key] = array[key - 1];
3029       array[key - 1] = tmp;
3030     }
3031   }
3032 }
3033 
3034 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3035   const unsigned int usable_count = VM_Version::page_size_count();
3036   if (usable_count == 1) {
3037     return false;
3038   }
3039 
3040   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3041   // build platform, getpagesizes() (without the '2') can be called directly.
3042   typedef int (*gps_t)(size_t[], int);
3043   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3044   if (gps_func == NULL) {
3045     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3046     if (gps_func == NULL) {
3047       if (warn) {
3048         warning("MPSS is not supported by the operating system.");
3049       }
3050       return false;
3051     }
3052   }
3053 
3054   // Fill the array of page sizes.
3055   int n = (*gps_func)(_page_sizes, page_sizes_max);
3056   assert(n > 0, "Solaris bug?");
3057 
3058   if (n == page_sizes_max) {
3059     // Add a sentinel value (necessary only if the array was completely filled
3060     // since it is static (zeroed at initialization)).
3061     _page_sizes[--n] = 0;
3062     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3063   }
3064   assert(_page_sizes[n] == 0, "missing sentinel");
3065   trace_page_sizes("available page sizes", _page_sizes, n);
3066 
3067   if (n == 1) return false;     // Only one page size available.
3068 
3069   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3070   // select up to usable_count elements.  First sort the array, find the first
3071   // acceptable value, then copy the usable sizes to the top of the array and
3072   // trim the rest.  Make sure to include the default page size :-).
3073   //
3074   // A better policy could get rid of the 4M limit by taking the sizes of the
3075   // important VM memory regions (java heap and possibly the code cache) into
3076   // account.
3077   insertion_sort_descending(_page_sizes, n);
3078   const size_t size_limit =
3079     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3080   int beg;
3081   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */;
3082   const int end = MIN2((int)usable_count, n) - 1;
3083   for (int cur = 0; cur < end; ++cur, ++beg) {
3084     _page_sizes[cur] = _page_sizes[beg];
3085   }
3086   _page_sizes[end] = vm_page_size();
3087   _page_sizes[end + 1] = 0;
3088 
3089   if (_page_sizes[end] > _page_sizes[end - 1]) {
3090     // Default page size is not the smallest; sort again.
3091     insertion_sort_descending(_page_sizes, end + 1);
3092   }
3093   *page_size = _page_sizes[0];
3094 
3095   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3096   return true;
3097 }
3098 
3099 void os::large_page_init() {
3100   if (UseLargePages) {
3101     // print a warning if any large page related flag is specified on command line
3102     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3103                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3104 
3105     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3106   }
3107 }
3108 
3109 bool os::Solaris::is_valid_page_size(size_t bytes) {
3110   for (int i = 0; _page_sizes[i] != 0; i++) {
3111     if (_page_sizes[i] == bytes) {
3112       return true;
3113     }
3114   }
3115   return false;
3116 }
3117 
3118 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3119   assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
3120   assert(is_ptr_aligned((void*) start, align),
3121          err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
3122   assert(is_size_aligned(bytes, align),
3123          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
3124 
3125   // Signal to OS that we want large pages for addresses
3126   // from addr, addr + bytes
3127   struct memcntl_mha mpss_struct;
3128   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3129   mpss_struct.mha_pagesize = align;
3130   mpss_struct.mha_flags = 0;
3131   // Upon successful completion, memcntl() returns 0
3132   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3133     debug_only(warning("Attempt to use MPSS failed."));
3134     return false;
3135   }
3136   return true;
3137 }
3138 
3139 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3140   fatal("os::reserve_memory_special should not be called on Solaris.");
3141   return NULL;
3142 }
3143 
3144 bool os::release_memory_special(char* base, size_t bytes) {
3145   fatal("os::release_memory_special should not be called on Solaris.");
3146   return false;
3147 }
3148 
3149 size_t os::large_page_size() {
3150   return _large_page_size;
3151 }
3152 
3153 // MPSS allows application to commit large page memory on demand; with ISM
3154 // the entire memory region must be allocated as shared memory.
3155 bool os::can_commit_large_page_memory() {
3156   return true;
3157 }
3158 
3159 bool os::can_execute_large_page_memory() {
3160   return true;
3161 }
3162 
3163 // Read calls from inside the vm need to perform state transitions
3164 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3165   size_t res;
3166   JavaThread* thread = (JavaThread*)Thread::current();
3167   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3168   ThreadBlockInVM tbiv(thread);
3169   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3170   return res;
3171 }
3172 
3173 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
3174   size_t res;
3175   JavaThread* thread = (JavaThread*)Thread::current();
3176   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3177   ThreadBlockInVM tbiv(thread);
3178   RESTARTABLE(::pread(fd, buf, (size_t) nBytes, offset), res);
3179   return res;
3180 }
3181 
3182 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3183   size_t res;
3184   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
3185          "Assumed _thread_in_native");
3186   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3187   return res;
3188 }
3189 
3190 void os::naked_short_sleep(jlong ms) {
3191   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3192 
3193   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3194   // Solaris requires -lrt for this.
3195   usleep((ms * 1000));
3196 
3197   return;
3198 }
3199 
3200 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3201 void os::infinite_sleep() {
3202   while (true) {    // sleep forever ...
3203     ::sleep(100);   // ... 100 seconds at a time
3204   }
3205 }
3206 
3207 // Used to convert frequent JVM_Yield() to nops
3208 bool os::dont_yield() {
3209   if (DontYieldALot) {
3210     static hrtime_t last_time = 0;
3211     hrtime_t diff = getTimeNanos() - last_time;
3212 
3213     if (diff < DontYieldALotInterval * 1000000) {
3214       return true;
3215     }
3216 
3217     last_time += diff;
3218 
3219     return false;
3220   } else {
3221     return false;
3222   }
3223 }
3224 
3225 // Note that yield semantics are defined by the scheduling class to which
3226 // the thread currently belongs.  Typically, yield will _not yield to
3227 // other equal or higher priority threads that reside on the dispatch queues
3228 // of other CPUs.
3229 
3230 void os::naked_yield() {
3231   thr_yield();
3232 }
3233 
3234 // Interface for setting lwp priorities.  If we are using T2 libthread,
3235 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3236 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3237 // function is meaningless in this mode so we must adjust the real lwp's priority
3238 // The routines below implement the getting and setting of lwp priorities.
3239 //
3240 // Note: T2 is now the only supported libthread. UseBoundThreads flag is
3241 //       being deprecated and all threads are now BoundThreads
3242 //
3243 // Note: There are three priority scales used on Solaris.  Java priotities
3244 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3245 //       from 0 to 127, and the current scheduling class of the process we
3246 //       are running in.  This is typically from -60 to +60.
3247 //       The setting of the lwp priorities in done after a call to thr_setprio
3248 //       so Java priorities are mapped to libthread priorities and we map from
3249 //       the latter to lwp priorities.  We don't keep priorities stored in
3250 //       Java priorities since some of our worker threads want to set priorities
3251 //       higher than all Java threads.
3252 //
3253 // For related information:
3254 // (1)  man -s 2 priocntl
3255 // (2)  man -s 4 priocntl
3256 // (3)  man dispadmin
3257 // =    librt.so
3258 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3259 // =    ps -cL <pid> ... to validate priority.
3260 // =    sched_get_priority_min and _max
3261 //              pthread_create
3262 //              sched_setparam
3263 //              pthread_setschedparam
3264 //
3265 // Assumptions:
3266 // +    We assume that all threads in the process belong to the same
3267 //              scheduling class.   IE. an homogenous process.
3268 // +    Must be root or in IA group to change change "interactive" attribute.
3269 //              Priocntl() will fail silently.  The only indication of failure is when
3270 //              we read-back the value and notice that it hasn't changed.
3271 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3272 // +    For RT, change timeslice as well.  Invariant:
3273 //              constant "priority integral"
3274 //              Konst == TimeSlice * (60-Priority)
3275 //              Given a priority, compute appropriate timeslice.
3276 // +    Higher numerical values have higher priority.
3277 
3278 // sched class attributes
3279 typedef struct {
3280   int   schedPolicy;              // classID
3281   int   maxPrio;
3282   int   minPrio;
3283 } SchedInfo;
3284 
3285 
3286 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3287 
3288 #ifdef ASSERT
3289 static int  ReadBackValidate = 1;
3290 #endif
3291 static int  myClass     = 0;
3292 static int  myMin       = 0;
3293 static int  myMax       = 0;
3294 static int  myCur       = 0;
3295 static bool priocntl_enable = false;
3296 
3297 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3298 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3299 
3300 
3301 // lwp_priocntl_init
3302 //
3303 // Try to determine the priority scale for our process.
3304 //
3305 // Return errno or 0 if OK.
3306 //
3307 static int lwp_priocntl_init() {
3308   int rslt;
3309   pcinfo_t ClassInfo;
3310   pcparms_t ParmInfo;
3311   int i;
3312 
3313   if (!UseThreadPriorities) return 0;
3314 
3315   // If ThreadPriorityPolicy is 1, switch tables
3316   if (ThreadPriorityPolicy == 1) {
3317     for (i = 0; i < CriticalPriority+1; i++)
3318       os::java_to_os_priority[i] = prio_policy1[i];
3319   }
3320   if (UseCriticalJavaThreadPriority) {
3321     // MaxPriority always maps to the FX scheduling class and criticalPrio.
3322     // See set_native_priority() and set_lwp_class_and_priority().
3323     // Save original MaxPriority mapping in case attempt to
3324     // use critical priority fails.
3325     java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3326     // Set negative to distinguish from other priorities
3327     os::java_to_os_priority[MaxPriority] = -criticalPrio;
3328   }
3329 
3330   // Get IDs for a set of well-known scheduling classes.
3331   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3332   // the system.  We should have a loop that iterates over the
3333   // classID values, which are known to be "small" integers.
3334 
3335   strcpy(ClassInfo.pc_clname, "TS");
3336   ClassInfo.pc_cid = -1;
3337   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3338   if (rslt < 0) return errno;
3339   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3340   tsLimits.schedPolicy = ClassInfo.pc_cid;
3341   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3342   tsLimits.minPrio = -tsLimits.maxPrio;
3343 
3344   strcpy(ClassInfo.pc_clname, "IA");
3345   ClassInfo.pc_cid = -1;
3346   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3347   if (rslt < 0) return errno;
3348   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3349   iaLimits.schedPolicy = ClassInfo.pc_cid;
3350   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3351   iaLimits.minPrio = -iaLimits.maxPrio;
3352 
3353   strcpy(ClassInfo.pc_clname, "RT");
3354   ClassInfo.pc_cid = -1;
3355   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3356   if (rslt < 0) return errno;
3357   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3358   rtLimits.schedPolicy = ClassInfo.pc_cid;
3359   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3360   rtLimits.minPrio = 0;
3361 
3362   strcpy(ClassInfo.pc_clname, "FX");
3363   ClassInfo.pc_cid = -1;
3364   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3365   if (rslt < 0) return errno;
3366   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3367   fxLimits.schedPolicy = ClassInfo.pc_cid;
3368   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3369   fxLimits.minPrio = 0;
3370 
3371   // Query our "current" scheduling class.
3372   // This will normally be IA, TS or, rarely, FX or RT.
3373   memset(&ParmInfo, 0, sizeof(ParmInfo));
3374   ParmInfo.pc_cid = PC_CLNULL;
3375   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3376   if (rslt < 0) return errno;
3377   myClass = ParmInfo.pc_cid;
3378 
3379   // We now know our scheduling classId, get specific information
3380   // about the class.
3381   ClassInfo.pc_cid = myClass;
3382   ClassInfo.pc_clname[0] = 0;
3383   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3384   if (rslt < 0) return errno;
3385 
3386   if (ThreadPriorityVerbose) {
3387     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3388   }
3389 
3390   memset(&ParmInfo, 0, sizeof(pcparms_t));
3391   ParmInfo.pc_cid = PC_CLNULL;
3392   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3393   if (rslt < 0) return errno;
3394 
3395   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3396     myMin = rtLimits.minPrio;
3397     myMax = rtLimits.maxPrio;
3398   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3399     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3400     myMin = iaLimits.minPrio;
3401     myMax = iaLimits.maxPrio;
3402     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3403   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3404     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3405     myMin = tsLimits.minPrio;
3406     myMax = tsLimits.maxPrio;
3407     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3408   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3409     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3410     myMin = fxLimits.minPrio;
3411     myMax = fxLimits.maxPrio;
3412     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3413   } else {
3414     // No clue - punt
3415     if (ThreadPriorityVerbose) {
3416       tty->print_cr("Unknown scheduling class: %s ... \n",
3417                     ClassInfo.pc_clname);
3418     }
3419     return EINVAL;      // no clue, punt
3420   }
3421 
3422   if (ThreadPriorityVerbose) {
3423     tty->print_cr("Thread priority Range: [%d..%d]\n", myMin, myMax);
3424   }
3425 
3426   priocntl_enable = true;  // Enable changing priorities
3427   return 0;
3428 }
3429 
3430 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3431 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3432 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3433 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3434 
3435 
3436 // scale_to_lwp_priority
3437 //
3438 // Convert from the libthread "thr_setprio" scale to our current
3439 // lwp scheduling class scale.
3440 //
3441 static int scale_to_lwp_priority(int rMin, int rMax, int x) {
3442   int v;
3443 
3444   if (x == 127) return rMax;            // avoid round-down
3445   v = (((x*(rMax-rMin)))/128)+rMin;
3446   return v;
3447 }
3448 
3449 
3450 // set_lwp_class_and_priority
3451 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3452                                int newPrio, int new_class, bool scale) {
3453   int rslt;
3454   int Actual, Expected, prv;
3455   pcparms_t ParmInfo;                   // for GET-SET
3456 #ifdef ASSERT
3457   pcparms_t ReadBack;                   // for readback
3458 #endif
3459 
3460   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3461   // Query current values.
3462   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3463   // Cache "pcparms_t" in global ParmCache.
3464   // TODO: elide set-to-same-value
3465 
3466   // If something went wrong on init, don't change priorities.
3467   if (!priocntl_enable) {
3468     if (ThreadPriorityVerbose) {
3469       tty->print_cr("Trying to set priority but init failed, ignoring");
3470     }
3471     return EINVAL;
3472   }
3473 
3474   // If lwp hasn't started yet, just return
3475   // the _start routine will call us again.
3476   if (lwpid <= 0) {
3477     if (ThreadPriorityVerbose) {
3478       tty->print_cr("deferring the set_lwp_class_and_priority of thread "
3479                     INTPTR_FORMAT " to %d, lwpid not set",
3480                     ThreadID, newPrio);
3481     }
3482     return 0;
3483   }
3484 
3485   if (ThreadPriorityVerbose) {
3486     tty->print_cr ("set_lwp_class_and_priority("
3487                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3488                    ThreadID, lwpid, newPrio);
3489   }
3490 
3491   memset(&ParmInfo, 0, sizeof(pcparms_t));
3492   ParmInfo.pc_cid = PC_CLNULL;
3493   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3494   if (rslt < 0) return errno;
3495 
3496   int cur_class = ParmInfo.pc_cid;
3497   ParmInfo.pc_cid = (id_t)new_class;
3498 
3499   if (new_class == rtLimits.schedPolicy) {
3500     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3501     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3502                                                        rtLimits.maxPrio, newPrio)
3503                                : newPrio;
3504     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3505     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3506     if (ThreadPriorityVerbose) {
3507       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3508     }
3509   } else if (new_class == iaLimits.schedPolicy) {
3510     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3511     int maxClamped     = MIN2(iaLimits.maxPrio,
3512                               cur_class == new_class
3513                               ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3514     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3515                                                        maxClamped, newPrio)
3516                                : newPrio;
3517     iaInfo->ia_uprilim = cur_class == new_class
3518                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3519     iaInfo->ia_mode    = IA_NOCHANGE;
3520     if (ThreadPriorityVerbose) {
3521       tty->print_cr("IA: [%d...%d] %d->%d\n",
3522                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3523     }
3524   } else if (new_class == tsLimits.schedPolicy) {
3525     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3526     int maxClamped     = MIN2(tsLimits.maxPrio,
3527                               cur_class == new_class
3528                               ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3529     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3530                                                        maxClamped, newPrio)
3531                                : newPrio;
3532     tsInfo->ts_uprilim = cur_class == new_class
3533                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3534     if (ThreadPriorityVerbose) {
3535       tty->print_cr("TS: [%d...%d] %d->%d\n",
3536                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3537     }
3538   } else if (new_class == fxLimits.schedPolicy) {
3539     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3540     int maxClamped     = MIN2(fxLimits.maxPrio,
3541                               cur_class == new_class
3542                               ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3543     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3544                                                        maxClamped, newPrio)
3545                                : newPrio;
3546     fxInfo->fx_uprilim = cur_class == new_class
3547                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3548     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3549     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3550     if (ThreadPriorityVerbose) {
3551       tty->print_cr("FX: [%d...%d] %d->%d\n",
3552                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3553     }
3554   } else {
3555     if (ThreadPriorityVerbose) {
3556       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3557     }
3558     return EINVAL;    // no clue, punt
3559   }
3560 
3561   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3562   if (ThreadPriorityVerbose && rslt) {
3563     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3564   }
3565   if (rslt < 0) return errno;
3566 
3567 #ifdef ASSERT
3568   // Sanity check: read back what we just attempted to set.
3569   // In theory it could have changed in the interim ...
3570   //
3571   // The priocntl system call is tricky.
3572   // Sometimes it'll validate the priority value argument and
3573   // return EINVAL if unhappy.  At other times it fails silently.
3574   // Readbacks are prudent.
3575 
3576   if (!ReadBackValidate) return 0;
3577 
3578   memset(&ReadBack, 0, sizeof(pcparms_t));
3579   ReadBack.pc_cid = PC_CLNULL;
3580   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3581   assert(rslt >= 0, "priocntl failed");
3582   Actual = Expected = 0xBAD;
3583   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3584   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3585     Actual   = RTPRI(ReadBack)->rt_pri;
3586     Expected = RTPRI(ParmInfo)->rt_pri;
3587   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3588     Actual   = IAPRI(ReadBack)->ia_upri;
3589     Expected = IAPRI(ParmInfo)->ia_upri;
3590   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3591     Actual   = TSPRI(ReadBack)->ts_upri;
3592     Expected = TSPRI(ParmInfo)->ts_upri;
3593   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3594     Actual   = FXPRI(ReadBack)->fx_upri;
3595     Expected = FXPRI(ParmInfo)->fx_upri;
3596   } else {
3597     if (ThreadPriorityVerbose) {
3598       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3599                     ParmInfo.pc_cid);
3600     }
3601   }
3602 
3603   if (Actual != Expected) {
3604     if (ThreadPriorityVerbose) {
3605       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3606                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3607     }
3608   }
3609 #endif
3610 
3611   return 0;
3612 }
3613 
3614 // Solaris only gives access to 128 real priorities at a time,
3615 // so we expand Java's ten to fill this range.  This would be better
3616 // if we dynamically adjusted relative priorities.
3617 //
3618 // The ThreadPriorityPolicy option allows us to select 2 different
3619 // priority scales.
3620 //
3621 // ThreadPriorityPolicy=0
3622 // Since the Solaris' default priority is MaximumPriority, we do not
3623 // set a priority lower than Max unless a priority lower than
3624 // NormPriority is requested.
3625 //
3626 // ThreadPriorityPolicy=1
3627 // This mode causes the priority table to get filled with
3628 // linear values.  NormPriority get's mapped to 50% of the
3629 // Maximum priority an so on.  This will cause VM threads
3630 // to get unfair treatment against other Solaris processes
3631 // which do not explicitly alter their thread priorities.
3632 
3633 int os::java_to_os_priority[CriticalPriority + 1] = {
3634   -99999,         // 0 Entry should never be used
3635 
3636   0,              // 1 MinPriority
3637   32,             // 2
3638   64,             // 3
3639 
3640   96,             // 4
3641   127,            // 5 NormPriority
3642   127,            // 6
3643 
3644   127,            // 7
3645   127,            // 8
3646   127,            // 9 NearMaxPriority
3647 
3648   127,            // 10 MaxPriority
3649 
3650   -criticalPrio   // 11 CriticalPriority
3651 };
3652 
3653 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3654   OSThread* osthread = thread->osthread();
3655 
3656   // Save requested priority in case the thread hasn't been started
3657   osthread->set_native_priority(newpri);
3658 
3659   // Check for critical priority request
3660   bool fxcritical = false;
3661   if (newpri == -criticalPrio) {
3662     fxcritical = true;
3663     newpri = criticalPrio;
3664   }
3665 
3666   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3667   if (!UseThreadPriorities) return OS_OK;
3668 
3669   int status = 0;
3670 
3671   if (!fxcritical) {
3672     // Use thr_setprio only if we have a priority that thr_setprio understands
3673     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3674   }
3675 
3676   int lwp_status =
3677           set_lwp_class_and_priority(osthread->thread_id(),
3678                                      osthread->lwp_id(),
3679                                      newpri,
3680                                      fxcritical ? fxLimits.schedPolicy : myClass,
3681                                      !fxcritical);
3682   if (lwp_status != 0 && fxcritical) {
3683     // Try again, this time without changing the scheduling class
3684     newpri = java_MaxPriority_to_os_priority;
3685     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3686                                             osthread->lwp_id(),
3687                                             newpri, myClass, false);
3688   }
3689   status |= lwp_status;
3690   return (status == 0) ? OS_OK : OS_ERR;
3691 }
3692 
3693 
3694 OSReturn os::get_native_priority(const Thread* const thread,
3695                                  int *priority_ptr) {
3696   int p;
3697   if (!UseThreadPriorities) {
3698     *priority_ptr = NormalPriority;
3699     return OS_OK;
3700   }
3701   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3702   if (status != 0) {
3703     return OS_ERR;
3704   }
3705   *priority_ptr = p;
3706   return OS_OK;
3707 }
3708 
3709 
3710 // Hint to the underlying OS that a task switch would not be good.
3711 // Void return because it's a hint and can fail.
3712 void os::hint_no_preempt() {
3713   schedctl_start(schedctl_init());
3714 }
3715 
3716 static void resume_clear_context(OSThread *osthread) {
3717   osthread->set_ucontext(NULL);
3718 }
3719 
3720 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3721   osthread->set_ucontext(context);
3722 }
3723 
3724 static Semaphore sr_semaphore;
3725 
3726 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3727   // Save and restore errno to avoid confusing native code with EINTR
3728   // after sigsuspend.
3729   int old_errno = errno;
3730 
3731   OSThread* osthread = thread->osthread();
3732   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3733 
3734   os::SuspendResume::State current = osthread->sr.state();
3735   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3736     suspend_save_context(osthread, uc);
3737 
3738     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3739     os::SuspendResume::State state = osthread->sr.suspended();
3740     if (state == os::SuspendResume::SR_SUSPENDED) {
3741       sigset_t suspend_set;  // signals for sigsuspend()
3742 
3743       // get current set of blocked signals and unblock resume signal
3744       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3745       sigdelset(&suspend_set, os::Solaris::SIGasync());
3746 
3747       sr_semaphore.signal();
3748       // wait here until we are resumed
3749       while (1) {
3750         sigsuspend(&suspend_set);
3751 
3752         os::SuspendResume::State result = osthread->sr.running();
3753         if (result == os::SuspendResume::SR_RUNNING) {
3754           sr_semaphore.signal();
3755           break;
3756         }
3757       }
3758 
3759     } else if (state == os::SuspendResume::SR_RUNNING) {
3760       // request was cancelled, continue
3761     } else {
3762       ShouldNotReachHere();
3763     }
3764 
3765     resume_clear_context(osthread);
3766   } else if (current == os::SuspendResume::SR_RUNNING) {
3767     // request was cancelled, continue
3768   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3769     // ignore
3770   } else {
3771     // ignore
3772   }
3773 
3774   errno = old_errno;
3775 }
3776 
3777 void os::print_statistics() {
3778 }
3779 
3780 int os::message_box(const char* title, const char* message) {
3781   int i;
3782   fdStream err(defaultStream::error_fd());
3783   for (i = 0; i < 78; i++) err.print_raw("=");
3784   err.cr();
3785   err.print_raw_cr(title);
3786   for (i = 0; i < 78; i++) err.print_raw("-");
3787   err.cr();
3788   err.print_raw_cr(message);
3789   for (i = 0; i < 78; i++) err.print_raw("=");
3790   err.cr();
3791 
3792   char buf[16];
3793   // Prevent process from exiting upon "read error" without consuming all CPU
3794   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3795 
3796   return buf[0] == 'y' || buf[0] == 'Y';
3797 }
3798 
3799 static int sr_notify(OSThread* osthread) {
3800   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
3801   assert_status(status == 0, status, "thr_kill");
3802   return status;
3803 }
3804 
3805 // "Randomly" selected value for how long we want to spin
3806 // before bailing out on suspending a thread, also how often
3807 // we send a signal to a thread we want to resume
3808 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3809 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3810 
3811 static bool do_suspend(OSThread* osthread) {
3812   assert(osthread->sr.is_running(), "thread should be running");
3813   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
3814 
3815   // mark as suspended and send signal
3816   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3817     // failed to switch, state wasn't running?
3818     ShouldNotReachHere();
3819     return false;
3820   }
3821 
3822   if (sr_notify(osthread) != 0) {
3823     ShouldNotReachHere();
3824   }
3825 
3826   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3827   while (true) {
3828     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
3829       break;
3830     } else {
3831       // timeout
3832       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3833       if (cancelled == os::SuspendResume::SR_RUNNING) {
3834         return false;
3835       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3836         // make sure that we consume the signal on the semaphore as well
3837         sr_semaphore.wait();
3838         break;
3839       } else {
3840         ShouldNotReachHere();
3841         return false;
3842       }
3843     }
3844   }
3845 
3846   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3847   return true;
3848 }
3849 
3850 static void do_resume(OSThread* osthread) {
3851   assert(osthread->sr.is_suspended(), "thread should be suspended");
3852   assert(!sr_semaphore.trywait(), "invalid semaphore state");
3853 
3854   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3855     // failed to switch to WAKEUP_REQUEST
3856     ShouldNotReachHere();
3857     return;
3858   }
3859 
3860   while (true) {
3861     if (sr_notify(osthread) == 0) {
3862       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
3863         if (osthread->sr.is_running()) {
3864           return;
3865         }
3866       }
3867     } else {
3868       ShouldNotReachHere();
3869     }
3870   }
3871 
3872   guarantee(osthread->sr.is_running(), "Must be running!");
3873 }
3874 
3875 void os::SuspendedThreadTask::internal_do_task() {
3876   if (do_suspend(_thread->osthread())) {
3877     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3878     do_task(context);
3879     do_resume(_thread->osthread());
3880   }
3881 }
3882 
3883 class PcFetcher : public os::SuspendedThreadTask {
3884  public:
3885   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3886   ExtendedPC result();
3887  protected:
3888   void do_task(const os::SuspendedThreadTaskContext& context);
3889  private:
3890   ExtendedPC _epc;
3891 };
3892 
3893 ExtendedPC PcFetcher::result() {
3894   guarantee(is_done(), "task is not done yet.");
3895   return _epc;
3896 }
3897 
3898 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3899   Thread* thread = context.thread();
3900   OSThread* osthread = thread->osthread();
3901   if (osthread->ucontext() != NULL) {
3902     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
3903   } else {
3904     // NULL context is unexpected, double-check this is the VMThread
3905     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3906   }
3907 }
3908 
3909 // A lightweight implementation that does not suspend the target thread and
3910 // thus returns only a hint. Used for profiling only!
3911 ExtendedPC os::get_thread_pc(Thread* thread) {
3912   // Make sure that it is called by the watcher and the Threads lock is owned.
3913   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
3914   // For now, is only used to profile the VM Thread
3915   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3916   PcFetcher fetcher(thread);
3917   fetcher.run();
3918   return fetcher.result();
3919 }
3920 
3921 
3922 // This does not do anything on Solaris. This is basically a hook for being
3923 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
3924 void os::os_exception_wrapper(java_call_t f, JavaValue* value,
3925                               methodHandle* method, JavaCallArguments* args,
3926                               Thread* thread) {
3927   f(value, method, args, thread);
3928 }
3929 
3930 // This routine may be used by user applications as a "hook" to catch signals.
3931 // The user-defined signal handler must pass unrecognized signals to this
3932 // routine, and if it returns true (non-zero), then the signal handler must
3933 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
3934 // routine will never retun false (zero), but instead will execute a VM panic
3935 // routine kill the process.
3936 //
3937 // If this routine returns false, it is OK to call it again.  This allows
3938 // the user-defined signal handler to perform checks either before or after
3939 // the VM performs its own checks.  Naturally, the user code would be making
3940 // a serious error if it tried to handle an exception (such as a null check
3941 // or breakpoint) that the VM was generating for its own correct operation.
3942 //
3943 // This routine may recognize any of the following kinds of signals:
3944 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
3945 // os::Solaris::SIGasync
3946 // It should be consulted by handlers for any of those signals.
3947 // It explicitly does not recognize os::Solaris::SIGinterrupt
3948 //
3949 // The caller of this routine must pass in the three arguments supplied
3950 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3951 // field of the structure passed to sigaction().  This routine assumes that
3952 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3953 //
3954 // Note that the VM will print warnings if it detects conflicting signal
3955 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3956 //
3957 extern "C" JNIEXPORT int JVM_handle_solaris_signal(int signo,
3958                                                    siginfo_t* siginfo,
3959                                                    void* ucontext,
3960                                                    int abort_if_unrecognized);
3961 
3962 
3963 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
3964   int orig_errno = errno;  // Preserve errno value over signal handler.
3965   JVM_handle_solaris_signal(sig, info, ucVoid, true);
3966   errno = orig_errno;
3967 }
3968 
3969 // Do not delete - if guarantee is ever removed,  a signal handler (even empty)
3970 // is needed to provoke threads blocked on IO to return an EINTR
3971 // Note: this explicitly does NOT call JVM_handle_solaris_signal and
3972 // does NOT participate in signal chaining due to requirement for
3973 // NOT setting SA_RESTART to make EINTR work.
3974 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
3975   if (UseSignalChaining) {
3976     struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
3977     if (actp && actp->sa_handler) {
3978       vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
3979     }
3980   }
3981 }
3982 
3983 // This boolean allows users to forward their own non-matching signals
3984 // to JVM_handle_solaris_signal, harmlessly.
3985 bool os::Solaris::signal_handlers_are_installed = false;
3986 
3987 // For signal-chaining
3988 bool os::Solaris::libjsig_is_loaded = false;
3989 typedef struct sigaction *(*get_signal_t)(int);
3990 get_signal_t os::Solaris::get_signal_action = NULL;
3991 
3992 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
3993   struct sigaction *actp = NULL;
3994 
3995   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
3996     // Retrieve the old signal handler from libjsig
3997     actp = (*get_signal_action)(sig);
3998   }
3999   if (actp == NULL) {
4000     // Retrieve the preinstalled signal handler from jvm
4001     actp = get_preinstalled_handler(sig);
4002   }
4003 
4004   return actp;
4005 }
4006 
4007 static bool call_chained_handler(struct sigaction *actp, int sig,
4008                                  siginfo_t *siginfo, void *context) {
4009   // Call the old signal handler
4010   if (actp->sa_handler == SIG_DFL) {
4011     // It's more reasonable to let jvm treat it as an unexpected exception
4012     // instead of taking the default action.
4013     return false;
4014   } else if (actp->sa_handler != SIG_IGN) {
4015     if ((actp->sa_flags & SA_NODEFER) == 0) {
4016       // automaticlly block the signal
4017       sigaddset(&(actp->sa_mask), sig);
4018     }
4019 
4020     sa_handler_t hand;
4021     sa_sigaction_t sa;
4022     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4023     // retrieve the chained handler
4024     if (siginfo_flag_set) {
4025       sa = actp->sa_sigaction;
4026     } else {
4027       hand = actp->sa_handler;
4028     }
4029 
4030     if ((actp->sa_flags & SA_RESETHAND) != 0) {
4031       actp->sa_handler = SIG_DFL;
4032     }
4033 
4034     // try to honor the signal mask
4035     sigset_t oset;
4036     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4037 
4038     // call into the chained handler
4039     if (siginfo_flag_set) {
4040       (*sa)(sig, siginfo, context);
4041     } else {
4042       (*hand)(sig);
4043     }
4044 
4045     // restore the signal mask
4046     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4047   }
4048   // Tell jvm's signal handler the signal is taken care of.
4049   return true;
4050 }
4051 
4052 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4053   bool chained = false;
4054   // signal-chaining
4055   if (UseSignalChaining) {
4056     struct sigaction *actp = get_chained_signal_action(sig);
4057     if (actp != NULL) {
4058       chained = call_chained_handler(actp, sig, siginfo, context);
4059     }
4060   }
4061   return chained;
4062 }
4063 
4064 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4065   assert((chainedsigactions != (struct sigaction *)NULL) &&
4066          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
4067   if (preinstalled_sigs[sig] != 0) {
4068     return &chainedsigactions[sig];
4069   }
4070   return NULL;
4071 }
4072 
4073 void os::Solaris::save_preinstalled_handler(int sig,
4074                                             struct sigaction& oldAct) {
4075   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4076   assert((chainedsigactions != (struct sigaction *)NULL) &&
4077          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
4078   chainedsigactions[sig] = oldAct;
4079   preinstalled_sigs[sig] = 1;
4080 }
4081 
4082 void os::Solaris::set_signal_handler(int sig, bool set_installed,
4083                                      bool oktochain) {
4084   // Check for overwrite.
4085   struct sigaction oldAct;
4086   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4087   void* oldhand =
4088       oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4089                           : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4090   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4091       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4092       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4093     if (AllowUserSignalHandlers || !set_installed) {
4094       // Do not overwrite; user takes responsibility to forward to us.
4095       return;
4096     } else if (UseSignalChaining) {
4097       if (oktochain) {
4098         // save the old handler in jvm
4099         save_preinstalled_handler(sig, oldAct);
4100       } else {
4101         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4102       }
4103       // libjsig also interposes the sigaction() call below and saves the
4104       // old sigaction on it own.
4105     } else {
4106       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4107                     "%#lx for signal %d.", (long)oldhand, sig));
4108     }
4109   }
4110 
4111   struct sigaction sigAct;
4112   sigfillset(&(sigAct.sa_mask));
4113   sigAct.sa_handler = SIG_DFL;
4114 
4115   sigAct.sa_sigaction = signalHandler;
4116   // Handle SIGSEGV on alternate signal stack if
4117   // not using stack banging
4118   if (!UseStackBanging && sig == SIGSEGV) {
4119     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4120   } else if (sig == os::Solaris::SIGinterrupt()) {
4121     // Interruptible i/o requires SA_RESTART cleared so EINTR
4122     // is returned instead of restarting system calls
4123     sigemptyset(&sigAct.sa_mask);
4124     sigAct.sa_handler = NULL;
4125     sigAct.sa_flags = SA_SIGINFO;
4126     sigAct.sa_sigaction = sigINTRHandler;
4127   } else {
4128     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4129   }
4130   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4131 
4132   sigaction(sig, &sigAct, &oldAct);
4133 
4134   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4135                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4136   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4137 }
4138 
4139 
4140 #define DO_SIGNAL_CHECK(sig)                      \
4141   do {                                            \
4142     if (!sigismember(&check_signal_done, sig)) {  \
4143       os::Solaris::check_signal_handler(sig);     \
4144     }                                             \
4145   } while (0)
4146 
4147 // This method is a periodic task to check for misbehaving JNI applications
4148 // under CheckJNI, we can add any periodic checks here
4149 
4150 void os::run_periodic_checks() {
4151   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4152   // thereby preventing a NULL checks.
4153   if (!check_addr0_done) check_addr0_done = check_addr0(tty);
4154 
4155   if (check_signals == false) return;
4156 
4157   // SEGV and BUS if overridden could potentially prevent
4158   // generation of hs*.log in the event of a crash, debugging
4159   // such a case can be very challenging, so we absolutely
4160   // check for the following for a good measure:
4161   DO_SIGNAL_CHECK(SIGSEGV);
4162   DO_SIGNAL_CHECK(SIGILL);
4163   DO_SIGNAL_CHECK(SIGFPE);
4164   DO_SIGNAL_CHECK(SIGBUS);
4165   DO_SIGNAL_CHECK(SIGPIPE);
4166   DO_SIGNAL_CHECK(SIGXFSZ);
4167 
4168   // ReduceSignalUsage allows the user to override these handlers
4169   // see comments at the very top and jvm_solaris.h
4170   if (!ReduceSignalUsage) {
4171     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4172     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4173     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4174     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4175   }
4176 
4177   // See comments above for using JVM1/JVM2 and UseAltSigs
4178   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4179   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4180 
4181 }
4182 
4183 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4184 
4185 static os_sigaction_t os_sigaction = NULL;
4186 
4187 void os::Solaris::check_signal_handler(int sig) {
4188   char buf[O_BUFLEN];
4189   address jvmHandler = NULL;
4190 
4191   struct sigaction act;
4192   if (os_sigaction == NULL) {
4193     // only trust the default sigaction, in case it has been interposed
4194     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4195     if (os_sigaction == NULL) return;
4196   }
4197 
4198   os_sigaction(sig, (struct sigaction*)NULL, &act);
4199 
4200   address thisHandler = (act.sa_flags & SA_SIGINFO)
4201     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4202     : CAST_FROM_FN_PTR(address, act.sa_handler);
4203 
4204 
4205   switch (sig) {
4206   case SIGSEGV:
4207   case SIGBUS:
4208   case SIGFPE:
4209   case SIGPIPE:
4210   case SIGXFSZ:
4211   case SIGILL:
4212     jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4213     break;
4214 
4215   case SHUTDOWN1_SIGNAL:
4216   case SHUTDOWN2_SIGNAL:
4217   case SHUTDOWN3_SIGNAL:
4218   case BREAK_SIGNAL:
4219     jvmHandler = (address)user_handler();
4220     break;
4221 
4222   default:
4223     int intrsig = os::Solaris::SIGinterrupt();
4224     int asynsig = os::Solaris::SIGasync();
4225 
4226     if (sig == intrsig) {
4227       jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4228     } else if (sig == asynsig) {
4229       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4230     } else {
4231       return;
4232     }
4233     break;
4234   }
4235 
4236 
4237   if (thisHandler != jvmHandler) {
4238     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4239     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4240     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4241     // No need to check this sig any longer
4242     sigaddset(&check_signal_done, sig);
4243     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4244     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4245       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4246                     exception_name(sig, buf, O_BUFLEN));
4247     }
4248   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4249     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4250     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4251     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4252     // No need to check this sig any longer
4253     sigaddset(&check_signal_done, sig);
4254   }
4255 
4256   // Print all the signal handler state
4257   if (sigismember(&check_signal_done, sig)) {
4258     print_signal_handlers(tty, buf, O_BUFLEN);
4259   }
4260 
4261 }
4262 
4263 void os::Solaris::install_signal_handlers() {
4264   bool libjsigdone = false;
4265   signal_handlers_are_installed = true;
4266 
4267   // signal-chaining
4268   typedef void (*signal_setting_t)();
4269   signal_setting_t begin_signal_setting = NULL;
4270   signal_setting_t end_signal_setting = NULL;
4271   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4272                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4273   if (begin_signal_setting != NULL) {
4274     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4275                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4276     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4277                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4278     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4279                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4280     libjsig_is_loaded = true;
4281     if (os::Solaris::get_libjsig_version != NULL) {
4282       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4283     }
4284     assert(UseSignalChaining, "should enable signal-chaining");
4285   }
4286   if (libjsig_is_loaded) {
4287     // Tell libjsig jvm is setting signal handlers
4288     (*begin_signal_setting)();
4289   }
4290 
4291   set_signal_handler(SIGSEGV, true, true);
4292   set_signal_handler(SIGPIPE, true, true);
4293   set_signal_handler(SIGXFSZ, true, true);
4294   set_signal_handler(SIGBUS, true, true);
4295   set_signal_handler(SIGILL, true, true);
4296   set_signal_handler(SIGFPE, true, true);
4297 
4298 
4299   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4300 
4301     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4302     // can not register overridable signals which might be > 32
4303     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4304       // Tell libjsig jvm has finished setting signal handlers
4305       (*end_signal_setting)();
4306       libjsigdone = true;
4307     }
4308   }
4309 
4310   // Never ok to chain our SIGinterrupt
4311   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4312   set_signal_handler(os::Solaris::SIGasync(), true, true);
4313 
4314   if (libjsig_is_loaded && !libjsigdone) {
4315     // Tell libjsig jvm finishes setting signal handlers
4316     (*end_signal_setting)();
4317   }
4318 
4319   // We don't activate signal checker if libjsig is in place, we trust ourselves
4320   // and if UserSignalHandler is installed all bets are off.
4321   // Log that signal checking is off only if -verbose:jni is specified.
4322   if (CheckJNICalls) {
4323     if (libjsig_is_loaded) {
4324       if (PrintJNIResolving) {
4325         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4326       }
4327       check_signals = false;
4328     }
4329     if (AllowUserSignalHandlers) {
4330       if (PrintJNIResolving) {
4331         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4332       }
4333       check_signals = false;
4334     }
4335   }
4336 }
4337 
4338 
4339 void report_error(const char* file_name, int line_no, const char* title,
4340                   const char* format, ...);
4341 
4342 const char * signames[] = {
4343   "SIG0",
4344   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4345   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4346   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4347   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4348   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4349   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4350   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4351   "SIGCANCEL", "SIGLOST"
4352 };
4353 
4354 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4355   if (0 < exception_code && exception_code <= SIGRTMAX) {
4356     // signal
4357     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4358       jio_snprintf(buf, size, "%s", signames[exception_code]);
4359     } else {
4360       jio_snprintf(buf, size, "SIG%d", exception_code);
4361     }
4362     return buf;
4363   } else {
4364     return NULL;
4365   }
4366 }
4367 
4368 // (Static) wrapper for getisax(2) call.
4369 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4370 
4371 // (Static) wrappers for the liblgrp API
4372 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4373 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4374 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4375 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4376 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4377 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4378 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4379 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4380 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4381 
4382 // (Static) wrapper for meminfo() call.
4383 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4384 
4385 static address resolve_symbol_lazy(const char* name) {
4386   address addr = (address) dlsym(RTLD_DEFAULT, name);
4387   if (addr == NULL) {
4388     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4389     addr = (address) dlsym(RTLD_NEXT, name);
4390   }
4391   return addr;
4392 }
4393 
4394 static address resolve_symbol(const char* name) {
4395   address addr = resolve_symbol_lazy(name);
4396   if (addr == NULL) {
4397     fatal(dlerror());
4398   }
4399   return addr;
4400 }
4401 
4402 void os::Solaris::libthread_init() {
4403   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4404 
4405   lwp_priocntl_init();
4406 
4407   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4408   if (func == NULL) {
4409     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4410     // Guarantee that this VM is running on an new enough OS (5.6 or
4411     // later) that it will have a new enough libthread.so.
4412     guarantee(func != NULL, "libthread.so is too old.");
4413   }
4414 
4415   int size;
4416   void (*handler_info_func)(address *, int *);
4417   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4418   handler_info_func(&handler_start, &size);
4419   handler_end = handler_start + size;
4420 }
4421 
4422 
4423 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4424 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4425 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4426 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4427 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4428 int os::Solaris::_mutex_scope = USYNC_THREAD;
4429 
4430 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4431 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4432 int_fnP_cond_tP os::Solaris::_cond_signal;
4433 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4434 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4435 int_fnP_cond_tP os::Solaris::_cond_destroy;
4436 int os::Solaris::_cond_scope = USYNC_THREAD;
4437 
4438 void os::Solaris::synchronization_init() {
4439   if (UseLWPSynchronization) {
4440     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4441     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4442     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4443     os::Solaris::set_mutex_init(lwp_mutex_init);
4444     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4445     os::Solaris::set_mutex_scope(USYNC_THREAD);
4446 
4447     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4448     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4449     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4450     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4451     os::Solaris::set_cond_init(lwp_cond_init);
4452     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4453     os::Solaris::set_cond_scope(USYNC_THREAD);
4454   } else {
4455     os::Solaris::set_mutex_scope(USYNC_THREAD);
4456     os::Solaris::set_cond_scope(USYNC_THREAD);
4457 
4458     if (UsePthreads) {
4459       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4460       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4461       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4462       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4463       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4464 
4465       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4466       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4467       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4468       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4469       os::Solaris::set_cond_init(pthread_cond_default_init);
4470       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4471     } else {
4472       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4473       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4474       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4475       os::Solaris::set_mutex_init(::mutex_init);
4476       os::Solaris::set_mutex_destroy(::mutex_destroy);
4477 
4478       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4479       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4480       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4481       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4482       os::Solaris::set_cond_init(::cond_init);
4483       os::Solaris::set_cond_destroy(::cond_destroy);
4484     }
4485   }
4486 }
4487 
4488 bool os::Solaris::liblgrp_init() {
4489   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4490   if (handle != NULL) {
4491     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4492     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4493     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4494     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4495     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4496     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4497     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4498     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4499                                                       dlsym(handle, "lgrp_cookie_stale")));
4500 
4501     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4502     set_lgrp_cookie(c);
4503     return true;
4504   }
4505   return false;
4506 }
4507 
4508 void os::Solaris::misc_sym_init() {
4509   address func;
4510 
4511   // getisax
4512   func = resolve_symbol_lazy("getisax");
4513   if (func != NULL) {
4514     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4515   }
4516 
4517   // meminfo
4518   func = resolve_symbol_lazy("meminfo");
4519   if (func != NULL) {
4520     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4521   }
4522 }
4523 
4524 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4525   assert(_getisax != NULL, "_getisax not set");
4526   return _getisax(array, n);
4527 }
4528 
4529 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4530 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4531 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4532 
4533 void init_pset_getloadavg_ptr(void) {
4534   pset_getloadavg_ptr =
4535     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4536   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4537     warning("pset_getloadavg function not found");
4538   }
4539 }
4540 
4541 int os::Solaris::_dev_zero_fd = -1;
4542 
4543 // this is called _before_ the global arguments have been parsed
4544 void os::init(void) {
4545   _initial_pid = getpid();
4546 
4547   max_hrtime = first_hrtime = gethrtime();
4548 
4549   init_random(1234567);
4550 
4551   page_size = sysconf(_SC_PAGESIZE);
4552   if (page_size == -1) {
4553     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4554                   strerror(errno)));
4555   }
4556   init_page_sizes((size_t) page_size);
4557 
4558   Solaris::initialize_system_info();
4559 
4560   // Initialize misc. symbols as soon as possible, so we can use them
4561   // if we need them.
4562   Solaris::misc_sym_init();
4563 
4564   int fd = ::open("/dev/zero", O_RDWR);
4565   if (fd < 0) {
4566     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4567   } else {
4568     Solaris::set_dev_zero_fd(fd);
4569 
4570     // Close on exec, child won't inherit.
4571     fcntl(fd, F_SETFD, FD_CLOEXEC);
4572   }
4573 
4574   clock_tics_per_sec = CLK_TCK;
4575 
4576   // check if dladdr1() exists; dladdr1 can provide more information than
4577   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4578   // and is available on linker patches for 5.7 and 5.8.
4579   // libdl.so must have been loaded, this call is just an entry lookup
4580   void * hdl = dlopen("libdl.so", RTLD_NOW);
4581   if (hdl) {
4582     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4583   }
4584 
4585   // (Solaris only) this switches to calls that actually do locking.
4586   ThreadCritical::initialize();
4587 
4588   main_thread = thr_self();
4589 
4590   // Constant minimum stack size allowed. It must be at least
4591   // the minimum of what the OS supports (thr_min_stack()), and
4592   // enough to allow the thread to get to user bytecode execution.
4593   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4594   // If the pagesize of the VM is greater than 8K determine the appropriate
4595   // number of initial guard pages.  The user can change this with the
4596   // command line arguments, if needed.
4597   if (vm_page_size() > 8*K) {
4598     StackYellowPages = 1;
4599     StackRedPages = 1;
4600     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4601   }
4602 }
4603 
4604 // To install functions for atexit system call
4605 extern "C" {
4606   static void perfMemory_exit_helper() {
4607     perfMemory_exit();
4608   }
4609 }
4610 
4611 // this is called _after_ the global arguments have been parsed
4612 jint os::init_2(void) {
4613   // try to enable extended file IO ASAP, see 6431278
4614   os::Solaris::try_enable_extended_io();
4615 
4616   // Allocate a single page and mark it as readable for safepoint polling.  Also
4617   // use this first mmap call to check support for MAP_ALIGN.
4618   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4619                                                       page_size,
4620                                                       MAP_PRIVATE | MAP_ALIGN,
4621                                                       PROT_READ);
4622   if (polling_page == NULL) {
4623     has_map_align = false;
4624     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4625                                                 PROT_READ);
4626   }
4627 
4628   os::set_polling_page(polling_page);
4629 
4630 #ifndef PRODUCT
4631   if (Verbose && PrintMiscellaneous) {
4632     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n",
4633                (intptr_t)polling_page);
4634   }
4635 #endif
4636 
4637   if (!UseMembar) {
4638     address mem_serialize_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE);
4639     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4640     os::set_memory_serialize_page(mem_serialize_page);
4641 
4642 #ifndef PRODUCT
4643     if (Verbose && PrintMiscellaneous) {
4644       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n",
4645                  (intptr_t)mem_serialize_page);
4646     }
4647 #endif
4648   }
4649 
4650   // Check minimum allowable stack size for thread creation and to initialize
4651   // the java system classes, including StackOverflowError - depends on page
4652   // size.  Add a page for compiler2 recursion in main thread.
4653   // Add in 2*BytesPerWord times page size to account for VM stack during
4654   // class initialization depending on 32 or 64 bit VM.
4655   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4656                                         (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4657                                         2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4658 
4659   size_t threadStackSizeInBytes = ThreadStackSize * K;
4660   if (threadStackSizeInBytes != 0 &&
4661       threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4662     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4663                   os::Solaris::min_stack_allowed/K);
4664     return JNI_ERR;
4665   }
4666 
4667   // For 64kbps there will be a 64kb page size, which makes
4668   // the usable default stack size quite a bit less.  Increase the
4669   // stack for 64kb (or any > than 8kb) pages, this increases
4670   // virtual memory fragmentation (since we're not creating the
4671   // stack on a power of 2 boundary.  The real fix for this
4672   // should be to fix the guard page mechanism.
4673 
4674   if (vm_page_size() > 8*K) {
4675     threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4676        ? threadStackSizeInBytes +
4677          ((StackYellowPages + StackRedPages) * vm_page_size())
4678        : 0;
4679     ThreadStackSize = threadStackSizeInBytes/K;
4680   }
4681 
4682   // Make the stack size a multiple of the page size so that
4683   // the yellow/red zones can be guarded.
4684   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4685                                                 vm_page_size()));
4686 
4687   Solaris::libthread_init();
4688 
4689   if (UseNUMA) {
4690     if (!Solaris::liblgrp_init()) {
4691       UseNUMA = false;
4692     } else {
4693       size_t lgrp_limit = os::numa_get_groups_num();
4694       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4695       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4696       FREE_C_HEAP_ARRAY(int, lgrp_ids);
4697       if (lgrp_num < 2) {
4698         // There's only one locality group, disable NUMA.
4699         UseNUMA = false;
4700       }
4701     }
4702     if (!UseNUMA && ForceNUMA) {
4703       UseNUMA = true;
4704     }
4705   }
4706 
4707   Solaris::signal_sets_init();
4708   Solaris::init_signal_mem();
4709   Solaris::install_signal_handlers();
4710 
4711   if (libjsigversion < JSIG_VERSION_1_4_1) {
4712     Maxlibjsigsigs = OLDMAXSIGNUM;
4713   }
4714 
4715   // initialize synchronization primitives to use either thread or
4716   // lwp synchronization (controlled by UseLWPSynchronization)
4717   Solaris::synchronization_init();
4718 
4719   if (MaxFDLimit) {
4720     // set the number of file descriptors to max. print out error
4721     // if getrlimit/setrlimit fails but continue regardless.
4722     struct rlimit nbr_files;
4723     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4724     if (status != 0) {
4725       if (PrintMiscellaneous && (Verbose || WizardMode)) {
4726         perror("os::init_2 getrlimit failed");
4727       }
4728     } else {
4729       nbr_files.rlim_cur = nbr_files.rlim_max;
4730       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4731       if (status != 0) {
4732         if (PrintMiscellaneous && (Verbose || WizardMode)) {
4733           perror("os::init_2 setrlimit failed");
4734         }
4735       }
4736     }
4737   }
4738 
4739   // Calculate theoretical max. size of Threads to guard gainst
4740   // artifical out-of-memory situations, where all available address-
4741   // space has been reserved by thread stacks. Default stack size is 1Mb.
4742   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
4743     JavaThread::stack_size_at_create() : (1*K*K);
4744   assert(pre_thread_stack_size != 0, "Must have a stack");
4745   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
4746   // we should start doing Virtual Memory banging. Currently when the threads will
4747   // have used all but 200Mb of space.
4748   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
4749   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
4750 
4751   // at-exit methods are called in the reverse order of their registration.
4752   // In Solaris 7 and earlier, atexit functions are called on return from
4753   // main or as a result of a call to exit(3C). There can be only 32 of
4754   // these functions registered and atexit() does not set errno. In Solaris
4755   // 8 and later, there is no limit to the number of functions registered
4756   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
4757   // functions are called upon dlclose(3DL) in addition to return from main
4758   // and exit(3C).
4759 
4760   if (PerfAllowAtExitRegistration) {
4761     // only register atexit functions if PerfAllowAtExitRegistration is set.
4762     // atexit functions can be delayed until process exit time, which
4763     // can be problematic for embedded VM situations. Embedded VMs should
4764     // call DestroyJavaVM() to assure that VM resources are released.
4765 
4766     // note: perfMemory_exit_helper atexit function may be removed in
4767     // the future if the appropriate cleanup code can be added to the
4768     // VM_Exit VMOperation's doit method.
4769     if (atexit(perfMemory_exit_helper) != 0) {
4770       warning("os::init2 atexit(perfMemory_exit_helper) failed");
4771     }
4772   }
4773 
4774   // Init pset_loadavg function pointer
4775   init_pset_getloadavg_ptr();
4776 
4777   return JNI_OK;
4778 }
4779 
4780 // Mark the polling page as unreadable
4781 void os::make_polling_page_unreadable(void) {
4782   if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0) {
4783     fatal("Could not disable polling page");
4784   }
4785 }
4786 
4787 // Mark the polling page as readable
4788 void os::make_polling_page_readable(void) {
4789   if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0) {
4790     fatal("Could not enable polling page");
4791   }
4792 }
4793 
4794 // OS interface.
4795 
4796 bool os::check_heap(bool force) { return true; }
4797 
4798 // Is a (classpath) directory empty?
4799 bool os::dir_is_empty(const char* path) {
4800   DIR *dir = NULL;
4801   struct dirent *ptr;
4802 
4803   dir = opendir(path);
4804   if (dir == NULL) return true;
4805 
4806   // Scan the directory
4807   bool result = true;
4808   char buf[sizeof(struct dirent) + MAX_PATH];
4809   struct dirent *dbuf = (struct dirent *) buf;
4810   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
4811     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4812       result = false;
4813     }
4814   }
4815   closedir(dir);
4816   return result;
4817 }
4818 
4819 // This code originates from JDK's sysOpen and open64_w
4820 // from src/solaris/hpi/src/system_md.c
4821 
4822 int os::open(const char *path, int oflag, int mode) {
4823   if (strlen(path) > MAX_PATH - 1) {
4824     errno = ENAMETOOLONG;
4825     return -1;
4826   }
4827   int fd;
4828 
4829   fd = ::open64(path, oflag, mode);
4830   if (fd == -1) return -1;
4831 
4832   // If the open succeeded, the file might still be a directory
4833   {
4834     struct stat64 buf64;
4835     int ret = ::fstat64(fd, &buf64);
4836     int st_mode = buf64.st_mode;
4837 
4838     if (ret != -1) {
4839       if ((st_mode & S_IFMT) == S_IFDIR) {
4840         errno = EISDIR;
4841         ::close(fd);
4842         return -1;
4843       }
4844     } else {
4845       ::close(fd);
4846       return -1;
4847     }
4848   }
4849 
4850   // 32-bit Solaris systems suffer from:
4851   //
4852   // - an historical default soft limit of 256 per-process file
4853   //   descriptors that is too low for many Java programs.
4854   //
4855   // - a design flaw where file descriptors created using stdio
4856   //   fopen must be less than 256, _even_ when the first limit above
4857   //   has been raised.  This can cause calls to fopen (but not calls to
4858   //   open, for example) to fail mysteriously, perhaps in 3rd party
4859   //   native code (although the JDK itself uses fopen).  One can hardly
4860   //   criticize them for using this most standard of all functions.
4861   //
4862   // We attempt to make everything work anyways by:
4863   //
4864   // - raising the soft limit on per-process file descriptors beyond
4865   //   256
4866   //
4867   // - As of Solaris 10u4, we can request that Solaris raise the 256
4868   //   stdio fopen limit by calling function enable_extended_FILE_stdio.
4869   //   This is done in init_2 and recorded in enabled_extended_FILE_stdio
4870   //
4871   // - If we are stuck on an old (pre 10u4) Solaris system, we can
4872   //   workaround the bug by remapping non-stdio file descriptors below
4873   //   256 to ones beyond 256, which is done below.
4874   //
4875   // See:
4876   // 1085341: 32-bit stdio routines should support file descriptors >255
4877   // 6533291: Work around 32-bit Solaris stdio limit of 256 open files
4878   // 6431278: Netbeans crash on 32 bit Solaris: need to call
4879   //          enable_extended_FILE_stdio() in VM initialisation
4880   // Giri Mandalika's blog
4881   // http://technopark02.blogspot.com/2005_05_01_archive.html
4882   //
4883 #ifndef  _LP64
4884   if ((!enabled_extended_FILE_stdio) && fd < 256) {
4885     int newfd = ::fcntl(fd, F_DUPFD, 256);
4886     if (newfd != -1) {
4887       ::close(fd);
4888       fd = newfd;
4889     }
4890   }
4891 #endif // 32-bit Solaris
4892 
4893   // All file descriptors that are opened in the JVM and not
4894   // specifically destined for a subprocess should have the
4895   // close-on-exec flag set.  If we don't set it, then careless 3rd
4896   // party native code might fork and exec without closing all
4897   // appropriate file descriptors (e.g. as we do in closeDescriptors in
4898   // UNIXProcess.c), and this in turn might:
4899   //
4900   // - cause end-of-file to fail to be detected on some file
4901   //   descriptors, resulting in mysterious hangs, or
4902   //
4903   // - might cause an fopen in the subprocess to fail on a system
4904   //   suffering from bug 1085341.
4905   //
4906   // (Yes, the default setting of the close-on-exec flag is a Unix
4907   // design flaw)
4908   //
4909   // See:
4910   // 1085341: 32-bit stdio routines should support file descriptors >255
4911   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4912   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4913   //
4914 #ifdef FD_CLOEXEC
4915   {
4916     int flags = ::fcntl(fd, F_GETFD);
4917     if (flags != -1) {
4918       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4919     }
4920   }
4921 #endif
4922 
4923   return fd;
4924 }
4925 
4926 // create binary file, rewriting existing file if required
4927 int os::create_binary_file(const char* path, bool rewrite_existing) {
4928   int oflags = O_WRONLY | O_CREAT;
4929   if (!rewrite_existing) {
4930     oflags |= O_EXCL;
4931   }
4932   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4933 }
4934 
4935 // return current position of file pointer
4936 jlong os::current_file_offset(int fd) {
4937   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4938 }
4939 
4940 // move file pointer to the specified offset
4941 jlong os::seek_to_file_offset(int fd, jlong offset) {
4942   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4943 }
4944 
4945 jlong os::lseek(int fd, jlong offset, int whence) {
4946   return (jlong) ::lseek64(fd, offset, whence);
4947 }
4948 
4949 char * os::native_path(char *path) {
4950   return path;
4951 }
4952 
4953 int os::ftruncate(int fd, jlong length) {
4954   return ::ftruncate64(fd, length);
4955 }
4956 
4957 int os::fsync(int fd)  {
4958   RESTARTABLE_RETURN_INT(::fsync(fd));
4959 }
4960 
4961 int os::available(int fd, jlong *bytes) {
4962   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
4963          "Assumed _thread_in_native");
4964   jlong cur, end;
4965   int mode;
4966   struct stat64 buf64;
4967 
4968   if (::fstat64(fd, &buf64) >= 0) {
4969     mode = buf64.st_mode;
4970     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4971       int n,ioctl_return;
4972 
4973       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
4974       if (ioctl_return>= 0) {
4975         *bytes = n;
4976         return 1;
4977       }
4978     }
4979   }
4980   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4981     return 0;
4982   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4983     return 0;
4984   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4985     return 0;
4986   }
4987   *bytes = end - cur;
4988   return 1;
4989 }
4990 
4991 // Map a block of memory.
4992 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4993                         char *addr, size_t bytes, bool read_only,
4994                         bool allow_exec) {
4995   int prot;
4996   int flags;
4997 
4998   if (read_only) {
4999     prot = PROT_READ;
5000     flags = MAP_SHARED;
5001   } else {
5002     prot = PROT_READ | PROT_WRITE;
5003     flags = MAP_PRIVATE;
5004   }
5005 
5006   if (allow_exec) {
5007     prot |= PROT_EXEC;
5008   }
5009 
5010   if (addr != NULL) {
5011     flags |= MAP_FIXED;
5012   }
5013 
5014   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5015                                      fd, file_offset);
5016   if (mapped_address == MAP_FAILED) {
5017     return NULL;
5018   }
5019   return mapped_address;
5020 }
5021 
5022 
5023 // Remap a block of memory.
5024 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5025                           char *addr, size_t bytes, bool read_only,
5026                           bool allow_exec) {
5027   // same as map_memory() on this OS
5028   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5029                         allow_exec);
5030 }
5031 
5032 
5033 // Unmap a block of memory.
5034 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5035   return munmap(addr, bytes) == 0;
5036 }
5037 
5038 void os::pause() {
5039   char filename[MAX_PATH];
5040   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5041     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5042   } else {
5043     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5044   }
5045 
5046   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5047   if (fd != -1) {
5048     struct stat buf;
5049     ::close(fd);
5050     while (::stat(filename, &buf) == 0) {
5051       (void)::poll(NULL, 0, 100);
5052     }
5053   } else {
5054     jio_fprintf(stderr,
5055                 "Could not open pause file '%s', continuing immediately.\n", filename);
5056   }
5057 }
5058 
5059 #ifndef PRODUCT
5060 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5061 // Turn this on if you need to trace synch operations.
5062 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5063 // and call record_synch_enable and record_synch_disable
5064 // around the computation of interest.
5065 
5066 void record_synch(char* name, bool returning);  // defined below
5067 
5068 class RecordSynch {
5069   char* _name;
5070  public:
5071   RecordSynch(char* name) :_name(name) { record_synch(_name, false); }
5072   ~RecordSynch()                       { record_synch(_name, true); }
5073 };
5074 
5075 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5076 extern "C" ret name params {                                    \
5077   typedef ret name##_t params;                                  \
5078   static name##_t* implem = NULL;                               \
5079   static int callcount = 0;                                     \
5080   if (implem == NULL) {                                         \
5081     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5082     if (implem == NULL)  fatal(dlerror());                      \
5083   }                                                             \
5084   ++callcount;                                                  \
5085   RecordSynch _rs(#name);                                       \
5086   inner;                                                        \
5087   return implem args;                                           \
5088 }
5089 // in dbx, examine callcounts this way:
5090 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5091 
5092 #define CHECK_POINTER_OK(p) \
5093   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5094 #define CHECK_MU \
5095   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5096 #define CHECK_CV \
5097   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5098 #define CHECK_P(p) \
5099   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5100 
5101 #define CHECK_MUTEX(mutex_op) \
5102   CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5103 
5104 CHECK_MUTEX(   mutex_lock)
5105 CHECK_MUTEX(  _mutex_lock)
5106 CHECK_MUTEX( mutex_unlock)
5107 CHECK_MUTEX(_mutex_unlock)
5108 CHECK_MUTEX( mutex_trylock)
5109 CHECK_MUTEX(_mutex_trylock)
5110 
5111 #define CHECK_COND(cond_op) \
5112   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU; CHECK_CV);
5113 
5114 CHECK_COND( cond_wait);
5115 CHECK_COND(_cond_wait);
5116 CHECK_COND(_cond_wait_cancel);
5117 
5118 #define CHECK_COND2(cond_op) \
5119   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU; CHECK_CV);
5120 
5121 CHECK_COND2( cond_timedwait);
5122 CHECK_COND2(_cond_timedwait);
5123 CHECK_COND2(_cond_timedwait_cancel);
5124 
5125 // do the _lwp_* versions too
5126 #define mutex_t lwp_mutex_t
5127 #define cond_t  lwp_cond_t
5128 CHECK_MUTEX(  _lwp_mutex_lock)
5129 CHECK_MUTEX(  _lwp_mutex_unlock)
5130 CHECK_MUTEX(  _lwp_mutex_trylock)
5131 CHECK_MUTEX( __lwp_mutex_lock)
5132 CHECK_MUTEX( __lwp_mutex_unlock)
5133 CHECK_MUTEX( __lwp_mutex_trylock)
5134 CHECK_MUTEX(___lwp_mutex_lock)
5135 CHECK_MUTEX(___lwp_mutex_unlock)
5136 
5137 CHECK_COND(  _lwp_cond_wait);
5138 CHECK_COND( __lwp_cond_wait);
5139 CHECK_COND(___lwp_cond_wait);
5140 
5141 CHECK_COND2(  _lwp_cond_timedwait);
5142 CHECK_COND2( __lwp_cond_timedwait);
5143 #undef mutex_t
5144 #undef cond_t
5145 
5146 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5147 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5148 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5149 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5150 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5151 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5152 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5153 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5154 
5155 
5156 // recording machinery:
5157 
5158 enum { RECORD_SYNCH_LIMIT = 200 };
5159 char* record_synch_name[RECORD_SYNCH_LIMIT];
5160 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5161 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5162 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5163 int record_synch_count = 0;
5164 bool record_synch_enabled = false;
5165 
5166 // in dbx, examine recorded data this way:
5167 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5168 
5169 void record_synch(char* name, bool returning) {
5170   if (record_synch_enabled) {
5171     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5172       record_synch_name[record_synch_count] = name;
5173       record_synch_returning[record_synch_count] = returning;
5174       record_synch_thread[record_synch_count] = thr_self();
5175       record_synch_arg0ptr[record_synch_count] = &name;
5176       record_synch_count++;
5177     }
5178     // put more checking code here:
5179     // ...
5180   }
5181 }
5182 
5183 void record_synch_enable() {
5184   // start collecting trace data, if not already doing so
5185   if (!record_synch_enabled)  record_synch_count = 0;
5186   record_synch_enabled = true;
5187 }
5188 
5189 void record_synch_disable() {
5190   // stop collecting trace data
5191   record_synch_enabled = false;
5192 }
5193 
5194 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5195 #endif // PRODUCT
5196 
5197 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5198 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5199                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5200 
5201 
5202 // JVMTI & JVM monitoring and management support
5203 // The thread_cpu_time() and current_thread_cpu_time() are only
5204 // supported if is_thread_cpu_time_supported() returns true.
5205 // They are not supported on Solaris T1.
5206 
5207 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5208 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5209 // of a thread.
5210 //
5211 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5212 // returns the fast estimate available on the platform.
5213 
5214 // hrtime_t gethrvtime() return value includes
5215 // user time but does not include system time
5216 jlong os::current_thread_cpu_time() {
5217   return (jlong) gethrvtime();
5218 }
5219 
5220 jlong os::thread_cpu_time(Thread *thread) {
5221   // return user level CPU time only to be consistent with
5222   // what current_thread_cpu_time returns.
5223   // thread_cpu_time_info() must be changed if this changes
5224   return os::thread_cpu_time(thread, false /* user time only */);
5225 }
5226 
5227 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5228   if (user_sys_cpu_time) {
5229     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5230   } else {
5231     return os::current_thread_cpu_time();
5232   }
5233 }
5234 
5235 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5236   char proc_name[64];
5237   int count;
5238   prusage_t prusage;
5239   jlong lwp_time;
5240   int fd;
5241 
5242   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5243           getpid(),
5244           thread->osthread()->lwp_id());
5245   fd = ::open(proc_name, O_RDONLY);
5246   if (fd == -1) return -1;
5247 
5248   do {
5249     count = ::pread(fd,
5250                     (void *)&prusage.pr_utime,
5251                     thr_time_size,
5252                     thr_time_off);
5253   } while (count < 0 && errno == EINTR);
5254   ::close(fd);
5255   if (count < 0) return -1;
5256 
5257   if (user_sys_cpu_time) {
5258     // user + system CPU time
5259     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5260                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5261                  (jlong)prusage.pr_stime.tv_nsec +
5262                  (jlong)prusage.pr_utime.tv_nsec;
5263   } else {
5264     // user level CPU time only
5265     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5266                 (jlong)prusage.pr_utime.tv_nsec;
5267   }
5268 
5269   return (lwp_time);
5270 }
5271 
5272 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5273   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5274   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5275   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5276   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5277 }
5278 
5279 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5280   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5281   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5282   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5283   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5284 }
5285 
5286 bool os::is_thread_cpu_time_supported() {
5287   return true;
5288 }
5289 
5290 // System loadavg support.  Returns -1 if load average cannot be obtained.
5291 // Return the load average for our processor set if the primitive exists
5292 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5293 int os::loadavg(double loadavg[], int nelem) {
5294   if (pset_getloadavg_ptr != NULL) {
5295     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5296   } else {
5297     return ::getloadavg(loadavg, nelem);
5298   }
5299 }
5300 
5301 //---------------------------------------------------------------------------------
5302 
5303 bool os::find(address addr, outputStream* st) {
5304   Dl_info dlinfo;
5305   memset(&dlinfo, 0, sizeof(dlinfo));
5306   if (dladdr(addr, &dlinfo) != 0) {
5307     st->print(PTR_FORMAT ": ", addr);
5308     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5309       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5310     } else if (dlinfo.dli_fbase != NULL) {
5311       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5312     } else {
5313       st->print("<absolute address>");
5314     }
5315     if (dlinfo.dli_fname != NULL) {
5316       st->print(" in %s", dlinfo.dli_fname);
5317     }
5318     if (dlinfo.dli_fbase != NULL) {
5319       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5320     }
5321     st->cr();
5322 
5323     if (Verbose) {
5324       // decode some bytes around the PC
5325       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5326       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5327       address       lowest = (address) dlinfo.dli_sname;
5328       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5329       if (begin < lowest)  begin = lowest;
5330       Dl_info dlinfo2;
5331       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5332           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
5333         end = (address) dlinfo2.dli_saddr;
5334       }
5335       Disassembler::decode(begin, end, st);
5336     }
5337     return true;
5338   }
5339   return false;
5340 }
5341 
5342 // Following function has been added to support HotSparc's libjvm.so running
5343 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5344 // src/solaris/hpi/native_threads in the EVM codebase.
5345 //
5346 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5347 // libraries and should thus be removed. We will leave it behind for a while
5348 // until we no longer want to able to run on top of 1.3.0 Solaris production
5349 // JDK. See 4341971.
5350 
5351 #define STACK_SLACK 0x800
5352 
5353 extern "C" {
5354   intptr_t sysThreadAvailableStackWithSlack() {
5355     stack_t st;
5356     intptr_t retval, stack_top;
5357     retval = thr_stksegment(&st);
5358     assert(retval == 0, "incorrect return value from thr_stksegment");
5359     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5360     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5361     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5362     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5363   }
5364 }
5365 
5366 // ObjectMonitor park-unpark infrastructure ...
5367 //
5368 // We implement Solaris and Linux PlatformEvents with the
5369 // obvious condvar-mutex-flag triple.
5370 // Another alternative that works quite well is pipes:
5371 // Each PlatformEvent consists of a pipe-pair.
5372 // The thread associated with the PlatformEvent
5373 // calls park(), which reads from the input end of the pipe.
5374 // Unpark() writes into the other end of the pipe.
5375 // The write-side of the pipe must be set NDELAY.
5376 // Unfortunately pipes consume a large # of handles.
5377 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5378 // Using pipes for the 1st few threads might be workable, however.
5379 //
5380 // park() is permitted to return spuriously.
5381 // Callers of park() should wrap the call to park() in
5382 // an appropriate loop.  A litmus test for the correct
5383 // usage of park is the following: if park() were modified
5384 // to immediately return 0 your code should still work,
5385 // albeit degenerating to a spin loop.
5386 //
5387 // In a sense, park()-unpark() just provides more polite spinning
5388 // and polling with the key difference over naive spinning being
5389 // that a parked thread needs to be explicitly unparked() in order
5390 // to wake up and to poll the underlying condition.
5391 //
5392 // Assumption:
5393 //    Only one parker can exist on an event, which is why we allocate
5394 //    them per-thread. Multiple unparkers can coexist.
5395 //
5396 // _Event transitions in park()
5397 //   -1 => -1 : illegal
5398 //    1 =>  0 : pass - return immediately
5399 //    0 => -1 : block; then set _Event to 0 before returning
5400 //
5401 // _Event transitions in unpark()
5402 //    0 => 1 : just return
5403 //    1 => 1 : just return
5404 //   -1 => either 0 or 1; must signal target thread
5405 //         That is, we can safely transition _Event from -1 to either
5406 //         0 or 1.
5407 //
5408 // _Event serves as a restricted-range semaphore.
5409 //   -1 : thread is blocked, i.e. there is a waiter
5410 //    0 : neutral: thread is running or ready,
5411 //        could have been signaled after a wait started
5412 //    1 : signaled - thread is running or ready
5413 //
5414 // Another possible encoding of _Event would be with
5415 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5416 //
5417 // TODO-FIXME: add DTRACE probes for:
5418 // 1.   Tx parks
5419 // 2.   Ty unparks Tx
5420 // 3.   Tx resumes from park
5421 
5422 
5423 // value determined through experimentation
5424 #define ROUNDINGFIX 11
5425 
5426 // utility to compute the abstime argument to timedwait.
5427 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5428 
5429 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5430   // millis is the relative timeout time
5431   // abstime will be the absolute timeout time
5432   if (millis < 0)  millis = 0;
5433   struct timeval now;
5434   int status = gettimeofday(&now, NULL);
5435   assert(status == 0, "gettimeofday");
5436   jlong seconds = millis / 1000;
5437   jlong max_wait_period;
5438 
5439   if (UseLWPSynchronization) {
5440     // forward port of fix for 4275818 (not sleeping long enough)
5441     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5442     // _lwp_cond_timedwait() used a round_down algorithm rather
5443     // than a round_up. For millis less than our roundfactor
5444     // it rounded down to 0 which doesn't meet the spec.
5445     // For millis > roundfactor we may return a bit sooner, but
5446     // since we can not accurately identify the patch level and
5447     // this has already been fixed in Solaris 9 and 8 we will
5448     // leave it alone rather than always rounding down.
5449 
5450     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5451     // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5452     // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5453     max_wait_period = 21000000;
5454   } else {
5455     max_wait_period = 50000000;
5456   }
5457   millis %= 1000;
5458   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5459     seconds = max_wait_period;
5460   }
5461   abstime->tv_sec = now.tv_sec  + seconds;
5462   long       usec = now.tv_usec + millis * 1000;
5463   if (usec >= 1000000) {
5464     abstime->tv_sec += 1;
5465     usec -= 1000000;
5466   }
5467   abstime->tv_nsec = usec * 1000;
5468   return abstime;
5469 }
5470 
5471 void os::PlatformEvent::park() {           // AKA: down()
5472   // Transitions for _Event:
5473   //   -1 => -1 : illegal
5474   //    1 =>  0 : pass - return immediately
5475   //    0 => -1 : block; then set _Event to 0 before returning
5476 
5477   // Invariant: Only the thread associated with the Event/PlatformEvent
5478   // may call park().
5479   assert(_nParked == 0, "invariant");
5480 
5481   int v;
5482   for (;;) {
5483     v = _Event;
5484     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5485   }
5486   guarantee(v >= 0, "invariant");
5487   if (v == 0) {
5488     // Do this the hard way by blocking ...
5489     // See http://monaco.sfbay/detail.jsf?cr=5094058.
5490     // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5491     // Only for SPARC >= V8PlusA
5492 #if defined(__sparc) && defined(COMPILER2)
5493     if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5494 #endif
5495     int status = os::Solaris::mutex_lock(_mutex);
5496     assert_status(status == 0, status, "mutex_lock");
5497     guarantee(_nParked == 0, "invariant");
5498     ++_nParked;
5499     while (_Event < 0) {
5500       // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5501       // Treat this the same as if the wait was interrupted
5502       // With usr/lib/lwp going to kernel, always handle ETIME
5503       status = os::Solaris::cond_wait(_cond, _mutex);
5504       if (status == ETIME) status = EINTR;
5505       assert_status(status == 0 || status == EINTR, status, "cond_wait");
5506     }
5507     --_nParked;
5508     _Event = 0;
5509     status = os::Solaris::mutex_unlock(_mutex);
5510     assert_status(status == 0, status, "mutex_unlock");
5511     // Paranoia to ensure our locked and lock-free paths interact
5512     // correctly with each other.
5513     OrderAccess::fence();
5514   }
5515 }
5516 
5517 int os::PlatformEvent::park(jlong millis) {
5518   // Transitions for _Event:
5519   //   -1 => -1 : illegal
5520   //    1 =>  0 : pass - return immediately
5521   //    0 => -1 : block; then set _Event to 0 before returning
5522 
5523   guarantee(_nParked == 0, "invariant");
5524   int v;
5525   for (;;) {
5526     v = _Event;
5527     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5528   }
5529   guarantee(v >= 0, "invariant");
5530   if (v != 0) return OS_OK;
5531 
5532   int ret = OS_TIMEOUT;
5533   timestruc_t abst;
5534   compute_abstime(&abst, millis);
5535 
5536   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5537   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5538   // Only for SPARC >= V8PlusA
5539 #if defined(__sparc) && defined(COMPILER2)
5540   if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5541 #endif
5542   int status = os::Solaris::mutex_lock(_mutex);
5543   assert_status(status == 0, status, "mutex_lock");
5544   guarantee(_nParked == 0, "invariant");
5545   ++_nParked;
5546   while (_Event < 0) {
5547     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5548     assert_status(status == 0 || status == EINTR ||
5549                   status == ETIME || status == ETIMEDOUT,
5550                   status, "cond_timedwait");
5551     if (!FilterSpuriousWakeups) break;                // previous semantics
5552     if (status == ETIME || status == ETIMEDOUT) break;
5553     // We consume and ignore EINTR and spurious wakeups.
5554   }
5555   --_nParked;
5556   if (_Event >= 0) ret = OS_OK;
5557   _Event = 0;
5558   status = os::Solaris::mutex_unlock(_mutex);
5559   assert_status(status == 0, status, "mutex_unlock");
5560   // Paranoia to ensure our locked and lock-free paths interact
5561   // correctly with each other.
5562   OrderAccess::fence();
5563   return ret;
5564 }
5565 
5566 void os::PlatformEvent::unpark() {
5567   // Transitions for _Event:
5568   //    0 => 1 : just return
5569   //    1 => 1 : just return
5570   //   -1 => either 0 or 1; must signal target thread
5571   //         That is, we can safely transition _Event from -1 to either
5572   //         0 or 1.
5573   // See also: "Semaphores in Plan 9" by Mullender & Cox
5574   //
5575   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5576   // that it will take two back-to-back park() calls for the owning
5577   // thread to block. This has the benefit of forcing a spurious return
5578   // from the first park() call after an unpark() call which will help
5579   // shake out uses of park() and unpark() without condition variables.
5580 
5581   if (Atomic::xchg(1, &_Event) >= 0) return;
5582 
5583   // If the thread associated with the event was parked, wake it.
5584   // Wait for the thread assoc with the PlatformEvent to vacate.
5585   int status = os::Solaris::mutex_lock(_mutex);
5586   assert_status(status == 0, status, "mutex_lock");
5587   int AnyWaiters = _nParked;
5588   status = os::Solaris::mutex_unlock(_mutex);
5589   assert_status(status == 0, status, "mutex_unlock");
5590   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5591   if (AnyWaiters != 0) {
5592     // Note that we signal() *after* dropping the lock for "immortal" Events.
5593     // This is safe and avoids a common class of  futile wakeups.  In rare
5594     // circumstances this can cause a thread to return prematurely from
5595     // cond_{timed}wait() but the spurious wakeup is benign and the victim
5596     // will simply re-test the condition and re-park itself.
5597     // This provides particular benefit if the underlying platform does not
5598     // provide wait morphing.
5599     status = os::Solaris::cond_signal(_cond);
5600     assert_status(status == 0, status, "cond_signal");
5601   }
5602 }
5603 
5604 // JSR166
5605 // -------------------------------------------------------
5606 
5607 // The solaris and linux implementations of park/unpark are fairly
5608 // conservative for now, but can be improved. They currently use a
5609 // mutex/condvar pair, plus _counter.
5610 // Park decrements _counter if > 0, else does a condvar wait.  Unpark
5611 // sets count to 1 and signals condvar.  Only one thread ever waits
5612 // on the condvar. Contention seen when trying to park implies that someone
5613 // is unparking you, so don't wait. And spurious returns are fine, so there
5614 // is no need to track notifications.
5615 
5616 #define MAX_SECS 100000000
5617 
5618 // This code is common to linux and solaris and will be moved to a
5619 // common place in dolphin.
5620 //
5621 // The passed in time value is either a relative time in nanoseconds
5622 // or an absolute time in milliseconds. Either way it has to be unpacked
5623 // into suitable seconds and nanoseconds components and stored in the
5624 // given timespec structure.
5625 // Given time is a 64-bit value and the time_t used in the timespec is only
5626 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
5627 // overflow if times way in the future are given. Further on Solaris versions
5628 // prior to 10 there is a restriction (see cond_timedwait) that the specified
5629 // number of seconds, in abstime, is less than current_time  + 100,000,000.
5630 // As it will be 28 years before "now + 100000000" will overflow we can
5631 // ignore overflow and just impose a hard-limit on seconds using the value
5632 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
5633 // years from "now".
5634 //
5635 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5636   assert(time > 0, "convertTime");
5637 
5638   struct timeval now;
5639   int status = gettimeofday(&now, NULL);
5640   assert(status == 0, "gettimeofday");
5641 
5642   time_t max_secs = now.tv_sec + MAX_SECS;
5643 
5644   if (isAbsolute) {
5645     jlong secs = time / 1000;
5646     if (secs > max_secs) {
5647       absTime->tv_sec = max_secs;
5648     } else {
5649       absTime->tv_sec = secs;
5650     }
5651     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5652   } else {
5653     jlong secs = time / NANOSECS_PER_SEC;
5654     if (secs >= MAX_SECS) {
5655       absTime->tv_sec = max_secs;
5656       absTime->tv_nsec = 0;
5657     } else {
5658       absTime->tv_sec = now.tv_sec + secs;
5659       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5660       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5661         absTime->tv_nsec -= NANOSECS_PER_SEC;
5662         ++absTime->tv_sec; // note: this must be <= max_secs
5663       }
5664     }
5665   }
5666   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5667   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5668   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5669   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5670 }
5671 
5672 void Parker::park(bool isAbsolute, jlong time) {
5673   // Ideally we'd do something useful while spinning, such
5674   // as calling unpackTime().
5675 
5676   // Optional fast-path check:
5677   // Return immediately if a permit is available.
5678   // We depend on Atomic::xchg() having full barrier semantics
5679   // since we are doing a lock-free update to _counter.
5680   if (Atomic::xchg(0, &_counter) > 0) return;
5681 
5682   // Optional fast-exit: Check interrupt before trying to wait
5683   Thread* thread = Thread::current();
5684   assert(thread->is_Java_thread(), "Must be JavaThread");
5685   JavaThread *jt = (JavaThread *)thread;
5686   if (Thread::is_interrupted(thread, false)) {
5687     return;
5688   }
5689 
5690   // First, demultiplex/decode time arguments
5691   timespec absTime;
5692   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
5693     return;
5694   }
5695   if (time > 0) {
5696     // Warning: this code might be exposed to the old Solaris time
5697     // round-down bugs.  Grep "roundingFix" for details.
5698     unpackTime(&absTime, isAbsolute, time);
5699   }
5700 
5701   // Enter safepoint region
5702   // Beware of deadlocks such as 6317397.
5703   // The per-thread Parker:: _mutex is a classic leaf-lock.
5704   // In particular a thread must never block on the Threads_lock while
5705   // holding the Parker:: mutex.  If safepoints are pending both the
5706   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5707   ThreadBlockInVM tbivm(jt);
5708 
5709   // Don't wait if cannot get lock since interference arises from
5710   // unblocking.  Also. check interrupt before trying wait
5711   if (Thread::is_interrupted(thread, false) ||
5712       os::Solaris::mutex_trylock(_mutex) != 0) {
5713     return;
5714   }
5715 
5716   int status;
5717 
5718   if (_counter > 0)  { // no wait needed
5719     _counter = 0;
5720     status = os::Solaris::mutex_unlock(_mutex);
5721     assert(status == 0, "invariant");
5722     // Paranoia to ensure our locked and lock-free paths interact
5723     // correctly with each other and Java-level accesses.
5724     OrderAccess::fence();
5725     return;
5726   }
5727 
5728 #ifdef ASSERT
5729   // Don't catch signals while blocked; let the running threads have the signals.
5730   // (This allows a debugger to break into the running thread.)
5731   sigset_t oldsigs;
5732   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
5733   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5734 #endif
5735 
5736   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5737   jt->set_suspend_equivalent();
5738   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5739 
5740   // Do this the hard way by blocking ...
5741   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5742   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5743   // Only for SPARC >= V8PlusA
5744 #if defined(__sparc) && defined(COMPILER2)
5745   if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5746 #endif
5747 
5748   if (time == 0) {
5749     status = os::Solaris::cond_wait(_cond, _mutex);
5750   } else {
5751     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
5752   }
5753   // Note that an untimed cond_wait() can sometimes return ETIME on older
5754   // versions of the Solaris.
5755   assert_status(status == 0 || status == EINTR ||
5756                 status == ETIME || status == ETIMEDOUT,
5757                 status, "cond_timedwait");
5758 
5759 #ifdef ASSERT
5760   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
5761 #endif
5762   _counter = 0;
5763   status = os::Solaris::mutex_unlock(_mutex);
5764   assert_status(status == 0, status, "mutex_unlock");
5765   // Paranoia to ensure our locked and lock-free paths interact
5766   // correctly with each other and Java-level accesses.
5767   OrderAccess::fence();
5768 
5769   // If externally suspended while waiting, re-suspend
5770   if (jt->handle_special_suspend_equivalent_condition()) {
5771     jt->java_suspend_self();
5772   }
5773 }
5774 
5775 void Parker::unpark() {
5776   int status = os::Solaris::mutex_lock(_mutex);
5777   assert(status == 0, "invariant");
5778   const int s = _counter;
5779   _counter = 1;
5780   status = os::Solaris::mutex_unlock(_mutex);
5781   assert(status == 0, "invariant");
5782 
5783   if (s < 1) {
5784     status = os::Solaris::cond_signal(_cond);
5785     assert(status == 0, "invariant");
5786   }
5787 }
5788 
5789 extern char** environ;
5790 
5791 // Run the specified command in a separate process. Return its exit value,
5792 // or -1 on failure (e.g. can't fork a new process).
5793 // Unlike system(), this function can be called from signal handler. It
5794 // doesn't block SIGINT et al.
5795 int os::fork_and_exec(char* cmd) {
5796   char * argv[4];
5797   argv[0] = (char *)"sh";
5798   argv[1] = (char *)"-c";
5799   argv[2] = cmd;
5800   argv[3] = NULL;
5801 
5802   // fork is async-safe, fork1 is not so can't use in signal handler
5803   pid_t pid;
5804   Thread* t = ThreadLocalStorage::get_thread_slow();
5805   if (t != NULL && t->is_inside_signal_handler()) {
5806     pid = fork();
5807   } else {
5808     pid = fork1();
5809   }
5810 
5811   if (pid < 0) {
5812     // fork failed
5813     warning("fork failed: %s", strerror(errno));
5814     return -1;
5815 
5816   } else if (pid == 0) {
5817     // child process
5818 
5819     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
5820     execve("/usr/bin/sh", argv, environ);
5821 
5822     // execve failed
5823     _exit(-1);
5824 
5825   } else  {
5826     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5827     // care about the actual exit code, for now.
5828 
5829     int status;
5830 
5831     // Wait for the child process to exit.  This returns immediately if
5832     // the child has already exited. */
5833     while (waitpid(pid, &status, 0) < 0) {
5834       switch (errno) {
5835       case ECHILD: return 0;
5836       case EINTR: break;
5837       default: return -1;
5838       }
5839     }
5840 
5841     if (WIFEXITED(status)) {
5842       // The child exited normally; get its exit code.
5843       return WEXITSTATUS(status);
5844     } else if (WIFSIGNALED(status)) {
5845       // The child exited because of a signal
5846       // The best value to return is 0x80 + signal number,
5847       // because that is what all Unix shells do, and because
5848       // it allows callers to distinguish between process exit and
5849       // process death by signal.
5850       return 0x80 + WTERMSIG(status);
5851     } else {
5852       // Unknown exit code; pass it through
5853       return status;
5854     }
5855   }
5856 }
5857 
5858 // is_headless_jre()
5859 //
5860 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5861 // in order to report if we are running in a headless jre
5862 //
5863 // Since JDK8 xawt/libmawt.so was moved into the same directory
5864 // as libawt.so, and renamed libawt_xawt.so
5865 //
5866 bool os::is_headless_jre() {
5867   struct stat statbuf;
5868   char buf[MAXPATHLEN];
5869   char libmawtpath[MAXPATHLEN];
5870   const char *xawtstr  = "/xawt/libmawt.so";
5871   const char *new_xawtstr = "/libawt_xawt.so";
5872   char *p;
5873 
5874   // Get path to libjvm.so
5875   os::jvm_path(buf, sizeof(buf));
5876 
5877   // Get rid of libjvm.so
5878   p = strrchr(buf, '/');
5879   if (p == NULL) {
5880     return false;
5881   } else {
5882     *p = '\0';
5883   }
5884 
5885   // Get rid of client or server
5886   p = strrchr(buf, '/');
5887   if (p == NULL) {
5888     return false;
5889   } else {
5890     *p = '\0';
5891   }
5892 
5893   // check xawt/libmawt.so
5894   strcpy(libmawtpath, buf);
5895   strcat(libmawtpath, xawtstr);
5896   if (::stat(libmawtpath, &statbuf) == 0) return false;
5897 
5898   // check libawt_xawt.so
5899   strcpy(libmawtpath, buf);
5900   strcat(libmawtpath, new_xawtstr);
5901   if (::stat(libmawtpath, &statbuf) == 0) return false;
5902 
5903   return true;
5904 }
5905 
5906 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
5907   size_t res;
5908   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5909          "Assumed _thread_in_native");
5910   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
5911   return res;
5912 }
5913 
5914 int os::close(int fd) {
5915   return ::close(fd);
5916 }
5917 
5918 int os::socket_close(int fd) {
5919   return ::close(fd);
5920 }
5921 
5922 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5923   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5924          "Assumed _thread_in_native");
5925   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
5926 }
5927 
5928 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5929   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5930          "Assumed _thread_in_native");
5931   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5932 }
5933 
5934 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5935   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5936 }
5937 
5938 // As both poll and select can be interrupted by signals, we have to be
5939 // prepared to restart the system call after updating the timeout, unless
5940 // a poll() is done with timeout == -1, in which case we repeat with this
5941 // "wait forever" value.
5942 
5943 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
5944   int _result;
5945   _result = ::connect(fd, him, len);
5946 
5947   // On Solaris, when a connect() call is interrupted, the connection
5948   // can be established asynchronously (see 6343810). Subsequent calls
5949   // to connect() must check the errno value which has the semantic
5950   // described below (copied from the connect() man page). Handling
5951   // of asynchronously established connections is required for both
5952   // blocking and non-blocking sockets.
5953   //     EINTR            The  connection  attempt  was   interrupted
5954   //                      before  any data arrived by the delivery of
5955   //                      a signal. The connection, however, will  be
5956   //                      established asynchronously.
5957   //
5958   //     EINPROGRESS      The socket is non-blocking, and the connec-
5959   //                      tion  cannot  be completed immediately.
5960   //
5961   //     EALREADY         The socket is non-blocking,  and a previous
5962   //                      connection  attempt  has  not yet been com-
5963   //                      pleted.
5964   //
5965   //     EISCONN          The socket is already connected.
5966   if (_result == OS_ERR && errno == EINTR) {
5967     // restarting a connect() changes its errno semantics
5968     RESTARTABLE(::connect(fd, him, len), _result);
5969     // undo these changes
5970     if (_result == OS_ERR) {
5971       if (errno == EALREADY) {
5972         errno = EINPROGRESS; // fall through
5973       } else if (errno == EISCONN) {
5974         errno = 0;
5975         return OS_OK;
5976       }
5977     }
5978   }
5979   return _result;
5980 }
5981 
5982 // Get the default path to the core file
5983 // Returns the length of the string
5984 int os::get_core_path(char* buffer, size_t bufferSize) {
5985   const char* p = get_current_directory(buffer, bufferSize);
5986 
5987   if (p == NULL) {
5988     assert(p != NULL, "failed to get current directory");
5989     return 0;
5990   }
5991 
5992   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
5993                                               p, current_process_id());
5994 
5995   return strlen(buffer);
5996 }
5997 
5998 #ifndef PRODUCT
5999 void TestReserveMemorySpecial_test() {
6000   // No tests available for this platform
6001 }
6002 #endif