1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "os_solaris.inline.hpp"
  41 #include "prims/jniFastGetField.hpp"
  42 #include "prims/jvm.h"
  43 #include "prims/jvm_misc.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/atomic.inline.hpp"
  46 #include "runtime/extendedPC.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/interfaceSupport.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/mutexLocker.hpp"
  52 #include "runtime/objectMonitor.hpp"
  53 #include "runtime/orderAccess.inline.hpp"
  54 #include "runtime/osThread.hpp"
  55 #include "runtime/perfMemory.hpp"
  56 #include "runtime/sharedRuntime.hpp"
  57 #include "runtime/statSampler.hpp"
  58 #include "runtime/stubRoutines.hpp"
  59 #include "runtime/thread.inline.hpp"
  60 #include "runtime/threadCritical.hpp"
  61 #include "runtime/timer.hpp"
  62 #include "runtime/vm_version.hpp"
  63 #include "services/attachListener.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "services/runtimeService.hpp"
  66 #include "utilities/decoder.hpp"
  67 #include "utilities/defaultStream.hpp"
  68 #include "utilities/events.hpp"
  69 #include "utilities/growableArray.hpp"
  70 #include "utilities/vmError.hpp"
  71 
  72 // put OS-includes here
  73 # include <dlfcn.h>
  74 # include <errno.h>
  75 # include <exception>
  76 # include <link.h>
  77 # include <poll.h>
  78 # include <pthread.h>
  79 # include <pwd.h>
  80 # include <schedctl.h>
  81 # include <setjmp.h>
  82 # include <signal.h>
  83 # include <stdio.h>
  84 # include <alloca.h>
  85 # include <sys/filio.h>
  86 # include <sys/ipc.h>
  87 # include <sys/lwp.h>
  88 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  89 # include <sys/mman.h>
  90 # include <sys/processor.h>
  91 # include <sys/procset.h>
  92 # include <sys/pset.h>
  93 # include <sys/resource.h>
  94 # include <sys/shm.h>
  95 # include <sys/socket.h>
  96 # include <sys/stat.h>
  97 # include <sys/systeminfo.h>
  98 # include <sys/time.h>
  99 # include <sys/times.h>
 100 # include <sys/types.h>
 101 # include <sys/wait.h>
 102 # include <sys/utsname.h>
 103 # include <thread.h>
 104 # include <unistd.h>
 105 # include <sys/priocntl.h>
 106 # include <sys/rtpriocntl.h>
 107 # include <sys/tspriocntl.h>
 108 # include <sys/iapriocntl.h>
 109 # include <sys/fxpriocntl.h>
 110 # include <sys/loadavg.h>
 111 # include <string.h>
 112 # include <stdio.h>
 113 
 114 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 115 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 116 
 117 #define MAX_PATH (2 * K)
 118 
 119 // for timer info max values which include all bits
 120 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 121 
 122 
 123 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 124 // compile on older systems without this header file.
 125 
 126 #ifndef MADV_ACCESS_LWP
 127   #define  MADV_ACCESS_LWP   7       /* next LWP to access heavily */
 128 #endif
 129 #ifndef MADV_ACCESS_MANY
 130   #define  MADV_ACCESS_MANY  8       /* many processes to access heavily */
 131 #endif
 132 
 133 #ifndef LGRP_RSRC_CPU
 134   #define LGRP_RSRC_CPU      0       /* CPU resources */
 135 #endif
 136 #ifndef LGRP_RSRC_MEM
 137   #define LGRP_RSRC_MEM      1       /* memory resources */
 138 #endif
 139 
 140 // see thr_setprio(3T) for the basis of these numbers
 141 #define MinimumPriority 0
 142 #define NormalPriority  64
 143 #define MaximumPriority 127
 144 
 145 // Values for ThreadPriorityPolicy == 1
 146 int prio_policy1[CriticalPriority+1] = {
 147   -99999,  0, 16,  32,  48,  64,
 148           80, 96, 112, 124, 127, 127 };
 149 
 150 // System parameters used internally
 151 static clock_t clock_tics_per_sec = 100;
 152 
 153 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 154 static bool enabled_extended_FILE_stdio = false;
 155 
 156 // For diagnostics to print a message once. see run_periodic_checks
 157 static bool check_addr0_done = false;
 158 static sigset_t check_signal_done;
 159 static bool check_signals = true;
 160 
 161 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 162 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 163 
 164 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 165 
 166 
 167 // "default" initializers for missing libc APIs
 168 extern "C" {
 169   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 170   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 171 
 172   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 173   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 174 }
 175 
 176 // "default" initializers for pthread-based synchronization
 177 extern "C" {
 178   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 179   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 180 }
 181 
 182 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 183 
 184 // Thread Local Storage
 185 // This is common to all Solaris platforms so it is defined here,
 186 // in this common file.
 187 // The declarations are in the os_cpu threadLS*.hpp files.
 188 //
 189 // Static member initialization for TLS
 190 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
 191 
 192 #ifndef PRODUCT
 193   #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
 194 
 195 int ThreadLocalStorage::_tcacheHit = 0;
 196 int ThreadLocalStorage::_tcacheMiss = 0;
 197 
 198 void ThreadLocalStorage::print_statistics() {
 199   int total = _tcacheMiss+_tcacheHit;
 200   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
 201                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
 202 }
 203   #undef _PCT
 204 #endif // PRODUCT
 205 
 206 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
 207                                                         int index) {
 208   Thread *thread = get_thread_slow();
 209   if (thread != NULL) {
 210     address sp = os::current_stack_pointer();
 211     guarantee(thread->_stack_base == NULL ||
 212               (sp <= thread->_stack_base &&
 213               sp >= thread->_stack_base - thread->_stack_size) ||
 214               is_error_reported(),
 215               "sp must be inside of selected thread stack");
 216 
 217     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
 218     _get_thread_cache[index] = thread;
 219   }
 220   return thread;
 221 }
 222 
 223 
 224 static const double all_zero[sizeof(Thread) / sizeof(double) + 1] = {0};
 225 #define NO_CACHED_THREAD ((Thread*)all_zero)
 226 
 227 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
 228 
 229   // Store the new value before updating the cache to prevent a race
 230   // between get_thread_via_cache_slowly() and this store operation.
 231   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
 232 
 233   // Update thread cache with new thread if setting on thread create,
 234   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
 235   uintptr_t raw = pd_raw_thread_id();
 236   int ix = pd_cache_index(raw);
 237   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
 238 }
 239 
 240 void ThreadLocalStorage::pd_init() {
 241   for (int i = 0; i < _pd_cache_size; i++) {
 242     _get_thread_cache[i] = NO_CACHED_THREAD;
 243   }
 244 }
 245 
 246 // Invalidate all the caches (happens to be the same as pd_init).
 247 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
 248 
 249 #undef NO_CACHED_THREAD
 250 
 251 // END Thread Local Storage
 252 
 253 static inline size_t adjust_stack_size(address base, size_t size) {
 254   if ((ssize_t)size < 0) {
 255     // 4759953: Compensate for ridiculous stack size.
 256     size = max_intx;
 257   }
 258   if (size > (size_t)base) {
 259     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 260     size = (size_t)base;
 261   }
 262   return size;
 263 }
 264 
 265 static inline stack_t get_stack_info() {
 266   stack_t st;
 267   int retval = thr_stksegment(&st);
 268   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 269   assert(retval == 0, "incorrect return value from thr_stksegment");
 270   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 271   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 272   return st;
 273 }
 274 
 275 address os::current_stack_base() {
 276   int r = thr_main();
 277   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 278   bool is_primordial_thread = r;
 279 
 280   // Workaround 4352906, avoid calls to thr_stksegment by
 281   // thr_main after the first one (it looks like we trash
 282   // some data, causing the value for ss_sp to be incorrect).
 283   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 284     stack_t st = get_stack_info();
 285     if (is_primordial_thread) {
 286       // cache initial value of stack base
 287       os::Solaris::_main_stack_base = (address)st.ss_sp;
 288     }
 289     return (address)st.ss_sp;
 290   } else {
 291     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 292     return os::Solaris::_main_stack_base;
 293   }
 294 }
 295 
 296 size_t os::current_stack_size() {
 297   size_t size;
 298 
 299   int r = thr_main();
 300   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 301   if (!r) {
 302     size = get_stack_info().ss_size;
 303   } else {
 304     struct rlimit limits;
 305     getrlimit(RLIMIT_STACK, &limits);
 306     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 307   }
 308   // base may not be page aligned
 309   address base = current_stack_base();
 310   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 311   return (size_t)(base - bottom);
 312 }
 313 
 314 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 315   return localtime_r(clock, res);
 316 }
 317 
 318 void os::Solaris::try_enable_extended_io() {
 319   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 320 
 321   if (!UseExtendedFileIO) {
 322     return;
 323   }
 324 
 325   enable_extended_FILE_stdio_t enabler =
 326     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 327                                          "enable_extended_FILE_stdio");
 328   if (enabler) {
 329     enabler(-1, -1);
 330   }
 331 }
 332 
 333 static int _processors_online = 0;
 334 
 335 jint os::Solaris::_os_thread_limit = 0;
 336 volatile jint os::Solaris::_os_thread_count = 0;
 337 
 338 julong os::available_memory() {
 339   return Solaris::available_memory();
 340 }
 341 
 342 julong os::Solaris::available_memory() {
 343   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 344 }
 345 
 346 julong os::Solaris::_physical_memory = 0;
 347 
 348 julong os::physical_memory() {
 349   return Solaris::physical_memory();
 350 }
 351 
 352 static hrtime_t first_hrtime = 0;
 353 static const hrtime_t hrtime_hz = 1000*1000*1000;
 354 static volatile hrtime_t max_hrtime = 0;
 355 
 356 
 357 void os::Solaris::initialize_system_info() {
 358   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 359   _processors_online = sysconf(_SC_NPROCESSORS_ONLN);
 360   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) *
 361                                      (julong)sysconf(_SC_PAGESIZE);
 362 }
 363 
 364 int os::active_processor_count() {
 365   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 366   pid_t pid = getpid();
 367   psetid_t pset = PS_NONE;
 368   // Are we running in a processor set or is there any processor set around?
 369   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 370     uint_t pset_cpus;
 371     // Query the number of cpus available to us.
 372     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 373       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 374       _processors_online = pset_cpus;
 375       return pset_cpus;
 376     }
 377   }
 378   // Otherwise return number of online cpus
 379   return online_cpus;
 380 }
 381 
 382 static bool find_processors_in_pset(psetid_t        pset,
 383                                     processorid_t** id_array,
 384                                     uint_t*         id_length) {
 385   bool result = false;
 386   // Find the number of processors in the processor set.
 387   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 388     // Make up an array to hold their ids.
 389     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 390     // Fill in the array with their processor ids.
 391     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 392       result = true;
 393     }
 394   }
 395   return result;
 396 }
 397 
 398 // Callers of find_processors_online() must tolerate imprecise results --
 399 // the system configuration can change asynchronously because of DR
 400 // or explicit psradm operations.
 401 //
 402 // We also need to take care that the loop (below) terminates as the
 403 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 404 // request and the loop that builds the list of processor ids.   Unfortunately
 405 // there's no reliable way to determine the maximum valid processor id,
 406 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 407 // man pages, which claim the processor id set is "sparse, but
 408 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 409 // exit the loop.
 410 //
 411 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 412 // not available on S8.0.
 413 
 414 static bool find_processors_online(processorid_t** id_array,
 415                                    uint*           id_length) {
 416   const processorid_t MAX_PROCESSOR_ID = 100000;
 417   // Find the number of processors online.
 418   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 419   // Make up an array to hold their ids.
 420   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 421   // Processors need not be numbered consecutively.
 422   long found = 0;
 423   processorid_t next = 0;
 424   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 425     processor_info_t info;
 426     if (processor_info(next, &info) == 0) {
 427       // NB, PI_NOINTR processors are effectively online ...
 428       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 429         (*id_array)[found] = next;
 430         found += 1;
 431       }
 432     }
 433     next += 1;
 434   }
 435   if (found < *id_length) {
 436     // The loop above didn't identify the expected number of processors.
 437     // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 438     // and re-running the loop, above, but there's no guarantee of progress
 439     // if the system configuration is in flux.  Instead, we just return what
 440     // we've got.  Note that in the worst case find_processors_online() could
 441     // return an empty set.  (As a fall-back in the case of the empty set we
 442     // could just return the ID of the current processor).
 443     *id_length = found;
 444   }
 445 
 446   return true;
 447 }
 448 
 449 static bool assign_distribution(processorid_t* id_array,
 450                                 uint           id_length,
 451                                 uint*          distribution,
 452                                 uint           distribution_length) {
 453   // We assume we can assign processorid_t's to uint's.
 454   assert(sizeof(processorid_t) == sizeof(uint),
 455          "can't convert processorid_t to uint");
 456   // Quick check to see if we won't succeed.
 457   if (id_length < distribution_length) {
 458     return false;
 459   }
 460   // Assign processor ids to the distribution.
 461   // Try to shuffle processors to distribute work across boards,
 462   // assuming 4 processors per board.
 463   const uint processors_per_board = ProcessDistributionStride;
 464   // Find the maximum processor id.
 465   processorid_t max_id = 0;
 466   for (uint m = 0; m < id_length; m += 1) {
 467     max_id = MAX2(max_id, id_array[m]);
 468   }
 469   // The next id, to limit loops.
 470   const processorid_t limit_id = max_id + 1;
 471   // Make up markers for available processors.
 472   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 473   for (uint c = 0; c < limit_id; c += 1) {
 474     available_id[c] = false;
 475   }
 476   for (uint a = 0; a < id_length; a += 1) {
 477     available_id[id_array[a]] = true;
 478   }
 479   // Step by "boards", then by "slot", copying to "assigned".
 480   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 481   //                remembering which processors have been assigned by
 482   //                previous calls, etc., so as to distribute several
 483   //                independent calls of this method.  What we'd like is
 484   //                It would be nice to have an API that let us ask
 485   //                how many processes are bound to a processor,
 486   //                but we don't have that, either.
 487   //                In the short term, "board" is static so that
 488   //                subsequent distributions don't all start at board 0.
 489   static uint board = 0;
 490   uint assigned = 0;
 491   // Until we've found enough processors ....
 492   while (assigned < distribution_length) {
 493     // ... find the next available processor in the board.
 494     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 495       uint try_id = board * processors_per_board + slot;
 496       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 497         distribution[assigned] = try_id;
 498         available_id[try_id] = false;
 499         assigned += 1;
 500         break;
 501       }
 502     }
 503     board += 1;
 504     if (board * processors_per_board + 0 >= limit_id) {
 505       board = 0;
 506     }
 507   }
 508   if (available_id != NULL) {
 509     FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
 510   }
 511   return true;
 512 }
 513 
 514 void os::set_native_thread_name(const char *name) {
 515   // Not yet implemented.
 516   return;
 517 }
 518 
 519 bool os::distribute_processes(uint length, uint* distribution) {
 520   bool result = false;
 521   // Find the processor id's of all the available CPUs.
 522   processorid_t* id_array  = NULL;
 523   uint           id_length = 0;
 524   // There are some races between querying information and using it,
 525   // since processor sets can change dynamically.
 526   psetid_t pset = PS_NONE;
 527   // Are we running in a processor set?
 528   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 529     result = find_processors_in_pset(pset, &id_array, &id_length);
 530   } else {
 531     result = find_processors_online(&id_array, &id_length);
 532   }
 533   if (result == true) {
 534     if (id_length >= length) {
 535       result = assign_distribution(id_array, id_length, distribution, length);
 536     } else {
 537       result = false;
 538     }
 539   }
 540   if (id_array != NULL) {
 541     FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
 542   }
 543   return result;
 544 }
 545 
 546 bool os::bind_to_processor(uint processor_id) {
 547   // We assume that a processorid_t can be stored in a uint.
 548   assert(sizeof(uint) == sizeof(processorid_t),
 549          "can't convert uint to processorid_t");
 550   int bind_result =
 551     processor_bind(P_LWPID,                       // bind LWP.
 552                    P_MYID,                        // bind current LWP.
 553                    (processorid_t) processor_id,  // id.
 554                    NULL);                         // don't return old binding.
 555   return (bind_result == 0);
 556 }
 557 
 558 bool os::getenv(const char* name, char* buffer, int len) {
 559   char* val = ::getenv(name);
 560   if (val == NULL || strlen(val) + 1 > len) {
 561     if (len > 0) buffer[0] = 0; // return a null string
 562     return false;
 563   }
 564   strcpy(buffer, val);
 565   return true;
 566 }
 567 
 568 
 569 // Return true if user is running as root.
 570 
 571 bool os::have_special_privileges() {
 572   static bool init = false;
 573   static bool privileges = false;
 574   if (!init) {
 575     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 576     init = true;
 577   }
 578   return privileges;
 579 }
 580 
 581 
 582 void os::init_system_properties_values() {
 583   // The next steps are taken in the product version:
 584   //
 585   // Obtain the JAVA_HOME value from the location of libjvm.so.
 586   // This library should be located at:
 587   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 588   //
 589   // If "/jre/lib/" appears at the right place in the path, then we
 590   // assume libjvm.so is installed in a JDK and we use this path.
 591   //
 592   // Otherwise exit with message: "Could not create the Java virtual machine."
 593   //
 594   // The following extra steps are taken in the debugging version:
 595   //
 596   // If "/jre/lib/" does NOT appear at the right place in the path
 597   // instead of exit check for $JAVA_HOME environment variable.
 598   //
 599   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 600   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 601   // it looks like libjvm.so is installed there
 602   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 603   //
 604   // Otherwise exit.
 605   //
 606   // Important note: if the location of libjvm.so changes this
 607   // code needs to be changed accordingly.
 608 
 609 // Base path of extensions installed on the system.
 610 #define SYS_EXT_DIR     "/usr/jdk/packages"
 611 #define EXTENSIONS_DIR  "/lib/ext"
 612 #define ENDORSED_DIR    "/lib/endorsed"
 613 
 614   char cpu_arch[12];
 615   // Buffer that fits several sprintfs.
 616   // Note that the space for the colon and the trailing null are provided
 617   // by the nulls included by the sizeof operator.
 618   const size_t bufsize =
 619     MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
 620          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 621          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
 622          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
 623   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 624 
 625   // sysclasspath, java_home, dll_dir
 626   {
 627     char *pslash;
 628     os::jvm_path(buf, bufsize);
 629 
 630     // Found the full path to libjvm.so.
 631     // Now cut the path to <java_home>/jre if we can.
 632     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 633     pslash = strrchr(buf, '/');
 634     if (pslash != NULL) {
 635       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 636     }
 637     Arguments::set_dll_dir(buf);
 638 
 639     if (pslash != NULL) {
 640       pslash = strrchr(buf, '/');
 641       if (pslash != NULL) {
 642         *pslash = '\0';          // Get rid of /<arch>.
 643         pslash = strrchr(buf, '/');
 644         if (pslash != NULL) {
 645           *pslash = '\0';        // Get rid of /lib.
 646         }
 647       }
 648     }
 649     Arguments::set_java_home(buf);
 650     set_boot_path('/', ':');
 651   }
 652 
 653   // Where to look for native libraries.
 654   {
 655     // Use dlinfo() to determine the correct java.library.path.
 656     //
 657     // If we're launched by the Java launcher, and the user
 658     // does not set java.library.path explicitly on the commandline,
 659     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 660     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 661     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 662     // /usr/lib), which is exactly what we want.
 663     //
 664     // If the user does set java.library.path, it completely
 665     // overwrites this setting, and always has.
 666     //
 667     // If we're not launched by the Java launcher, we may
 668     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 669     // settings.  Again, dlinfo does exactly what we want.
 670 
 671     Dl_serinfo     info_sz, *info = &info_sz;
 672     Dl_serpath     *path;
 673     char           *library_path;
 674     char           *common_path = buf;
 675 
 676     // Determine search path count and required buffer size.
 677     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 678       FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 679       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 680     }
 681 
 682     // Allocate new buffer and initialize.
 683     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 684     info->dls_size = info_sz.dls_size;
 685     info->dls_cnt = info_sz.dls_cnt;
 686 
 687     // Obtain search path information.
 688     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 689       FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 690       FREE_C_HEAP_ARRAY(char, info, mtInternal);
 691       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 692     }
 693 
 694     path = &info->dls_serpath[0];
 695 
 696     // Note: Due to a legacy implementation, most of the library path
 697     // is set in the launcher. This was to accomodate linking restrictions
 698     // on legacy Solaris implementations (which are no longer supported).
 699     // Eventually, all the library path setting will be done here.
 700     //
 701     // However, to prevent the proliferation of improperly built native
 702     // libraries, the new path component /usr/jdk/packages is added here.
 703 
 704     // Determine the actual CPU architecture.
 705     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 706 #ifdef _LP64
 707     // If we are a 64-bit vm, perform the following translations:
 708     //   sparc   -> sparcv9
 709     //   i386    -> amd64
 710     if (strcmp(cpu_arch, "sparc") == 0) {
 711       strcat(cpu_arch, "v9");
 712     } else if (strcmp(cpu_arch, "i386") == 0) {
 713       strcpy(cpu_arch, "amd64");
 714     }
 715 #endif
 716 
 717     // Construct the invariant part of ld_library_path.
 718     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 719 
 720     // Struct size is more than sufficient for the path components obtained
 721     // through the dlinfo() call, so only add additional space for the path
 722     // components explicitly added here.
 723     size_t library_path_size = info->dls_size + strlen(common_path);
 724     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 725     library_path[0] = '\0';
 726 
 727     // Construct the desired Java library path from the linker's library
 728     // search path.
 729     //
 730     // For compatibility, it is optimal that we insert the additional path
 731     // components specific to the Java VM after those components specified
 732     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 733     // infrastructure.
 734     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 735       strcpy(library_path, common_path);
 736     } else {
 737       int inserted = 0;
 738       int i;
 739       for (i = 0; i < info->dls_cnt; i++, path++) {
 740         uint_t flags = path->dls_flags & LA_SER_MASK;
 741         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 742           strcat(library_path, common_path);
 743           strcat(library_path, os::path_separator());
 744           inserted = 1;
 745         }
 746         strcat(library_path, path->dls_name);
 747         strcat(library_path, os::path_separator());
 748       }
 749       // Eliminate trailing path separator.
 750       library_path[strlen(library_path)-1] = '\0';
 751     }
 752 
 753     // happens before argument parsing - can't use a trace flag
 754     // tty->print_raw("init_system_properties_values: native lib path: ");
 755     // tty->print_raw_cr(library_path);
 756 
 757     // Callee copies into its own buffer.
 758     Arguments::set_library_path(library_path);
 759 
 760     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
 761     FREE_C_HEAP_ARRAY(char, info, mtInternal);
 762   }
 763 
 764   // Extensions directories.
 765   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 766   Arguments::set_ext_dirs(buf);
 767 
 768   // Endorsed standards default directory.
 769   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
 770   Arguments::set_endorsed_dirs(buf);
 771 
 772   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 773 
 774 #undef SYS_EXT_DIR
 775 #undef EXTENSIONS_DIR
 776 #undef ENDORSED_DIR
 777 }
 778 
 779 void os::breakpoint() {
 780   BREAKPOINT;
 781 }
 782 
 783 bool os::obsolete_option(const JavaVMOption *option) {
 784   if (!strncmp(option->optionString, "-Xt", 3)) {
 785     return true;
 786   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 787     return true;
 788   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 789     return true;
 790   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 791     return true;
 792   }
 793   return false;
 794 }
 795 
 796 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 797   address  stackStart  = (address)thread->stack_base();
 798   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 799   if (sp < stackStart && sp >= stackEnd) return true;
 800   return false;
 801 }
 802 
 803 extern "C" void breakpoint() {
 804   // use debugger to set breakpoint here
 805 }
 806 
 807 static thread_t main_thread;
 808 
 809 // Thread start routine for all new Java threads
 810 extern "C" void* java_start(void* thread_addr) {
 811   // Try to randomize the cache line index of hot stack frames.
 812   // This helps when threads of the same stack traces evict each other's
 813   // cache lines. The threads can be either from the same JVM instance, or
 814   // from different JVM instances. The benefit is especially true for
 815   // processors with hyperthreading technology.
 816   static int counter = 0;
 817   int pid = os::current_process_id();
 818   alloca(((pid ^ counter++) & 7) * 128);
 819 
 820   int prio;
 821   Thread* thread = (Thread*)thread_addr;
 822   OSThread* osthr = thread->osthread();
 823 
 824   osthr->set_lwp_id(_lwp_self());  // Store lwp in case we are bound
 825   thread->_schedctl = (void *) schedctl_init();
 826 
 827   if (UseNUMA) {
 828     int lgrp_id = os::numa_get_group_id();
 829     if (lgrp_id != -1) {
 830       thread->set_lgrp_id(lgrp_id);
 831     }
 832   }
 833 
 834   // If the creator called set priority before we started,
 835   // we need to call set_native_priority now that we have an lwp.
 836   // We used to get the priority from thr_getprio (we called
 837   // thr_setprio way back in create_thread) and pass it to
 838   // set_native_priority, but Solaris scales the priority
 839   // in java_to_os_priority, so when we read it back here,
 840   // we pass trash to set_native_priority instead of what's
 841   // in java_to_os_priority. So we save the native priority
 842   // in the osThread and recall it here.
 843 
 844   if (osthr->thread_id() != -1) {
 845     if (UseThreadPriorities) {
 846       int prio = osthr->native_priority();
 847       if (ThreadPriorityVerbose) {
 848         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 849                       INTPTR_FORMAT ", setting priority: %d\n",
 850                       osthr->thread_id(), osthr->lwp_id(), prio);
 851       }
 852       os::set_native_priority(thread, prio);
 853     }
 854   } else if (ThreadPriorityVerbose) {
 855     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 856   }
 857 
 858   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 859 
 860   // initialize signal mask for this thread
 861   os::Solaris::hotspot_sigmask(thread);
 862 
 863   thread->run();
 864 
 865   // One less thread is executing
 866   // When the VMThread gets here, the main thread may have already exited
 867   // which frees the CodeHeap containing the Atomic::dec code
 868   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 869     Atomic::dec(&os::Solaris::_os_thread_count);
 870   }
 871 
 872   if (UseDetachedThreads) {
 873     thr_exit(NULL);
 874     ShouldNotReachHere();
 875   }
 876   return NULL;
 877 }
 878 
 879 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 880   // Allocate the OSThread object
 881   OSThread* osthread = new OSThread(NULL, NULL);
 882   if (osthread == NULL) return NULL;
 883 
 884   // Store info on the Solaris thread into the OSThread
 885   osthread->set_thread_id(thread_id);
 886   osthread->set_lwp_id(_lwp_self());
 887   thread->_schedctl = (void *) schedctl_init();
 888 
 889   if (UseNUMA) {
 890     int lgrp_id = os::numa_get_group_id();
 891     if (lgrp_id != -1) {
 892       thread->set_lgrp_id(lgrp_id);
 893     }
 894   }
 895 
 896   if (ThreadPriorityVerbose) {
 897     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 898                   osthread->thread_id(), osthread->lwp_id());
 899   }
 900 
 901   // Initial thread state is INITIALIZED, not SUSPENDED
 902   osthread->set_state(INITIALIZED);
 903 
 904   return osthread;
 905 }
 906 
 907 void os::Solaris::hotspot_sigmask(Thread* thread) {
 908   //Save caller's signal mask
 909   sigset_t sigmask;
 910   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 911   OSThread *osthread = thread->osthread();
 912   osthread->set_caller_sigmask(sigmask);
 913 
 914   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 915   if (!ReduceSignalUsage) {
 916     if (thread->is_VM_thread()) {
 917       // Only the VM thread handles BREAK_SIGNAL ...
 918       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 919     } else {
 920       // ... all other threads block BREAK_SIGNAL
 921       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 922       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 923     }
 924   }
 925 }
 926 
 927 bool os::create_attached_thread(JavaThread* thread) {
 928 #ifdef ASSERT
 929   thread->verify_not_published();
 930 #endif
 931   OSThread* osthread = create_os_thread(thread, thr_self());
 932   if (osthread == NULL) {
 933     return false;
 934   }
 935 
 936   // Initial thread state is RUNNABLE
 937   osthread->set_state(RUNNABLE);
 938   thread->set_osthread(osthread);
 939 
 940   // initialize signal mask for this thread
 941   // and save the caller's signal mask
 942   os::Solaris::hotspot_sigmask(thread);
 943 
 944   return true;
 945 }
 946 
 947 bool os::create_main_thread(JavaThread* thread) {
 948 #ifdef ASSERT
 949   thread->verify_not_published();
 950 #endif
 951   if (_starting_thread == NULL) {
 952     _starting_thread = create_os_thread(thread, main_thread);
 953     if (_starting_thread == NULL) {
 954       return false;
 955     }
 956   }
 957 
 958   // The primodial thread is runnable from the start
 959   _starting_thread->set_state(RUNNABLE);
 960 
 961   thread->set_osthread(_starting_thread);
 962 
 963   // initialize signal mask for this thread
 964   // and save the caller's signal mask
 965   os::Solaris::hotspot_sigmask(thread);
 966 
 967   return true;
 968 }
 969 
 970 
 971 bool os::create_thread(Thread* thread, ThreadType thr_type,
 972                        size_t stack_size) {
 973   // Allocate the OSThread object
 974   OSThread* osthread = new OSThread(NULL, NULL);
 975   if (osthread == NULL) {
 976     return false;
 977   }
 978 
 979   if (ThreadPriorityVerbose) {
 980     char *thrtyp;
 981     switch (thr_type) {
 982     case vm_thread:
 983       thrtyp = (char *)"vm";
 984       break;
 985     case cgc_thread:
 986       thrtyp = (char *)"cgc";
 987       break;
 988     case pgc_thread:
 989       thrtyp = (char *)"pgc";
 990       break;
 991     case java_thread:
 992       thrtyp = (char *)"java";
 993       break;
 994     case compiler_thread:
 995       thrtyp = (char *)"compiler";
 996       break;
 997     case watcher_thread:
 998       thrtyp = (char *)"watcher";
 999       break;
1000     default:
1001       thrtyp = (char *)"unknown";
1002       break;
1003     }
1004     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1005   }
1006 
1007   // Calculate stack size if it's not specified by caller.
1008   if (stack_size == 0) {
1009     // The default stack size 1M (2M for LP64).
1010     stack_size = (BytesPerWord >> 2) * K * K;
1011 
1012     switch (thr_type) {
1013     case os::java_thread:
1014       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1015       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1016       break;
1017     case os::compiler_thread:
1018       if (CompilerThreadStackSize > 0) {
1019         stack_size = (size_t)(CompilerThreadStackSize * K);
1020         break;
1021       } // else fall through:
1022         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1023     case os::vm_thread:
1024     case os::pgc_thread:
1025     case os::cgc_thread:
1026     case os::watcher_thread:
1027       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1028       break;
1029     }
1030   }
1031   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1032 
1033   // Initial state is ALLOCATED but not INITIALIZED
1034   osthread->set_state(ALLOCATED);
1035 
1036   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1037     // We got lots of threads. Check if we still have some address space left.
1038     // Need to be at least 5Mb of unreserved address space. We do check by
1039     // trying to reserve some.
1040     const size_t VirtualMemoryBangSize = 20*K*K;
1041     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1042     if (mem == NULL) {
1043       delete osthread;
1044       return false;
1045     } else {
1046       // Release the memory again
1047       os::release_memory(mem, VirtualMemoryBangSize);
1048     }
1049   }
1050 
1051   // Setup osthread because the child thread may need it.
1052   thread->set_osthread(osthread);
1053 
1054   // Create the Solaris thread
1055   thread_t tid = 0;
1056   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
1057   int      status;
1058 
1059   // Mark that we don't have an lwp or thread id yet.
1060   // In case we attempt to set the priority before the thread starts.
1061   osthread->set_lwp_id(-1);
1062   osthread->set_thread_id(-1);
1063 
1064   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1065   if (status != 0) {
1066     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1067       perror("os::create_thread");
1068     }
1069     thread->set_osthread(NULL);
1070     // Need to clean up stuff we've allocated so far
1071     delete osthread;
1072     return false;
1073   }
1074 
1075   Atomic::inc(&os::Solaris::_os_thread_count);
1076 
1077   // Store info on the Solaris thread into the OSThread
1078   osthread->set_thread_id(tid);
1079 
1080   // Remember that we created this thread so we can set priority on it
1081   osthread->set_vm_created();
1082 
1083   // Initial thread state is INITIALIZED, not SUSPENDED
1084   osthread->set_state(INITIALIZED);
1085 
1086   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1087   return true;
1088 }
1089 
1090 // defined for >= Solaris 10. This allows builds on earlier versions
1091 // of Solaris to take advantage of the newly reserved Solaris JVM signals
1092 // With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1093 // and -XX:+UseAltSigs does nothing since these should have no conflict
1094 //
1095 #if !defined(SIGJVM1)
1096   #define SIGJVM1 39
1097   #define SIGJVM2 40
1098 #endif
1099 
1100 debug_only(static bool signal_sets_initialized = false);
1101 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1102 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1103 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1104 
1105 bool os::Solaris::is_sig_ignored(int sig) {
1106   struct sigaction oact;
1107   sigaction(sig, (struct sigaction*)NULL, &oact);
1108   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1109                                  : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1110   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
1111     return true;
1112   } else {
1113     return false;
1114   }
1115 }
1116 
1117 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1118 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1119 static bool isJVM1available() {
1120   return SIGJVM1 < SIGRTMIN;
1121 }
1122 
1123 void os::Solaris::signal_sets_init() {
1124   // Should also have an assertion stating we are still single-threaded.
1125   assert(!signal_sets_initialized, "Already initialized");
1126   // Fill in signals that are necessarily unblocked for all threads in
1127   // the VM. Currently, we unblock the following signals:
1128   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1129   //                         by -Xrs (=ReduceSignalUsage));
1130   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1131   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1132   // the dispositions or masks wrt these signals.
1133   // Programs embedding the VM that want to use the above signals for their
1134   // own purposes must, at this time, use the "-Xrs" option to prevent
1135   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1136   // (See bug 4345157, and other related bugs).
1137   // In reality, though, unblocking these signals is really a nop, since
1138   // these signals are not blocked by default.
1139   sigemptyset(&unblocked_sigs);
1140   sigemptyset(&allowdebug_blocked_sigs);
1141   sigaddset(&unblocked_sigs, SIGILL);
1142   sigaddset(&unblocked_sigs, SIGSEGV);
1143   sigaddset(&unblocked_sigs, SIGBUS);
1144   sigaddset(&unblocked_sigs, SIGFPE);
1145 
1146   if (isJVM1available) {
1147     os::Solaris::set_SIGinterrupt(SIGJVM1);
1148     os::Solaris::set_SIGasync(SIGJVM2);
1149   } else if (UseAltSigs) {
1150     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1151     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1152   } else {
1153     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1154     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1155   }
1156 
1157   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1158   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1159 
1160   if (!ReduceSignalUsage) {
1161     if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1162       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1163       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1164     }
1165     if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1166       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1167       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1168     }
1169     if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1170       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1171       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1172     }
1173   }
1174   // Fill in signals that are blocked by all but the VM thread.
1175   sigemptyset(&vm_sigs);
1176   if (!ReduceSignalUsage) {
1177     sigaddset(&vm_sigs, BREAK_SIGNAL);
1178   }
1179   debug_only(signal_sets_initialized = true);
1180 
1181   // For diagnostics only used in run_periodic_checks
1182   sigemptyset(&check_signal_done);
1183 }
1184 
1185 // These are signals that are unblocked while a thread is running Java.
1186 // (For some reason, they get blocked by default.)
1187 sigset_t* os::Solaris::unblocked_signals() {
1188   assert(signal_sets_initialized, "Not initialized");
1189   return &unblocked_sigs;
1190 }
1191 
1192 // These are the signals that are blocked while a (non-VM) thread is
1193 // running Java. Only the VM thread handles these signals.
1194 sigset_t* os::Solaris::vm_signals() {
1195   assert(signal_sets_initialized, "Not initialized");
1196   return &vm_sigs;
1197 }
1198 
1199 // These are signals that are blocked during cond_wait to allow debugger in
1200 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1201   assert(signal_sets_initialized, "Not initialized");
1202   return &allowdebug_blocked_sigs;
1203 }
1204 
1205 
1206 void _handle_uncaught_cxx_exception() {
1207   VMError err("An uncaught C++ exception");
1208   err.report_and_die();
1209 }
1210 
1211 
1212 // First crack at OS-specific initialization, from inside the new thread.
1213 void os::initialize_thread(Thread* thr) {
1214   int r = thr_main();
1215   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
1216   if (r) {
1217     JavaThread* jt = (JavaThread *)thr;
1218     assert(jt != NULL, "Sanity check");
1219     size_t stack_size;
1220     address base = jt->stack_base();
1221     if (Arguments::created_by_java_launcher()) {
1222       // Use 2MB to allow for Solaris 7 64 bit mode.
1223       stack_size = JavaThread::stack_size_at_create() == 0
1224         ? 2048*K : JavaThread::stack_size_at_create();
1225 
1226       // There are rare cases when we may have already used more than
1227       // the basic stack size allotment before this method is invoked.
1228       // Attempt to allow for a normally sized java_stack.
1229       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1230       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1231     } else {
1232       // 6269555: If we were not created by a Java launcher, i.e. if we are
1233       // running embedded in a native application, treat the primordial thread
1234       // as much like a native attached thread as possible.  This means using
1235       // the current stack size from thr_stksegment(), unless it is too large
1236       // to reliably setup guard pages.  A reasonable max size is 8MB.
1237       size_t current_size = current_stack_size();
1238       // This should never happen, but just in case....
1239       if (current_size == 0) current_size = 2 * K * K;
1240       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1241     }
1242     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1243     stack_size = (size_t)(base - bottom);
1244 
1245     assert(stack_size > 0, "Stack size calculation problem");
1246 
1247     if (stack_size > jt->stack_size()) {
1248 #ifndef PRODUCT
1249       struct rlimit limits;
1250       getrlimit(RLIMIT_STACK, &limits);
1251       size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1252       assert(size >= jt->stack_size(), "Stack size problem in main thread");
1253 #endif
1254       tty->print_cr("Stack size of %d Kb exceeds current limit of %d Kb.\n"
1255                     "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1256                     "See limit(1) to increase the stack size limit.",
1257                     stack_size / K, jt->stack_size() / K);
1258       vm_exit(1);
1259     }
1260     assert(jt->stack_size() >= stack_size,
1261            "Attempt to map more stack than was allocated");
1262     jt->set_stack_size(stack_size);
1263   }
1264 
1265   // With the T2 libthread (T1 is no longer supported) threads are always bound
1266   // and we use stackbanging in all cases.
1267 
1268   os::Solaris::init_thread_fpu_state();
1269   std::set_terminate(_handle_uncaught_cxx_exception);
1270 }
1271 
1272 
1273 
1274 // Free Solaris resources related to the OSThread
1275 void os::free_thread(OSThread* osthread) {
1276   assert(osthread != NULL, "os::free_thread but osthread not set");
1277 
1278 
1279   // We are told to free resources of the argument thread,
1280   // but we can only really operate on the current thread.
1281   // The main thread must take the VMThread down synchronously
1282   // before the main thread exits and frees up CodeHeap
1283   guarantee((Thread::current()->osthread() == osthread
1284              || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1285   if (Thread::current()->osthread() == osthread) {
1286     // Restore caller's signal mask
1287     sigset_t sigmask = osthread->caller_sigmask();
1288     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1289   }
1290   delete osthread;
1291 }
1292 
1293 void os::pd_start_thread(Thread* thread) {
1294   int status = thr_continue(thread->osthread()->thread_id());
1295   assert_status(status == 0, status, "thr_continue failed");
1296 }
1297 
1298 
1299 intx os::current_thread_id() {
1300   return (intx)thr_self();
1301 }
1302 
1303 static pid_t _initial_pid = 0;
1304 
1305 int os::current_process_id() {
1306   return (int)(_initial_pid ? _initial_pid : getpid());
1307 }
1308 
1309 int os::allocate_thread_local_storage() {
1310   // %%%       in Win32 this allocates a memory segment pointed to by a
1311   //           register.  Dan Stein can implement a similar feature in
1312   //           Solaris.  Alternatively, the VM can do the same thing
1313   //           explicitly: malloc some storage and keep the pointer in a
1314   //           register (which is part of the thread's context) (or keep it
1315   //           in TLS).
1316   // %%%       In current versions of Solaris, thr_self and TSD can
1317   //           be accessed via short sequences of displaced indirections.
1318   //           The value of thr_self is available as %g7(36).
1319   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1320   //           assuming that the current thread already has a value bound to k.
1321   //           It may be worth experimenting with such access patterns,
1322   //           and later having the parameters formally exported from a Solaris
1323   //           interface.  I think, however, that it will be faster to
1324   //           maintain the invariant that %g2 always contains the
1325   //           JavaThread in Java code, and have stubs simply
1326   //           treat %g2 as a caller-save register, preserving it in a %lN.
1327   thread_key_t tk;
1328   if (thr_keycreate(&tk, NULL)) {
1329     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1330                   "(%s)", strerror(errno)));
1331   }
1332   return int(tk);
1333 }
1334 
1335 void os::free_thread_local_storage(int index) {
1336   // %%% don't think we need anything here
1337   // if (pthread_key_delete((pthread_key_t) tk)) {
1338   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
1339   // }
1340 }
1341 
1342 // libthread allocate for tsd_common is a version specific
1343 // small number - point is NO swap space available
1344 #define SMALLINT 32
1345 void os::thread_local_storage_at_put(int index, void* value) {
1346   // %%% this is used only in threadLocalStorage.cpp
1347   if (thr_setspecific((thread_key_t)index, value)) {
1348     if (errno == ENOMEM) {
1349       vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1350                             "thr_setspecific: out of swap space");
1351     } else {
1352       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1353                     "(%s)", strerror(errno)));
1354     }
1355   } else {
1356     ThreadLocalStorage::set_thread_in_slot((Thread *) value);
1357   }
1358 }
1359 
1360 // This function could be called before TLS is initialized, for example, when
1361 // VM receives an async signal or when VM causes a fatal error during
1362 // initialization. Return NULL if thr_getspecific() fails.
1363 void* os::thread_local_storage_at(int index) {
1364   // %%% this is used only in threadLocalStorage.cpp
1365   void* r = NULL;
1366   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1367 }
1368 
1369 
1370 // gethrtime() should be monotonic according to the documentation,
1371 // but some virtualized platforms are known to break this guarantee.
1372 // getTimeNanos() must be guaranteed not to move backwards, so we
1373 // are forced to add a check here.
1374 inline hrtime_t getTimeNanos() {
1375   const hrtime_t now = gethrtime();
1376   const hrtime_t prev = max_hrtime;
1377   if (now <= prev) {
1378     return prev;   // same or retrograde time;
1379   }
1380   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1381   assert(obsv >= prev, "invariant");   // Monotonicity
1382   // If the CAS succeeded then we're done and return "now".
1383   // If the CAS failed and the observed value "obsv" is >= now then
1384   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1385   // some other thread raced this thread and installed a new value, in which case
1386   // we could either (a) retry the entire operation, (b) retry trying to install now
1387   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1388   // we might discard a higher "now" value in deference to a slightly lower but freshly
1389   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1390   // to (a) or (b) -- and greatly reduces coherence traffic.
1391   // We might also condition (c) on the magnitude of the delta between obsv and now.
1392   // Avoiding excessive CAS operations to hot RW locations is critical.
1393   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1394   return (prev == obsv) ? now : obsv;
1395 }
1396 
1397 // Time since start-up in seconds to a fine granularity.
1398 // Used by VMSelfDestructTimer and the MemProfiler.
1399 double os::elapsedTime() {
1400   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1401 }
1402 
1403 jlong os::elapsed_counter() {
1404   return (jlong)(getTimeNanos() - first_hrtime);
1405 }
1406 
1407 jlong os::elapsed_frequency() {
1408   return hrtime_hz;
1409 }
1410 
1411 // Return the real, user, and system times in seconds from an
1412 // arbitrary fixed point in the past.
1413 bool os::getTimesSecs(double* process_real_time,
1414                       double* process_user_time,
1415                       double* process_system_time) {
1416   struct tms ticks;
1417   clock_t real_ticks = times(&ticks);
1418 
1419   if (real_ticks == (clock_t) (-1)) {
1420     return false;
1421   } else {
1422     double ticks_per_second = (double) clock_tics_per_sec;
1423     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1424     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1425     // For consistency return the real time from getTimeNanos()
1426     // converted to seconds.
1427     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1428 
1429     return true;
1430   }
1431 }
1432 
1433 bool os::supports_vtime() { return true; }
1434 
1435 bool os::enable_vtime() {
1436   int fd = ::open("/proc/self/ctl", O_WRONLY);
1437   if (fd == -1) {
1438     return false;
1439   }
1440 
1441   long cmd[] = { PCSET, PR_MSACCT };
1442   int res = ::write(fd, cmd, sizeof(long) * 2);
1443   ::close(fd);
1444   if (res != sizeof(long) * 2) {
1445     return false;
1446   }
1447   return true;
1448 }
1449 
1450 bool os::vtime_enabled() {
1451   int fd = ::open("/proc/self/status", O_RDONLY);
1452   if (fd == -1) {
1453     return false;
1454   }
1455 
1456   pstatus_t status;
1457   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1458   ::close(fd);
1459   if (res != sizeof(pstatus_t)) {
1460     return false;
1461   }
1462   return status.pr_flags & PR_MSACCT;
1463 }
1464 
1465 double os::elapsedVTime() {
1466   return (double)gethrvtime() / (double)hrtime_hz;
1467 }
1468 
1469 // Used internally for comparisons only
1470 // getTimeMillis guaranteed to not move backwards on Solaris
1471 jlong getTimeMillis() {
1472   jlong nanotime = getTimeNanos();
1473   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1474 }
1475 
1476 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1477 jlong os::javaTimeMillis() {
1478   timeval t;
1479   if (gettimeofday(&t, NULL) == -1) {
1480     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1481   }
1482   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1483 }
1484 
1485 jlong os::javaTimeNanos() {
1486   return (jlong)getTimeNanos();
1487 }
1488 
1489 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1490   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1491   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1492   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1493   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1494 }
1495 
1496 char * os::local_time_string(char *buf, size_t buflen) {
1497   struct tm t;
1498   time_t long_time;
1499   time(&long_time);
1500   localtime_r(&long_time, &t);
1501   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1502                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1503                t.tm_hour, t.tm_min, t.tm_sec);
1504   return buf;
1505 }
1506 
1507 // Note: os::shutdown() might be called very early during initialization, or
1508 // called from signal handler. Before adding something to os::shutdown(), make
1509 // sure it is async-safe and can handle partially initialized VM.
1510 void os::shutdown() {
1511 
1512   // allow PerfMemory to attempt cleanup of any persistent resources
1513   perfMemory_exit();
1514 
1515   // needs to remove object in file system
1516   AttachListener::abort();
1517 
1518   // flush buffered output, finish log files
1519   ostream_abort();
1520 
1521   // Check for abort hook
1522   abort_hook_t abort_hook = Arguments::abort_hook();
1523   if (abort_hook != NULL) {
1524     abort_hook();
1525   }
1526 }
1527 
1528 // Note: os::abort() might be called very early during initialization, or
1529 // called from signal handler. Before adding something to os::abort(), make
1530 // sure it is async-safe and can handle partially initialized VM.
1531 void os::abort(bool dump_core) {
1532   os::shutdown();
1533   if (dump_core) {
1534 #ifndef PRODUCT
1535     fdStream out(defaultStream::output_fd());
1536     out.print_raw("Current thread is ");
1537     char buf[16];
1538     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1539     out.print_raw_cr(buf);
1540     out.print_raw_cr("Dumping core ...");
1541 #endif
1542     ::abort(); // dump core (for debugging)
1543   }
1544 
1545   ::exit(1);
1546 }
1547 
1548 // Die immediately, no exit hook, no abort hook, no cleanup.
1549 void os::die() {
1550   ::abort(); // dump core (for debugging)
1551 }
1552 
1553 // DLL functions
1554 
1555 const char* os::dll_file_extension() { return ".so"; }
1556 
1557 // This must be hard coded because it's the system's temporary
1558 // directory not the java application's temp directory, ala java.io.tmpdir.
1559 const char* os::get_temp_directory() { return "/tmp"; }
1560 
1561 static bool file_exists(const char* filename) {
1562   struct stat statbuf;
1563   if (filename == NULL || strlen(filename) == 0) {
1564     return false;
1565   }
1566   return os::stat(filename, &statbuf) == 0;
1567 }
1568 
1569 bool os::dll_build_name(char* buffer, size_t buflen,
1570                         const char* pname, const char* fname) {
1571   bool retval = false;
1572   const size_t pnamelen = pname ? strlen(pname) : 0;
1573 
1574   // Return error on buffer overflow.
1575   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1576     return retval;
1577   }
1578 
1579   if (pnamelen == 0) {
1580     snprintf(buffer, buflen, "lib%s.so", fname);
1581     retval = true;
1582   } else if (strchr(pname, *os::path_separator()) != NULL) {
1583     int n;
1584     char** pelements = split_path(pname, &n);
1585     if (pelements == NULL) {
1586       return false;
1587     }
1588     for (int i = 0; i < n; i++) {
1589       // really shouldn't be NULL but what the heck, check can't hurt
1590       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1591         continue; // skip the empty path values
1592       }
1593       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1594       if (file_exists(buffer)) {
1595         retval = true;
1596         break;
1597       }
1598     }
1599     // release the storage
1600     for (int i = 0; i < n; i++) {
1601       if (pelements[i] != NULL) {
1602         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1603       }
1604     }
1605     if (pelements != NULL) {
1606       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1607     }
1608   } else {
1609     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1610     retval = true;
1611   }
1612   return retval;
1613 }
1614 
1615 // check if addr is inside libjvm.so
1616 bool os::address_is_in_vm(address addr) {
1617   static address libjvm_base_addr;
1618   Dl_info dlinfo;
1619 
1620   if (libjvm_base_addr == NULL) {
1621     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1622       libjvm_base_addr = (address)dlinfo.dli_fbase;
1623     }
1624     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1625   }
1626 
1627   if (dladdr((void *)addr, &dlinfo) != 0) {
1628     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1629   }
1630 
1631   return false;
1632 }
1633 
1634 typedef int (*dladdr1_func_type)(void *, Dl_info *, void **, int);
1635 static dladdr1_func_type dladdr1_func = NULL;
1636 
1637 bool os::dll_address_to_function_name(address addr, char *buf,
1638                                       int buflen, int * offset) {
1639   // buf is not optional, but offset is optional
1640   assert(buf != NULL, "sanity check");
1641 
1642   Dl_info dlinfo;
1643 
1644   // dladdr1_func was initialized in os::init()
1645   if (dladdr1_func != NULL) {
1646     // yes, we have dladdr1
1647 
1648     // Support for dladdr1 is checked at runtime; it may be
1649     // available even if the vm is built on a machine that does
1650     // not have dladdr1 support.  Make sure there is a value for
1651     // RTLD_DL_SYMENT.
1652 #ifndef RTLD_DL_SYMENT
1653   #define RTLD_DL_SYMENT 1
1654 #endif
1655 #ifdef _LP64
1656     Elf64_Sym * info;
1657 #else
1658     Elf32_Sym * info;
1659 #endif
1660     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1661                      RTLD_DL_SYMENT) != 0) {
1662       // see if we have a matching symbol that covers our address
1663       if (dlinfo.dli_saddr != NULL &&
1664           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1665         if (dlinfo.dli_sname != NULL) {
1666           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1667             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1668           }
1669           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1670           return true;
1671         }
1672       }
1673       // no matching symbol so try for just file info
1674       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1675         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1676                             buf, buflen, offset, dlinfo.dli_fname)) {
1677           return true;
1678         }
1679       }
1680     }
1681     buf[0] = '\0';
1682     if (offset != NULL) *offset  = -1;
1683     return false;
1684   }
1685 
1686   // no, only dladdr is available
1687   if (dladdr((void *)addr, &dlinfo) != 0) {
1688     // see if we have a matching symbol
1689     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1690       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1691         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1692       }
1693       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1694       return true;
1695     }
1696     // no matching symbol so try for just file info
1697     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1698       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1699                           buf, buflen, offset, dlinfo.dli_fname)) {
1700         return true;
1701       }
1702     }
1703   }
1704   buf[0] = '\0';
1705   if (offset != NULL) *offset  = -1;
1706   return false;
1707 }
1708 
1709 bool os::dll_address_to_library_name(address addr, char* buf,
1710                                      int buflen, int* offset) {
1711   // buf is not optional, but offset is optional
1712   assert(buf != NULL, "sanity check");
1713 
1714   Dl_info dlinfo;
1715 
1716   if (dladdr((void*)addr, &dlinfo) != 0) {
1717     if (dlinfo.dli_fname != NULL) {
1718       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1719     }
1720     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1721       *offset = addr - (address)dlinfo.dli_fbase;
1722     }
1723     return true;
1724   }
1725 
1726   buf[0] = '\0';
1727   if (offset) *offset = -1;
1728   return false;
1729 }
1730 
1731 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1732   Dl_info dli;
1733   // Sanity check?
1734   if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 ||
1735       dli.dli_fname == NULL) {
1736     return 1;
1737   }
1738 
1739   void * handle = dlopen(dli.dli_fname, RTLD_LAZY);
1740   if (handle == NULL) {
1741     return 1;
1742   }
1743 
1744   Link_map *map;
1745   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1746   if (map == NULL) {
1747     dlclose(handle);
1748     return 1;
1749   }
1750 
1751   while (map->l_prev != NULL) {
1752     map = map->l_prev;
1753   }
1754 
1755   while (map != NULL) {
1756     // Iterate through all map entries and call callback with fields of interest
1757     if(callback(map->l_name, (address)map->l_addr, (address)0, param)) {
1758       dlclose(handle);
1759       return 1;
1760     }
1761     map = map->l_next;
1762   }
1763 
1764   dlclose(handle);
1765   return 0;
1766 }
1767 
1768 int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) {
1769   outputStream * out = (outputStream *) param;
1770   out->print_cr(PTR_FORMAT " \t%s", base_address, name);
1771   return 0;
1772 }
1773 
1774 void os::print_dll_info(outputStream * st) {
1775   st->print_cr("Dynamic libraries:"); st->flush();
1776   if (get_loaded_modules_info(_print_dll_info_cb, (void *)st)) {
1777     st->print_cr("Error: Cannot print dynamic libraries.");
1778   }
1779 }
1780 
1781 // Loads .dll/.so and
1782 // in case of error it checks if .dll/.so was built for the
1783 // same architecture as Hotspot is running on
1784 
1785 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1786   void * result= ::dlopen(filename, RTLD_LAZY);
1787   if (result != NULL) {
1788     // Successful loading
1789     return result;
1790   }
1791 
1792   Elf32_Ehdr elf_head;
1793 
1794   // Read system error message into ebuf
1795   // It may or may not be overwritten below
1796   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1797   ebuf[ebuflen-1]='\0';
1798   int diag_msg_max_length=ebuflen-strlen(ebuf);
1799   char* diag_msg_buf=ebuf+strlen(ebuf);
1800 
1801   if (diag_msg_max_length==0) {
1802     // No more space in ebuf for additional diagnostics message
1803     return NULL;
1804   }
1805 
1806 
1807   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1808 
1809   if (file_descriptor < 0) {
1810     // Can't open library, report dlerror() message
1811     return NULL;
1812   }
1813 
1814   bool failed_to_read_elf_head=
1815     (sizeof(elf_head)!=
1816      (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1817 
1818   ::close(file_descriptor);
1819   if (failed_to_read_elf_head) {
1820     // file i/o error - report dlerror() msg
1821     return NULL;
1822   }
1823 
1824   typedef struct {
1825     Elf32_Half  code;         // Actual value as defined in elf.h
1826     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1827     char        elf_class;    // 32 or 64 bit
1828     char        endianess;    // MSB or LSB
1829     char*       name;         // String representation
1830   } arch_t;
1831 
1832   static const arch_t arch_array[]={
1833     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1834     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1835     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1836     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1837     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1838     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1839     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1840     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1841     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1842     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1843   };
1844 
1845 #if  (defined IA32)
1846   static  Elf32_Half running_arch_code=EM_386;
1847 #elif   (defined AMD64)
1848   static  Elf32_Half running_arch_code=EM_X86_64;
1849 #elif  (defined IA64)
1850   static  Elf32_Half running_arch_code=EM_IA_64;
1851 #elif  (defined __sparc) && (defined _LP64)
1852   static  Elf32_Half running_arch_code=EM_SPARCV9;
1853 #elif  (defined __sparc) && (!defined _LP64)
1854   static  Elf32_Half running_arch_code=EM_SPARC;
1855 #elif  (defined __powerpc64__)
1856   static  Elf32_Half running_arch_code=EM_PPC64;
1857 #elif  (defined __powerpc__)
1858   static  Elf32_Half running_arch_code=EM_PPC;
1859 #elif (defined ARM)
1860   static  Elf32_Half running_arch_code=EM_ARM;
1861 #else
1862   #error Method os::dll_load requires that one of following is defined:\
1863        IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1864 #endif
1865 
1866   // Identify compatability class for VM's architecture and library's architecture
1867   // Obtain string descriptions for architectures
1868 
1869   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1870   int running_arch_index=-1;
1871 
1872   for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
1873     if (running_arch_code == arch_array[i].code) {
1874       running_arch_index    = i;
1875     }
1876     if (lib_arch.code == arch_array[i].code) {
1877       lib_arch.compat_class = arch_array[i].compat_class;
1878       lib_arch.name         = arch_array[i].name;
1879     }
1880   }
1881 
1882   assert(running_arch_index != -1,
1883          "Didn't find running architecture code (running_arch_code) in arch_array");
1884   if (running_arch_index == -1) {
1885     // Even though running architecture detection failed
1886     // we may still continue with reporting dlerror() message
1887     return NULL;
1888   }
1889 
1890   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1891     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1892     return NULL;
1893   }
1894 
1895   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1896     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1897     return NULL;
1898   }
1899 
1900   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1901     if (lib_arch.name!=NULL) {
1902       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1903                  " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1904                  lib_arch.name, arch_array[running_arch_index].name);
1905     } else {
1906       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1907                  " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1908                  lib_arch.code,
1909                  arch_array[running_arch_index].name);
1910     }
1911   }
1912 
1913   return NULL;
1914 }
1915 
1916 void* os::dll_lookup(void* handle, const char* name) {
1917   return dlsym(handle, name);
1918 }
1919 
1920 void* os::get_default_process_handle() {
1921   return (void*)::dlopen(NULL, RTLD_LAZY);
1922 }
1923 
1924 int os::stat(const char *path, struct stat *sbuf) {
1925   char pathbuf[MAX_PATH];
1926   if (strlen(path) > MAX_PATH - 1) {
1927     errno = ENAMETOOLONG;
1928     return -1;
1929   }
1930   os::native_path(strcpy(pathbuf, path));
1931   return ::stat(pathbuf, sbuf);
1932 }
1933 
1934 static bool _print_ascii_file(const char* filename, outputStream* st) {
1935   int fd = ::open(filename, O_RDONLY);
1936   if (fd == -1) {
1937     return false;
1938   }
1939 
1940   char buf[32];
1941   int bytes;
1942   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1943     st->print_raw(buf, bytes);
1944   }
1945 
1946   ::close(fd);
1947 
1948   return true;
1949 }
1950 
1951 void os::print_os_info_brief(outputStream* st) {
1952   os::Solaris::print_distro_info(st);
1953 
1954   os::Posix::print_uname_info(st);
1955 
1956   os::Solaris::print_libversion_info(st);
1957 }
1958 
1959 void os::print_os_info(outputStream* st) {
1960   st->print("OS:");
1961 
1962   os::Solaris::print_distro_info(st);
1963 
1964   os::Posix::print_uname_info(st);
1965 
1966   os::Solaris::print_libversion_info(st);
1967 
1968   os::Posix::print_rlimit_info(st);
1969 
1970   os::Posix::print_load_average(st);
1971 }
1972 
1973 void os::Solaris::print_distro_info(outputStream* st) {
1974   if (!_print_ascii_file("/etc/release", st)) {
1975     st->print("Solaris");
1976   }
1977   st->cr();
1978 }
1979 
1980 void os::Solaris::print_libversion_info(outputStream* st) {
1981   st->print("  (T2 libthread)");
1982   st->cr();
1983 }
1984 
1985 static bool check_addr0(outputStream* st) {
1986   jboolean status = false;
1987   int fd = ::open("/proc/self/map",O_RDONLY);
1988   if (fd >= 0) {
1989     prmap_t p;
1990     while (::read(fd, &p, sizeof(p)) > 0) {
1991       if (p.pr_vaddr == 0x0) {
1992         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
1993         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
1994         st->print("Access:");
1995         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
1996         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
1997         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
1998         st->cr();
1999         status = true;
2000       }
2001     }
2002     ::close(fd);
2003   }
2004   return status;
2005 }
2006 
2007 void os::pd_print_cpu_info(outputStream* st) {
2008   // Nothing to do for now.
2009 }
2010 
2011 void os::print_memory_info(outputStream* st) {
2012   st->print("Memory:");
2013   st->print(" %dk page", os::vm_page_size()>>10);
2014   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2015   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2016   st->cr();
2017   (void) check_addr0(st);
2018 }
2019 
2020 void os::print_siginfo(outputStream* st, void* siginfo) {
2021   const siginfo_t* si = (const siginfo_t*)siginfo;
2022 
2023   os::Posix::print_siginfo_brief(st, si);
2024 
2025   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2026       UseSharedSpaces) {
2027     FileMapInfo* mapinfo = FileMapInfo::current_info();
2028     if (mapinfo->is_in_shared_space(si->si_addr)) {
2029       st->print("\n\nError accessing class data sharing archive."   \
2030                 " Mapped file inaccessible during execution, "      \
2031                 " possible disk/network problem.");
2032     }
2033   }
2034   st->cr();
2035 }
2036 
2037 // Moved from whole group, because we need them here for diagnostic
2038 // prints.
2039 #define OLDMAXSIGNUM 32
2040 static int Maxsignum = 0;
2041 static int *ourSigFlags = NULL;
2042 
2043 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2044 
2045 int os::Solaris::get_our_sigflags(int sig) {
2046   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2047   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2048   return ourSigFlags[sig];
2049 }
2050 
2051 void os::Solaris::set_our_sigflags(int sig, int flags) {
2052   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2053   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2054   ourSigFlags[sig] = flags;
2055 }
2056 
2057 
2058 static const char* get_signal_handler_name(address handler,
2059                                            char* buf, int buflen) {
2060   int offset;
2061   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2062   if (found) {
2063     // skip directory names
2064     const char *p1, *p2;
2065     p1 = buf;
2066     size_t len = strlen(os::file_separator());
2067     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2068     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2069   } else {
2070     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2071   }
2072   return buf;
2073 }
2074 
2075 static void print_signal_handler(outputStream* st, int sig,
2076                                  char* buf, size_t buflen) {
2077   struct sigaction sa;
2078 
2079   sigaction(sig, NULL, &sa);
2080 
2081   st->print("%s: ", os::exception_name(sig, buf, buflen));
2082 
2083   address handler = (sa.sa_flags & SA_SIGINFO)
2084                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2085                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2086 
2087   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2088     st->print("SIG_DFL");
2089   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2090     st->print("SIG_IGN");
2091   } else {
2092     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2093   }
2094 
2095   st->print(", sa_mask[0]=");
2096   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2097 
2098   address rh = VMError::get_resetted_sighandler(sig);
2099   // May be, handler was resetted by VMError?
2100   if (rh != NULL) {
2101     handler = rh;
2102     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2103   }
2104 
2105   st->print(", sa_flags=");
2106   os::Posix::print_sa_flags(st, sa.sa_flags);
2107 
2108   // Check: is it our handler?
2109   if (handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2110       handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2111     // It is our signal handler
2112     // check for flags
2113     if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2114       st->print(
2115                 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2116                 os::Solaris::get_our_sigflags(sig));
2117     }
2118   }
2119   st->cr();
2120 }
2121 
2122 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2123   st->print_cr("Signal Handlers:");
2124   print_signal_handler(st, SIGSEGV, buf, buflen);
2125   print_signal_handler(st, SIGBUS , buf, buflen);
2126   print_signal_handler(st, SIGFPE , buf, buflen);
2127   print_signal_handler(st, SIGPIPE, buf, buflen);
2128   print_signal_handler(st, SIGXFSZ, buf, buflen);
2129   print_signal_handler(st, SIGILL , buf, buflen);
2130   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2131   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2132   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2133   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2134   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2135   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2136   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2137   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2138 }
2139 
2140 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2141 
2142 // Find the full path to the current module, libjvm.so
2143 void os::jvm_path(char *buf, jint buflen) {
2144   // Error checking.
2145   if (buflen < MAXPATHLEN) {
2146     assert(false, "must use a large-enough buffer");
2147     buf[0] = '\0';
2148     return;
2149   }
2150   // Lazy resolve the path to current module.
2151   if (saved_jvm_path[0] != 0) {
2152     strcpy(buf, saved_jvm_path);
2153     return;
2154   }
2155 
2156   Dl_info dlinfo;
2157   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2158   assert(ret != 0, "cannot locate libjvm");
2159   if (ret != 0 && dlinfo.dli_fname != NULL) {
2160     realpath((char *)dlinfo.dli_fname, buf);
2161   } else {
2162     buf[0] = '\0';
2163     return;
2164   }
2165 
2166   if (Arguments::sun_java_launcher_is_altjvm()) {
2167     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2168     // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
2169     // If "/jre/lib/" appears at the right place in the string, then
2170     // assume we are installed in a JDK and we're done.  Otherwise, check
2171     // for a JAVA_HOME environment variable and fix up the path so it
2172     // looks like libjvm.so is installed there (append a fake suffix
2173     // hotspot/libjvm.so).
2174     const char *p = buf + strlen(buf) - 1;
2175     for (int count = 0; p > buf && count < 5; ++count) {
2176       for (--p; p > buf && *p != '/'; --p)
2177         /* empty */ ;
2178     }
2179 
2180     if (strncmp(p, "/jre/lib/", 9) != 0) {
2181       // Look for JAVA_HOME in the environment.
2182       char* java_home_var = ::getenv("JAVA_HOME");
2183       if (java_home_var != NULL && java_home_var[0] != 0) {
2184         char cpu_arch[12];
2185         char* jrelib_p;
2186         int   len;
2187         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2188 #ifdef _LP64
2189         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2190         if (strcmp(cpu_arch, "sparc") == 0) {
2191           strcat(cpu_arch, "v9");
2192         } else if (strcmp(cpu_arch, "i386") == 0) {
2193           strcpy(cpu_arch, "amd64");
2194         }
2195 #endif
2196         // Check the current module name "libjvm.so".
2197         p = strrchr(buf, '/');
2198         assert(strstr(p, "/libjvm") == p, "invalid library name");
2199 
2200         realpath(java_home_var, buf);
2201         // determine if this is a legacy image or modules image
2202         // modules image doesn't have "jre" subdirectory
2203         len = strlen(buf);
2204         assert(len < buflen, "Ran out of buffer space");
2205         jrelib_p = buf + len;
2206         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2207         if (0 != access(buf, F_OK)) {
2208           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2209         }
2210 
2211         if (0 == access(buf, F_OK)) {
2212           // Use current module name "libjvm.so"
2213           len = strlen(buf);
2214           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2215         } else {
2216           // Go back to path of .so
2217           realpath((char *)dlinfo.dli_fname, buf);
2218         }
2219       }
2220     }
2221   }
2222 
2223   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2224   saved_jvm_path[MAXPATHLEN - 1] = '\0';
2225 }
2226 
2227 
2228 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2229   // no prefix required, not even "_"
2230 }
2231 
2232 
2233 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2234   // no suffix required
2235 }
2236 
2237 // This method is a copy of JDK's sysGetLastErrorString
2238 // from src/solaris/hpi/src/system_md.c
2239 
2240 size_t os::lasterror(char *buf, size_t len) {
2241   if (errno == 0)  return 0;
2242 
2243   const char *s = ::strerror(errno);
2244   size_t n = ::strlen(s);
2245   if (n >= len) {
2246     n = len - 1;
2247   }
2248   ::strncpy(buf, s, n);
2249   buf[n] = '\0';
2250   return n;
2251 }
2252 
2253 
2254 // sun.misc.Signal
2255 
2256 extern "C" {
2257   static void UserHandler(int sig, void *siginfo, void *context) {
2258     // Ctrl-C is pressed during error reporting, likely because the error
2259     // handler fails to abort. Let VM die immediately.
2260     if (sig == SIGINT && is_error_reported()) {
2261       os::die();
2262     }
2263 
2264     os::signal_notify(sig);
2265     // We do not need to reinstate the signal handler each time...
2266   }
2267 }
2268 
2269 void* os::user_handler() {
2270   return CAST_FROM_FN_PTR(void*, UserHandler);
2271 }
2272 
2273 class Semaphore : public StackObj {
2274  public:
2275   Semaphore();
2276   ~Semaphore();
2277   void signal();
2278   void wait();
2279   bool trywait();
2280   bool timedwait(unsigned int sec, int nsec);
2281  private:
2282   sema_t _semaphore;
2283 };
2284 
2285 
2286 Semaphore::Semaphore() {
2287   sema_init(&_semaphore, 0, NULL, NULL);
2288 }
2289 
2290 Semaphore::~Semaphore() {
2291   sema_destroy(&_semaphore);
2292 }
2293 
2294 void Semaphore::signal() {
2295   sema_post(&_semaphore);
2296 }
2297 
2298 void Semaphore::wait() {
2299   sema_wait(&_semaphore);
2300 }
2301 
2302 bool Semaphore::trywait() {
2303   return sema_trywait(&_semaphore) == 0;
2304 }
2305 
2306 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2307   struct timespec ts;
2308   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2309 
2310   while (1) {
2311     int result = sema_timedwait(&_semaphore, &ts);
2312     if (result == 0) {
2313       return true;
2314     } else if (errno == EINTR) {
2315       continue;
2316     } else if (errno == ETIME) {
2317       return false;
2318     } else {
2319       return false;
2320     }
2321   }
2322 }
2323 
2324 extern "C" {
2325   typedef void (*sa_handler_t)(int);
2326   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2327 }
2328 
2329 void* os::signal(int signal_number, void* handler) {
2330   struct sigaction sigAct, oldSigAct;
2331   sigfillset(&(sigAct.sa_mask));
2332   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2333   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2334 
2335   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
2336     // -1 means registration failed
2337     return (void *)-1;
2338   }
2339 
2340   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2341 }
2342 
2343 void os::signal_raise(int signal_number) {
2344   raise(signal_number);
2345 }
2346 
2347 // The following code is moved from os.cpp for making this
2348 // code platform specific, which it is by its very nature.
2349 
2350 // a counter for each possible signal value
2351 static int Sigexit = 0;
2352 static int Maxlibjsigsigs;
2353 static jint *pending_signals = NULL;
2354 static int *preinstalled_sigs = NULL;
2355 static struct sigaction *chainedsigactions = NULL;
2356 static sema_t sig_sem;
2357 typedef int (*version_getting_t)();
2358 version_getting_t os::Solaris::get_libjsig_version = NULL;
2359 static int libjsigversion = NULL;
2360 
2361 int os::sigexitnum_pd() {
2362   assert(Sigexit > 0, "signal memory not yet initialized");
2363   return Sigexit;
2364 }
2365 
2366 void os::Solaris::init_signal_mem() {
2367   // Initialize signal structures
2368   Maxsignum = SIGRTMAX;
2369   Sigexit = Maxsignum+1;
2370   assert(Maxsignum >0, "Unable to obtain max signal number");
2371 
2372   Maxlibjsigsigs = Maxsignum;
2373 
2374   // pending_signals has one int per signal
2375   // The additional signal is for SIGEXIT - exit signal to signal_thread
2376   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2377   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2378 
2379   if (UseSignalChaining) {
2380     chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2381                                                    * (Maxsignum + 1), mtInternal);
2382     memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2383     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2384     memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2385   }
2386   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2387   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2388 }
2389 
2390 void os::signal_init_pd() {
2391   int ret;
2392 
2393   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2394   assert(ret == 0, "sema_init() failed");
2395 }
2396 
2397 void os::signal_notify(int signal_number) {
2398   int ret;
2399 
2400   Atomic::inc(&pending_signals[signal_number]);
2401   ret = ::sema_post(&sig_sem);
2402   assert(ret == 0, "sema_post() failed");
2403 }
2404 
2405 static int check_pending_signals(bool wait_for_signal) {
2406   int ret;
2407   while (true) {
2408     for (int i = 0; i < Sigexit + 1; i++) {
2409       jint n = pending_signals[i];
2410       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2411         return i;
2412       }
2413     }
2414     if (!wait_for_signal) {
2415       return -1;
2416     }
2417     JavaThread *thread = JavaThread::current();
2418     ThreadBlockInVM tbivm(thread);
2419 
2420     bool threadIsSuspended;
2421     do {
2422       thread->set_suspend_equivalent();
2423       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2424       while ((ret = ::sema_wait(&sig_sem)) == EINTR)
2425         ;
2426       assert(ret == 0, "sema_wait() failed");
2427 
2428       // were we externally suspended while we were waiting?
2429       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2430       if (threadIsSuspended) {
2431         // The semaphore has been incremented, but while we were waiting
2432         // another thread suspended us. We don't want to continue running
2433         // while suspended because that would surprise the thread that
2434         // suspended us.
2435         ret = ::sema_post(&sig_sem);
2436         assert(ret == 0, "sema_post() failed");
2437 
2438         thread->java_suspend_self();
2439       }
2440     } while (threadIsSuspended);
2441   }
2442 }
2443 
2444 int os::signal_lookup() {
2445   return check_pending_signals(false);
2446 }
2447 
2448 int os::signal_wait() {
2449   return check_pending_signals(true);
2450 }
2451 
2452 ////////////////////////////////////////////////////////////////////////////////
2453 // Virtual Memory
2454 
2455 static int page_size = -1;
2456 
2457 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2458 // clear this var if support is not available.
2459 static bool has_map_align = true;
2460 
2461 int os::vm_page_size() {
2462   assert(page_size != -1, "must call os::init");
2463   return page_size;
2464 }
2465 
2466 // Solaris allocates memory by pages.
2467 int os::vm_allocation_granularity() {
2468   assert(page_size != -1, "must call os::init");
2469   return page_size;
2470 }
2471 
2472 static bool recoverable_mmap_error(int err) {
2473   // See if the error is one we can let the caller handle. This
2474   // list of errno values comes from the Solaris mmap(2) man page.
2475   switch (err) {
2476   case EBADF:
2477   case EINVAL:
2478   case ENOTSUP:
2479     // let the caller deal with these errors
2480     return true;
2481 
2482   default:
2483     // Any remaining errors on this OS can cause our reserved mapping
2484     // to be lost. That can cause confusion where different data
2485     // structures think they have the same memory mapped. The worst
2486     // scenario is if both the VM and a library think they have the
2487     // same memory mapped.
2488     return false;
2489   }
2490 }
2491 
2492 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2493                                     int err) {
2494   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2495           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2496           strerror(err), err);
2497 }
2498 
2499 static void warn_fail_commit_memory(char* addr, size_t bytes,
2500                                     size_t alignment_hint, bool exec,
2501                                     int err) {
2502   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2503           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2504           alignment_hint, exec, strerror(err), err);
2505 }
2506 
2507 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2508   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2509   size_t size = bytes;
2510   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2511   if (res != NULL) {
2512     if (UseNUMAInterleaving) {
2513       numa_make_global(addr, bytes);
2514     }
2515     return 0;
2516   }
2517 
2518   int err = errno;  // save errno from mmap() call in mmap_chunk()
2519 
2520   if (!recoverable_mmap_error(err)) {
2521     warn_fail_commit_memory(addr, bytes, exec, err);
2522     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2523   }
2524 
2525   return err;
2526 }
2527 
2528 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2529   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2530 }
2531 
2532 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2533                                   const char* mesg) {
2534   assert(mesg != NULL, "mesg must be specified");
2535   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2536   if (err != 0) {
2537     // the caller wants all commit errors to exit with the specified mesg:
2538     warn_fail_commit_memory(addr, bytes, exec, err);
2539     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2540   }
2541 }
2542 
2543 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
2544   assert(is_size_aligned(alignment, (size_t) vm_page_size()),
2545          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
2546                  alignment, (size_t) vm_page_size()));
2547 
2548   for (int i = 0; _page_sizes[i] != 0; i++) {
2549     if (is_size_aligned(alignment, _page_sizes[i])) {
2550       return _page_sizes[i];
2551     }
2552   }
2553 
2554   return (size_t) vm_page_size();
2555 }
2556 
2557 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2558                                     size_t alignment_hint, bool exec) {
2559   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2560   if (err == 0 && UseLargePages && alignment_hint > 0) {
2561     assert(is_size_aligned(bytes, alignment_hint),
2562            err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
2563 
2564     // The syscall memcntl requires an exact page size (see man memcntl for details).
2565     size_t page_size = page_size_for_alignment(alignment_hint);
2566     if (page_size > (size_t) vm_page_size()) {
2567       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2568     }
2569   }
2570   return err;
2571 }
2572 
2573 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2574                           bool exec) {
2575   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2576 }
2577 
2578 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2579                                   size_t alignment_hint, bool exec,
2580                                   const char* mesg) {
2581   assert(mesg != NULL, "mesg must be specified");
2582   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2583   if (err != 0) {
2584     // the caller wants all commit errors to exit with the specified mesg:
2585     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2586     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2587   }
2588 }
2589 
2590 // Uncommit the pages in a specified region.
2591 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2592   if (madvise(addr, bytes, MADV_FREE) < 0) {
2593     debug_only(warning("MADV_FREE failed."));
2594     return;
2595   }
2596 }
2597 
2598 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2599   return os::commit_memory(addr, size, !ExecMem);
2600 }
2601 
2602 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2603   return os::uncommit_memory(addr, size);
2604 }
2605 
2606 // Change the page size in a given range.
2607 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2608   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2609   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2610   if (UseLargePages) {
2611     Solaris::setup_large_pages(addr, bytes, alignment_hint);
2612   }
2613 }
2614 
2615 // Tell the OS to make the range local to the first-touching LWP
2616 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2617   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2618   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2619     debug_only(warning("MADV_ACCESS_LWP failed."));
2620   }
2621 }
2622 
2623 // Tell the OS that this range would be accessed from different LWPs.
2624 void os::numa_make_global(char *addr, size_t bytes) {
2625   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2626   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2627     debug_only(warning("MADV_ACCESS_MANY failed."));
2628   }
2629 }
2630 
2631 // Get the number of the locality groups.
2632 size_t os::numa_get_groups_num() {
2633   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2634   return n != -1 ? n : 1;
2635 }
2636 
2637 // Get a list of leaf locality groups. A leaf lgroup is group that
2638 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2639 // board. An LWP is assigned to one of these groups upon creation.
2640 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2641   if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2642     ids[0] = 0;
2643     return 1;
2644   }
2645   int result_size = 0, top = 1, bottom = 0, cur = 0;
2646   for (int k = 0; k < size; k++) {
2647     int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2648                                    (Solaris::lgrp_id_t*)&ids[top], size - top);
2649     if (r == -1) {
2650       ids[0] = 0;
2651       return 1;
2652     }
2653     if (!r) {
2654       // That's a leaf node.
2655       assert(bottom <= cur, "Sanity check");
2656       // Check if the node has memory
2657       if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2658                                   NULL, 0, LGRP_RSRC_MEM) > 0) {
2659         ids[bottom++] = ids[cur];
2660       }
2661     }
2662     top += r;
2663     cur++;
2664   }
2665   if (bottom == 0) {
2666     // Handle a situation, when the OS reports no memory available.
2667     // Assume UMA architecture.
2668     ids[0] = 0;
2669     return 1;
2670   }
2671   return bottom;
2672 }
2673 
2674 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2675 bool os::numa_topology_changed() {
2676   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2677   if (is_stale != -1 && is_stale) {
2678     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2679     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2680     assert(c != 0, "Failure to initialize LGRP API");
2681     Solaris::set_lgrp_cookie(c);
2682     return true;
2683   }
2684   return false;
2685 }
2686 
2687 // Get the group id of the current LWP.
2688 int os::numa_get_group_id() {
2689   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2690   if (lgrp_id == -1) {
2691     return 0;
2692   }
2693   const int size = os::numa_get_groups_num();
2694   int *ids = (int*)alloca(size * sizeof(int));
2695 
2696   // Get the ids of all lgroups with memory; r is the count.
2697   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2698                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2699   if (r <= 0) {
2700     return 0;
2701   }
2702   return ids[os::random() % r];
2703 }
2704 
2705 // Request information about the page.
2706 bool os::get_page_info(char *start, page_info* info) {
2707   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2708   uint64_t addr = (uintptr_t)start;
2709   uint64_t outdata[2];
2710   uint_t validity = 0;
2711 
2712   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2713     return false;
2714   }
2715 
2716   info->size = 0;
2717   info->lgrp_id = -1;
2718 
2719   if ((validity & 1) != 0) {
2720     if ((validity & 2) != 0) {
2721       info->lgrp_id = outdata[0];
2722     }
2723     if ((validity & 4) != 0) {
2724       info->size = outdata[1];
2725     }
2726     return true;
2727   }
2728   return false;
2729 }
2730 
2731 // Scan the pages from start to end until a page different than
2732 // the one described in the info parameter is encountered.
2733 char *os::scan_pages(char *start, char* end, page_info* page_expected,
2734                      page_info* page_found) {
2735   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2736   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2737   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2738   uint_t validity[MAX_MEMINFO_CNT];
2739 
2740   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2741   uint64_t p = (uint64_t)start;
2742   while (p < (uint64_t)end) {
2743     addrs[0] = p;
2744     size_t addrs_count = 1;
2745     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2746       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2747       addrs_count++;
2748     }
2749 
2750     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2751       return NULL;
2752     }
2753 
2754     size_t i = 0;
2755     for (; i < addrs_count; i++) {
2756       if ((validity[i] & 1) != 0) {
2757         if ((validity[i] & 4) != 0) {
2758           if (outdata[types * i + 1] != page_expected->size) {
2759             break;
2760           }
2761         } else if (page_expected->size != 0) {
2762           break;
2763         }
2764 
2765         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2766           if (outdata[types * i] != page_expected->lgrp_id) {
2767             break;
2768           }
2769         }
2770       } else {
2771         return NULL;
2772       }
2773     }
2774 
2775     if (i < addrs_count) {
2776       if ((validity[i] & 2) != 0) {
2777         page_found->lgrp_id = outdata[types * i];
2778       } else {
2779         page_found->lgrp_id = -1;
2780       }
2781       if ((validity[i] & 4) != 0) {
2782         page_found->size = outdata[types * i + 1];
2783       } else {
2784         page_found->size = 0;
2785       }
2786       return (char*)addrs[i];
2787     }
2788 
2789     p = addrs[addrs_count - 1] + page_size;
2790   }
2791   return end;
2792 }
2793 
2794 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2795   size_t size = bytes;
2796   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2797   // uncommitted page. Otherwise, the read/write might succeed if we
2798   // have enough swap space to back the physical page.
2799   return
2800     NULL != Solaris::mmap_chunk(addr, size,
2801                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2802                                 PROT_NONE);
2803 }
2804 
2805 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2806   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2807 
2808   if (b == MAP_FAILED) {
2809     return NULL;
2810   }
2811   return b;
2812 }
2813 
2814 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes,
2815                              size_t alignment_hint, bool fixed) {
2816   char* addr = requested_addr;
2817   int flags = MAP_PRIVATE | MAP_NORESERVE;
2818 
2819   assert(!(fixed && (alignment_hint > 0)),
2820          "alignment hint meaningless with fixed mmap");
2821 
2822   if (fixed) {
2823     flags |= MAP_FIXED;
2824   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2825     flags |= MAP_ALIGN;
2826     addr = (char*) alignment_hint;
2827   }
2828 
2829   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2830   // uncommitted page. Otherwise, the read/write might succeed if we
2831   // have enough swap space to back the physical page.
2832   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2833 }
2834 
2835 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2836                             size_t alignment_hint) {
2837   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint,
2838                                   (requested_addr != NULL));
2839 
2840   guarantee(requested_addr == NULL || requested_addr == addr,
2841             "OS failed to return requested mmap address.");
2842   return addr;
2843 }
2844 
2845 // Reserve memory at an arbitrary address, only if that area is
2846 // available (and not reserved for something else).
2847 
2848 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2849   const int max_tries = 10;
2850   char* base[max_tries];
2851   size_t size[max_tries];
2852 
2853   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2854   // is dependent on the requested size and the MMU.  Our initial gap
2855   // value here is just a guess and will be corrected later.
2856   bool had_top_overlap = false;
2857   bool have_adjusted_gap = false;
2858   size_t gap = 0x400000;
2859 
2860   // Assert only that the size is a multiple of the page size, since
2861   // that's all that mmap requires, and since that's all we really know
2862   // about at this low abstraction level.  If we need higher alignment,
2863   // we can either pass an alignment to this method or verify alignment
2864   // in one of the methods further up the call chain.  See bug 5044738.
2865   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2866 
2867   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2868   // Give it a try, if the kernel honors the hint we can return immediately.
2869   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2870 
2871   volatile int err = errno;
2872   if (addr == requested_addr) {
2873     return addr;
2874   } else if (addr != NULL) {
2875     pd_unmap_memory(addr, bytes);
2876   }
2877 
2878   if (PrintMiscellaneous && Verbose) {
2879     char buf[256];
2880     buf[0] = '\0';
2881     if (addr == NULL) {
2882       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2883     }
2884     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2885             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2886             "%s", bytes, requested_addr, addr, buf);
2887   }
2888 
2889   // Address hint method didn't work.  Fall back to the old method.
2890   // In theory, once SNV becomes our oldest supported platform, this
2891   // code will no longer be needed.
2892   //
2893   // Repeatedly allocate blocks until the block is allocated at the
2894   // right spot. Give up after max_tries.
2895   int i;
2896   for (i = 0; i < max_tries; ++i) {
2897     base[i] = reserve_memory(bytes);
2898 
2899     if (base[i] != NULL) {
2900       // Is this the block we wanted?
2901       if (base[i] == requested_addr) {
2902         size[i] = bytes;
2903         break;
2904       }
2905 
2906       // check that the gap value is right
2907       if (had_top_overlap && !have_adjusted_gap) {
2908         size_t actual_gap = base[i-1] - base[i] - bytes;
2909         if (gap != actual_gap) {
2910           // adjust the gap value and retry the last 2 allocations
2911           assert(i > 0, "gap adjustment code problem");
2912           have_adjusted_gap = true;  // adjust the gap only once, just in case
2913           gap = actual_gap;
2914           if (PrintMiscellaneous && Verbose) {
2915             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2916           }
2917           unmap_memory(base[i], bytes);
2918           unmap_memory(base[i-1], size[i-1]);
2919           i-=2;
2920           continue;
2921         }
2922       }
2923 
2924       // Does this overlap the block we wanted? Give back the overlapped
2925       // parts and try again.
2926       //
2927       // There is still a bug in this code: if top_overlap == bytes,
2928       // the overlap is offset from requested region by the value of gap.
2929       // In this case giving back the overlapped part will not work,
2930       // because we'll give back the entire block at base[i] and
2931       // therefore the subsequent allocation will not generate a new gap.
2932       // This could be fixed with a new algorithm that used larger
2933       // or variable size chunks to find the requested region -
2934       // but such a change would introduce additional complications.
2935       // It's rare enough that the planets align for this bug,
2936       // so we'll just wait for a fix for 6204603/5003415 which
2937       // will provide a mmap flag to allow us to avoid this business.
2938 
2939       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2940       if (top_overlap >= 0 && top_overlap < bytes) {
2941         had_top_overlap = true;
2942         unmap_memory(base[i], top_overlap);
2943         base[i] += top_overlap;
2944         size[i] = bytes - top_overlap;
2945       } else {
2946         size_t bottom_overlap = base[i] + bytes - requested_addr;
2947         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2948           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2949             warning("attempt_reserve_memory_at: possible alignment bug");
2950           }
2951           unmap_memory(requested_addr, bottom_overlap);
2952           size[i] = bytes - bottom_overlap;
2953         } else {
2954           size[i] = bytes;
2955         }
2956       }
2957     }
2958   }
2959 
2960   // Give back the unused reserved pieces.
2961 
2962   for (int j = 0; j < i; ++j) {
2963     if (base[j] != NULL) {
2964       unmap_memory(base[j], size[j]);
2965     }
2966   }
2967 
2968   return (i < max_tries) ? requested_addr : NULL;
2969 }
2970 
2971 bool os::pd_release_memory(char* addr, size_t bytes) {
2972   size_t size = bytes;
2973   return munmap(addr, size) == 0;
2974 }
2975 
2976 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
2977   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
2978          "addr must be page aligned");
2979   int retVal = mprotect(addr, bytes, prot);
2980   return retVal == 0;
2981 }
2982 
2983 // Protect memory (Used to pass readonly pages through
2984 // JNI GetArray<type>Elements with empty arrays.)
2985 // Also, used for serialization page and for compressed oops null pointer
2986 // checking.
2987 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2988                         bool is_committed) {
2989   unsigned int p = 0;
2990   switch (prot) {
2991   case MEM_PROT_NONE: p = PROT_NONE; break;
2992   case MEM_PROT_READ: p = PROT_READ; break;
2993   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2994   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2995   default:
2996     ShouldNotReachHere();
2997   }
2998   // is_committed is unused.
2999   return solaris_mprotect(addr, bytes, p);
3000 }
3001 
3002 // guard_memory and unguard_memory only happens within stack guard pages.
3003 // Since ISM pertains only to the heap, guard and unguard memory should not
3004 /// happen with an ISM region.
3005 bool os::guard_memory(char* addr, size_t bytes) {
3006   return solaris_mprotect(addr, bytes, PROT_NONE);
3007 }
3008 
3009 bool os::unguard_memory(char* addr, size_t bytes) {
3010   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3011 }
3012 
3013 // Large page support
3014 static size_t _large_page_size = 0;
3015 
3016 // Insertion sort for small arrays (descending order).
3017 static void insertion_sort_descending(size_t* array, int len) {
3018   for (int i = 0; i < len; i++) {
3019     size_t val = array[i];
3020     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3021       size_t tmp = array[key];
3022       array[key] = array[key - 1];
3023       array[key - 1] = tmp;
3024     }
3025   }
3026 }
3027 
3028 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3029   const unsigned int usable_count = VM_Version::page_size_count();
3030   if (usable_count == 1) {
3031     return false;
3032   }
3033 
3034   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3035   // build platform, getpagesizes() (without the '2') can be called directly.
3036   typedef int (*gps_t)(size_t[], int);
3037   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3038   if (gps_func == NULL) {
3039     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3040     if (gps_func == NULL) {
3041       if (warn) {
3042         warning("MPSS is not supported by the operating system.");
3043       }
3044       return false;
3045     }
3046   }
3047 
3048   // Fill the array of page sizes.
3049   int n = (*gps_func)(_page_sizes, page_sizes_max);
3050   assert(n > 0, "Solaris bug?");
3051 
3052   if (n == page_sizes_max) {
3053     // Add a sentinel value (necessary only if the array was completely filled
3054     // since it is static (zeroed at initialization)).
3055     _page_sizes[--n] = 0;
3056     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3057   }
3058   assert(_page_sizes[n] == 0, "missing sentinel");
3059   trace_page_sizes("available page sizes", _page_sizes, n);
3060 
3061   if (n == 1) return false;     // Only one page size available.
3062 
3063   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3064   // select up to usable_count elements.  First sort the array, find the first
3065   // acceptable value, then copy the usable sizes to the top of the array and
3066   // trim the rest.  Make sure to include the default page size :-).
3067   //
3068   // A better policy could get rid of the 4M limit by taking the sizes of the
3069   // important VM memory regions (java heap and possibly the code cache) into
3070   // account.
3071   insertion_sort_descending(_page_sizes, n);
3072   const size_t size_limit =
3073     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3074   int beg;
3075   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */;
3076   const int end = MIN2((int)usable_count, n) - 1;
3077   for (int cur = 0; cur < end; ++cur, ++beg) {
3078     _page_sizes[cur] = _page_sizes[beg];
3079   }
3080   _page_sizes[end] = vm_page_size();
3081   _page_sizes[end + 1] = 0;
3082 
3083   if (_page_sizes[end] > _page_sizes[end - 1]) {
3084     // Default page size is not the smallest; sort again.
3085     insertion_sort_descending(_page_sizes, end + 1);
3086   }
3087   *page_size = _page_sizes[0];
3088 
3089   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3090   return true;
3091 }
3092 
3093 void os::large_page_init() {
3094   if (UseLargePages) {
3095     // print a warning if any large page related flag is specified on command line
3096     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3097                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3098 
3099     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3100   }
3101 }
3102 
3103 bool os::Solaris::is_valid_page_size(size_t bytes) {
3104   for (int i = 0; _page_sizes[i] != 0; i++) {
3105     if (_page_sizes[i] == bytes) {
3106       return true;
3107     }
3108   }
3109   return false;
3110 }
3111 
3112 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3113   assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
3114   assert(is_ptr_aligned((void*) start, align),
3115          err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
3116   assert(is_size_aligned(bytes, align),
3117          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
3118 
3119   // Signal to OS that we want large pages for addresses
3120   // from addr, addr + bytes
3121   struct memcntl_mha mpss_struct;
3122   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3123   mpss_struct.mha_pagesize = align;
3124   mpss_struct.mha_flags = 0;
3125   // Upon successful completion, memcntl() returns 0
3126   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3127     debug_only(warning("Attempt to use MPSS failed."));
3128     return false;
3129   }
3130   return true;
3131 }
3132 
3133 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3134   fatal("os::reserve_memory_special should not be called on Solaris.");
3135   return NULL;
3136 }
3137 
3138 bool os::release_memory_special(char* base, size_t bytes) {
3139   fatal("os::release_memory_special should not be called on Solaris.");
3140   return false;
3141 }
3142 
3143 size_t os::large_page_size() {
3144   return _large_page_size;
3145 }
3146 
3147 // MPSS allows application to commit large page memory on demand; with ISM
3148 // the entire memory region must be allocated as shared memory.
3149 bool os::can_commit_large_page_memory() {
3150   return true;
3151 }
3152 
3153 bool os::can_execute_large_page_memory() {
3154   return true;
3155 }
3156 
3157 // Read calls from inside the vm need to perform state transitions
3158 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3159   size_t res;
3160   JavaThread* thread = (JavaThread*)Thread::current();
3161   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3162   ThreadBlockInVM tbiv(thread);
3163   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3164   return res;
3165 }
3166 









3167 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3168   size_t res;
3169   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
3170          "Assumed _thread_in_native");
3171   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3172   return res;
3173 }
3174 
3175 void os::naked_short_sleep(jlong ms) {
3176   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3177 
3178   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3179   // Solaris requires -lrt for this.
3180   usleep((ms * 1000));
3181 
3182   return;
3183 }
3184 
3185 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3186 void os::infinite_sleep() {
3187   while (true) {    // sleep forever ...
3188     ::sleep(100);   // ... 100 seconds at a time
3189   }
3190 }
3191 
3192 // Used to convert frequent JVM_Yield() to nops
3193 bool os::dont_yield() {
3194   if (DontYieldALot) {
3195     static hrtime_t last_time = 0;
3196     hrtime_t diff = getTimeNanos() - last_time;
3197 
3198     if (diff < DontYieldALotInterval * 1000000) {
3199       return true;
3200     }
3201 
3202     last_time += diff;
3203 
3204     return false;
3205   } else {
3206     return false;
3207   }
3208 }
3209 
3210 // Note that yield semantics are defined by the scheduling class to which
3211 // the thread currently belongs.  Typically, yield will _not yield to
3212 // other equal or higher priority threads that reside on the dispatch queues
3213 // of other CPUs.
3214 
3215 void os::naked_yield() {
3216   thr_yield();
3217 }
3218 
3219 // Interface for setting lwp priorities.  If we are using T2 libthread,
3220 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3221 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3222 // function is meaningless in this mode so we must adjust the real lwp's priority
3223 // The routines below implement the getting and setting of lwp priorities.
3224 //
3225 // Note: T2 is now the only supported libthread. UseBoundThreads flag is
3226 //       being deprecated and all threads are now BoundThreads
3227 //
3228 // Note: There are three priority scales used on Solaris.  Java priotities
3229 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3230 //       from 0 to 127, and the current scheduling class of the process we
3231 //       are running in.  This is typically from -60 to +60.
3232 //       The setting of the lwp priorities in done after a call to thr_setprio
3233 //       so Java priorities are mapped to libthread priorities and we map from
3234 //       the latter to lwp priorities.  We don't keep priorities stored in
3235 //       Java priorities since some of our worker threads want to set priorities
3236 //       higher than all Java threads.
3237 //
3238 // For related information:
3239 // (1)  man -s 2 priocntl
3240 // (2)  man -s 4 priocntl
3241 // (3)  man dispadmin
3242 // =    librt.so
3243 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3244 // =    ps -cL <pid> ... to validate priority.
3245 // =    sched_get_priority_min and _max
3246 //              pthread_create
3247 //              sched_setparam
3248 //              pthread_setschedparam
3249 //
3250 // Assumptions:
3251 // +    We assume that all threads in the process belong to the same
3252 //              scheduling class.   IE. an homogenous process.
3253 // +    Must be root or in IA group to change change "interactive" attribute.
3254 //              Priocntl() will fail silently.  The only indication of failure is when
3255 //              we read-back the value and notice that it hasn't changed.
3256 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3257 // +    For RT, change timeslice as well.  Invariant:
3258 //              constant "priority integral"
3259 //              Konst == TimeSlice * (60-Priority)
3260 //              Given a priority, compute appropriate timeslice.
3261 // +    Higher numerical values have higher priority.
3262 
3263 // sched class attributes
3264 typedef struct {
3265   int   schedPolicy;              // classID
3266   int   maxPrio;
3267   int   minPrio;
3268 } SchedInfo;
3269 
3270 
3271 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3272 
3273 #ifdef ASSERT
3274 static int  ReadBackValidate = 1;
3275 #endif
3276 static int  myClass     = 0;
3277 static int  myMin       = 0;
3278 static int  myMax       = 0;
3279 static int  myCur       = 0;
3280 static bool priocntl_enable = false;
3281 
3282 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3283 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3284 
3285 
3286 // lwp_priocntl_init
3287 //
3288 // Try to determine the priority scale for our process.
3289 //
3290 // Return errno or 0 if OK.
3291 //
3292 static int lwp_priocntl_init() {
3293   int rslt;
3294   pcinfo_t ClassInfo;
3295   pcparms_t ParmInfo;
3296   int i;
3297 
3298   if (!UseThreadPriorities) return 0;
3299 
3300   // If ThreadPriorityPolicy is 1, switch tables
3301   if (ThreadPriorityPolicy == 1) {
3302     for (i = 0; i < CriticalPriority+1; i++)
3303       os::java_to_os_priority[i] = prio_policy1[i];
3304   }
3305   if (UseCriticalJavaThreadPriority) {
3306     // MaxPriority always maps to the FX scheduling class and criticalPrio.
3307     // See set_native_priority() and set_lwp_class_and_priority().
3308     // Save original MaxPriority mapping in case attempt to
3309     // use critical priority fails.
3310     java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3311     // Set negative to distinguish from other priorities
3312     os::java_to_os_priority[MaxPriority] = -criticalPrio;
3313   }
3314 
3315   // Get IDs for a set of well-known scheduling classes.
3316   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3317   // the system.  We should have a loop that iterates over the
3318   // classID values, which are known to be "small" integers.
3319 
3320   strcpy(ClassInfo.pc_clname, "TS");
3321   ClassInfo.pc_cid = -1;
3322   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3323   if (rslt < 0) return errno;
3324   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3325   tsLimits.schedPolicy = ClassInfo.pc_cid;
3326   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3327   tsLimits.minPrio = -tsLimits.maxPrio;
3328 
3329   strcpy(ClassInfo.pc_clname, "IA");
3330   ClassInfo.pc_cid = -1;
3331   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3332   if (rslt < 0) return errno;
3333   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3334   iaLimits.schedPolicy = ClassInfo.pc_cid;
3335   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3336   iaLimits.minPrio = -iaLimits.maxPrio;
3337 
3338   strcpy(ClassInfo.pc_clname, "RT");
3339   ClassInfo.pc_cid = -1;
3340   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3341   if (rslt < 0) return errno;
3342   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3343   rtLimits.schedPolicy = ClassInfo.pc_cid;
3344   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3345   rtLimits.minPrio = 0;
3346 
3347   strcpy(ClassInfo.pc_clname, "FX");
3348   ClassInfo.pc_cid = -1;
3349   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3350   if (rslt < 0) return errno;
3351   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3352   fxLimits.schedPolicy = ClassInfo.pc_cid;
3353   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3354   fxLimits.minPrio = 0;
3355 
3356   // Query our "current" scheduling class.
3357   // This will normally be IA, TS or, rarely, FX or RT.
3358   memset(&ParmInfo, 0, sizeof(ParmInfo));
3359   ParmInfo.pc_cid = PC_CLNULL;
3360   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3361   if (rslt < 0) return errno;
3362   myClass = ParmInfo.pc_cid;
3363 
3364   // We now know our scheduling classId, get specific information
3365   // about the class.
3366   ClassInfo.pc_cid = myClass;
3367   ClassInfo.pc_clname[0] = 0;
3368   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3369   if (rslt < 0) return errno;
3370 
3371   if (ThreadPriorityVerbose) {
3372     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3373   }
3374 
3375   memset(&ParmInfo, 0, sizeof(pcparms_t));
3376   ParmInfo.pc_cid = PC_CLNULL;
3377   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3378   if (rslt < 0) return errno;
3379 
3380   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3381     myMin = rtLimits.minPrio;
3382     myMax = rtLimits.maxPrio;
3383   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3384     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3385     myMin = iaLimits.minPrio;
3386     myMax = iaLimits.maxPrio;
3387     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3388   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3389     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3390     myMin = tsLimits.minPrio;
3391     myMax = tsLimits.maxPrio;
3392     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3393   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3394     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3395     myMin = fxLimits.minPrio;
3396     myMax = fxLimits.maxPrio;
3397     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3398   } else {
3399     // No clue - punt
3400     if (ThreadPriorityVerbose) {
3401       tty->print_cr("Unknown scheduling class: %s ... \n",
3402                     ClassInfo.pc_clname);
3403     }
3404     return EINVAL;      // no clue, punt
3405   }
3406 
3407   if (ThreadPriorityVerbose) {
3408     tty->print_cr("Thread priority Range: [%d..%d]\n", myMin, myMax);
3409   }
3410 
3411   priocntl_enable = true;  // Enable changing priorities
3412   return 0;
3413 }
3414 
3415 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3416 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3417 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3418 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3419 
3420 
3421 // scale_to_lwp_priority
3422 //
3423 // Convert from the libthread "thr_setprio" scale to our current
3424 // lwp scheduling class scale.
3425 //
3426 static int scale_to_lwp_priority(int rMin, int rMax, int x) {
3427   int v;
3428 
3429   if (x == 127) return rMax;            // avoid round-down
3430   v = (((x*(rMax-rMin)))/128)+rMin;
3431   return v;
3432 }
3433 
3434 
3435 // set_lwp_class_and_priority
3436 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3437                                int newPrio, int new_class, bool scale) {
3438   int rslt;
3439   int Actual, Expected, prv;
3440   pcparms_t ParmInfo;                   // for GET-SET
3441 #ifdef ASSERT
3442   pcparms_t ReadBack;                   // for readback
3443 #endif
3444 
3445   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3446   // Query current values.
3447   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3448   // Cache "pcparms_t" in global ParmCache.
3449   // TODO: elide set-to-same-value
3450 
3451   // If something went wrong on init, don't change priorities.
3452   if (!priocntl_enable) {
3453     if (ThreadPriorityVerbose) {
3454       tty->print_cr("Trying to set priority but init failed, ignoring");
3455     }
3456     return EINVAL;
3457   }
3458 
3459   // If lwp hasn't started yet, just return
3460   // the _start routine will call us again.
3461   if (lwpid <= 0) {
3462     if (ThreadPriorityVerbose) {
3463       tty->print_cr("deferring the set_lwp_class_and_priority of thread "
3464                     INTPTR_FORMAT " to %d, lwpid not set",
3465                     ThreadID, newPrio);
3466     }
3467     return 0;
3468   }
3469 
3470   if (ThreadPriorityVerbose) {
3471     tty->print_cr ("set_lwp_class_and_priority("
3472                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3473                    ThreadID, lwpid, newPrio);
3474   }
3475 
3476   memset(&ParmInfo, 0, sizeof(pcparms_t));
3477   ParmInfo.pc_cid = PC_CLNULL;
3478   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3479   if (rslt < 0) return errno;
3480 
3481   int cur_class = ParmInfo.pc_cid;
3482   ParmInfo.pc_cid = (id_t)new_class;
3483 
3484   if (new_class == rtLimits.schedPolicy) {
3485     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3486     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3487                                                        rtLimits.maxPrio, newPrio)
3488                                : newPrio;
3489     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3490     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3491     if (ThreadPriorityVerbose) {
3492       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3493     }
3494   } else if (new_class == iaLimits.schedPolicy) {
3495     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3496     int maxClamped     = MIN2(iaLimits.maxPrio,
3497                               cur_class == new_class
3498                               ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3499     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3500                                                        maxClamped, newPrio)
3501                                : newPrio;
3502     iaInfo->ia_uprilim = cur_class == new_class
3503                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3504     iaInfo->ia_mode    = IA_NOCHANGE;
3505     if (ThreadPriorityVerbose) {
3506       tty->print_cr("IA: [%d...%d] %d->%d\n",
3507                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3508     }
3509   } else if (new_class == tsLimits.schedPolicy) {
3510     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3511     int maxClamped     = MIN2(tsLimits.maxPrio,
3512                               cur_class == new_class
3513                               ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3514     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3515                                                        maxClamped, newPrio)
3516                                : newPrio;
3517     tsInfo->ts_uprilim = cur_class == new_class
3518                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3519     if (ThreadPriorityVerbose) {
3520       tty->print_cr("TS: [%d...%d] %d->%d\n",
3521                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3522     }
3523   } else if (new_class == fxLimits.schedPolicy) {
3524     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3525     int maxClamped     = MIN2(fxLimits.maxPrio,
3526                               cur_class == new_class
3527                               ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3528     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3529                                                        maxClamped, newPrio)
3530                                : newPrio;
3531     fxInfo->fx_uprilim = cur_class == new_class
3532                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3533     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3534     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3535     if (ThreadPriorityVerbose) {
3536       tty->print_cr("FX: [%d...%d] %d->%d\n",
3537                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3538     }
3539   } else {
3540     if (ThreadPriorityVerbose) {
3541       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3542     }
3543     return EINVAL;    // no clue, punt
3544   }
3545 
3546   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3547   if (ThreadPriorityVerbose && rslt) {
3548     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3549   }
3550   if (rslt < 0) return errno;
3551 
3552 #ifdef ASSERT
3553   // Sanity check: read back what we just attempted to set.
3554   // In theory it could have changed in the interim ...
3555   //
3556   // The priocntl system call is tricky.
3557   // Sometimes it'll validate the priority value argument and
3558   // return EINVAL if unhappy.  At other times it fails silently.
3559   // Readbacks are prudent.
3560 
3561   if (!ReadBackValidate) return 0;
3562 
3563   memset(&ReadBack, 0, sizeof(pcparms_t));
3564   ReadBack.pc_cid = PC_CLNULL;
3565   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3566   assert(rslt >= 0, "priocntl failed");
3567   Actual = Expected = 0xBAD;
3568   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3569   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3570     Actual   = RTPRI(ReadBack)->rt_pri;
3571     Expected = RTPRI(ParmInfo)->rt_pri;
3572   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3573     Actual   = IAPRI(ReadBack)->ia_upri;
3574     Expected = IAPRI(ParmInfo)->ia_upri;
3575   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3576     Actual   = TSPRI(ReadBack)->ts_upri;
3577     Expected = TSPRI(ParmInfo)->ts_upri;
3578   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3579     Actual   = FXPRI(ReadBack)->fx_upri;
3580     Expected = FXPRI(ParmInfo)->fx_upri;
3581   } else {
3582     if (ThreadPriorityVerbose) {
3583       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3584                     ParmInfo.pc_cid);
3585     }
3586   }
3587 
3588   if (Actual != Expected) {
3589     if (ThreadPriorityVerbose) {
3590       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3591                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3592     }
3593   }
3594 #endif
3595 
3596   return 0;
3597 }
3598 
3599 // Solaris only gives access to 128 real priorities at a time,
3600 // so we expand Java's ten to fill this range.  This would be better
3601 // if we dynamically adjusted relative priorities.
3602 //
3603 // The ThreadPriorityPolicy option allows us to select 2 different
3604 // priority scales.
3605 //
3606 // ThreadPriorityPolicy=0
3607 // Since the Solaris' default priority is MaximumPriority, we do not
3608 // set a priority lower than Max unless a priority lower than
3609 // NormPriority is requested.
3610 //
3611 // ThreadPriorityPolicy=1
3612 // This mode causes the priority table to get filled with
3613 // linear values.  NormPriority get's mapped to 50% of the
3614 // Maximum priority an so on.  This will cause VM threads
3615 // to get unfair treatment against other Solaris processes
3616 // which do not explicitly alter their thread priorities.
3617 
3618 int os::java_to_os_priority[CriticalPriority + 1] = {
3619   -99999,         // 0 Entry should never be used
3620 
3621   0,              // 1 MinPriority
3622   32,             // 2
3623   64,             // 3
3624 
3625   96,             // 4
3626   127,            // 5 NormPriority
3627   127,            // 6
3628 
3629   127,            // 7
3630   127,            // 8
3631   127,            // 9 NearMaxPriority
3632 
3633   127,            // 10 MaxPriority
3634 
3635   -criticalPrio   // 11 CriticalPriority
3636 };
3637 
3638 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3639   OSThread* osthread = thread->osthread();
3640 
3641   // Save requested priority in case the thread hasn't been started
3642   osthread->set_native_priority(newpri);
3643 
3644   // Check for critical priority request
3645   bool fxcritical = false;
3646   if (newpri == -criticalPrio) {
3647     fxcritical = true;
3648     newpri = criticalPrio;
3649   }
3650 
3651   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3652   if (!UseThreadPriorities) return OS_OK;
3653 
3654   int status = 0;
3655 
3656   if (!fxcritical) {
3657     // Use thr_setprio only if we have a priority that thr_setprio understands
3658     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3659   }
3660 
3661   int lwp_status =
3662           set_lwp_class_and_priority(osthread->thread_id(),
3663                                      osthread->lwp_id(),
3664                                      newpri,
3665                                      fxcritical ? fxLimits.schedPolicy : myClass,
3666                                      !fxcritical);
3667   if (lwp_status != 0 && fxcritical) {
3668     // Try again, this time without changing the scheduling class
3669     newpri = java_MaxPriority_to_os_priority;
3670     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3671                                             osthread->lwp_id(),
3672                                             newpri, myClass, false);
3673   }
3674   status |= lwp_status;
3675   return (status == 0) ? OS_OK : OS_ERR;
3676 }
3677 
3678 
3679 OSReturn os::get_native_priority(const Thread* const thread,
3680                                  int *priority_ptr) {
3681   int p;
3682   if (!UseThreadPriorities) {
3683     *priority_ptr = NormalPriority;
3684     return OS_OK;
3685   }
3686   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3687   if (status != 0) {
3688     return OS_ERR;
3689   }
3690   *priority_ptr = p;
3691   return OS_OK;
3692 }
3693 
3694 
3695 // Hint to the underlying OS that a task switch would not be good.
3696 // Void return because it's a hint and can fail.
3697 void os::hint_no_preempt() {
3698   schedctl_start(schedctl_init());
3699 }
3700 
3701 static void resume_clear_context(OSThread *osthread) {
3702   osthread->set_ucontext(NULL);
3703 }
3704 
3705 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3706   osthread->set_ucontext(context);
3707 }
3708 
3709 static Semaphore sr_semaphore;
3710 
3711 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3712   // Save and restore errno to avoid confusing native code with EINTR
3713   // after sigsuspend.
3714   int old_errno = errno;
3715 
3716   OSThread* osthread = thread->osthread();
3717   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3718 
3719   os::SuspendResume::State current = osthread->sr.state();
3720   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3721     suspend_save_context(osthread, uc);
3722 
3723     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3724     os::SuspendResume::State state = osthread->sr.suspended();
3725     if (state == os::SuspendResume::SR_SUSPENDED) {
3726       sigset_t suspend_set;  // signals for sigsuspend()
3727 
3728       // get current set of blocked signals and unblock resume signal
3729       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3730       sigdelset(&suspend_set, os::Solaris::SIGasync());
3731 
3732       sr_semaphore.signal();
3733       // wait here until we are resumed
3734       while (1) {
3735         sigsuspend(&suspend_set);
3736 
3737         os::SuspendResume::State result = osthread->sr.running();
3738         if (result == os::SuspendResume::SR_RUNNING) {
3739           sr_semaphore.signal();
3740           break;
3741         }
3742       }
3743 
3744     } else if (state == os::SuspendResume::SR_RUNNING) {
3745       // request was cancelled, continue
3746     } else {
3747       ShouldNotReachHere();
3748     }
3749 
3750     resume_clear_context(osthread);
3751   } else if (current == os::SuspendResume::SR_RUNNING) {
3752     // request was cancelled, continue
3753   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3754     // ignore
3755   } else {
3756     // ignore
3757   }
3758 
3759   errno = old_errno;
3760 }
3761 
3762 void os::print_statistics() {
3763 }
3764 
3765 int os::message_box(const char* title, const char* message) {
3766   int i;
3767   fdStream err(defaultStream::error_fd());
3768   for (i = 0; i < 78; i++) err.print_raw("=");
3769   err.cr();
3770   err.print_raw_cr(title);
3771   for (i = 0; i < 78; i++) err.print_raw("-");
3772   err.cr();
3773   err.print_raw_cr(message);
3774   for (i = 0; i < 78; i++) err.print_raw("=");
3775   err.cr();
3776 
3777   char buf[16];
3778   // Prevent process from exiting upon "read error" without consuming all CPU
3779   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3780 
3781   return buf[0] == 'y' || buf[0] == 'Y';
3782 }
3783 
3784 static int sr_notify(OSThread* osthread) {
3785   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
3786   assert_status(status == 0, status, "thr_kill");
3787   return status;
3788 }
3789 
3790 // "Randomly" selected value for how long we want to spin
3791 // before bailing out on suspending a thread, also how often
3792 // we send a signal to a thread we want to resume
3793 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3794 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3795 
3796 static bool do_suspend(OSThread* osthread) {
3797   assert(osthread->sr.is_running(), "thread should be running");
3798   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
3799 
3800   // mark as suspended and send signal
3801   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3802     // failed to switch, state wasn't running?
3803     ShouldNotReachHere();
3804     return false;
3805   }
3806 
3807   if (sr_notify(osthread) != 0) {
3808     ShouldNotReachHere();
3809   }
3810 
3811   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3812   while (true) {
3813     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
3814       break;
3815     } else {
3816       // timeout
3817       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3818       if (cancelled == os::SuspendResume::SR_RUNNING) {
3819         return false;
3820       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3821         // make sure that we consume the signal on the semaphore as well
3822         sr_semaphore.wait();
3823         break;
3824       } else {
3825         ShouldNotReachHere();
3826         return false;
3827       }
3828     }
3829   }
3830 
3831   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3832   return true;
3833 }
3834 
3835 static void do_resume(OSThread* osthread) {
3836   assert(osthread->sr.is_suspended(), "thread should be suspended");
3837   assert(!sr_semaphore.trywait(), "invalid semaphore state");
3838 
3839   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3840     // failed to switch to WAKEUP_REQUEST
3841     ShouldNotReachHere();
3842     return;
3843   }
3844 
3845   while (true) {
3846     if (sr_notify(osthread) == 0) {
3847       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
3848         if (osthread->sr.is_running()) {
3849           return;
3850         }
3851       }
3852     } else {
3853       ShouldNotReachHere();
3854     }
3855   }
3856 
3857   guarantee(osthread->sr.is_running(), "Must be running!");
3858 }
3859 
3860 void os::SuspendedThreadTask::internal_do_task() {
3861   if (do_suspend(_thread->osthread())) {
3862     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3863     do_task(context);
3864     do_resume(_thread->osthread());
3865   }
3866 }
3867 
3868 class PcFetcher : public os::SuspendedThreadTask {
3869  public:
3870   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3871   ExtendedPC result();
3872  protected:
3873   void do_task(const os::SuspendedThreadTaskContext& context);
3874  private:
3875   ExtendedPC _epc;
3876 };
3877 
3878 ExtendedPC PcFetcher::result() {
3879   guarantee(is_done(), "task is not done yet.");
3880   return _epc;
3881 }
3882 
3883 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3884   Thread* thread = context.thread();
3885   OSThread* osthread = thread->osthread();
3886   if (osthread->ucontext() != NULL) {
3887     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
3888   } else {
3889     // NULL context is unexpected, double-check this is the VMThread
3890     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3891   }
3892 }
3893 
3894 // A lightweight implementation that does not suspend the target thread and
3895 // thus returns only a hint. Used for profiling only!
3896 ExtendedPC os::get_thread_pc(Thread* thread) {
3897   // Make sure that it is called by the watcher and the Threads lock is owned.
3898   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
3899   // For now, is only used to profile the VM Thread
3900   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3901   PcFetcher fetcher(thread);
3902   fetcher.run();
3903   return fetcher.result();
3904 }
3905 
3906 
3907 // This does not do anything on Solaris. This is basically a hook for being
3908 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
3909 void os::os_exception_wrapper(java_call_t f, JavaValue* value,
3910                               methodHandle* method, JavaCallArguments* args,
3911                               Thread* thread) {
3912   f(value, method, args, thread);
3913 }
3914 
3915 // This routine may be used by user applications as a "hook" to catch signals.
3916 // The user-defined signal handler must pass unrecognized signals to this
3917 // routine, and if it returns true (non-zero), then the signal handler must
3918 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
3919 // routine will never retun false (zero), but instead will execute a VM panic
3920 // routine kill the process.
3921 //
3922 // If this routine returns false, it is OK to call it again.  This allows
3923 // the user-defined signal handler to perform checks either before or after
3924 // the VM performs its own checks.  Naturally, the user code would be making
3925 // a serious error if it tried to handle an exception (such as a null check
3926 // or breakpoint) that the VM was generating for its own correct operation.
3927 //
3928 // This routine may recognize any of the following kinds of signals:
3929 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
3930 // os::Solaris::SIGasync
3931 // It should be consulted by handlers for any of those signals.
3932 // It explicitly does not recognize os::Solaris::SIGinterrupt
3933 //
3934 // The caller of this routine must pass in the three arguments supplied
3935 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3936 // field of the structure passed to sigaction().  This routine assumes that
3937 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3938 //
3939 // Note that the VM will print warnings if it detects conflicting signal
3940 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3941 //
3942 extern "C" JNIEXPORT int JVM_handle_solaris_signal(int signo,
3943                                                    siginfo_t* siginfo,
3944                                                    void* ucontext,
3945                                                    int abort_if_unrecognized);
3946 
3947 
3948 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
3949   int orig_errno = errno;  // Preserve errno value over signal handler.
3950   JVM_handle_solaris_signal(sig, info, ucVoid, true);
3951   errno = orig_errno;
3952 }
3953 
3954 // Do not delete - if guarantee is ever removed,  a signal handler (even empty)
3955 // is needed to provoke threads blocked on IO to return an EINTR
3956 // Note: this explicitly does NOT call JVM_handle_solaris_signal and
3957 // does NOT participate in signal chaining due to requirement for
3958 // NOT setting SA_RESTART to make EINTR work.
3959 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
3960   if (UseSignalChaining) {
3961     struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
3962     if (actp && actp->sa_handler) {
3963       vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
3964     }
3965   }
3966 }
3967 
3968 // This boolean allows users to forward their own non-matching signals
3969 // to JVM_handle_solaris_signal, harmlessly.
3970 bool os::Solaris::signal_handlers_are_installed = false;
3971 
3972 // For signal-chaining
3973 bool os::Solaris::libjsig_is_loaded = false;
3974 typedef struct sigaction *(*get_signal_t)(int);
3975 get_signal_t os::Solaris::get_signal_action = NULL;
3976 
3977 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
3978   struct sigaction *actp = NULL;
3979 
3980   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
3981     // Retrieve the old signal handler from libjsig
3982     actp = (*get_signal_action)(sig);
3983   }
3984   if (actp == NULL) {
3985     // Retrieve the preinstalled signal handler from jvm
3986     actp = get_preinstalled_handler(sig);
3987   }
3988 
3989   return actp;
3990 }
3991 
3992 static bool call_chained_handler(struct sigaction *actp, int sig,
3993                                  siginfo_t *siginfo, void *context) {
3994   // Call the old signal handler
3995   if (actp->sa_handler == SIG_DFL) {
3996     // It's more reasonable to let jvm treat it as an unexpected exception
3997     // instead of taking the default action.
3998     return false;
3999   } else if (actp->sa_handler != SIG_IGN) {
4000     if ((actp->sa_flags & SA_NODEFER) == 0) {
4001       // automaticlly block the signal
4002       sigaddset(&(actp->sa_mask), sig);
4003     }
4004 
4005     sa_handler_t hand;
4006     sa_sigaction_t sa;
4007     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4008     // retrieve the chained handler
4009     if (siginfo_flag_set) {
4010       sa = actp->sa_sigaction;
4011     } else {
4012       hand = actp->sa_handler;
4013     }
4014 
4015     if ((actp->sa_flags & SA_RESETHAND) != 0) {
4016       actp->sa_handler = SIG_DFL;
4017     }
4018 
4019     // try to honor the signal mask
4020     sigset_t oset;
4021     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4022 
4023     // call into the chained handler
4024     if (siginfo_flag_set) {
4025       (*sa)(sig, siginfo, context);
4026     } else {
4027       (*hand)(sig);
4028     }
4029 
4030     // restore the signal mask
4031     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4032   }
4033   // Tell jvm's signal handler the signal is taken care of.
4034   return true;
4035 }
4036 
4037 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4038   bool chained = false;
4039   // signal-chaining
4040   if (UseSignalChaining) {
4041     struct sigaction *actp = get_chained_signal_action(sig);
4042     if (actp != NULL) {
4043       chained = call_chained_handler(actp, sig, siginfo, context);
4044     }
4045   }
4046   return chained;
4047 }
4048 
4049 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4050   assert((chainedsigactions != (struct sigaction *)NULL) &&
4051          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
4052   if (preinstalled_sigs[sig] != 0) {
4053     return &chainedsigactions[sig];
4054   }
4055   return NULL;
4056 }
4057 
4058 void os::Solaris::save_preinstalled_handler(int sig,
4059                                             struct sigaction& oldAct) {
4060   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4061   assert((chainedsigactions != (struct sigaction *)NULL) &&
4062          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
4063   chainedsigactions[sig] = oldAct;
4064   preinstalled_sigs[sig] = 1;
4065 }
4066 
4067 void os::Solaris::set_signal_handler(int sig, bool set_installed,
4068                                      bool oktochain) {
4069   // Check for overwrite.
4070   struct sigaction oldAct;
4071   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4072   void* oldhand =
4073       oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4074                           : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4075   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4076       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4077       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4078     if (AllowUserSignalHandlers || !set_installed) {
4079       // Do not overwrite; user takes responsibility to forward to us.
4080       return;
4081     } else if (UseSignalChaining) {
4082       if (oktochain) {
4083         // save the old handler in jvm
4084         save_preinstalled_handler(sig, oldAct);
4085       } else {
4086         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4087       }
4088       // libjsig also interposes the sigaction() call below and saves the
4089       // old sigaction on it own.
4090     } else {
4091       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4092                     "%#lx for signal %d.", (long)oldhand, sig));
4093     }
4094   }
4095 
4096   struct sigaction sigAct;
4097   sigfillset(&(sigAct.sa_mask));
4098   sigAct.sa_handler = SIG_DFL;
4099 
4100   sigAct.sa_sigaction = signalHandler;
4101   // Handle SIGSEGV on alternate signal stack if
4102   // not using stack banging
4103   if (!UseStackBanging && sig == SIGSEGV) {
4104     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4105   } else if (sig == os::Solaris::SIGinterrupt()) {
4106     // Interruptible i/o requires SA_RESTART cleared so EINTR
4107     // is returned instead of restarting system calls
4108     sigemptyset(&sigAct.sa_mask);
4109     sigAct.sa_handler = NULL;
4110     sigAct.sa_flags = SA_SIGINFO;
4111     sigAct.sa_sigaction = sigINTRHandler;
4112   } else {
4113     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4114   }
4115   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4116 
4117   sigaction(sig, &sigAct, &oldAct);
4118 
4119   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4120                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4121   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4122 }
4123 
4124 
4125 #define DO_SIGNAL_CHECK(sig)                      \
4126   do {                                            \
4127     if (!sigismember(&check_signal_done, sig)) {  \
4128       os::Solaris::check_signal_handler(sig);     \
4129     }                                             \
4130   } while (0)
4131 
4132 // This method is a periodic task to check for misbehaving JNI applications
4133 // under CheckJNI, we can add any periodic checks here
4134 
4135 void os::run_periodic_checks() {
4136   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4137   // thereby preventing a NULL checks.
4138   if (!check_addr0_done) check_addr0_done = check_addr0(tty);
4139 
4140   if (check_signals == false) return;
4141 
4142   // SEGV and BUS if overridden could potentially prevent
4143   // generation of hs*.log in the event of a crash, debugging
4144   // such a case can be very challenging, so we absolutely
4145   // check for the following for a good measure:
4146   DO_SIGNAL_CHECK(SIGSEGV);
4147   DO_SIGNAL_CHECK(SIGILL);
4148   DO_SIGNAL_CHECK(SIGFPE);
4149   DO_SIGNAL_CHECK(SIGBUS);
4150   DO_SIGNAL_CHECK(SIGPIPE);
4151   DO_SIGNAL_CHECK(SIGXFSZ);
4152 
4153   // ReduceSignalUsage allows the user to override these handlers
4154   // see comments at the very top and jvm_solaris.h
4155   if (!ReduceSignalUsage) {
4156     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4157     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4158     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4159     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4160   }
4161 
4162   // See comments above for using JVM1/JVM2 and UseAltSigs
4163   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4164   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4165 
4166 }
4167 
4168 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4169 
4170 static os_sigaction_t os_sigaction = NULL;
4171 
4172 void os::Solaris::check_signal_handler(int sig) {
4173   char buf[O_BUFLEN];
4174   address jvmHandler = NULL;
4175 
4176   struct sigaction act;
4177   if (os_sigaction == NULL) {
4178     // only trust the default sigaction, in case it has been interposed
4179     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4180     if (os_sigaction == NULL) return;
4181   }
4182 
4183   os_sigaction(sig, (struct sigaction*)NULL, &act);
4184 
4185   address thisHandler = (act.sa_flags & SA_SIGINFO)
4186     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4187     : CAST_FROM_FN_PTR(address, act.sa_handler);
4188 
4189 
4190   switch (sig) {
4191   case SIGSEGV:
4192   case SIGBUS:
4193   case SIGFPE:
4194   case SIGPIPE:
4195   case SIGXFSZ:
4196   case SIGILL:
4197     jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4198     break;
4199 
4200   case SHUTDOWN1_SIGNAL:
4201   case SHUTDOWN2_SIGNAL:
4202   case SHUTDOWN3_SIGNAL:
4203   case BREAK_SIGNAL:
4204     jvmHandler = (address)user_handler();
4205     break;
4206 
4207   default:
4208     int intrsig = os::Solaris::SIGinterrupt();
4209     int asynsig = os::Solaris::SIGasync();
4210 
4211     if (sig == intrsig) {
4212       jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4213     } else if (sig == asynsig) {
4214       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4215     } else {
4216       return;
4217     }
4218     break;
4219   }
4220 
4221 
4222   if (thisHandler != jvmHandler) {
4223     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4224     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4225     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4226     // No need to check this sig any longer
4227     sigaddset(&check_signal_done, sig);
4228     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4229     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4230       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4231                     exception_name(sig, buf, O_BUFLEN));
4232     }
4233   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4234     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4235     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4236     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4237     // No need to check this sig any longer
4238     sigaddset(&check_signal_done, sig);
4239   }
4240 
4241   // Print all the signal handler state
4242   if (sigismember(&check_signal_done, sig)) {
4243     print_signal_handlers(tty, buf, O_BUFLEN);
4244   }
4245 
4246 }
4247 
4248 void os::Solaris::install_signal_handlers() {
4249   bool libjsigdone = false;
4250   signal_handlers_are_installed = true;
4251 
4252   // signal-chaining
4253   typedef void (*signal_setting_t)();
4254   signal_setting_t begin_signal_setting = NULL;
4255   signal_setting_t end_signal_setting = NULL;
4256   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4257                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4258   if (begin_signal_setting != NULL) {
4259     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4260                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4261     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4262                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4263     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4264                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4265     libjsig_is_loaded = true;
4266     if (os::Solaris::get_libjsig_version != NULL) {
4267       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4268     }
4269     assert(UseSignalChaining, "should enable signal-chaining");
4270   }
4271   if (libjsig_is_loaded) {
4272     // Tell libjsig jvm is setting signal handlers
4273     (*begin_signal_setting)();
4274   }
4275 
4276   set_signal_handler(SIGSEGV, true, true);
4277   set_signal_handler(SIGPIPE, true, true);
4278   set_signal_handler(SIGXFSZ, true, true);
4279   set_signal_handler(SIGBUS, true, true);
4280   set_signal_handler(SIGILL, true, true);
4281   set_signal_handler(SIGFPE, true, true);
4282 
4283 
4284   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4285 
4286     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4287     // can not register overridable signals which might be > 32
4288     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4289       // Tell libjsig jvm has finished setting signal handlers
4290       (*end_signal_setting)();
4291       libjsigdone = true;
4292     }
4293   }
4294 
4295   // Never ok to chain our SIGinterrupt
4296   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4297   set_signal_handler(os::Solaris::SIGasync(), true, true);
4298 
4299   if (libjsig_is_loaded && !libjsigdone) {
4300     // Tell libjsig jvm finishes setting signal handlers
4301     (*end_signal_setting)();
4302   }
4303 
4304   // We don't activate signal checker if libjsig is in place, we trust ourselves
4305   // and if UserSignalHandler is installed all bets are off.
4306   // Log that signal checking is off only if -verbose:jni is specified.
4307   if (CheckJNICalls) {
4308     if (libjsig_is_loaded) {
4309       if (PrintJNIResolving) {
4310         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4311       }
4312       check_signals = false;
4313     }
4314     if (AllowUserSignalHandlers) {
4315       if (PrintJNIResolving) {
4316         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4317       }
4318       check_signals = false;
4319     }
4320   }
4321 }
4322 
4323 
4324 void report_error(const char* file_name, int line_no, const char* title,
4325                   const char* format, ...);
4326 
4327 const char * signames[] = {
4328   "SIG0",
4329   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4330   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4331   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4332   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4333   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4334   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4335   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4336   "SIGCANCEL", "SIGLOST"
4337 };
4338 
4339 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4340   if (0 < exception_code && exception_code <= SIGRTMAX) {
4341     // signal
4342     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4343       jio_snprintf(buf, size, "%s", signames[exception_code]);
4344     } else {
4345       jio_snprintf(buf, size, "SIG%d", exception_code);
4346     }
4347     return buf;
4348   } else {
4349     return NULL;
4350   }
4351 }
4352 
4353 // (Static) wrapper for getisax(2) call.
4354 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4355 
4356 // (Static) wrappers for the liblgrp API
4357 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4358 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4359 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4360 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4361 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4362 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4363 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4364 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4365 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4366 
4367 // (Static) wrapper for meminfo() call.
4368 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4369 
4370 static address resolve_symbol_lazy(const char* name) {
4371   address addr = (address) dlsym(RTLD_DEFAULT, name);
4372   if (addr == NULL) {
4373     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4374     addr = (address) dlsym(RTLD_NEXT, name);
4375   }
4376   return addr;
4377 }
4378 
4379 static address resolve_symbol(const char* name) {
4380   address addr = resolve_symbol_lazy(name);
4381   if (addr == NULL) {
4382     fatal(dlerror());
4383   }
4384   return addr;
4385 }
4386 
4387 void os::Solaris::libthread_init() {
4388   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4389 
4390   lwp_priocntl_init();
4391 
4392   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4393   if (func == NULL) {
4394     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4395     // Guarantee that this VM is running on an new enough OS (5.6 or
4396     // later) that it will have a new enough libthread.so.
4397     guarantee(func != NULL, "libthread.so is too old.");
4398   }
4399 
4400   int size;
4401   void (*handler_info_func)(address *, int *);
4402   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4403   handler_info_func(&handler_start, &size);
4404   handler_end = handler_start + size;
4405 }
4406 
4407 
4408 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4409 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4410 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4411 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4412 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4413 int os::Solaris::_mutex_scope = USYNC_THREAD;
4414 
4415 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4416 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4417 int_fnP_cond_tP os::Solaris::_cond_signal;
4418 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4419 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4420 int_fnP_cond_tP os::Solaris::_cond_destroy;
4421 int os::Solaris::_cond_scope = USYNC_THREAD;
4422 
4423 void os::Solaris::synchronization_init() {
4424   if (UseLWPSynchronization) {
4425     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4426     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4427     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4428     os::Solaris::set_mutex_init(lwp_mutex_init);
4429     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4430     os::Solaris::set_mutex_scope(USYNC_THREAD);
4431 
4432     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4433     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4434     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4435     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4436     os::Solaris::set_cond_init(lwp_cond_init);
4437     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4438     os::Solaris::set_cond_scope(USYNC_THREAD);
4439   } else {
4440     os::Solaris::set_mutex_scope(USYNC_THREAD);
4441     os::Solaris::set_cond_scope(USYNC_THREAD);
4442 
4443     if (UsePthreads) {
4444       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4445       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4446       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4447       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4448       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4449 
4450       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4451       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4452       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4453       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4454       os::Solaris::set_cond_init(pthread_cond_default_init);
4455       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4456     } else {
4457       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4458       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4459       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4460       os::Solaris::set_mutex_init(::mutex_init);
4461       os::Solaris::set_mutex_destroy(::mutex_destroy);
4462 
4463       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4464       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4465       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4466       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4467       os::Solaris::set_cond_init(::cond_init);
4468       os::Solaris::set_cond_destroy(::cond_destroy);
4469     }
4470   }
4471 }
4472 
4473 bool os::Solaris::liblgrp_init() {
4474   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4475   if (handle != NULL) {
4476     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4477     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4478     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4479     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4480     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4481     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4482     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4483     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4484                                                       dlsym(handle, "lgrp_cookie_stale")));
4485 
4486     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4487     set_lgrp_cookie(c);
4488     return true;
4489   }
4490   return false;
4491 }
4492 
4493 void os::Solaris::misc_sym_init() {
4494   address func;
4495 
4496   // getisax
4497   func = resolve_symbol_lazy("getisax");
4498   if (func != NULL) {
4499     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4500   }
4501 
4502   // meminfo
4503   func = resolve_symbol_lazy("meminfo");
4504   if (func != NULL) {
4505     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4506   }
4507 }
4508 
4509 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4510   assert(_getisax != NULL, "_getisax not set");
4511   return _getisax(array, n);
4512 }
4513 
4514 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4515 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4516 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4517 
4518 void init_pset_getloadavg_ptr(void) {
4519   pset_getloadavg_ptr =
4520     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4521   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4522     warning("pset_getloadavg function not found");
4523   }
4524 }
4525 
4526 int os::Solaris::_dev_zero_fd = -1;
4527 
4528 // this is called _before_ the global arguments have been parsed
4529 void os::init(void) {
4530   _initial_pid = getpid();
4531 
4532   max_hrtime = first_hrtime = gethrtime();
4533 
4534   init_random(1234567);
4535 
4536   page_size = sysconf(_SC_PAGESIZE);
4537   if (page_size == -1) {
4538     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4539                   strerror(errno)));
4540   }
4541   init_page_sizes((size_t) page_size);
4542 
4543   Solaris::initialize_system_info();
4544 
4545   // Initialize misc. symbols as soon as possible, so we can use them
4546   // if we need them.
4547   Solaris::misc_sym_init();
4548 
4549   int fd = ::open("/dev/zero", O_RDWR);
4550   if (fd < 0) {
4551     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4552   } else {
4553     Solaris::set_dev_zero_fd(fd);
4554 
4555     // Close on exec, child won't inherit.
4556     fcntl(fd, F_SETFD, FD_CLOEXEC);
4557   }
4558 
4559   clock_tics_per_sec = CLK_TCK;
4560 
4561   // check if dladdr1() exists; dladdr1 can provide more information than
4562   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4563   // and is available on linker patches for 5.7 and 5.8.
4564   // libdl.so must have been loaded, this call is just an entry lookup
4565   void * hdl = dlopen("libdl.so", RTLD_NOW);
4566   if (hdl) {
4567     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4568   }
4569 
4570   // (Solaris only) this switches to calls that actually do locking.
4571   ThreadCritical::initialize();
4572 
4573   main_thread = thr_self();
4574 
4575   // Constant minimum stack size allowed. It must be at least
4576   // the minimum of what the OS supports (thr_min_stack()), and
4577   // enough to allow the thread to get to user bytecode execution.
4578   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4579   // If the pagesize of the VM is greater than 8K determine the appropriate
4580   // number of initial guard pages.  The user can change this with the
4581   // command line arguments, if needed.
4582   if (vm_page_size() > 8*K) {
4583     StackYellowPages = 1;
4584     StackRedPages = 1;
4585     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4586   }
4587 }
4588 
4589 // To install functions for atexit system call
4590 extern "C" {
4591   static void perfMemory_exit_helper() {
4592     perfMemory_exit();
4593   }
4594 }
4595 
4596 // this is called _after_ the global arguments have been parsed
4597 jint os::init_2(void) {
4598   // try to enable extended file IO ASAP, see 6431278
4599   os::Solaris::try_enable_extended_io();
4600 
4601   // Allocate a single page and mark it as readable for safepoint polling.  Also
4602   // use this first mmap call to check support for MAP_ALIGN.
4603   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4604                                                       page_size,
4605                                                       MAP_PRIVATE | MAP_ALIGN,
4606                                                       PROT_READ);
4607   if (polling_page == NULL) {
4608     has_map_align = false;
4609     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4610                                                 PROT_READ);
4611   }
4612 
4613   os::set_polling_page(polling_page);
4614 
4615 #ifndef PRODUCT
4616   if (Verbose && PrintMiscellaneous) {
4617     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n",
4618                (intptr_t)polling_page);
4619   }
4620 #endif
4621 
4622   if (!UseMembar) {
4623     address mem_serialize_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE);
4624     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4625     os::set_memory_serialize_page(mem_serialize_page);
4626 
4627 #ifndef PRODUCT
4628     if (Verbose && PrintMiscellaneous) {
4629       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n",
4630                  (intptr_t)mem_serialize_page);
4631     }
4632 #endif
4633   }
4634 
4635   // Check minimum allowable stack size for thread creation and to initialize
4636   // the java system classes, including StackOverflowError - depends on page
4637   // size.  Add a page for compiler2 recursion in main thread.
4638   // Add in 2*BytesPerWord times page size to account for VM stack during
4639   // class initialization depending on 32 or 64 bit VM.
4640   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4641                                         (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4642                                         2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4643 
4644   size_t threadStackSizeInBytes = ThreadStackSize * K;
4645   if (threadStackSizeInBytes != 0 &&
4646       threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4647     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4648                   os::Solaris::min_stack_allowed/K);
4649     return JNI_ERR;
4650   }
4651 
4652   // For 64kbps there will be a 64kb page size, which makes
4653   // the usable default stack size quite a bit less.  Increase the
4654   // stack for 64kb (or any > than 8kb) pages, this increases
4655   // virtual memory fragmentation (since we're not creating the
4656   // stack on a power of 2 boundary.  The real fix for this
4657   // should be to fix the guard page mechanism.
4658 
4659   if (vm_page_size() > 8*K) {
4660     threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4661        ? threadStackSizeInBytes +
4662          ((StackYellowPages + StackRedPages) * vm_page_size())
4663        : 0;
4664     ThreadStackSize = threadStackSizeInBytes/K;
4665   }
4666 
4667   // Make the stack size a multiple of the page size so that
4668   // the yellow/red zones can be guarded.
4669   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4670                                                 vm_page_size()));
4671 
4672   Solaris::libthread_init();
4673 
4674   if (UseNUMA) {
4675     if (!Solaris::liblgrp_init()) {
4676       UseNUMA = false;
4677     } else {
4678       size_t lgrp_limit = os::numa_get_groups_num();
4679       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4680       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4681       FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
4682       if (lgrp_num < 2) {
4683         // There's only one locality group, disable NUMA.
4684         UseNUMA = false;
4685       }
4686     }
4687     if (!UseNUMA && ForceNUMA) {
4688       UseNUMA = true;
4689     }
4690   }
4691 
4692   Solaris::signal_sets_init();
4693   Solaris::init_signal_mem();
4694   Solaris::install_signal_handlers();
4695 
4696   if (libjsigversion < JSIG_VERSION_1_4_1) {
4697     Maxlibjsigsigs = OLDMAXSIGNUM;
4698   }
4699 
4700   // initialize synchronization primitives to use either thread or
4701   // lwp synchronization (controlled by UseLWPSynchronization)
4702   Solaris::synchronization_init();
4703 
4704   if (MaxFDLimit) {
4705     // set the number of file descriptors to max. print out error
4706     // if getrlimit/setrlimit fails but continue regardless.
4707     struct rlimit nbr_files;
4708     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4709     if (status != 0) {
4710       if (PrintMiscellaneous && (Verbose || WizardMode)) {
4711         perror("os::init_2 getrlimit failed");
4712       }
4713     } else {
4714       nbr_files.rlim_cur = nbr_files.rlim_max;
4715       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4716       if (status != 0) {
4717         if (PrintMiscellaneous && (Verbose || WizardMode)) {
4718           perror("os::init_2 setrlimit failed");
4719         }
4720       }
4721     }
4722   }
4723 
4724   // Calculate theoretical max. size of Threads to guard gainst
4725   // artifical out-of-memory situations, where all available address-
4726   // space has been reserved by thread stacks. Default stack size is 1Mb.
4727   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
4728     JavaThread::stack_size_at_create() : (1*K*K);
4729   assert(pre_thread_stack_size != 0, "Must have a stack");
4730   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
4731   // we should start doing Virtual Memory banging. Currently when the threads will
4732   // have used all but 200Mb of space.
4733   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
4734   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
4735 
4736   // at-exit methods are called in the reverse order of their registration.
4737   // In Solaris 7 and earlier, atexit functions are called on return from
4738   // main or as a result of a call to exit(3C). There can be only 32 of
4739   // these functions registered and atexit() does not set errno. In Solaris
4740   // 8 and later, there is no limit to the number of functions registered
4741   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
4742   // functions are called upon dlclose(3DL) in addition to return from main
4743   // and exit(3C).
4744 
4745   if (PerfAllowAtExitRegistration) {
4746     // only register atexit functions if PerfAllowAtExitRegistration is set.
4747     // atexit functions can be delayed until process exit time, which
4748     // can be problematic for embedded VM situations. Embedded VMs should
4749     // call DestroyJavaVM() to assure that VM resources are released.
4750 
4751     // note: perfMemory_exit_helper atexit function may be removed in
4752     // the future if the appropriate cleanup code can be added to the
4753     // VM_Exit VMOperation's doit method.
4754     if (atexit(perfMemory_exit_helper) != 0) {
4755       warning("os::init2 atexit(perfMemory_exit_helper) failed");
4756     }
4757   }
4758 
4759   // Init pset_loadavg function pointer
4760   init_pset_getloadavg_ptr();
4761 
4762   return JNI_OK;
4763 }
4764 
4765 // Mark the polling page as unreadable
4766 void os::make_polling_page_unreadable(void) {
4767   if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0) {
4768     fatal("Could not disable polling page");
4769   }
4770 }
4771 
4772 // Mark the polling page as readable
4773 void os::make_polling_page_readable(void) {
4774   if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0) {
4775     fatal("Could not enable polling page");
4776   }
4777 }
4778 
4779 // OS interface.
4780 
4781 bool os::check_heap(bool force) { return true; }
4782 
4783 // Is a (classpath) directory empty?
4784 bool os::dir_is_empty(const char* path) {
4785   DIR *dir = NULL;
4786   struct dirent *ptr;
4787 
4788   dir = opendir(path);
4789   if (dir == NULL) return true;
4790 
4791   // Scan the directory
4792   bool result = true;
4793   char buf[sizeof(struct dirent) + MAX_PATH];
4794   struct dirent *dbuf = (struct dirent *) buf;
4795   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
4796     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4797       result = false;
4798     }
4799   }
4800   closedir(dir);
4801   return result;
4802 }
4803 
4804 // This code originates from JDK's sysOpen and open64_w
4805 // from src/solaris/hpi/src/system_md.c
4806 
4807 int os::open(const char *path, int oflag, int mode) {
4808   if (strlen(path) > MAX_PATH - 1) {
4809     errno = ENAMETOOLONG;
4810     return -1;
4811   }
4812   int fd;
4813 
4814   fd = ::open64(path, oflag, mode);
4815   if (fd == -1) return -1;
4816 
4817   // If the open succeeded, the file might still be a directory
4818   {
4819     struct stat64 buf64;
4820     int ret = ::fstat64(fd, &buf64);
4821     int st_mode = buf64.st_mode;
4822 
4823     if (ret != -1) {
4824       if ((st_mode & S_IFMT) == S_IFDIR) {
4825         errno = EISDIR;
4826         ::close(fd);
4827         return -1;
4828       }
4829     } else {
4830       ::close(fd);
4831       return -1;
4832     }
4833   }
4834 
4835   // 32-bit Solaris systems suffer from:
4836   //
4837   // - an historical default soft limit of 256 per-process file
4838   //   descriptors that is too low for many Java programs.
4839   //
4840   // - a design flaw where file descriptors created using stdio
4841   //   fopen must be less than 256, _even_ when the first limit above
4842   //   has been raised.  This can cause calls to fopen (but not calls to
4843   //   open, for example) to fail mysteriously, perhaps in 3rd party
4844   //   native code (although the JDK itself uses fopen).  One can hardly
4845   //   criticize them for using this most standard of all functions.
4846   //
4847   // We attempt to make everything work anyways by:
4848   //
4849   // - raising the soft limit on per-process file descriptors beyond
4850   //   256
4851   //
4852   // - As of Solaris 10u4, we can request that Solaris raise the 256
4853   //   stdio fopen limit by calling function enable_extended_FILE_stdio.
4854   //   This is done in init_2 and recorded in enabled_extended_FILE_stdio
4855   //
4856   // - If we are stuck on an old (pre 10u4) Solaris system, we can
4857   //   workaround the bug by remapping non-stdio file descriptors below
4858   //   256 to ones beyond 256, which is done below.
4859   //
4860   // See:
4861   // 1085341: 32-bit stdio routines should support file descriptors >255
4862   // 6533291: Work around 32-bit Solaris stdio limit of 256 open files
4863   // 6431278: Netbeans crash on 32 bit Solaris: need to call
4864   //          enable_extended_FILE_stdio() in VM initialisation
4865   // Giri Mandalika's blog
4866   // http://technopark02.blogspot.com/2005_05_01_archive.html
4867   //
4868 #ifndef  _LP64
4869   if ((!enabled_extended_FILE_stdio) && fd < 256) {
4870     int newfd = ::fcntl(fd, F_DUPFD, 256);
4871     if (newfd != -1) {
4872       ::close(fd);
4873       fd = newfd;
4874     }
4875   }
4876 #endif // 32-bit Solaris
4877 
4878   // All file descriptors that are opened in the JVM and not
4879   // specifically destined for a subprocess should have the
4880   // close-on-exec flag set.  If we don't set it, then careless 3rd
4881   // party native code might fork and exec without closing all
4882   // appropriate file descriptors (e.g. as we do in closeDescriptors in
4883   // UNIXProcess.c), and this in turn might:
4884   //
4885   // - cause end-of-file to fail to be detected on some file
4886   //   descriptors, resulting in mysterious hangs, or
4887   //
4888   // - might cause an fopen in the subprocess to fail on a system
4889   //   suffering from bug 1085341.
4890   //
4891   // (Yes, the default setting of the close-on-exec flag is a Unix
4892   // design flaw)
4893   //
4894   // See:
4895   // 1085341: 32-bit stdio routines should support file descriptors >255
4896   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4897   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4898   //
4899 #ifdef FD_CLOEXEC
4900   {
4901     int flags = ::fcntl(fd, F_GETFD);
4902     if (flags != -1) {
4903       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4904     }
4905   }
4906 #endif
4907 
4908   return fd;
4909 }
4910 
4911 // create binary file, rewriting existing file if required
4912 int os::create_binary_file(const char* path, bool rewrite_existing) {
4913   int oflags = O_WRONLY | O_CREAT;
4914   if (!rewrite_existing) {
4915     oflags |= O_EXCL;
4916   }
4917   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4918 }
4919 
4920 // return current position of file pointer
4921 jlong os::current_file_offset(int fd) {
4922   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4923 }
4924 
4925 // move file pointer to the specified offset
4926 jlong os::seek_to_file_offset(int fd, jlong offset) {
4927   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4928 }
4929 
4930 jlong os::lseek(int fd, jlong offset, int whence) {
4931   return (jlong) ::lseek64(fd, offset, whence);
4932 }
4933 
4934 char * os::native_path(char *path) {
4935   return path;
4936 }
4937 
4938 int os::ftruncate(int fd, jlong length) {
4939   return ::ftruncate64(fd, length);
4940 }
4941 
4942 int os::fsync(int fd)  {
4943   RESTARTABLE_RETURN_INT(::fsync(fd));
4944 }
4945 
4946 int os::available(int fd, jlong *bytes) {
4947   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
4948          "Assumed _thread_in_native");
4949   jlong cur, end;
4950   int mode;
4951   struct stat64 buf64;
4952 
4953   if (::fstat64(fd, &buf64) >= 0) {
4954     mode = buf64.st_mode;
4955     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4956       int n,ioctl_return;
4957 
4958       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
4959       if (ioctl_return>= 0) {
4960         *bytes = n;
4961         return 1;
4962       }
4963     }
4964   }
4965   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4966     return 0;
4967   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4968     return 0;
4969   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4970     return 0;
4971   }
4972   *bytes = end - cur;
4973   return 1;
4974 }
4975 
4976 // Map a block of memory.
4977 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4978                         char *addr, size_t bytes, bool read_only,
4979                         bool allow_exec) {
4980   int prot;
4981   int flags;
4982 
4983   if (read_only) {
4984     prot = PROT_READ;
4985     flags = MAP_SHARED;
4986   } else {
4987     prot = PROT_READ | PROT_WRITE;
4988     flags = MAP_PRIVATE;
4989   }
4990 
4991   if (allow_exec) {
4992     prot |= PROT_EXEC;
4993   }
4994 
4995   if (addr != NULL) {
4996     flags |= MAP_FIXED;
4997   }
4998 
4999   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5000                                      fd, file_offset);
5001   if (mapped_address == MAP_FAILED) {
5002     return NULL;
5003   }
5004   return mapped_address;
5005 }
5006 
5007 
5008 // Remap a block of memory.
5009 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5010                           char *addr, size_t bytes, bool read_only,
5011                           bool allow_exec) {
5012   // same as map_memory() on this OS
5013   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5014                         allow_exec);
5015 }
5016 
5017 
5018 // Unmap a block of memory.
5019 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5020   return munmap(addr, bytes) == 0;
5021 }
5022 
5023 void os::pause() {
5024   char filename[MAX_PATH];
5025   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5026     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5027   } else {
5028     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5029   }
5030 
5031   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5032   if (fd != -1) {
5033     struct stat buf;
5034     ::close(fd);
5035     while (::stat(filename, &buf) == 0) {
5036       (void)::poll(NULL, 0, 100);
5037     }
5038   } else {
5039     jio_fprintf(stderr,
5040                 "Could not open pause file '%s', continuing immediately.\n", filename);
5041   }
5042 }
5043 
5044 #ifndef PRODUCT
5045 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5046 // Turn this on if you need to trace synch operations.
5047 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5048 // and call record_synch_enable and record_synch_disable
5049 // around the computation of interest.
5050 
5051 void record_synch(char* name, bool returning);  // defined below
5052 
5053 class RecordSynch {
5054   char* _name;
5055  public:
5056   RecordSynch(char* name) :_name(name) { record_synch(_name, false); }
5057   ~RecordSynch()                       { record_synch(_name, true); }
5058 };
5059 
5060 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5061 extern "C" ret name params {                                    \
5062   typedef ret name##_t params;                                  \
5063   static name##_t* implem = NULL;                               \
5064   static int callcount = 0;                                     \
5065   if (implem == NULL) {                                         \
5066     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5067     if (implem == NULL)  fatal(dlerror());                      \
5068   }                                                             \
5069   ++callcount;                                                  \
5070   RecordSynch _rs(#name);                                       \
5071   inner;                                                        \
5072   return implem args;                                           \
5073 }
5074 // in dbx, examine callcounts this way:
5075 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5076 
5077 #define CHECK_POINTER_OK(p) \
5078   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5079 #define CHECK_MU \
5080   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5081 #define CHECK_CV \
5082   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5083 #define CHECK_P(p) \
5084   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5085 
5086 #define CHECK_MUTEX(mutex_op) \
5087   CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5088 
5089 CHECK_MUTEX(   mutex_lock)
5090 CHECK_MUTEX(  _mutex_lock)
5091 CHECK_MUTEX( mutex_unlock)
5092 CHECK_MUTEX(_mutex_unlock)
5093 CHECK_MUTEX( mutex_trylock)
5094 CHECK_MUTEX(_mutex_trylock)
5095 
5096 #define CHECK_COND(cond_op) \
5097   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU; CHECK_CV);
5098 
5099 CHECK_COND( cond_wait);
5100 CHECK_COND(_cond_wait);
5101 CHECK_COND(_cond_wait_cancel);
5102 
5103 #define CHECK_COND2(cond_op) \
5104   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU; CHECK_CV);
5105 
5106 CHECK_COND2( cond_timedwait);
5107 CHECK_COND2(_cond_timedwait);
5108 CHECK_COND2(_cond_timedwait_cancel);
5109 
5110 // do the _lwp_* versions too
5111 #define mutex_t lwp_mutex_t
5112 #define cond_t  lwp_cond_t
5113 CHECK_MUTEX(  _lwp_mutex_lock)
5114 CHECK_MUTEX(  _lwp_mutex_unlock)
5115 CHECK_MUTEX(  _lwp_mutex_trylock)
5116 CHECK_MUTEX( __lwp_mutex_lock)
5117 CHECK_MUTEX( __lwp_mutex_unlock)
5118 CHECK_MUTEX( __lwp_mutex_trylock)
5119 CHECK_MUTEX(___lwp_mutex_lock)
5120 CHECK_MUTEX(___lwp_mutex_unlock)
5121 
5122 CHECK_COND(  _lwp_cond_wait);
5123 CHECK_COND( __lwp_cond_wait);
5124 CHECK_COND(___lwp_cond_wait);
5125 
5126 CHECK_COND2(  _lwp_cond_timedwait);
5127 CHECK_COND2( __lwp_cond_timedwait);
5128 #undef mutex_t
5129 #undef cond_t
5130 
5131 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5132 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5133 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5134 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5135 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5136 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5137 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5138 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5139 
5140 
5141 // recording machinery:
5142 
5143 enum { RECORD_SYNCH_LIMIT = 200 };
5144 char* record_synch_name[RECORD_SYNCH_LIMIT];
5145 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5146 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5147 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5148 int record_synch_count = 0;
5149 bool record_synch_enabled = false;
5150 
5151 // in dbx, examine recorded data this way:
5152 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5153 
5154 void record_synch(char* name, bool returning) {
5155   if (record_synch_enabled) {
5156     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5157       record_synch_name[record_synch_count] = name;
5158       record_synch_returning[record_synch_count] = returning;
5159       record_synch_thread[record_synch_count] = thr_self();
5160       record_synch_arg0ptr[record_synch_count] = &name;
5161       record_synch_count++;
5162     }
5163     // put more checking code here:
5164     // ...
5165   }
5166 }
5167 
5168 void record_synch_enable() {
5169   // start collecting trace data, if not already doing so
5170   if (!record_synch_enabled)  record_synch_count = 0;
5171   record_synch_enabled = true;
5172 }
5173 
5174 void record_synch_disable() {
5175   // stop collecting trace data
5176   record_synch_enabled = false;
5177 }
5178 
5179 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5180 #endif // PRODUCT
5181 
5182 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5183 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5184                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5185 
5186 
5187 // JVMTI & JVM monitoring and management support
5188 // The thread_cpu_time() and current_thread_cpu_time() are only
5189 // supported if is_thread_cpu_time_supported() returns true.
5190 // They are not supported on Solaris T1.
5191 
5192 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5193 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5194 // of a thread.
5195 //
5196 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5197 // returns the fast estimate available on the platform.
5198 
5199 // hrtime_t gethrvtime() return value includes
5200 // user time but does not include system time
5201 jlong os::current_thread_cpu_time() {
5202   return (jlong) gethrvtime();
5203 }
5204 
5205 jlong os::thread_cpu_time(Thread *thread) {
5206   // return user level CPU time only to be consistent with
5207   // what current_thread_cpu_time returns.
5208   // thread_cpu_time_info() must be changed if this changes
5209   return os::thread_cpu_time(thread, false /* user time only */);
5210 }
5211 
5212 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5213   if (user_sys_cpu_time) {
5214     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5215   } else {
5216     return os::current_thread_cpu_time();
5217   }
5218 }
5219 
5220 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5221   char proc_name[64];
5222   int count;
5223   prusage_t prusage;
5224   jlong lwp_time;
5225   int fd;
5226 
5227   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5228           getpid(),
5229           thread->osthread()->lwp_id());
5230   fd = ::open(proc_name, O_RDONLY);
5231   if (fd == -1) return -1;
5232 
5233   do {
5234     count = ::pread(fd,
5235                     (void *)&prusage.pr_utime,
5236                     thr_time_size,
5237                     thr_time_off);
5238   } while (count < 0 && errno == EINTR);
5239   ::close(fd);
5240   if (count < 0) return -1;
5241 
5242   if (user_sys_cpu_time) {
5243     // user + system CPU time
5244     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5245                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5246                  (jlong)prusage.pr_stime.tv_nsec +
5247                  (jlong)prusage.pr_utime.tv_nsec;
5248   } else {
5249     // user level CPU time only
5250     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5251                 (jlong)prusage.pr_utime.tv_nsec;
5252   }
5253 
5254   return (lwp_time);
5255 }
5256 
5257 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5258   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5259   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5260   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5261   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5262 }
5263 
5264 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5265   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5266   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5267   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5268   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5269 }
5270 
5271 bool os::is_thread_cpu_time_supported() {
5272   return true;
5273 }
5274 
5275 // System loadavg support.  Returns -1 if load average cannot be obtained.
5276 // Return the load average for our processor set if the primitive exists
5277 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5278 int os::loadavg(double loadavg[], int nelem) {
5279   if (pset_getloadavg_ptr != NULL) {
5280     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5281   } else {
5282     return ::getloadavg(loadavg, nelem);
5283   }
5284 }
5285 
5286 //---------------------------------------------------------------------------------
5287 
5288 bool os::find(address addr, outputStream* st) {
5289   Dl_info dlinfo;
5290   memset(&dlinfo, 0, sizeof(dlinfo));
5291   if (dladdr(addr, &dlinfo) != 0) {
5292     st->print(PTR_FORMAT ": ", addr);
5293     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5294       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5295     } else if (dlinfo.dli_fbase != NULL) {
5296       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5297     } else {
5298       st->print("<absolute address>");
5299     }
5300     if (dlinfo.dli_fname != NULL) {
5301       st->print(" in %s", dlinfo.dli_fname);
5302     }
5303     if (dlinfo.dli_fbase != NULL) {
5304       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5305     }
5306     st->cr();
5307 
5308     if (Verbose) {
5309       // decode some bytes around the PC
5310       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5311       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5312       address       lowest = (address) dlinfo.dli_sname;
5313       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5314       if (begin < lowest)  begin = lowest;
5315       Dl_info dlinfo2;
5316       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5317           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
5318         end = (address) dlinfo2.dli_saddr;
5319       }
5320       Disassembler::decode(begin, end, st);
5321     }
5322     return true;
5323   }
5324   return false;
5325 }
5326 
5327 // Following function has been added to support HotSparc's libjvm.so running
5328 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5329 // src/solaris/hpi/native_threads in the EVM codebase.
5330 //
5331 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5332 // libraries and should thus be removed. We will leave it behind for a while
5333 // until we no longer want to able to run on top of 1.3.0 Solaris production
5334 // JDK. See 4341971.
5335 
5336 #define STACK_SLACK 0x800
5337 
5338 extern "C" {
5339   intptr_t sysThreadAvailableStackWithSlack() {
5340     stack_t st;
5341     intptr_t retval, stack_top;
5342     retval = thr_stksegment(&st);
5343     assert(retval == 0, "incorrect return value from thr_stksegment");
5344     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5345     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5346     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5347     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5348   }
5349 }
5350 
5351 // ObjectMonitor park-unpark infrastructure ...
5352 //
5353 // We implement Solaris and Linux PlatformEvents with the
5354 // obvious condvar-mutex-flag triple.
5355 // Another alternative that works quite well is pipes:
5356 // Each PlatformEvent consists of a pipe-pair.
5357 // The thread associated with the PlatformEvent
5358 // calls park(), which reads from the input end of the pipe.
5359 // Unpark() writes into the other end of the pipe.
5360 // The write-side of the pipe must be set NDELAY.
5361 // Unfortunately pipes consume a large # of handles.
5362 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5363 // Using pipes for the 1st few threads might be workable, however.
5364 //
5365 // park() is permitted to return spuriously.
5366 // Callers of park() should wrap the call to park() in
5367 // an appropriate loop.  A litmus test for the correct
5368 // usage of park is the following: if park() were modified
5369 // to immediately return 0 your code should still work,
5370 // albeit degenerating to a spin loop.
5371 //
5372 // In a sense, park()-unpark() just provides more polite spinning
5373 // and polling with the key difference over naive spinning being
5374 // that a parked thread needs to be explicitly unparked() in order
5375 // to wake up and to poll the underlying condition.
5376 //
5377 // Assumption:
5378 //    Only one parker can exist on an event, which is why we allocate
5379 //    them per-thread. Multiple unparkers can coexist.
5380 //
5381 // _Event transitions in park()
5382 //   -1 => -1 : illegal
5383 //    1 =>  0 : pass - return immediately
5384 //    0 => -1 : block; then set _Event to 0 before returning
5385 //
5386 // _Event transitions in unpark()
5387 //    0 => 1 : just return
5388 //    1 => 1 : just return
5389 //   -1 => either 0 or 1; must signal target thread
5390 //         That is, we can safely transition _Event from -1 to either
5391 //         0 or 1.
5392 //
5393 // _Event serves as a restricted-range semaphore.
5394 //   -1 : thread is blocked, i.e. there is a waiter
5395 //    0 : neutral: thread is running or ready,
5396 //        could have been signaled after a wait started
5397 //    1 : signaled - thread is running or ready
5398 //
5399 // Another possible encoding of _Event would be with
5400 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5401 //
5402 // TODO-FIXME: add DTRACE probes for:
5403 // 1.   Tx parks
5404 // 2.   Ty unparks Tx
5405 // 3.   Tx resumes from park
5406 
5407 
5408 // value determined through experimentation
5409 #define ROUNDINGFIX 11
5410 
5411 // utility to compute the abstime argument to timedwait.
5412 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5413 
5414 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5415   // millis is the relative timeout time
5416   // abstime will be the absolute timeout time
5417   if (millis < 0)  millis = 0;
5418   struct timeval now;
5419   int status = gettimeofday(&now, NULL);
5420   assert(status == 0, "gettimeofday");
5421   jlong seconds = millis / 1000;
5422   jlong max_wait_period;
5423 
5424   if (UseLWPSynchronization) {
5425     // forward port of fix for 4275818 (not sleeping long enough)
5426     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5427     // _lwp_cond_timedwait() used a round_down algorithm rather
5428     // than a round_up. For millis less than our roundfactor
5429     // it rounded down to 0 which doesn't meet the spec.
5430     // For millis > roundfactor we may return a bit sooner, but
5431     // since we can not accurately identify the patch level and
5432     // this has already been fixed in Solaris 9 and 8 we will
5433     // leave it alone rather than always rounding down.
5434 
5435     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5436     // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5437     // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5438     max_wait_period = 21000000;
5439   } else {
5440     max_wait_period = 50000000;
5441   }
5442   millis %= 1000;
5443   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5444     seconds = max_wait_period;
5445   }
5446   abstime->tv_sec = now.tv_sec  + seconds;
5447   long       usec = now.tv_usec + millis * 1000;
5448   if (usec >= 1000000) {
5449     abstime->tv_sec += 1;
5450     usec -= 1000000;
5451   }
5452   abstime->tv_nsec = usec * 1000;
5453   return abstime;
5454 }
5455 
5456 void os::PlatformEvent::park() {           // AKA: down()
5457   // Transitions for _Event:
5458   //   -1 => -1 : illegal
5459   //    1 =>  0 : pass - return immediately
5460   //    0 => -1 : block; then set _Event to 0 before returning
5461 
5462   // Invariant: Only the thread associated with the Event/PlatformEvent
5463   // may call park().
5464   assert(_nParked == 0, "invariant");
5465 
5466   int v;
5467   for (;;) {
5468     v = _Event;
5469     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5470   }
5471   guarantee(v >= 0, "invariant");
5472   if (v == 0) {
5473     // Do this the hard way by blocking ...
5474     // See http://monaco.sfbay/detail.jsf?cr=5094058.
5475     // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5476     // Only for SPARC >= V8PlusA
5477 #if defined(__sparc) && defined(COMPILER2)
5478     if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5479 #endif
5480     int status = os::Solaris::mutex_lock(_mutex);
5481     assert_status(status == 0, status, "mutex_lock");
5482     guarantee(_nParked == 0, "invariant");
5483     ++_nParked;
5484     while (_Event < 0) {
5485       // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5486       // Treat this the same as if the wait was interrupted
5487       // With usr/lib/lwp going to kernel, always handle ETIME
5488       status = os::Solaris::cond_wait(_cond, _mutex);
5489       if (status == ETIME) status = EINTR;
5490       assert_status(status == 0 || status == EINTR, status, "cond_wait");
5491     }
5492     --_nParked;
5493     _Event = 0;
5494     status = os::Solaris::mutex_unlock(_mutex);
5495     assert_status(status == 0, status, "mutex_unlock");
5496     // Paranoia to ensure our locked and lock-free paths interact
5497     // correctly with each other.
5498     OrderAccess::fence();
5499   }
5500 }
5501 
5502 int os::PlatformEvent::park(jlong millis) {
5503   // Transitions for _Event:
5504   //   -1 => -1 : illegal
5505   //    1 =>  0 : pass - return immediately
5506   //    0 => -1 : block; then set _Event to 0 before returning
5507 
5508   guarantee(_nParked == 0, "invariant");
5509   int v;
5510   for (;;) {
5511     v = _Event;
5512     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5513   }
5514   guarantee(v >= 0, "invariant");
5515   if (v != 0) return OS_OK;
5516 
5517   int ret = OS_TIMEOUT;
5518   timestruc_t abst;
5519   compute_abstime(&abst, millis);
5520 
5521   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5522   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5523   // Only for SPARC >= V8PlusA
5524 #if defined(__sparc) && defined(COMPILER2)
5525   if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5526 #endif
5527   int status = os::Solaris::mutex_lock(_mutex);
5528   assert_status(status == 0, status, "mutex_lock");
5529   guarantee(_nParked == 0, "invariant");
5530   ++_nParked;
5531   while (_Event < 0) {
5532     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5533     assert_status(status == 0 || status == EINTR ||
5534                   status == ETIME || status == ETIMEDOUT,
5535                   status, "cond_timedwait");
5536     if (!FilterSpuriousWakeups) break;                // previous semantics
5537     if (status == ETIME || status == ETIMEDOUT) break;
5538     // We consume and ignore EINTR and spurious wakeups.
5539   }
5540   --_nParked;
5541   if (_Event >= 0) ret = OS_OK;
5542   _Event = 0;
5543   status = os::Solaris::mutex_unlock(_mutex);
5544   assert_status(status == 0, status, "mutex_unlock");
5545   // Paranoia to ensure our locked and lock-free paths interact
5546   // correctly with each other.
5547   OrderAccess::fence();
5548   return ret;
5549 }
5550 
5551 void os::PlatformEvent::unpark() {
5552   // Transitions for _Event:
5553   //    0 => 1 : just return
5554   //    1 => 1 : just return
5555   //   -1 => either 0 or 1; must signal target thread
5556   //         That is, we can safely transition _Event from -1 to either
5557   //         0 or 1.
5558   // See also: "Semaphores in Plan 9" by Mullender & Cox
5559   //
5560   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5561   // that it will take two back-to-back park() calls for the owning
5562   // thread to block. This has the benefit of forcing a spurious return
5563   // from the first park() call after an unpark() call which will help
5564   // shake out uses of park() and unpark() without condition variables.
5565 
5566   if (Atomic::xchg(1, &_Event) >= 0) return;
5567 
5568   // If the thread associated with the event was parked, wake it.
5569   // Wait for the thread assoc with the PlatformEvent to vacate.
5570   int status = os::Solaris::mutex_lock(_mutex);
5571   assert_status(status == 0, status, "mutex_lock");
5572   int AnyWaiters = _nParked;
5573   status = os::Solaris::mutex_unlock(_mutex);
5574   assert_status(status == 0, status, "mutex_unlock");
5575   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5576   if (AnyWaiters != 0) {
5577     // Note that we signal() *after* dropping the lock for "immortal" Events.
5578     // This is safe and avoids a common class of  futile wakeups.  In rare
5579     // circumstances this can cause a thread to return prematurely from
5580     // cond_{timed}wait() but the spurious wakeup is benign and the victim
5581     // will simply re-test the condition and re-park itself.
5582     // This provides particular benefit if the underlying platform does not
5583     // provide wait morphing.
5584     status = os::Solaris::cond_signal(_cond);
5585     assert_status(status == 0, status, "cond_signal");
5586   }
5587 }
5588 
5589 // JSR166
5590 // -------------------------------------------------------
5591 
5592 // The solaris and linux implementations of park/unpark are fairly
5593 // conservative for now, but can be improved. They currently use a
5594 // mutex/condvar pair, plus _counter.
5595 // Park decrements _counter if > 0, else does a condvar wait.  Unpark
5596 // sets count to 1 and signals condvar.  Only one thread ever waits
5597 // on the condvar. Contention seen when trying to park implies that someone
5598 // is unparking you, so don't wait. And spurious returns are fine, so there
5599 // is no need to track notifications.
5600 
5601 #define MAX_SECS 100000000
5602 
5603 // This code is common to linux and solaris and will be moved to a
5604 // common place in dolphin.
5605 //
5606 // The passed in time value is either a relative time in nanoseconds
5607 // or an absolute time in milliseconds. Either way it has to be unpacked
5608 // into suitable seconds and nanoseconds components and stored in the
5609 // given timespec structure.
5610 // Given time is a 64-bit value and the time_t used in the timespec is only
5611 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
5612 // overflow if times way in the future are given. Further on Solaris versions
5613 // prior to 10 there is a restriction (see cond_timedwait) that the specified
5614 // number of seconds, in abstime, is less than current_time  + 100,000,000.
5615 // As it will be 28 years before "now + 100000000" will overflow we can
5616 // ignore overflow and just impose a hard-limit on seconds using the value
5617 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
5618 // years from "now".
5619 //
5620 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5621   assert(time > 0, "convertTime");
5622 
5623   struct timeval now;
5624   int status = gettimeofday(&now, NULL);
5625   assert(status == 0, "gettimeofday");
5626 
5627   time_t max_secs = now.tv_sec + MAX_SECS;
5628 
5629   if (isAbsolute) {
5630     jlong secs = time / 1000;
5631     if (secs > max_secs) {
5632       absTime->tv_sec = max_secs;
5633     } else {
5634       absTime->tv_sec = secs;
5635     }
5636     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5637   } else {
5638     jlong secs = time / NANOSECS_PER_SEC;
5639     if (secs >= MAX_SECS) {
5640       absTime->tv_sec = max_secs;
5641       absTime->tv_nsec = 0;
5642     } else {
5643       absTime->tv_sec = now.tv_sec + secs;
5644       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5645       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5646         absTime->tv_nsec -= NANOSECS_PER_SEC;
5647         ++absTime->tv_sec; // note: this must be <= max_secs
5648       }
5649     }
5650   }
5651   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5652   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5653   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5654   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5655 }
5656 
5657 void Parker::park(bool isAbsolute, jlong time) {
5658   // Ideally we'd do something useful while spinning, such
5659   // as calling unpackTime().
5660 
5661   // Optional fast-path check:
5662   // Return immediately if a permit is available.
5663   // We depend on Atomic::xchg() having full barrier semantics
5664   // since we are doing a lock-free update to _counter.
5665   if (Atomic::xchg(0, &_counter) > 0) return;
5666 
5667   // Optional fast-exit: Check interrupt before trying to wait
5668   Thread* thread = Thread::current();
5669   assert(thread->is_Java_thread(), "Must be JavaThread");
5670   JavaThread *jt = (JavaThread *)thread;
5671   if (Thread::is_interrupted(thread, false)) {
5672     return;
5673   }
5674 
5675   // First, demultiplex/decode time arguments
5676   timespec absTime;
5677   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
5678     return;
5679   }
5680   if (time > 0) {
5681     // Warning: this code might be exposed to the old Solaris time
5682     // round-down bugs.  Grep "roundingFix" for details.
5683     unpackTime(&absTime, isAbsolute, time);
5684   }
5685 
5686   // Enter safepoint region
5687   // Beware of deadlocks such as 6317397.
5688   // The per-thread Parker:: _mutex is a classic leaf-lock.
5689   // In particular a thread must never block on the Threads_lock while
5690   // holding the Parker:: mutex.  If safepoints are pending both the
5691   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5692   ThreadBlockInVM tbivm(jt);
5693 
5694   // Don't wait if cannot get lock since interference arises from
5695   // unblocking.  Also. check interrupt before trying wait
5696   if (Thread::is_interrupted(thread, false) ||
5697       os::Solaris::mutex_trylock(_mutex) != 0) {
5698     return;
5699   }
5700 
5701   int status;
5702 
5703   if (_counter > 0)  { // no wait needed
5704     _counter = 0;
5705     status = os::Solaris::mutex_unlock(_mutex);
5706     assert(status == 0, "invariant");
5707     // Paranoia to ensure our locked and lock-free paths interact
5708     // correctly with each other and Java-level accesses.
5709     OrderAccess::fence();
5710     return;
5711   }
5712 
5713 #ifdef ASSERT
5714   // Don't catch signals while blocked; let the running threads have the signals.
5715   // (This allows a debugger to break into the running thread.)
5716   sigset_t oldsigs;
5717   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
5718   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5719 #endif
5720 
5721   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5722   jt->set_suspend_equivalent();
5723   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5724 
5725   // Do this the hard way by blocking ...
5726   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5727   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5728   // Only for SPARC >= V8PlusA
5729 #if defined(__sparc) && defined(COMPILER2)
5730   if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5731 #endif
5732 
5733   if (time == 0) {
5734     status = os::Solaris::cond_wait(_cond, _mutex);
5735   } else {
5736     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
5737   }
5738   // Note that an untimed cond_wait() can sometimes return ETIME on older
5739   // versions of the Solaris.
5740   assert_status(status == 0 || status == EINTR ||
5741                 status == ETIME || status == ETIMEDOUT,
5742                 status, "cond_timedwait");
5743 
5744 #ifdef ASSERT
5745   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
5746 #endif
5747   _counter = 0;
5748   status = os::Solaris::mutex_unlock(_mutex);
5749   assert_status(status == 0, status, "mutex_unlock");
5750   // Paranoia to ensure our locked and lock-free paths interact
5751   // correctly with each other and Java-level accesses.
5752   OrderAccess::fence();
5753 
5754   // If externally suspended while waiting, re-suspend
5755   if (jt->handle_special_suspend_equivalent_condition()) {
5756     jt->java_suspend_self();
5757   }
5758 }
5759 
5760 void Parker::unpark() {
5761   int status = os::Solaris::mutex_lock(_mutex);
5762   assert(status == 0, "invariant");
5763   const int s = _counter;
5764   _counter = 1;
5765   status = os::Solaris::mutex_unlock(_mutex);
5766   assert(status == 0, "invariant");
5767 
5768   if (s < 1) {
5769     status = os::Solaris::cond_signal(_cond);
5770     assert(status == 0, "invariant");
5771   }
5772 }
5773 
5774 extern char** environ;
5775 
5776 // Run the specified command in a separate process. Return its exit value,
5777 // or -1 on failure (e.g. can't fork a new process).
5778 // Unlike system(), this function can be called from signal handler. It
5779 // doesn't block SIGINT et al.
5780 int os::fork_and_exec(char* cmd) {
5781   char * argv[4];
5782   argv[0] = (char *)"sh";
5783   argv[1] = (char *)"-c";
5784   argv[2] = cmd;
5785   argv[3] = NULL;
5786 
5787   // fork is async-safe, fork1 is not so can't use in signal handler
5788   pid_t pid;
5789   Thread* t = ThreadLocalStorage::get_thread_slow();
5790   if (t != NULL && t->is_inside_signal_handler()) {
5791     pid = fork();
5792   } else {
5793     pid = fork1();
5794   }
5795 
5796   if (pid < 0) {
5797     // fork failed
5798     warning("fork failed: %s", strerror(errno));
5799     return -1;
5800 
5801   } else if (pid == 0) {
5802     // child process
5803 
5804     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
5805     execve("/usr/bin/sh", argv, environ);
5806 
5807     // execve failed
5808     _exit(-1);
5809 
5810   } else  {
5811     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5812     // care about the actual exit code, for now.
5813 
5814     int status;
5815 
5816     // Wait for the child process to exit.  This returns immediately if
5817     // the child has already exited. */
5818     while (waitpid(pid, &status, 0) < 0) {
5819       switch (errno) {
5820       case ECHILD: return 0;
5821       case EINTR: break;
5822       default: return -1;
5823       }
5824     }
5825 
5826     if (WIFEXITED(status)) {
5827       // The child exited normally; get its exit code.
5828       return WEXITSTATUS(status);
5829     } else if (WIFSIGNALED(status)) {
5830       // The child exited because of a signal
5831       // The best value to return is 0x80 + signal number,
5832       // because that is what all Unix shells do, and because
5833       // it allows callers to distinguish between process exit and
5834       // process death by signal.
5835       return 0x80 + WTERMSIG(status);
5836     } else {
5837       // Unknown exit code; pass it through
5838       return status;
5839     }
5840   }
5841 }
5842 
5843 // is_headless_jre()
5844 //
5845 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5846 // in order to report if we are running in a headless jre
5847 //
5848 // Since JDK8 xawt/libmawt.so was moved into the same directory
5849 // as libawt.so, and renamed libawt_xawt.so
5850 //
5851 bool os::is_headless_jre() {
5852   struct stat statbuf;
5853   char buf[MAXPATHLEN];
5854   char libmawtpath[MAXPATHLEN];
5855   const char *xawtstr  = "/xawt/libmawt.so";
5856   const char *new_xawtstr = "/libawt_xawt.so";
5857   char *p;
5858 
5859   // Get path to libjvm.so
5860   os::jvm_path(buf, sizeof(buf));
5861 
5862   // Get rid of libjvm.so
5863   p = strrchr(buf, '/');
5864   if (p == NULL) {
5865     return false;
5866   } else {
5867     *p = '\0';
5868   }
5869 
5870   // Get rid of client or server
5871   p = strrchr(buf, '/');
5872   if (p == NULL) {
5873     return false;
5874   } else {
5875     *p = '\0';
5876   }
5877 
5878   // check xawt/libmawt.so
5879   strcpy(libmawtpath, buf);
5880   strcat(libmawtpath, xawtstr);
5881   if (::stat(libmawtpath, &statbuf) == 0) return false;
5882 
5883   // check libawt_xawt.so
5884   strcpy(libmawtpath, buf);
5885   strcat(libmawtpath, new_xawtstr);
5886   if (::stat(libmawtpath, &statbuf) == 0) return false;
5887 
5888   return true;
5889 }
5890 
5891 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
5892   size_t res;
5893   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5894          "Assumed _thread_in_native");
5895   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
5896   return res;
5897 }
5898 
5899 int os::close(int fd) {
5900   return ::close(fd);
5901 }
5902 
5903 int os::socket_close(int fd) {
5904   return ::close(fd);
5905 }
5906 
5907 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5908   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5909          "Assumed _thread_in_native");
5910   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
5911 }
5912 
5913 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5914   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5915          "Assumed _thread_in_native");
5916   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5917 }
5918 
5919 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5920   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5921 }
5922 
5923 // As both poll and select can be interrupted by signals, we have to be
5924 // prepared to restart the system call after updating the timeout, unless
5925 // a poll() is done with timeout == -1, in which case we repeat with this
5926 // "wait forever" value.
5927 
5928 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
5929   int _result;
5930   _result = ::connect(fd, him, len);
5931 
5932   // On Solaris, when a connect() call is interrupted, the connection
5933   // can be established asynchronously (see 6343810). Subsequent calls
5934   // to connect() must check the errno value which has the semantic
5935   // described below (copied from the connect() man page). Handling
5936   // of asynchronously established connections is required for both
5937   // blocking and non-blocking sockets.
5938   //     EINTR            The  connection  attempt  was   interrupted
5939   //                      before  any data arrived by the delivery of
5940   //                      a signal. The connection, however, will  be
5941   //                      established asynchronously.
5942   //
5943   //     EINPROGRESS      The socket is non-blocking, and the connec-
5944   //                      tion  cannot  be completed immediately.
5945   //
5946   //     EALREADY         The socket is non-blocking,  and a previous
5947   //                      connection  attempt  has  not yet been com-
5948   //                      pleted.
5949   //
5950   //     EISCONN          The socket is already connected.
5951   if (_result == OS_ERR && errno == EINTR) {
5952     // restarting a connect() changes its errno semantics
5953     RESTARTABLE(::connect(fd, him, len), _result);
5954     // undo these changes
5955     if (_result == OS_ERR) {
5956       if (errno == EALREADY) {
5957         errno = EINPROGRESS; // fall through
5958       } else if (errno == EISCONN) {
5959         errno = 0;
5960         return OS_OK;
5961       }
5962     }
5963   }
5964   return _result;
5965 }
5966 
5967 // Get the default path to the core file
5968 // Returns the length of the string
5969 int os::get_core_path(char* buffer, size_t bufferSize) {
5970   const char* p = get_current_directory(buffer, bufferSize);
5971 
5972   if (p == NULL) {
5973     assert(p != NULL, "failed to get current directory");
5974     return 0;
5975   }
5976 
5977   return strlen(buffer);
5978 }
5979 
5980 #ifndef PRODUCT
5981 void TestReserveMemorySpecial_test() {
5982   // No tests available for this platform
5983 }
5984 #endif
--- EOF ---