1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "os_solaris.inline.hpp"
  41 #include "prims/jniFastGetField.hpp"
  42 #include "prims/jvm.h"
  43 #include "prims/jvm_misc.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/atomic.inline.hpp"
  46 #include "runtime/extendedPC.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/interfaceSupport.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/mutexLocker.hpp"
  52 #include "runtime/objectMonitor.hpp"
  53 #include "runtime/orderAccess.inline.hpp"
  54 #include "runtime/osThread.hpp"
  55 #include "runtime/perfMemory.hpp"
  56 #include "runtime/sharedRuntime.hpp"
  57 #include "runtime/statSampler.hpp"
  58 #include "runtime/stubRoutines.hpp"
  59 #include "runtime/thread.inline.hpp"
  60 #include "runtime/threadCritical.hpp"
  61 #include "runtime/timer.hpp"
  62 #include "runtime/vm_version.hpp"
  63 #include "services/attachListener.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "services/runtimeService.hpp"
  66 #include "utilities/decoder.hpp"
  67 #include "utilities/defaultStream.hpp"
  68 #include "utilities/events.hpp"
  69 #include "utilities/growableArray.hpp"
  70 #include "utilities/vmError.hpp"
  71 
  72 // put OS-includes here
  73 # include <dlfcn.h>
  74 # include <errno.h>
  75 # include <exception>
  76 # include <link.h>
  77 # include <poll.h>
  78 # include <pthread.h>
  79 # include <pwd.h>
  80 # include <schedctl.h>
  81 # include <setjmp.h>
  82 # include <signal.h>
  83 # include <stdio.h>
  84 # include <alloca.h>
  85 # include <sys/filio.h>
  86 # include <sys/ipc.h>
  87 # include <sys/lwp.h>
  88 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  89 # include <sys/mman.h>
  90 # include <sys/processor.h>
  91 # include <sys/procset.h>
  92 # include <sys/pset.h>
  93 # include <sys/resource.h>
  94 # include <sys/shm.h>
  95 # include <sys/socket.h>
  96 # include <sys/stat.h>
  97 # include <sys/systeminfo.h>
  98 # include <sys/time.h>
  99 # include <sys/times.h>
 100 # include <sys/types.h>
 101 # include <sys/wait.h>
 102 # include <sys/utsname.h>
 103 # include <thread.h>
 104 # include <unistd.h>
 105 # include <sys/priocntl.h>
 106 # include <sys/rtpriocntl.h>
 107 # include <sys/tspriocntl.h>
 108 # include <sys/iapriocntl.h>
 109 # include <sys/fxpriocntl.h>
 110 # include <sys/loadavg.h>
 111 # include <string.h>
 112 # include <stdio.h>
 113 
 114 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 115 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 116 
 117 #define MAX_PATH (2 * K)
 118 
 119 // for timer info max values which include all bits
 120 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 121 
 122 
 123 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 124 // compile on older systems without this header file.
 125 
 126 #ifndef MADV_ACCESS_LWP
 127   #define  MADV_ACCESS_LWP   7       /* next LWP to access heavily */
 128 #endif
 129 #ifndef MADV_ACCESS_MANY
 130   #define  MADV_ACCESS_MANY  8       /* many processes to access heavily */
 131 #endif
 132 
 133 #ifndef LGRP_RSRC_CPU
 134   #define LGRP_RSRC_CPU      0       /* CPU resources */
 135 #endif
 136 #ifndef LGRP_RSRC_MEM
 137   #define LGRP_RSRC_MEM      1       /* memory resources */
 138 #endif
 139 
 140 // see thr_setprio(3T) for the basis of these numbers
 141 #define MinimumPriority 0
 142 #define NormalPriority  64
 143 #define MaximumPriority 127
 144 
 145 // Values for ThreadPriorityPolicy == 1
 146 int prio_policy1[CriticalPriority+1] = {
 147   -99999,  0, 16,  32,  48,  64,
 148           80, 96, 112, 124, 127, 127 };
 149 
 150 // System parameters used internally
 151 static clock_t clock_tics_per_sec = 100;
 152 
 153 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 154 static bool enabled_extended_FILE_stdio = false;
 155 
 156 // For diagnostics to print a message once. see run_periodic_checks
 157 static bool check_addr0_done = false;
 158 static sigset_t check_signal_done;
 159 static bool check_signals = true;
 160 
 161 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 162 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 163 
 164 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 165 
 166 
 167 // "default" initializers for missing libc APIs
 168 extern "C" {
 169   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 170   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 171 
 172   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 173   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 174 }
 175 
 176 // "default" initializers for pthread-based synchronization
 177 extern "C" {
 178   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 179   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 180 }
 181 
 182 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 183 
 184 // Thread Local Storage
 185 // This is common to all Solaris platforms so it is defined here,
 186 // in this common file.
 187 // The declarations are in the os_cpu threadLS*.hpp files.
 188 //
 189 // Static member initialization for TLS
 190 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
 191 
 192 #ifndef PRODUCT
 193   #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
 194 
 195 int ThreadLocalStorage::_tcacheHit = 0;
 196 int ThreadLocalStorage::_tcacheMiss = 0;
 197 
 198 void ThreadLocalStorage::print_statistics() {
 199   int total = _tcacheMiss+_tcacheHit;
 200   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
 201                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
 202 }
 203   #undef _PCT
 204 #endif // PRODUCT
 205 
 206 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
 207                                                         int index) {
 208   Thread *thread = get_thread_slow();
 209   if (thread != NULL) {
 210     address sp = os::current_stack_pointer();
 211     guarantee(thread->_stack_base == NULL ||
 212               (sp <= thread->_stack_base &&
 213               sp >= thread->_stack_base - thread->_stack_size) ||
 214               is_error_reported(),
 215               "sp must be inside of selected thread stack");
 216 
 217     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
 218     _get_thread_cache[index] = thread;
 219   }
 220   return thread;
 221 }
 222 
 223 
 224 static const double all_zero[sizeof(Thread) / sizeof(double) + 1] = {0};
 225 #define NO_CACHED_THREAD ((Thread*)all_zero)
 226 
 227 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
 228 
 229   // Store the new value before updating the cache to prevent a race
 230   // between get_thread_via_cache_slowly() and this store operation.
 231   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
 232 
 233   // Update thread cache with new thread if setting on thread create,
 234   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
 235   uintptr_t raw = pd_raw_thread_id();
 236   int ix = pd_cache_index(raw);
 237   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
 238 }
 239 
 240 void ThreadLocalStorage::pd_init() {
 241   for (int i = 0; i < _pd_cache_size; i++) {
 242     _get_thread_cache[i] = NO_CACHED_THREAD;
 243   }
 244 }
 245 
 246 // Invalidate all the caches (happens to be the same as pd_init).
 247 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
 248 
 249 #undef NO_CACHED_THREAD
 250 
 251 // END Thread Local Storage
 252 
 253 static inline size_t adjust_stack_size(address base, size_t size) {
 254   if ((ssize_t)size < 0) {
 255     // 4759953: Compensate for ridiculous stack size.
 256     size = max_intx;
 257   }
 258   if (size > (size_t)base) {
 259     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 260     size = (size_t)base;
 261   }
 262   return size;
 263 }
 264 
 265 static inline stack_t get_stack_info() {
 266   stack_t st;
 267   int retval = thr_stksegment(&st);
 268   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 269   assert(retval == 0, "incorrect return value from thr_stksegment");
 270   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 271   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 272   return st;
 273 }
 274 
 275 address os::current_stack_base() {
 276   int r = thr_main();
 277   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 278   bool is_primordial_thread = r;
 279 
 280   // Workaround 4352906, avoid calls to thr_stksegment by
 281   // thr_main after the first one (it looks like we trash
 282   // some data, causing the value for ss_sp to be incorrect).
 283   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 284     stack_t st = get_stack_info();
 285     if (is_primordial_thread) {
 286       // cache initial value of stack base
 287       os::Solaris::_main_stack_base = (address)st.ss_sp;
 288     }
 289     return (address)st.ss_sp;
 290   } else {
 291     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 292     return os::Solaris::_main_stack_base;
 293   }
 294 }
 295 
 296 size_t os::current_stack_size() {
 297   size_t size;
 298 
 299   int r = thr_main();
 300   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 301   if (!r) {
 302     size = get_stack_info().ss_size;
 303   } else {
 304     struct rlimit limits;
 305     getrlimit(RLIMIT_STACK, &limits);
 306     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 307   }
 308   // base may not be page aligned
 309   address base = current_stack_base();
 310   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 311   return (size_t)(base - bottom);
 312 }
 313 
 314 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 315   return localtime_r(clock, res);
 316 }
 317 
 318 void os::Solaris::try_enable_extended_io() {
 319   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 320 
 321   if (!UseExtendedFileIO) {
 322     return;
 323   }
 324 
 325   enable_extended_FILE_stdio_t enabler =
 326     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 327                                          "enable_extended_FILE_stdio");
 328   if (enabler) {
 329     enabler(-1, -1);
 330   }
 331 }
 332 
 333 static int _processors_online = 0;
 334 
 335 jint os::Solaris::_os_thread_limit = 0;
 336 volatile jint os::Solaris::_os_thread_count = 0;
 337 
 338 julong os::available_memory() {
 339   return Solaris::available_memory();
 340 }
 341 
 342 julong os::Solaris::available_memory() {
 343   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 344 }
 345 
 346 julong os::Solaris::_physical_memory = 0;
 347 
 348 julong os::physical_memory() {
 349   return Solaris::physical_memory();
 350 }
 351 
 352 static hrtime_t first_hrtime = 0;
 353 static const hrtime_t hrtime_hz = 1000*1000*1000;
 354 static volatile hrtime_t max_hrtime = 0;
 355 
 356 
 357 void os::Solaris::initialize_system_info() {
 358   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 359   _processors_online = sysconf(_SC_NPROCESSORS_ONLN);
 360   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) *
 361                                      (julong)sysconf(_SC_PAGESIZE);
 362 }
 363 
 364 int os::active_processor_count() {
 365   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 366   pid_t pid = getpid();
 367   psetid_t pset = PS_NONE;
 368   // Are we running in a processor set or is there any processor set around?
 369   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 370     uint_t pset_cpus;
 371     // Query the number of cpus available to us.
 372     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 373       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 374       _processors_online = pset_cpus;
 375       return pset_cpus;
 376     }
 377   }
 378   // Otherwise return number of online cpus
 379   return online_cpus;
 380 }
 381 
 382 static bool find_processors_in_pset(psetid_t        pset,
 383                                     processorid_t** id_array,
 384                                     uint_t*         id_length) {
 385   bool result = false;
 386   // Find the number of processors in the processor set.
 387   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 388     // Make up an array to hold their ids.
 389     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 390     // Fill in the array with their processor ids.
 391     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 392       result = true;
 393     }
 394   }
 395   return result;
 396 }
 397 
 398 // Callers of find_processors_online() must tolerate imprecise results --
 399 // the system configuration can change asynchronously because of DR
 400 // or explicit psradm operations.
 401 //
 402 // We also need to take care that the loop (below) terminates as the
 403 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 404 // request and the loop that builds the list of processor ids.   Unfortunately
 405 // there's no reliable way to determine the maximum valid processor id,
 406 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 407 // man pages, which claim the processor id set is "sparse, but
 408 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 409 // exit the loop.
 410 //
 411 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 412 // not available on S8.0.
 413 
 414 static bool find_processors_online(processorid_t** id_array,
 415                                    uint*           id_length) {
 416   const processorid_t MAX_PROCESSOR_ID = 100000;
 417   // Find the number of processors online.
 418   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 419   // Make up an array to hold their ids.
 420   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 421   // Processors need not be numbered consecutively.
 422   long found = 0;
 423   processorid_t next = 0;
 424   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 425     processor_info_t info;
 426     if (processor_info(next, &info) == 0) {
 427       // NB, PI_NOINTR processors are effectively online ...
 428       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 429         (*id_array)[found] = next;
 430         found += 1;
 431       }
 432     }
 433     next += 1;
 434   }
 435   if (found < *id_length) {
 436     // The loop above didn't identify the expected number of processors.
 437     // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 438     // and re-running the loop, above, but there's no guarantee of progress
 439     // if the system configuration is in flux.  Instead, we just return what
 440     // we've got.  Note that in the worst case find_processors_online() could
 441     // return an empty set.  (As a fall-back in the case of the empty set we
 442     // could just return the ID of the current processor).
 443     *id_length = found;
 444   }
 445 
 446   return true;
 447 }
 448 
 449 static bool assign_distribution(processorid_t* id_array,
 450                                 uint           id_length,
 451                                 uint*          distribution,
 452                                 uint           distribution_length) {
 453   // We assume we can assign processorid_t's to uint's.
 454   assert(sizeof(processorid_t) == sizeof(uint),
 455          "can't convert processorid_t to uint");
 456   // Quick check to see if we won't succeed.
 457   if (id_length < distribution_length) {
 458     return false;
 459   }
 460   // Assign processor ids to the distribution.
 461   // Try to shuffle processors to distribute work across boards,
 462   // assuming 4 processors per board.
 463   const uint processors_per_board = ProcessDistributionStride;
 464   // Find the maximum processor id.
 465   processorid_t max_id = 0;
 466   for (uint m = 0; m < id_length; m += 1) {
 467     max_id = MAX2(max_id, id_array[m]);
 468   }
 469   // The next id, to limit loops.
 470   const processorid_t limit_id = max_id + 1;
 471   // Make up markers for available processors.
 472   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 473   for (uint c = 0; c < limit_id; c += 1) {
 474     available_id[c] = false;
 475   }
 476   for (uint a = 0; a < id_length; a += 1) {
 477     available_id[id_array[a]] = true;
 478   }
 479   // Step by "boards", then by "slot", copying to "assigned".
 480   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 481   //                remembering which processors have been assigned by
 482   //                previous calls, etc., so as to distribute several
 483   //                independent calls of this method.  What we'd like is
 484   //                It would be nice to have an API that let us ask
 485   //                how many processes are bound to a processor,
 486   //                but we don't have that, either.
 487   //                In the short term, "board" is static so that
 488   //                subsequent distributions don't all start at board 0.
 489   static uint board = 0;
 490   uint assigned = 0;
 491   // Until we've found enough processors ....
 492   while (assigned < distribution_length) {
 493     // ... find the next available processor in the board.
 494     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 495       uint try_id = board * processors_per_board + slot;
 496       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 497         distribution[assigned] = try_id;
 498         available_id[try_id] = false;
 499         assigned += 1;
 500         break;
 501       }
 502     }
 503     board += 1;
 504     if (board * processors_per_board + 0 >= limit_id) {
 505       board = 0;
 506     }
 507   }
 508   if (available_id != NULL) {
 509     FREE_C_HEAP_ARRAY(bool, available_id);
 510   }
 511   return true;
 512 }
 513 
 514 void os::set_native_thread_name(const char *name) {
 515   // Not yet implemented.
 516   return;
 517 }
 518 
 519 bool os::distribute_processes(uint length, uint* distribution) {
 520   bool result = false;
 521   // Find the processor id's of all the available CPUs.
 522   processorid_t* id_array  = NULL;
 523   uint           id_length = 0;
 524   // There are some races between querying information and using it,
 525   // since processor sets can change dynamically.
 526   psetid_t pset = PS_NONE;
 527   // Are we running in a processor set?
 528   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 529     result = find_processors_in_pset(pset, &id_array, &id_length);
 530   } else {
 531     result = find_processors_online(&id_array, &id_length);
 532   }
 533   if (result == true) {
 534     if (id_length >= length) {
 535       result = assign_distribution(id_array, id_length, distribution, length);
 536     } else {
 537       result = false;
 538     }
 539   }
 540   if (id_array != NULL) {
 541     FREE_C_HEAP_ARRAY(processorid_t, id_array);
 542   }
 543   return result;
 544 }
 545 
 546 bool os::bind_to_processor(uint processor_id) {
 547   // We assume that a processorid_t can be stored in a uint.
 548   assert(sizeof(uint) == sizeof(processorid_t),
 549          "can't convert uint to processorid_t");
 550   int bind_result =
 551     processor_bind(P_LWPID,                       // bind LWP.
 552                    P_MYID,                        // bind current LWP.
 553                    (processorid_t) processor_id,  // id.
 554                    NULL);                         // don't return old binding.
 555   return (bind_result == 0);
 556 }
 557 
 558 // Return true if user is running as root.
 559 
 560 bool os::have_special_privileges() {
 561   static bool init = false;
 562   static bool privileges = false;
 563   if (!init) {
 564     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 565     init = true;
 566   }
 567   return privileges;
 568 }
 569 
 570 
 571 void os::init_system_properties_values() {
 572   // The next steps are taken in the product version:
 573   //
 574   // Obtain the JAVA_HOME value from the location of libjvm.so.
 575   // This library should be located at:
 576   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 577   //
 578   // If "/jre/lib/" appears at the right place in the path, then we
 579   // assume libjvm.so is installed in a JDK and we use this path.
 580   //
 581   // Otherwise exit with message: "Could not create the Java virtual machine."
 582   //
 583   // The following extra steps are taken in the debugging version:
 584   //
 585   // If "/jre/lib/" does NOT appear at the right place in the path
 586   // instead of exit check for $JAVA_HOME environment variable.
 587   //
 588   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 589   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 590   // it looks like libjvm.so is installed there
 591   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 592   //
 593   // Otherwise exit.
 594   //
 595   // Important note: if the location of libjvm.so changes this
 596   // code needs to be changed accordingly.
 597 
 598 // Base path of extensions installed on the system.
 599 #define SYS_EXT_DIR     "/usr/jdk/packages"
 600 #define EXTENSIONS_DIR  "/lib/ext"
 601 
 602   char cpu_arch[12];
 603   // Buffer that fits several sprintfs.
 604   // Note that the space for the colon and the trailing null are provided
 605   // by the nulls included by the sizeof operator.
 606   const size_t bufsize =
 607     MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
 608          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 609          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir
 610   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 611 
 612   // sysclasspath, java_home, dll_dir
 613   {
 614     char *pslash;
 615     os::jvm_path(buf, bufsize);
 616 
 617     // Found the full path to libjvm.so.
 618     // Now cut the path to <java_home>/jre if we can.
 619     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 620     pslash = strrchr(buf, '/');
 621     if (pslash != NULL) {
 622       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 623     }
 624     Arguments::set_dll_dir(buf);
 625 
 626     if (pslash != NULL) {
 627       pslash = strrchr(buf, '/');
 628       if (pslash != NULL) {
 629         *pslash = '\0';          // Get rid of /<arch>.
 630         pslash = strrchr(buf, '/');
 631         if (pslash != NULL) {
 632           *pslash = '\0';        // Get rid of /lib.
 633         }
 634       }
 635     }
 636     Arguments::set_java_home(buf);
 637     set_boot_path('/', ':');
 638   }
 639 
 640   // Where to look for native libraries.
 641   {
 642     // Use dlinfo() to determine the correct java.library.path.
 643     //
 644     // If we're launched by the Java launcher, and the user
 645     // does not set java.library.path explicitly on the commandline,
 646     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 647     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 648     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 649     // /usr/lib), which is exactly what we want.
 650     //
 651     // If the user does set java.library.path, it completely
 652     // overwrites this setting, and always has.
 653     //
 654     // If we're not launched by the Java launcher, we may
 655     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 656     // settings.  Again, dlinfo does exactly what we want.
 657 
 658     Dl_serinfo     info_sz, *info = &info_sz;
 659     Dl_serpath     *path;
 660     char           *library_path;
 661     char           *common_path = buf;
 662 
 663     // Determine search path count and required buffer size.
 664     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 665       FREE_C_HEAP_ARRAY(char, buf);
 666       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 667     }
 668 
 669     // Allocate new buffer and initialize.
 670     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 671     info->dls_size = info_sz.dls_size;
 672     info->dls_cnt = info_sz.dls_cnt;
 673 
 674     // Obtain search path information.
 675     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 676       FREE_C_HEAP_ARRAY(char, buf);
 677       FREE_C_HEAP_ARRAY(char, info);
 678       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 679     }
 680 
 681     path = &info->dls_serpath[0];
 682 
 683     // Note: Due to a legacy implementation, most of the library path
 684     // is set in the launcher. This was to accomodate linking restrictions
 685     // on legacy Solaris implementations (which are no longer supported).
 686     // Eventually, all the library path setting will be done here.
 687     //
 688     // However, to prevent the proliferation of improperly built native
 689     // libraries, the new path component /usr/jdk/packages is added here.
 690 
 691     // Determine the actual CPU architecture.
 692     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 693 #ifdef _LP64
 694     // If we are a 64-bit vm, perform the following translations:
 695     //   sparc   -> sparcv9
 696     //   i386    -> amd64
 697     if (strcmp(cpu_arch, "sparc") == 0) {
 698       strcat(cpu_arch, "v9");
 699     } else if (strcmp(cpu_arch, "i386") == 0) {
 700       strcpy(cpu_arch, "amd64");
 701     }
 702 #endif
 703 
 704     // Construct the invariant part of ld_library_path.
 705     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 706 
 707     // Struct size is more than sufficient for the path components obtained
 708     // through the dlinfo() call, so only add additional space for the path
 709     // components explicitly added here.
 710     size_t library_path_size = info->dls_size + strlen(common_path);
 711     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 712     library_path[0] = '\0';
 713 
 714     // Construct the desired Java library path from the linker's library
 715     // search path.
 716     //
 717     // For compatibility, it is optimal that we insert the additional path
 718     // components specific to the Java VM after those components specified
 719     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 720     // infrastructure.
 721     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 722       strcpy(library_path, common_path);
 723     } else {
 724       int inserted = 0;
 725       int i;
 726       for (i = 0; i < info->dls_cnt; i++, path++) {
 727         uint_t flags = path->dls_flags & LA_SER_MASK;
 728         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 729           strcat(library_path, common_path);
 730           strcat(library_path, os::path_separator());
 731           inserted = 1;
 732         }
 733         strcat(library_path, path->dls_name);
 734         strcat(library_path, os::path_separator());
 735       }
 736       // Eliminate trailing path separator.
 737       library_path[strlen(library_path)-1] = '\0';
 738     }
 739 
 740     // happens before argument parsing - can't use a trace flag
 741     // tty->print_raw("init_system_properties_values: native lib path: ");
 742     // tty->print_raw_cr(library_path);
 743 
 744     // Callee copies into its own buffer.
 745     Arguments::set_library_path(library_path);
 746 
 747     FREE_C_HEAP_ARRAY(char, library_path);
 748     FREE_C_HEAP_ARRAY(char, info);
 749   }
 750 
 751   // Extensions directories.
 752   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 753   Arguments::set_ext_dirs(buf);
 754 
 755   FREE_C_HEAP_ARRAY(char, buf);
 756 
 757 #undef SYS_EXT_DIR
 758 #undef EXTENSIONS_DIR
 759 }
 760 
 761 void os::breakpoint() {
 762   BREAKPOINT;
 763 }
 764 
 765 bool os::obsolete_option(const JavaVMOption *option) {
 766   if (!strncmp(option->optionString, "-Xt", 3)) {
 767     return true;
 768   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 769     return true;
 770   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 771     return true;
 772   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 773     return true;
 774   }
 775   return false;
 776 }
 777 
 778 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 779   address  stackStart  = (address)thread->stack_base();
 780   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 781   if (sp < stackStart && sp >= stackEnd) return true;
 782   return false;
 783 }
 784 
 785 extern "C" void breakpoint() {
 786   // use debugger to set breakpoint here
 787 }
 788 
 789 static thread_t main_thread;
 790 
 791 // Thread start routine for all new Java threads
 792 extern "C" void* java_start(void* thread_addr) {
 793   // Try to randomize the cache line index of hot stack frames.
 794   // This helps when threads of the same stack traces evict each other's
 795   // cache lines. The threads can be either from the same JVM instance, or
 796   // from different JVM instances. The benefit is especially true for
 797   // processors with hyperthreading technology.
 798   static int counter = 0;
 799   int pid = os::current_process_id();
 800   alloca(((pid ^ counter++) & 7) * 128);
 801 
 802   int prio;
 803   Thread* thread = (Thread*)thread_addr;
 804   OSThread* osthr = thread->osthread();
 805 
 806   osthr->set_lwp_id(_lwp_self());  // Store lwp in case we are bound
 807   thread->_schedctl = (void *) schedctl_init();
 808 
 809   if (UseNUMA) {
 810     int lgrp_id = os::numa_get_group_id();
 811     if (lgrp_id != -1) {
 812       thread->set_lgrp_id(lgrp_id);
 813     }
 814   }
 815 
 816   // If the creator called set priority before we started,
 817   // we need to call set_native_priority now that we have an lwp.
 818   // We used to get the priority from thr_getprio (we called
 819   // thr_setprio way back in create_thread) and pass it to
 820   // set_native_priority, but Solaris scales the priority
 821   // in java_to_os_priority, so when we read it back here,
 822   // we pass trash to set_native_priority instead of what's
 823   // in java_to_os_priority. So we save the native priority
 824   // in the osThread and recall it here.
 825 
 826   if (osthr->thread_id() != -1) {
 827     if (UseThreadPriorities) {
 828       int prio = osthr->native_priority();
 829       if (ThreadPriorityVerbose) {
 830         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 831                       INTPTR_FORMAT ", setting priority: %d\n",
 832                       osthr->thread_id(), osthr->lwp_id(), prio);
 833       }
 834       os::set_native_priority(thread, prio);
 835     }
 836   } else if (ThreadPriorityVerbose) {
 837     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 838   }
 839 
 840   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 841 
 842   // initialize signal mask for this thread
 843   os::Solaris::hotspot_sigmask(thread);
 844 
 845   thread->run();
 846 
 847   // One less thread is executing
 848   // When the VMThread gets here, the main thread may have already exited
 849   // which frees the CodeHeap containing the Atomic::dec code
 850   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 851     Atomic::dec(&os::Solaris::_os_thread_count);
 852   }
 853 
 854   if (UseDetachedThreads) {
 855     thr_exit(NULL);
 856     ShouldNotReachHere();
 857   }
 858   return NULL;
 859 }
 860 
 861 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 862   // Allocate the OSThread object
 863   OSThread* osthread = new OSThread(NULL, NULL);
 864   if (osthread == NULL) return NULL;
 865 
 866   // Store info on the Solaris thread into the OSThread
 867   osthread->set_thread_id(thread_id);
 868   osthread->set_lwp_id(_lwp_self());
 869   thread->_schedctl = (void *) schedctl_init();
 870 
 871   if (UseNUMA) {
 872     int lgrp_id = os::numa_get_group_id();
 873     if (lgrp_id != -1) {
 874       thread->set_lgrp_id(lgrp_id);
 875     }
 876   }
 877 
 878   if (ThreadPriorityVerbose) {
 879     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 880                   osthread->thread_id(), osthread->lwp_id());
 881   }
 882 
 883   // Initial thread state is INITIALIZED, not SUSPENDED
 884   osthread->set_state(INITIALIZED);
 885 
 886   return osthread;
 887 }
 888 
 889 void os::Solaris::hotspot_sigmask(Thread* thread) {
 890   //Save caller's signal mask
 891   sigset_t sigmask;
 892   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 893   OSThread *osthread = thread->osthread();
 894   osthread->set_caller_sigmask(sigmask);
 895 
 896   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 897   if (!ReduceSignalUsage) {
 898     if (thread->is_VM_thread()) {
 899       // Only the VM thread handles BREAK_SIGNAL ...
 900       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 901     } else {
 902       // ... all other threads block BREAK_SIGNAL
 903       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 904       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 905     }
 906   }
 907 }
 908 
 909 bool os::create_attached_thread(JavaThread* thread) {
 910 #ifdef ASSERT
 911   thread->verify_not_published();
 912 #endif
 913   OSThread* osthread = create_os_thread(thread, thr_self());
 914   if (osthread == NULL) {
 915     return false;
 916   }
 917 
 918   // Initial thread state is RUNNABLE
 919   osthread->set_state(RUNNABLE);
 920   thread->set_osthread(osthread);
 921 
 922   // initialize signal mask for this thread
 923   // and save the caller's signal mask
 924   os::Solaris::hotspot_sigmask(thread);
 925 
 926   return true;
 927 }
 928 
 929 bool os::create_main_thread(JavaThread* thread) {
 930 #ifdef ASSERT
 931   thread->verify_not_published();
 932 #endif
 933   if (_starting_thread == NULL) {
 934     _starting_thread = create_os_thread(thread, main_thread);
 935     if (_starting_thread == NULL) {
 936       return false;
 937     }
 938   }
 939 
 940   // The primodial thread is runnable from the start
 941   _starting_thread->set_state(RUNNABLE);
 942 
 943   thread->set_osthread(_starting_thread);
 944 
 945   // initialize signal mask for this thread
 946   // and save the caller's signal mask
 947   os::Solaris::hotspot_sigmask(thread);
 948 
 949   return true;
 950 }
 951 
 952 
 953 bool os::create_thread(Thread* thread, ThreadType thr_type,
 954                        size_t stack_size) {
 955   // Allocate the OSThread object
 956   OSThread* osthread = new OSThread(NULL, NULL);
 957   if (osthread == NULL) {
 958     return false;
 959   }
 960 
 961   if (ThreadPriorityVerbose) {
 962     char *thrtyp;
 963     switch (thr_type) {
 964     case vm_thread:
 965       thrtyp = (char *)"vm";
 966       break;
 967     case cgc_thread:
 968       thrtyp = (char *)"cgc";
 969       break;
 970     case pgc_thread:
 971       thrtyp = (char *)"pgc";
 972       break;
 973     case java_thread:
 974       thrtyp = (char *)"java";
 975       break;
 976     case compiler_thread:
 977       thrtyp = (char *)"compiler";
 978       break;
 979     case watcher_thread:
 980       thrtyp = (char *)"watcher";
 981       break;
 982     default:
 983       thrtyp = (char *)"unknown";
 984       break;
 985     }
 986     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
 987   }
 988 
 989   // Calculate stack size if it's not specified by caller.
 990   if (stack_size == 0) {
 991     // The default stack size 1M (2M for LP64).
 992     stack_size = (BytesPerWord >> 2) * K * K;
 993 
 994     switch (thr_type) {
 995     case os::java_thread:
 996       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 997       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
 998       break;
 999     case os::compiler_thread:
1000       if (CompilerThreadStackSize > 0) {
1001         stack_size = (size_t)(CompilerThreadStackSize * K);
1002         break;
1003       } // else fall through:
1004         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1005     case os::vm_thread:
1006     case os::pgc_thread:
1007     case os::cgc_thread:
1008     case os::watcher_thread:
1009       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1010       break;
1011     }
1012   }
1013   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1014 
1015   // Initial state is ALLOCATED but not INITIALIZED
1016   osthread->set_state(ALLOCATED);
1017 
1018   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1019     // We got lots of threads. Check if we still have some address space left.
1020     // Need to be at least 5Mb of unreserved address space. We do check by
1021     // trying to reserve some.
1022     const size_t VirtualMemoryBangSize = 20*K*K;
1023     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1024     if (mem == NULL) {
1025       delete osthread;
1026       return false;
1027     } else {
1028       // Release the memory again
1029       os::release_memory(mem, VirtualMemoryBangSize);
1030     }
1031   }
1032 
1033   // Setup osthread because the child thread may need it.
1034   thread->set_osthread(osthread);
1035 
1036   // Create the Solaris thread
1037   thread_t tid = 0;
1038   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
1039   int      status;
1040 
1041   // Mark that we don't have an lwp or thread id yet.
1042   // In case we attempt to set the priority before the thread starts.
1043   osthread->set_lwp_id(-1);
1044   osthread->set_thread_id(-1);
1045 
1046   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1047   if (status != 0) {
1048     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1049       perror("os::create_thread");
1050     }
1051     thread->set_osthread(NULL);
1052     // Need to clean up stuff we've allocated so far
1053     delete osthread;
1054     return false;
1055   }
1056 
1057   Atomic::inc(&os::Solaris::_os_thread_count);
1058 
1059   // Store info on the Solaris thread into the OSThread
1060   osthread->set_thread_id(tid);
1061 
1062   // Remember that we created this thread so we can set priority on it
1063   osthread->set_vm_created();
1064 
1065   // Initial thread state is INITIALIZED, not SUSPENDED
1066   osthread->set_state(INITIALIZED);
1067 
1068   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1069   return true;
1070 }
1071 
1072 // defined for >= Solaris 10. This allows builds on earlier versions
1073 // of Solaris to take advantage of the newly reserved Solaris JVM signals
1074 // With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1075 // and -XX:+UseAltSigs does nothing since these should have no conflict
1076 //
1077 #if !defined(SIGJVM1)
1078   #define SIGJVM1 39
1079   #define SIGJVM2 40
1080 #endif
1081 
1082 debug_only(static bool signal_sets_initialized = false);
1083 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1084 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1085 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1086 
1087 bool os::Solaris::is_sig_ignored(int sig) {
1088   struct sigaction oact;
1089   sigaction(sig, (struct sigaction*)NULL, &oact);
1090   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1091                                  : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1092   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
1093     return true;
1094   } else {
1095     return false;
1096   }
1097 }
1098 
1099 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1100 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1101 static bool isJVM1available() {
1102   return SIGJVM1 < SIGRTMIN;
1103 }
1104 
1105 void os::Solaris::signal_sets_init() {
1106   // Should also have an assertion stating we are still single-threaded.
1107   assert(!signal_sets_initialized, "Already initialized");
1108   // Fill in signals that are necessarily unblocked for all threads in
1109   // the VM. Currently, we unblock the following signals:
1110   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1111   //                         by -Xrs (=ReduceSignalUsage));
1112   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1113   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1114   // the dispositions or masks wrt these signals.
1115   // Programs embedding the VM that want to use the above signals for their
1116   // own purposes must, at this time, use the "-Xrs" option to prevent
1117   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1118   // (See bug 4345157, and other related bugs).
1119   // In reality, though, unblocking these signals is really a nop, since
1120   // these signals are not blocked by default.
1121   sigemptyset(&unblocked_sigs);
1122   sigemptyset(&allowdebug_blocked_sigs);
1123   sigaddset(&unblocked_sigs, SIGILL);
1124   sigaddset(&unblocked_sigs, SIGSEGV);
1125   sigaddset(&unblocked_sigs, SIGBUS);
1126   sigaddset(&unblocked_sigs, SIGFPE);
1127 
1128   if (isJVM1available) {
1129     os::Solaris::set_SIGinterrupt(SIGJVM1);
1130     os::Solaris::set_SIGasync(SIGJVM2);
1131   } else if (UseAltSigs) {
1132     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1133     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1134   } else {
1135     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1136     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1137   }
1138 
1139   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1140   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1141 
1142   if (!ReduceSignalUsage) {
1143     if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1144       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1145       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1146     }
1147     if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1148       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1149       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1150     }
1151     if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1152       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1153       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1154     }
1155   }
1156   // Fill in signals that are blocked by all but the VM thread.
1157   sigemptyset(&vm_sigs);
1158   if (!ReduceSignalUsage) {
1159     sigaddset(&vm_sigs, BREAK_SIGNAL);
1160   }
1161   debug_only(signal_sets_initialized = true);
1162 
1163   // For diagnostics only used in run_periodic_checks
1164   sigemptyset(&check_signal_done);
1165 }
1166 
1167 // These are signals that are unblocked while a thread is running Java.
1168 // (For some reason, they get blocked by default.)
1169 sigset_t* os::Solaris::unblocked_signals() {
1170   assert(signal_sets_initialized, "Not initialized");
1171   return &unblocked_sigs;
1172 }
1173 
1174 // These are the signals that are blocked while a (non-VM) thread is
1175 // running Java. Only the VM thread handles these signals.
1176 sigset_t* os::Solaris::vm_signals() {
1177   assert(signal_sets_initialized, "Not initialized");
1178   return &vm_sigs;
1179 }
1180 
1181 // These are signals that are blocked during cond_wait to allow debugger in
1182 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1183   assert(signal_sets_initialized, "Not initialized");
1184   return &allowdebug_blocked_sigs;
1185 }
1186 
1187 
1188 void _handle_uncaught_cxx_exception() {
1189   VMError err("An uncaught C++ exception");
1190   err.report_and_die();
1191 }
1192 
1193 
1194 // First crack at OS-specific initialization, from inside the new thread.
1195 void os::initialize_thread(Thread* thr) {
1196   int r = thr_main();
1197   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
1198   if (r) {
1199     JavaThread* jt = (JavaThread *)thr;
1200     assert(jt != NULL, "Sanity check");
1201     size_t stack_size;
1202     address base = jt->stack_base();
1203     if (Arguments::created_by_java_launcher()) {
1204       // Use 2MB to allow for Solaris 7 64 bit mode.
1205       stack_size = JavaThread::stack_size_at_create() == 0
1206         ? 2048*K : JavaThread::stack_size_at_create();
1207 
1208       // There are rare cases when we may have already used more than
1209       // the basic stack size allotment before this method is invoked.
1210       // Attempt to allow for a normally sized java_stack.
1211       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1212       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1213     } else {
1214       // 6269555: If we were not created by a Java launcher, i.e. if we are
1215       // running embedded in a native application, treat the primordial thread
1216       // as much like a native attached thread as possible.  This means using
1217       // the current stack size from thr_stksegment(), unless it is too large
1218       // to reliably setup guard pages.  A reasonable max size is 8MB.
1219       size_t current_size = current_stack_size();
1220       // This should never happen, but just in case....
1221       if (current_size == 0) current_size = 2 * K * K;
1222       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1223     }
1224     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1225     stack_size = (size_t)(base - bottom);
1226 
1227     assert(stack_size > 0, "Stack size calculation problem");
1228 
1229     if (stack_size > jt->stack_size()) {
1230 #ifndef PRODUCT
1231       struct rlimit limits;
1232       getrlimit(RLIMIT_STACK, &limits);
1233       size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1234       assert(size >= jt->stack_size(), "Stack size problem in main thread");
1235 #endif
1236       tty->print_cr("Stack size of %d Kb exceeds current limit of %d Kb.\n"
1237                     "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1238                     "See limit(1) to increase the stack size limit.",
1239                     stack_size / K, jt->stack_size() / K);
1240       vm_exit(1);
1241     }
1242     assert(jt->stack_size() >= stack_size,
1243            "Attempt to map more stack than was allocated");
1244     jt->set_stack_size(stack_size);
1245   }
1246 
1247   // With the T2 libthread (T1 is no longer supported) threads are always bound
1248   // and we use stackbanging in all cases.
1249 
1250   os::Solaris::init_thread_fpu_state();
1251   std::set_terminate(_handle_uncaught_cxx_exception);
1252 }
1253 
1254 
1255 
1256 // Free Solaris resources related to the OSThread
1257 void os::free_thread(OSThread* osthread) {
1258   assert(osthread != NULL, "os::free_thread but osthread not set");
1259 
1260 
1261   // We are told to free resources of the argument thread,
1262   // but we can only really operate on the current thread.
1263   // The main thread must take the VMThread down synchronously
1264   // before the main thread exits and frees up CodeHeap
1265   guarantee((Thread::current()->osthread() == osthread
1266              || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1267   if (Thread::current()->osthread() == osthread) {
1268     // Restore caller's signal mask
1269     sigset_t sigmask = osthread->caller_sigmask();
1270     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1271   }
1272   delete osthread;
1273 }
1274 
1275 void os::pd_start_thread(Thread* thread) {
1276   int status = thr_continue(thread->osthread()->thread_id());
1277   assert_status(status == 0, status, "thr_continue failed");
1278 }
1279 
1280 
1281 intx os::current_thread_id() {
1282   return (intx)thr_self();
1283 }
1284 
1285 static pid_t _initial_pid = 0;
1286 
1287 int os::current_process_id() {
1288   return (int)(_initial_pid ? _initial_pid : getpid());
1289 }
1290 
1291 int os::allocate_thread_local_storage() {
1292   // %%%       in Win32 this allocates a memory segment pointed to by a
1293   //           register.  Dan Stein can implement a similar feature in
1294   //           Solaris.  Alternatively, the VM can do the same thing
1295   //           explicitly: malloc some storage and keep the pointer in a
1296   //           register (which is part of the thread's context) (or keep it
1297   //           in TLS).
1298   // %%%       In current versions of Solaris, thr_self and TSD can
1299   //           be accessed via short sequences of displaced indirections.
1300   //           The value of thr_self is available as %g7(36).
1301   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1302   //           assuming that the current thread already has a value bound to k.
1303   //           It may be worth experimenting with such access patterns,
1304   //           and later having the parameters formally exported from a Solaris
1305   //           interface.  I think, however, that it will be faster to
1306   //           maintain the invariant that %g2 always contains the
1307   //           JavaThread in Java code, and have stubs simply
1308   //           treat %g2 as a caller-save register, preserving it in a %lN.
1309   thread_key_t tk;
1310   if (thr_keycreate(&tk, NULL)) {
1311     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1312                   "(%s)", strerror(errno)));
1313   }
1314   return int(tk);
1315 }
1316 
1317 void os::free_thread_local_storage(int index) {
1318   // %%% don't think we need anything here
1319   // if (pthread_key_delete((pthread_key_t) tk)) {
1320   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
1321   // }
1322 }
1323 
1324 // libthread allocate for tsd_common is a version specific
1325 // small number - point is NO swap space available
1326 #define SMALLINT 32
1327 void os::thread_local_storage_at_put(int index, void* value) {
1328   // %%% this is used only in threadLocalStorage.cpp
1329   if (thr_setspecific((thread_key_t)index, value)) {
1330     if (errno == ENOMEM) {
1331       vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1332                             "thr_setspecific: out of swap space");
1333     } else {
1334       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1335                     "(%s)", strerror(errno)));
1336     }
1337   } else {
1338     ThreadLocalStorage::set_thread_in_slot((Thread *) value);
1339   }
1340 }
1341 
1342 // This function could be called before TLS is initialized, for example, when
1343 // VM receives an async signal or when VM causes a fatal error during
1344 // initialization. Return NULL if thr_getspecific() fails.
1345 void* os::thread_local_storage_at(int index) {
1346   // %%% this is used only in threadLocalStorage.cpp
1347   void* r = NULL;
1348   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1349 }
1350 
1351 
1352 // gethrtime() should be monotonic according to the documentation,
1353 // but some virtualized platforms are known to break this guarantee.
1354 // getTimeNanos() must be guaranteed not to move backwards, so we
1355 // are forced to add a check here.
1356 inline hrtime_t getTimeNanos() {
1357   const hrtime_t now = gethrtime();
1358   const hrtime_t prev = max_hrtime;
1359   if (now <= prev) {
1360     return prev;   // same or retrograde time;
1361   }
1362   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1363   assert(obsv >= prev, "invariant");   // Monotonicity
1364   // If the CAS succeeded then we're done and return "now".
1365   // If the CAS failed and the observed value "obsv" is >= now then
1366   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1367   // some other thread raced this thread and installed a new value, in which case
1368   // we could either (a) retry the entire operation, (b) retry trying to install now
1369   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1370   // we might discard a higher "now" value in deference to a slightly lower but freshly
1371   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1372   // to (a) or (b) -- and greatly reduces coherence traffic.
1373   // We might also condition (c) on the magnitude of the delta between obsv and now.
1374   // Avoiding excessive CAS operations to hot RW locations is critical.
1375   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1376   return (prev == obsv) ? now : obsv;
1377 }
1378 
1379 // Time since start-up in seconds to a fine granularity.
1380 // Used by VMSelfDestructTimer and the MemProfiler.
1381 double os::elapsedTime() {
1382   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1383 }
1384 
1385 jlong os::elapsed_counter() {
1386   return (jlong)(getTimeNanos() - first_hrtime);
1387 }
1388 
1389 jlong os::elapsed_frequency() {
1390   return hrtime_hz;
1391 }
1392 
1393 // Return the real, user, and system times in seconds from an
1394 // arbitrary fixed point in the past.
1395 bool os::getTimesSecs(double* process_real_time,
1396                       double* process_user_time,
1397                       double* process_system_time) {
1398   struct tms ticks;
1399   clock_t real_ticks = times(&ticks);
1400 
1401   if (real_ticks == (clock_t) (-1)) {
1402     return false;
1403   } else {
1404     double ticks_per_second = (double) clock_tics_per_sec;
1405     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1406     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1407     // For consistency return the real time from getTimeNanos()
1408     // converted to seconds.
1409     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1410 
1411     return true;
1412   }
1413 }
1414 
1415 bool os::supports_vtime() { return true; }
1416 
1417 bool os::enable_vtime() {
1418   int fd = ::open("/proc/self/ctl", O_WRONLY);
1419   if (fd == -1) {
1420     return false;
1421   }
1422 
1423   long cmd[] = { PCSET, PR_MSACCT };
1424   int res = ::write(fd, cmd, sizeof(long) * 2);
1425   ::close(fd);
1426   if (res != sizeof(long) * 2) {
1427     return false;
1428   }
1429   return true;
1430 }
1431 
1432 bool os::vtime_enabled() {
1433   int fd = ::open("/proc/self/status", O_RDONLY);
1434   if (fd == -1) {
1435     return false;
1436   }
1437 
1438   pstatus_t status;
1439   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1440   ::close(fd);
1441   if (res != sizeof(pstatus_t)) {
1442     return false;
1443   }
1444   return status.pr_flags & PR_MSACCT;
1445 }
1446 
1447 double os::elapsedVTime() {
1448   return (double)gethrvtime() / (double)hrtime_hz;
1449 }
1450 
1451 // Used internally for comparisons only
1452 // getTimeMillis guaranteed to not move backwards on Solaris
1453 jlong getTimeMillis() {
1454   jlong nanotime = getTimeNanos();
1455   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1456 }
1457 
1458 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1459 jlong os::javaTimeMillis() {
1460   timeval t;
1461   if (gettimeofday(&t, NULL) == -1) {
1462     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1463   }
1464   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1465 }
1466 
1467 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1468   timeval t;
1469   if (gettimeofday(&t, NULL) == -1) {
1470     fatal(err_msg("os::javaTimeSystemUTC: gettimeofday (%s)", strerror(errno)));
1471   }
1472   seconds = jlong(t.tv_sec);
1473   nanos = jlong(t.tv_usec) * 1000;
1474 }
1475 
1476 
1477 jlong os::javaTimeNanos() {
1478   return (jlong)getTimeNanos();
1479 }
1480 
1481 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1482   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1483   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1484   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1485   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1486 }
1487 
1488 char * os::local_time_string(char *buf, size_t buflen) {
1489   struct tm t;
1490   time_t long_time;
1491   time(&long_time);
1492   localtime_r(&long_time, &t);
1493   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1494                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1495                t.tm_hour, t.tm_min, t.tm_sec);
1496   return buf;
1497 }
1498 
1499 // Note: os::shutdown() might be called very early during initialization, or
1500 // called from signal handler. Before adding something to os::shutdown(), make
1501 // sure it is async-safe and can handle partially initialized VM.
1502 void os::shutdown() {
1503 
1504   // allow PerfMemory to attempt cleanup of any persistent resources
1505   perfMemory_exit();
1506 
1507   // needs to remove object in file system
1508   AttachListener::abort();
1509 
1510   // flush buffered output, finish log files
1511   ostream_abort();
1512 
1513   // Check for abort hook
1514   abort_hook_t abort_hook = Arguments::abort_hook();
1515   if (abort_hook != NULL) {
1516     abort_hook();
1517   }
1518 }
1519 
1520 // Note: os::abort() might be called very early during initialization, or
1521 // called from signal handler. Before adding something to os::abort(), make
1522 // sure it is async-safe and can handle partially initialized VM.
1523 void os::abort(bool dump_core) {
1524   abort(dump_core, NULL, NULL);
1525 }
1526 
1527 void os::abort(bool dump_core, void* siginfo, void* context) {
1528   os::shutdown();
1529   if (dump_core) {
1530 #ifndef PRODUCT
1531     fdStream out(defaultStream::output_fd());
1532     out.print_raw("Current thread is ");
1533     char buf[16];
1534     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1535     out.print_raw_cr(buf);
1536     out.print_raw_cr("Dumping core ...");
1537 #endif
1538     ::abort(); // dump core (for debugging)
1539   }
1540 
1541   ::exit(1);
1542 }
1543 
1544 // Die immediately, no exit hook, no abort hook, no cleanup.
1545 void os::die() {
1546   ::abort(); // dump core (for debugging)
1547 }
1548 
1549 // DLL functions
1550 
1551 const char* os::dll_file_extension() { return ".so"; }
1552 
1553 // This must be hard coded because it's the system's temporary
1554 // directory not the java application's temp directory, ala java.io.tmpdir.
1555 const char* os::get_temp_directory() { return "/tmp"; }
1556 
1557 static bool file_exists(const char* filename) {
1558   struct stat statbuf;
1559   if (filename == NULL || strlen(filename) == 0) {
1560     return false;
1561   }
1562   return os::stat(filename, &statbuf) == 0;
1563 }
1564 
1565 bool os::dll_build_name(char* buffer, size_t buflen,
1566                         const char* pname, const char* fname) {
1567   bool retval = false;
1568   const size_t pnamelen = pname ? strlen(pname) : 0;
1569 
1570   // Return error on buffer overflow.
1571   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1572     return retval;
1573   }
1574 
1575   if (pnamelen == 0) {
1576     snprintf(buffer, buflen, "lib%s.so", fname);
1577     retval = true;
1578   } else if (strchr(pname, *os::path_separator()) != NULL) {
1579     int n;
1580     char** pelements = split_path(pname, &n);
1581     if (pelements == NULL) {
1582       return false;
1583     }
1584     for (int i = 0; i < n; i++) {
1585       // really shouldn't be NULL but what the heck, check can't hurt
1586       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1587         continue; // skip the empty path values
1588       }
1589       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1590       if (file_exists(buffer)) {
1591         retval = true;
1592         break;
1593       }
1594     }
1595     // release the storage
1596     for (int i = 0; i < n; i++) {
1597       if (pelements[i] != NULL) {
1598         FREE_C_HEAP_ARRAY(char, pelements[i]);
1599       }
1600     }
1601     if (pelements != NULL) {
1602       FREE_C_HEAP_ARRAY(char*, pelements);
1603     }
1604   } else {
1605     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1606     retval = true;
1607   }
1608   return retval;
1609 }
1610 
1611 // check if addr is inside libjvm.so
1612 bool os::address_is_in_vm(address addr) {
1613   static address libjvm_base_addr;
1614   Dl_info dlinfo;
1615 
1616   if (libjvm_base_addr == NULL) {
1617     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1618       libjvm_base_addr = (address)dlinfo.dli_fbase;
1619     }
1620     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1621   }
1622 
1623   if (dladdr((void *)addr, &dlinfo) != 0) {
1624     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1625   }
1626 
1627   return false;
1628 }
1629 
1630 typedef int (*dladdr1_func_type)(void *, Dl_info *, void **, int);
1631 static dladdr1_func_type dladdr1_func = NULL;
1632 
1633 bool os::dll_address_to_function_name(address addr, char *buf,
1634                                       int buflen, int * offset) {
1635   // buf is not optional, but offset is optional
1636   assert(buf != NULL, "sanity check");
1637 
1638   Dl_info dlinfo;
1639 
1640   // dladdr1_func was initialized in os::init()
1641   if (dladdr1_func != NULL) {
1642     // yes, we have dladdr1
1643 
1644     // Support for dladdr1 is checked at runtime; it may be
1645     // available even if the vm is built on a machine that does
1646     // not have dladdr1 support.  Make sure there is a value for
1647     // RTLD_DL_SYMENT.
1648 #ifndef RTLD_DL_SYMENT
1649   #define RTLD_DL_SYMENT 1
1650 #endif
1651 #ifdef _LP64
1652     Elf64_Sym * info;
1653 #else
1654     Elf32_Sym * info;
1655 #endif
1656     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1657                      RTLD_DL_SYMENT) != 0) {
1658       // see if we have a matching symbol that covers our address
1659       if (dlinfo.dli_saddr != NULL &&
1660           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1661         if (dlinfo.dli_sname != NULL) {
1662           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1663             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1664           }
1665           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1666           return true;
1667         }
1668       }
1669       // no matching symbol so try for just file info
1670       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1671         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1672                             buf, buflen, offset, dlinfo.dli_fname)) {
1673           return true;
1674         }
1675       }
1676     }
1677     buf[0] = '\0';
1678     if (offset != NULL) *offset  = -1;
1679     return false;
1680   }
1681 
1682   // no, only dladdr is available
1683   if (dladdr((void *)addr, &dlinfo) != 0) {
1684     // see if we have a matching symbol
1685     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1686       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1687         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1688       }
1689       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1690       return true;
1691     }
1692     // no matching symbol so try for just file info
1693     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1694       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1695                           buf, buflen, offset, dlinfo.dli_fname)) {
1696         return true;
1697       }
1698     }
1699   }
1700   buf[0] = '\0';
1701   if (offset != NULL) *offset  = -1;
1702   return false;
1703 }
1704 
1705 bool os::dll_address_to_library_name(address addr, char* buf,
1706                                      int buflen, int* offset) {
1707   // buf is not optional, but offset is optional
1708   assert(buf != NULL, "sanity check");
1709 
1710   Dl_info dlinfo;
1711 
1712   if (dladdr((void*)addr, &dlinfo) != 0) {
1713     if (dlinfo.dli_fname != NULL) {
1714       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1715     }
1716     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1717       *offset = addr - (address)dlinfo.dli_fbase;
1718     }
1719     return true;
1720   }
1721 
1722   buf[0] = '\0';
1723   if (offset) *offset = -1;
1724   return false;
1725 }
1726 
1727 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1728   Dl_info dli;
1729   // Sanity check?
1730   if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 ||
1731       dli.dli_fname == NULL) {
1732     return 1;
1733   }
1734 
1735   void * handle = dlopen(dli.dli_fname, RTLD_LAZY);
1736   if (handle == NULL) {
1737     return 1;
1738   }
1739 
1740   Link_map *map;
1741   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1742   if (map == NULL) {
1743     dlclose(handle);
1744     return 1;
1745   }
1746 
1747   while (map->l_prev != NULL) {
1748     map = map->l_prev;
1749   }
1750 
1751   while (map != NULL) {
1752     // Iterate through all map entries and call callback with fields of interest
1753     if(callback(map->l_name, (address)map->l_addr, (address)0, param)) {
1754       dlclose(handle);
1755       return 1;
1756     }
1757     map = map->l_next;
1758   }
1759 
1760   dlclose(handle);
1761   return 0;
1762 }
1763 
1764 int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) {
1765   outputStream * out = (outputStream *) param;
1766   out->print_cr(PTR_FORMAT " \t%s", base_address, name);
1767   return 0;
1768 }
1769 
1770 void os::print_dll_info(outputStream * st) {
1771   st->print_cr("Dynamic libraries:"); st->flush();
1772   if (get_loaded_modules_info(_print_dll_info_cb, (void *)st)) {
1773     st->print_cr("Error: Cannot print dynamic libraries.");
1774   }
1775 }
1776 
1777 // Loads .dll/.so and
1778 // in case of error it checks if .dll/.so was built for the
1779 // same architecture as Hotspot is running on
1780 
1781 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1782   void * result= ::dlopen(filename, RTLD_LAZY);
1783   if (result != NULL) {
1784     // Successful loading
1785     return result;
1786   }
1787 
1788   Elf32_Ehdr elf_head;
1789 
1790   // Read system error message into ebuf
1791   // It may or may not be overwritten below
1792   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1793   ebuf[ebuflen-1]='\0';
1794   int diag_msg_max_length=ebuflen-strlen(ebuf);
1795   char* diag_msg_buf=ebuf+strlen(ebuf);
1796 
1797   if (diag_msg_max_length==0) {
1798     // No more space in ebuf for additional diagnostics message
1799     return NULL;
1800   }
1801 
1802 
1803   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1804 
1805   if (file_descriptor < 0) {
1806     // Can't open library, report dlerror() message
1807     return NULL;
1808   }
1809 
1810   bool failed_to_read_elf_head=
1811     (sizeof(elf_head)!=
1812      (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1813 
1814   ::close(file_descriptor);
1815   if (failed_to_read_elf_head) {
1816     // file i/o error - report dlerror() msg
1817     return NULL;
1818   }
1819 
1820   typedef struct {
1821     Elf32_Half  code;         // Actual value as defined in elf.h
1822     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1823     char        elf_class;    // 32 or 64 bit
1824     char        endianess;    // MSB or LSB
1825     char*       name;         // String representation
1826   } arch_t;
1827 
1828   static const arch_t arch_array[]={
1829     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1830     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1831     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1832     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1833     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1834     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1835     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1836     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1837     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1838     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1839   };
1840 
1841 #if  (defined IA32)
1842   static  Elf32_Half running_arch_code=EM_386;
1843 #elif   (defined AMD64)
1844   static  Elf32_Half running_arch_code=EM_X86_64;
1845 #elif  (defined IA64)
1846   static  Elf32_Half running_arch_code=EM_IA_64;
1847 #elif  (defined __sparc) && (defined _LP64)
1848   static  Elf32_Half running_arch_code=EM_SPARCV9;
1849 #elif  (defined __sparc) && (!defined _LP64)
1850   static  Elf32_Half running_arch_code=EM_SPARC;
1851 #elif  (defined __powerpc64__)
1852   static  Elf32_Half running_arch_code=EM_PPC64;
1853 #elif  (defined __powerpc__)
1854   static  Elf32_Half running_arch_code=EM_PPC;
1855 #elif (defined ARM)
1856   static  Elf32_Half running_arch_code=EM_ARM;
1857 #else
1858   #error Method os::dll_load requires that one of following is defined:\
1859        IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1860 #endif
1861 
1862   // Identify compatability class for VM's architecture and library's architecture
1863   // Obtain string descriptions for architectures
1864 
1865   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1866   int running_arch_index=-1;
1867 
1868   for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
1869     if (running_arch_code == arch_array[i].code) {
1870       running_arch_index    = i;
1871     }
1872     if (lib_arch.code == arch_array[i].code) {
1873       lib_arch.compat_class = arch_array[i].compat_class;
1874       lib_arch.name         = arch_array[i].name;
1875     }
1876   }
1877 
1878   assert(running_arch_index != -1,
1879          "Didn't find running architecture code (running_arch_code) in arch_array");
1880   if (running_arch_index == -1) {
1881     // Even though running architecture detection failed
1882     // we may still continue with reporting dlerror() message
1883     return NULL;
1884   }
1885 
1886   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1887     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1888     return NULL;
1889   }
1890 
1891   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1892     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1893     return NULL;
1894   }
1895 
1896   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1897     if (lib_arch.name!=NULL) {
1898       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1899                  " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1900                  lib_arch.name, arch_array[running_arch_index].name);
1901     } else {
1902       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1903                  " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1904                  lib_arch.code,
1905                  arch_array[running_arch_index].name);
1906     }
1907   }
1908 
1909   return NULL;
1910 }
1911 
1912 void* os::dll_lookup(void* handle, const char* name) {
1913   return dlsym(handle, name);
1914 }
1915 
1916 void* os::get_default_process_handle() {
1917   return (void*)::dlopen(NULL, RTLD_LAZY);
1918 }
1919 
1920 int os::stat(const char *path, struct stat *sbuf) {
1921   char pathbuf[MAX_PATH];
1922   if (strlen(path) > MAX_PATH - 1) {
1923     errno = ENAMETOOLONG;
1924     return -1;
1925   }
1926   os::native_path(strcpy(pathbuf, path));
1927   return ::stat(pathbuf, sbuf);
1928 }
1929 
1930 static bool _print_ascii_file(const char* filename, outputStream* st) {
1931   int fd = ::open(filename, O_RDONLY);
1932   if (fd == -1) {
1933     return false;
1934   }
1935 
1936   char buf[32];
1937   int bytes;
1938   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1939     st->print_raw(buf, bytes);
1940   }
1941 
1942   ::close(fd);
1943 
1944   return true;
1945 }
1946 
1947 void os::print_os_info_brief(outputStream* st) {
1948   os::Solaris::print_distro_info(st);
1949 
1950   os::Posix::print_uname_info(st);
1951 
1952   os::Solaris::print_libversion_info(st);
1953 }
1954 
1955 void os::print_os_info(outputStream* st) {
1956   st->print("OS:");
1957 
1958   os::Solaris::print_distro_info(st);
1959 
1960   os::Posix::print_uname_info(st);
1961 
1962   os::Solaris::print_libversion_info(st);
1963 
1964   os::Posix::print_rlimit_info(st);
1965 
1966   os::Posix::print_load_average(st);
1967 }
1968 
1969 void os::Solaris::print_distro_info(outputStream* st) {
1970   if (!_print_ascii_file("/etc/release", st)) {
1971     st->print("Solaris");
1972   }
1973   st->cr();
1974 }
1975 
1976 void os::Solaris::print_libversion_info(outputStream* st) {
1977   st->print("  (T2 libthread)");
1978   st->cr();
1979 }
1980 
1981 static bool check_addr0(outputStream* st) {
1982   jboolean status = false;
1983   int fd = ::open("/proc/self/map",O_RDONLY);
1984   if (fd >= 0) {
1985     prmap_t p;
1986     while (::read(fd, &p, sizeof(p)) > 0) {
1987       if (p.pr_vaddr == 0x0) {
1988         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
1989         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
1990         st->print("Access:");
1991         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
1992         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
1993         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
1994         st->cr();
1995         status = true;
1996       }
1997     }
1998     ::close(fd);
1999   }
2000   return status;
2001 }
2002 
2003 void os::pd_print_cpu_info(outputStream* st) {
2004   // Nothing to do for now.
2005 }
2006 
2007 void os::print_memory_info(outputStream* st) {
2008   st->print("Memory:");
2009   st->print(" %dk page", os::vm_page_size()>>10);
2010   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2011   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2012   st->cr();
2013   (void) check_addr0(st);
2014 }
2015 
2016 void os::print_siginfo(outputStream* st, void* siginfo) {
2017   const siginfo_t* si = (const siginfo_t*)siginfo;
2018 
2019   os::Posix::print_siginfo_brief(st, si);
2020 
2021   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2022       UseSharedSpaces) {
2023     FileMapInfo* mapinfo = FileMapInfo::current_info();
2024     if (mapinfo->is_in_shared_space(si->si_addr)) {
2025       st->print("\n\nError accessing class data sharing archive."   \
2026                 " Mapped file inaccessible during execution, "      \
2027                 " possible disk/network problem.");
2028     }
2029   }
2030   st->cr();
2031 }
2032 
2033 // Moved from whole group, because we need them here for diagnostic
2034 // prints.
2035 #define OLDMAXSIGNUM 32
2036 static int Maxsignum = 0;
2037 static int *ourSigFlags = NULL;
2038 
2039 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2040 
2041 int os::Solaris::get_our_sigflags(int sig) {
2042   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2043   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2044   return ourSigFlags[sig];
2045 }
2046 
2047 void os::Solaris::set_our_sigflags(int sig, int flags) {
2048   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2049   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2050   ourSigFlags[sig] = flags;
2051 }
2052 
2053 
2054 static const char* get_signal_handler_name(address handler,
2055                                            char* buf, int buflen) {
2056   int offset;
2057   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2058   if (found) {
2059     // skip directory names
2060     const char *p1, *p2;
2061     p1 = buf;
2062     size_t len = strlen(os::file_separator());
2063     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2064     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2065   } else {
2066     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2067   }
2068   return buf;
2069 }
2070 
2071 static void print_signal_handler(outputStream* st, int sig,
2072                                  char* buf, size_t buflen) {
2073   struct sigaction sa;
2074 
2075   sigaction(sig, NULL, &sa);
2076 
2077   st->print("%s: ", os::exception_name(sig, buf, buflen));
2078 
2079   address handler = (sa.sa_flags & SA_SIGINFO)
2080                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2081                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2082 
2083   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2084     st->print("SIG_DFL");
2085   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2086     st->print("SIG_IGN");
2087   } else {
2088     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2089   }
2090 
2091   st->print(", sa_mask[0]=");
2092   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2093 
2094   address rh = VMError::get_resetted_sighandler(sig);
2095   // May be, handler was resetted by VMError?
2096   if (rh != NULL) {
2097     handler = rh;
2098     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2099   }
2100 
2101   st->print(", sa_flags=");
2102   os::Posix::print_sa_flags(st, sa.sa_flags);
2103 
2104   // Check: is it our handler?
2105   if (handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2106       handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2107     // It is our signal handler
2108     // check for flags
2109     if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2110       st->print(
2111                 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2112                 os::Solaris::get_our_sigflags(sig));
2113     }
2114   }
2115   st->cr();
2116 }
2117 
2118 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2119   st->print_cr("Signal Handlers:");
2120   print_signal_handler(st, SIGSEGV, buf, buflen);
2121   print_signal_handler(st, SIGBUS , buf, buflen);
2122   print_signal_handler(st, SIGFPE , buf, buflen);
2123   print_signal_handler(st, SIGPIPE, buf, buflen);
2124   print_signal_handler(st, SIGXFSZ, buf, buflen);
2125   print_signal_handler(st, SIGILL , buf, buflen);
2126   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2127   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2128   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2129   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2130   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2131   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2132   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2133   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2134 }
2135 
2136 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2137 
2138 // Find the full path to the current module, libjvm.so
2139 void os::jvm_path(char *buf, jint buflen) {
2140   // Error checking.
2141   if (buflen < MAXPATHLEN) {
2142     assert(false, "must use a large-enough buffer");
2143     buf[0] = '\0';
2144     return;
2145   }
2146   // Lazy resolve the path to current module.
2147   if (saved_jvm_path[0] != 0) {
2148     strcpy(buf, saved_jvm_path);
2149     return;
2150   }
2151 
2152   Dl_info dlinfo;
2153   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2154   assert(ret != 0, "cannot locate libjvm");
2155   if (ret != 0 && dlinfo.dli_fname != NULL) {
2156     realpath((char *)dlinfo.dli_fname, buf);
2157   } else {
2158     buf[0] = '\0';
2159     return;
2160   }
2161 
2162   if (Arguments::sun_java_launcher_is_altjvm()) {
2163     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2164     // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
2165     // If "/jre/lib/" appears at the right place in the string, then
2166     // assume we are installed in a JDK and we're done.  Otherwise, check
2167     // for a JAVA_HOME environment variable and fix up the path so it
2168     // looks like libjvm.so is installed there (append a fake suffix
2169     // hotspot/libjvm.so).
2170     const char *p = buf + strlen(buf) - 1;
2171     for (int count = 0; p > buf && count < 5; ++count) {
2172       for (--p; p > buf && *p != '/'; --p)
2173         /* empty */ ;
2174     }
2175 
2176     if (strncmp(p, "/jre/lib/", 9) != 0) {
2177       // Look for JAVA_HOME in the environment.
2178       char* java_home_var = ::getenv("JAVA_HOME");
2179       if (java_home_var != NULL && java_home_var[0] != 0) {
2180         char cpu_arch[12];
2181         char* jrelib_p;
2182         int   len;
2183         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2184 #ifdef _LP64
2185         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2186         if (strcmp(cpu_arch, "sparc") == 0) {
2187           strcat(cpu_arch, "v9");
2188         } else if (strcmp(cpu_arch, "i386") == 0) {
2189           strcpy(cpu_arch, "amd64");
2190         }
2191 #endif
2192         // Check the current module name "libjvm.so".
2193         p = strrchr(buf, '/');
2194         assert(strstr(p, "/libjvm") == p, "invalid library name");
2195 
2196         realpath(java_home_var, buf);
2197         // determine if this is a legacy image or modules image
2198         // modules image doesn't have "jre" subdirectory
2199         len = strlen(buf);
2200         assert(len < buflen, "Ran out of buffer space");
2201         jrelib_p = buf + len;
2202         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2203         if (0 != access(buf, F_OK)) {
2204           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2205         }
2206 
2207         if (0 == access(buf, F_OK)) {
2208           // Use current module name "libjvm.so"
2209           len = strlen(buf);
2210           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2211         } else {
2212           // Go back to path of .so
2213           realpath((char *)dlinfo.dli_fname, buf);
2214         }
2215       }
2216     }
2217   }
2218 
2219   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2220   saved_jvm_path[MAXPATHLEN - 1] = '\0';
2221 }
2222 
2223 
2224 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2225   // no prefix required, not even "_"
2226 }
2227 
2228 
2229 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2230   // no suffix required
2231 }
2232 
2233 // This method is a copy of JDK's sysGetLastErrorString
2234 // from src/solaris/hpi/src/system_md.c
2235 
2236 size_t os::lasterror(char *buf, size_t len) {
2237   if (errno == 0)  return 0;
2238 
2239   const char *s = ::strerror(errno);
2240   size_t n = ::strlen(s);
2241   if (n >= len) {
2242     n = len - 1;
2243   }
2244   ::strncpy(buf, s, n);
2245   buf[n] = '\0';
2246   return n;
2247 }
2248 
2249 
2250 // sun.misc.Signal
2251 
2252 extern "C" {
2253   static void UserHandler(int sig, void *siginfo, void *context) {
2254     // Ctrl-C is pressed during error reporting, likely because the error
2255     // handler fails to abort. Let VM die immediately.
2256     if (sig == SIGINT && is_error_reported()) {
2257       os::die();
2258     }
2259 
2260     os::signal_notify(sig);
2261     // We do not need to reinstate the signal handler each time...
2262   }
2263 }
2264 
2265 void* os::user_handler() {
2266   return CAST_FROM_FN_PTR(void*, UserHandler);
2267 }
2268 
2269 class Semaphore : public StackObj {
2270  public:
2271   Semaphore();
2272   ~Semaphore();
2273   void signal();
2274   void wait();
2275   bool trywait();
2276   bool timedwait(unsigned int sec, int nsec);
2277  private:
2278   sema_t _semaphore;
2279 };
2280 
2281 
2282 Semaphore::Semaphore() {
2283   sema_init(&_semaphore, 0, NULL, NULL);
2284 }
2285 
2286 Semaphore::~Semaphore() {
2287   sema_destroy(&_semaphore);
2288 }
2289 
2290 void Semaphore::signal() {
2291   sema_post(&_semaphore);
2292 }
2293 
2294 void Semaphore::wait() {
2295   sema_wait(&_semaphore);
2296 }
2297 
2298 bool Semaphore::trywait() {
2299   return sema_trywait(&_semaphore) == 0;
2300 }
2301 
2302 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2303   struct timespec ts;
2304   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2305 
2306   while (1) {
2307     int result = sema_timedwait(&_semaphore, &ts);
2308     if (result == 0) {
2309       return true;
2310     } else if (errno == EINTR) {
2311       continue;
2312     } else if (errno == ETIME) {
2313       return false;
2314     } else {
2315       return false;
2316     }
2317   }
2318 }
2319 
2320 extern "C" {
2321   typedef void (*sa_handler_t)(int);
2322   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2323 }
2324 
2325 void* os::signal(int signal_number, void* handler) {
2326   struct sigaction sigAct, oldSigAct;
2327   sigfillset(&(sigAct.sa_mask));
2328   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2329   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2330 
2331   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
2332     // -1 means registration failed
2333     return (void *)-1;
2334   }
2335 
2336   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2337 }
2338 
2339 void os::signal_raise(int signal_number) {
2340   raise(signal_number);
2341 }
2342 
2343 // The following code is moved from os.cpp for making this
2344 // code platform specific, which it is by its very nature.
2345 
2346 // a counter for each possible signal value
2347 static int Sigexit = 0;
2348 static int Maxlibjsigsigs;
2349 static jint *pending_signals = NULL;
2350 static int *preinstalled_sigs = NULL;
2351 static struct sigaction *chainedsigactions = NULL;
2352 static sema_t sig_sem;
2353 typedef int (*version_getting_t)();
2354 version_getting_t os::Solaris::get_libjsig_version = NULL;
2355 static int libjsigversion = NULL;
2356 
2357 int os::sigexitnum_pd() {
2358   assert(Sigexit > 0, "signal memory not yet initialized");
2359   return Sigexit;
2360 }
2361 
2362 void os::Solaris::init_signal_mem() {
2363   // Initialize signal structures
2364   Maxsignum = SIGRTMAX;
2365   Sigexit = Maxsignum+1;
2366   assert(Maxsignum >0, "Unable to obtain max signal number");
2367 
2368   Maxlibjsigsigs = Maxsignum;
2369 
2370   // pending_signals has one int per signal
2371   // The additional signal is for SIGEXIT - exit signal to signal_thread
2372   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2373   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2374 
2375   if (UseSignalChaining) {
2376     chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2377                                                    * (Maxsignum + 1), mtInternal);
2378     memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2379     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2380     memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2381   }
2382   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2383   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2384 }
2385 
2386 void os::signal_init_pd() {
2387   int ret;
2388 
2389   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2390   assert(ret == 0, "sema_init() failed");
2391 }
2392 
2393 void os::signal_notify(int signal_number) {
2394   int ret;
2395 
2396   Atomic::inc(&pending_signals[signal_number]);
2397   ret = ::sema_post(&sig_sem);
2398   assert(ret == 0, "sema_post() failed");
2399 }
2400 
2401 static int check_pending_signals(bool wait_for_signal) {
2402   int ret;
2403   while (true) {
2404     for (int i = 0; i < Sigexit + 1; i++) {
2405       jint n = pending_signals[i];
2406       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2407         return i;
2408       }
2409     }
2410     if (!wait_for_signal) {
2411       return -1;
2412     }
2413     JavaThread *thread = JavaThread::current();
2414     ThreadBlockInVM tbivm(thread);
2415 
2416     bool threadIsSuspended;
2417     do {
2418       thread->set_suspend_equivalent();
2419       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2420       while ((ret = ::sema_wait(&sig_sem)) == EINTR)
2421         ;
2422       assert(ret == 0, "sema_wait() failed");
2423 
2424       // were we externally suspended while we were waiting?
2425       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2426       if (threadIsSuspended) {
2427         // The semaphore has been incremented, but while we were waiting
2428         // another thread suspended us. We don't want to continue running
2429         // while suspended because that would surprise the thread that
2430         // suspended us.
2431         ret = ::sema_post(&sig_sem);
2432         assert(ret == 0, "sema_post() failed");
2433 
2434         thread->java_suspend_self();
2435       }
2436     } while (threadIsSuspended);
2437   }
2438 }
2439 
2440 int os::signal_lookup() {
2441   return check_pending_signals(false);
2442 }
2443 
2444 int os::signal_wait() {
2445   return check_pending_signals(true);
2446 }
2447 
2448 ////////////////////////////////////////////////////////////////////////////////
2449 // Virtual Memory
2450 
2451 static int page_size = -1;
2452 
2453 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2454 // clear this var if support is not available.
2455 static bool has_map_align = true;
2456 
2457 int os::vm_page_size() {
2458   assert(page_size != -1, "must call os::init");
2459   return page_size;
2460 }
2461 
2462 // Solaris allocates memory by pages.
2463 int os::vm_allocation_granularity() {
2464   assert(page_size != -1, "must call os::init");
2465   return page_size;
2466 }
2467 
2468 static bool recoverable_mmap_error(int err) {
2469   // See if the error is one we can let the caller handle. This
2470   // list of errno values comes from the Solaris mmap(2) man page.
2471   switch (err) {
2472   case EBADF:
2473   case EINVAL:
2474   case ENOTSUP:
2475     // let the caller deal with these errors
2476     return true;
2477 
2478   default:
2479     // Any remaining errors on this OS can cause our reserved mapping
2480     // to be lost. That can cause confusion where different data
2481     // structures think they have the same memory mapped. The worst
2482     // scenario is if both the VM and a library think they have the
2483     // same memory mapped.
2484     return false;
2485   }
2486 }
2487 
2488 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2489                                     int err) {
2490   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2491           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2492           strerror(err), err);
2493 }
2494 
2495 static void warn_fail_commit_memory(char* addr, size_t bytes,
2496                                     size_t alignment_hint, bool exec,
2497                                     int err) {
2498   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2499           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2500           alignment_hint, exec, strerror(err), err);
2501 }
2502 
2503 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2504   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2505   size_t size = bytes;
2506   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2507   if (res != NULL) {
2508     if (UseNUMAInterleaving) {
2509       numa_make_global(addr, bytes);
2510     }
2511     return 0;
2512   }
2513 
2514   int err = errno;  // save errno from mmap() call in mmap_chunk()
2515 
2516   if (!recoverable_mmap_error(err)) {
2517     warn_fail_commit_memory(addr, bytes, exec, err);
2518     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2519   }
2520 
2521   return err;
2522 }
2523 
2524 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2525   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2526 }
2527 
2528 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2529                                   const char* mesg) {
2530   assert(mesg != NULL, "mesg must be specified");
2531   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2532   if (err != 0) {
2533     // the caller wants all commit errors to exit with the specified mesg:
2534     warn_fail_commit_memory(addr, bytes, exec, err);
2535     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2536   }
2537 }
2538 
2539 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
2540   assert(is_size_aligned(alignment, (size_t) vm_page_size()),
2541          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
2542                  alignment, (size_t) vm_page_size()));
2543 
2544   for (int i = 0; _page_sizes[i] != 0; i++) {
2545     if (is_size_aligned(alignment, _page_sizes[i])) {
2546       return _page_sizes[i];
2547     }
2548   }
2549 
2550   return (size_t) vm_page_size();
2551 }
2552 
2553 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2554                                     size_t alignment_hint, bool exec) {
2555   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2556   if (err == 0 && UseLargePages && alignment_hint > 0) {
2557     assert(is_size_aligned(bytes, alignment_hint),
2558            err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
2559 
2560     // The syscall memcntl requires an exact page size (see man memcntl for details).
2561     size_t page_size = page_size_for_alignment(alignment_hint);
2562     if (page_size > (size_t) vm_page_size()) {
2563       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2564     }
2565   }
2566   return err;
2567 }
2568 
2569 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2570                           bool exec) {
2571   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2572 }
2573 
2574 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2575                                   size_t alignment_hint, bool exec,
2576                                   const char* mesg) {
2577   assert(mesg != NULL, "mesg must be specified");
2578   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2579   if (err != 0) {
2580     // the caller wants all commit errors to exit with the specified mesg:
2581     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2582     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2583   }
2584 }
2585 
2586 // Uncommit the pages in a specified region.
2587 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2588   if (madvise(addr, bytes, MADV_FREE) < 0) {
2589     debug_only(warning("MADV_FREE failed."));
2590     return;
2591   }
2592 }
2593 
2594 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2595   return os::commit_memory(addr, size, !ExecMem);
2596 }
2597 
2598 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2599   return os::uncommit_memory(addr, size);
2600 }
2601 
2602 // Change the page size in a given range.
2603 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2604   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2605   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2606   if (UseLargePages) {
2607     size_t page_size = Solaris::page_size_for_alignment(alignment_hint);
2608     if (page_size > (size_t) vm_page_size()) {
2609       Solaris::setup_large_pages(addr, bytes, page_size);
2610     }
2611   }
2612 }
2613 
2614 // Tell the OS to make the range local to the first-touching LWP
2615 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2616   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2617   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2618     debug_only(warning("MADV_ACCESS_LWP failed."));
2619   }
2620 }
2621 
2622 // Tell the OS that this range would be accessed from different LWPs.
2623 void os::numa_make_global(char *addr, size_t bytes) {
2624   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2625   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2626     debug_only(warning("MADV_ACCESS_MANY failed."));
2627   }
2628 }
2629 
2630 // Get the number of the locality groups.
2631 size_t os::numa_get_groups_num() {
2632   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2633   return n != -1 ? n : 1;
2634 }
2635 
2636 // Get a list of leaf locality groups. A leaf lgroup is group that
2637 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2638 // board. An LWP is assigned to one of these groups upon creation.
2639 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2640   if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2641     ids[0] = 0;
2642     return 1;
2643   }
2644   int result_size = 0, top = 1, bottom = 0, cur = 0;
2645   for (int k = 0; k < size; k++) {
2646     int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2647                                    (Solaris::lgrp_id_t*)&ids[top], size - top);
2648     if (r == -1) {
2649       ids[0] = 0;
2650       return 1;
2651     }
2652     if (!r) {
2653       // That's a leaf node.
2654       assert(bottom <= cur, "Sanity check");
2655       // Check if the node has memory
2656       if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2657                                   NULL, 0, LGRP_RSRC_MEM) > 0) {
2658         ids[bottom++] = ids[cur];
2659       }
2660     }
2661     top += r;
2662     cur++;
2663   }
2664   if (bottom == 0) {
2665     // Handle a situation, when the OS reports no memory available.
2666     // Assume UMA architecture.
2667     ids[0] = 0;
2668     return 1;
2669   }
2670   return bottom;
2671 }
2672 
2673 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2674 bool os::numa_topology_changed() {
2675   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2676   if (is_stale != -1 && is_stale) {
2677     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2678     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2679     assert(c != 0, "Failure to initialize LGRP API");
2680     Solaris::set_lgrp_cookie(c);
2681     return true;
2682   }
2683   return false;
2684 }
2685 
2686 // Get the group id of the current LWP.
2687 int os::numa_get_group_id() {
2688   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2689   if (lgrp_id == -1) {
2690     return 0;
2691   }
2692   const int size = os::numa_get_groups_num();
2693   int *ids = (int*)alloca(size * sizeof(int));
2694 
2695   // Get the ids of all lgroups with memory; r is the count.
2696   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2697                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2698   if (r <= 0) {
2699     return 0;
2700   }
2701   return ids[os::random() % r];
2702 }
2703 
2704 // Request information about the page.
2705 bool os::get_page_info(char *start, page_info* info) {
2706   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2707   uint64_t addr = (uintptr_t)start;
2708   uint64_t outdata[2];
2709   uint_t validity = 0;
2710 
2711   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2712     return false;
2713   }
2714 
2715   info->size = 0;
2716   info->lgrp_id = -1;
2717 
2718   if ((validity & 1) != 0) {
2719     if ((validity & 2) != 0) {
2720       info->lgrp_id = outdata[0];
2721     }
2722     if ((validity & 4) != 0) {
2723       info->size = outdata[1];
2724     }
2725     return true;
2726   }
2727   return false;
2728 }
2729 
2730 // Scan the pages from start to end until a page different than
2731 // the one described in the info parameter is encountered.
2732 char *os::scan_pages(char *start, char* end, page_info* page_expected,
2733                      page_info* page_found) {
2734   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2735   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2736   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2737   uint_t validity[MAX_MEMINFO_CNT];
2738 
2739   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2740   uint64_t p = (uint64_t)start;
2741   while (p < (uint64_t)end) {
2742     addrs[0] = p;
2743     size_t addrs_count = 1;
2744     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2745       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2746       addrs_count++;
2747     }
2748 
2749     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2750       return NULL;
2751     }
2752 
2753     size_t i = 0;
2754     for (; i < addrs_count; i++) {
2755       if ((validity[i] & 1) != 0) {
2756         if ((validity[i] & 4) != 0) {
2757           if (outdata[types * i + 1] != page_expected->size) {
2758             break;
2759           }
2760         } else if (page_expected->size != 0) {
2761           break;
2762         }
2763 
2764         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2765           if (outdata[types * i] != page_expected->lgrp_id) {
2766             break;
2767           }
2768         }
2769       } else {
2770         return NULL;
2771       }
2772     }
2773 
2774     if (i < addrs_count) {
2775       if ((validity[i] & 2) != 0) {
2776         page_found->lgrp_id = outdata[types * i];
2777       } else {
2778         page_found->lgrp_id = -1;
2779       }
2780       if ((validity[i] & 4) != 0) {
2781         page_found->size = outdata[types * i + 1];
2782       } else {
2783         page_found->size = 0;
2784       }
2785       return (char*)addrs[i];
2786     }
2787 
2788     p = addrs[addrs_count - 1] + page_size;
2789   }
2790   return end;
2791 }
2792 
2793 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2794   size_t size = bytes;
2795   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2796   // uncommitted page. Otherwise, the read/write might succeed if we
2797   // have enough swap space to back the physical page.
2798   return
2799     NULL != Solaris::mmap_chunk(addr, size,
2800                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2801                                 PROT_NONE);
2802 }
2803 
2804 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2805   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2806 
2807   if (b == MAP_FAILED) {
2808     return NULL;
2809   }
2810   return b;
2811 }
2812 
2813 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes,
2814                              size_t alignment_hint, bool fixed) {
2815   char* addr = requested_addr;
2816   int flags = MAP_PRIVATE | MAP_NORESERVE;
2817 
2818   assert(!(fixed && (alignment_hint > 0)),
2819          "alignment hint meaningless with fixed mmap");
2820 
2821   if (fixed) {
2822     flags |= MAP_FIXED;
2823   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2824     flags |= MAP_ALIGN;
2825     addr = (char*) alignment_hint;
2826   }
2827 
2828   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2829   // uncommitted page. Otherwise, the read/write might succeed if we
2830   // have enough swap space to back the physical page.
2831   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2832 }
2833 
2834 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2835                             size_t alignment_hint) {
2836   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint,
2837                                   (requested_addr != NULL));
2838 
2839   guarantee(requested_addr == NULL || requested_addr == addr,
2840             "OS failed to return requested mmap address.");
2841   return addr;
2842 }
2843 
2844 // Reserve memory at an arbitrary address, only if that area is
2845 // available (and not reserved for something else).
2846 
2847 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2848   const int max_tries = 10;
2849   char* base[max_tries];
2850   size_t size[max_tries];
2851 
2852   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2853   // is dependent on the requested size and the MMU.  Our initial gap
2854   // value here is just a guess and will be corrected later.
2855   bool had_top_overlap = false;
2856   bool have_adjusted_gap = false;
2857   size_t gap = 0x400000;
2858 
2859   // Assert only that the size is a multiple of the page size, since
2860   // that's all that mmap requires, and since that's all we really know
2861   // about at this low abstraction level.  If we need higher alignment,
2862   // we can either pass an alignment to this method or verify alignment
2863   // in one of the methods further up the call chain.  See bug 5044738.
2864   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2865 
2866   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2867   // Give it a try, if the kernel honors the hint we can return immediately.
2868   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2869 
2870   volatile int err = errno;
2871   if (addr == requested_addr) {
2872     return addr;
2873   } else if (addr != NULL) {
2874     pd_unmap_memory(addr, bytes);
2875   }
2876 
2877   if (PrintMiscellaneous && Verbose) {
2878     char buf[256];
2879     buf[0] = '\0';
2880     if (addr == NULL) {
2881       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2882     }
2883     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2884             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2885             "%s", bytes, requested_addr, addr, buf);
2886   }
2887 
2888   // Address hint method didn't work.  Fall back to the old method.
2889   // In theory, once SNV becomes our oldest supported platform, this
2890   // code will no longer be needed.
2891   //
2892   // Repeatedly allocate blocks until the block is allocated at the
2893   // right spot. Give up after max_tries.
2894   int i;
2895   for (i = 0; i < max_tries; ++i) {
2896     base[i] = reserve_memory(bytes);
2897 
2898     if (base[i] != NULL) {
2899       // Is this the block we wanted?
2900       if (base[i] == requested_addr) {
2901         size[i] = bytes;
2902         break;
2903       }
2904 
2905       // check that the gap value is right
2906       if (had_top_overlap && !have_adjusted_gap) {
2907         size_t actual_gap = base[i-1] - base[i] - bytes;
2908         if (gap != actual_gap) {
2909           // adjust the gap value and retry the last 2 allocations
2910           assert(i > 0, "gap adjustment code problem");
2911           have_adjusted_gap = true;  // adjust the gap only once, just in case
2912           gap = actual_gap;
2913           if (PrintMiscellaneous && Verbose) {
2914             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2915           }
2916           unmap_memory(base[i], bytes);
2917           unmap_memory(base[i-1], size[i-1]);
2918           i-=2;
2919           continue;
2920         }
2921       }
2922 
2923       // Does this overlap the block we wanted? Give back the overlapped
2924       // parts and try again.
2925       //
2926       // There is still a bug in this code: if top_overlap == bytes,
2927       // the overlap is offset from requested region by the value of gap.
2928       // In this case giving back the overlapped part will not work,
2929       // because we'll give back the entire block at base[i] and
2930       // therefore the subsequent allocation will not generate a new gap.
2931       // This could be fixed with a new algorithm that used larger
2932       // or variable size chunks to find the requested region -
2933       // but such a change would introduce additional complications.
2934       // It's rare enough that the planets align for this bug,
2935       // so we'll just wait for a fix for 6204603/5003415 which
2936       // will provide a mmap flag to allow us to avoid this business.
2937 
2938       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2939       if (top_overlap >= 0 && top_overlap < bytes) {
2940         had_top_overlap = true;
2941         unmap_memory(base[i], top_overlap);
2942         base[i] += top_overlap;
2943         size[i] = bytes - top_overlap;
2944       } else {
2945         size_t bottom_overlap = base[i] + bytes - requested_addr;
2946         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2947           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2948             warning("attempt_reserve_memory_at: possible alignment bug");
2949           }
2950           unmap_memory(requested_addr, bottom_overlap);
2951           size[i] = bytes - bottom_overlap;
2952         } else {
2953           size[i] = bytes;
2954         }
2955       }
2956     }
2957   }
2958 
2959   // Give back the unused reserved pieces.
2960 
2961   for (int j = 0; j < i; ++j) {
2962     if (base[j] != NULL) {
2963       unmap_memory(base[j], size[j]);
2964     }
2965   }
2966 
2967   return (i < max_tries) ? requested_addr : NULL;
2968 }
2969 
2970 bool os::pd_release_memory(char* addr, size_t bytes) {
2971   size_t size = bytes;
2972   return munmap(addr, size) == 0;
2973 }
2974 
2975 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
2976   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
2977          "addr must be page aligned");
2978   int retVal = mprotect(addr, bytes, prot);
2979   return retVal == 0;
2980 }
2981 
2982 // Protect memory (Used to pass readonly pages through
2983 // JNI GetArray<type>Elements with empty arrays.)
2984 // Also, used for serialization page and for compressed oops null pointer
2985 // checking.
2986 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2987                         bool is_committed) {
2988   unsigned int p = 0;
2989   switch (prot) {
2990   case MEM_PROT_NONE: p = PROT_NONE; break;
2991   case MEM_PROT_READ: p = PROT_READ; break;
2992   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2993   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2994   default:
2995     ShouldNotReachHere();
2996   }
2997   // is_committed is unused.
2998   return solaris_mprotect(addr, bytes, p);
2999 }
3000 
3001 // guard_memory and unguard_memory only happens within stack guard pages.
3002 // Since ISM pertains only to the heap, guard and unguard memory should not
3003 /// happen with an ISM region.
3004 bool os::guard_memory(char* addr, size_t bytes) {
3005   return solaris_mprotect(addr, bytes, PROT_NONE);
3006 }
3007 
3008 bool os::unguard_memory(char* addr, size_t bytes) {
3009   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3010 }
3011 
3012 // Large page support
3013 static size_t _large_page_size = 0;
3014 
3015 // Insertion sort for small arrays (descending order).
3016 static void insertion_sort_descending(size_t* array, int len) {
3017   for (int i = 0; i < len; i++) {
3018     size_t val = array[i];
3019     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3020       size_t tmp = array[key];
3021       array[key] = array[key - 1];
3022       array[key - 1] = tmp;
3023     }
3024   }
3025 }
3026 
3027 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3028   const unsigned int usable_count = VM_Version::page_size_count();
3029   if (usable_count == 1) {
3030     return false;
3031   }
3032 
3033   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3034   // build platform, getpagesizes() (without the '2') can be called directly.
3035   typedef int (*gps_t)(size_t[], int);
3036   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3037   if (gps_func == NULL) {
3038     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3039     if (gps_func == NULL) {
3040       if (warn) {
3041         warning("MPSS is not supported by the operating system.");
3042       }
3043       return false;
3044     }
3045   }
3046 
3047   // Fill the array of page sizes.
3048   int n = (*gps_func)(_page_sizes, page_sizes_max);
3049   assert(n > 0, "Solaris bug?");
3050 
3051   if (n == page_sizes_max) {
3052     // Add a sentinel value (necessary only if the array was completely filled
3053     // since it is static (zeroed at initialization)).
3054     _page_sizes[--n] = 0;
3055     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3056   }
3057   assert(_page_sizes[n] == 0, "missing sentinel");
3058   trace_page_sizes("available page sizes", _page_sizes, n);
3059 
3060   if (n == 1) return false;     // Only one page size available.
3061 
3062   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3063   // select up to usable_count elements.  First sort the array, find the first
3064   // acceptable value, then copy the usable sizes to the top of the array and
3065   // trim the rest.  Make sure to include the default page size :-).
3066   //
3067   // A better policy could get rid of the 4M limit by taking the sizes of the
3068   // important VM memory regions (java heap and possibly the code cache) into
3069   // account.
3070   insertion_sort_descending(_page_sizes, n);
3071   const size_t size_limit =
3072     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3073   int beg;
3074   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */;
3075   const int end = MIN2((int)usable_count, n) - 1;
3076   for (int cur = 0; cur < end; ++cur, ++beg) {
3077     _page_sizes[cur] = _page_sizes[beg];
3078   }
3079   _page_sizes[end] = vm_page_size();
3080   _page_sizes[end + 1] = 0;
3081 
3082   if (_page_sizes[end] > _page_sizes[end - 1]) {
3083     // Default page size is not the smallest; sort again.
3084     insertion_sort_descending(_page_sizes, end + 1);
3085   }
3086   *page_size = _page_sizes[0];
3087 
3088   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3089   return true;
3090 }
3091 
3092 void os::large_page_init() {
3093   if (UseLargePages) {
3094     // print a warning if any large page related flag is specified on command line
3095     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3096                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3097 
3098     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3099   }
3100 }
3101 
3102 bool os::Solaris::is_valid_page_size(size_t bytes) {
3103   for (int i = 0; _page_sizes[i] != 0; i++) {
3104     if (_page_sizes[i] == bytes) {
3105       return true;
3106     }
3107   }
3108   return false;
3109 }
3110 
3111 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3112   assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
3113   assert(is_ptr_aligned((void*) start, align),
3114          err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
3115   assert(is_size_aligned(bytes, align),
3116          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
3117 
3118   // Signal to OS that we want large pages for addresses
3119   // from addr, addr + bytes
3120   struct memcntl_mha mpss_struct;
3121   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3122   mpss_struct.mha_pagesize = align;
3123   mpss_struct.mha_flags = 0;
3124   // Upon successful completion, memcntl() returns 0
3125   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3126     debug_only(warning("Attempt to use MPSS failed."));
3127     return false;
3128   }
3129   return true;
3130 }
3131 
3132 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3133   fatal("os::reserve_memory_special should not be called on Solaris.");
3134   return NULL;
3135 }
3136 
3137 bool os::release_memory_special(char* base, size_t bytes) {
3138   fatal("os::release_memory_special should not be called on Solaris.");
3139   return false;
3140 }
3141 
3142 size_t os::large_page_size() {
3143   return _large_page_size;
3144 }
3145 
3146 // MPSS allows application to commit large page memory on demand; with ISM
3147 // the entire memory region must be allocated as shared memory.
3148 bool os::can_commit_large_page_memory() {
3149   return true;
3150 }
3151 
3152 bool os::can_execute_large_page_memory() {
3153   return true;
3154 }
3155 
3156 // Read calls from inside the vm need to perform state transitions
3157 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3158   size_t res;
3159   JavaThread* thread = (JavaThread*)Thread::current();
3160   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3161   ThreadBlockInVM tbiv(thread);
3162   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3163   return res;
3164 }
3165 
3166 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
3167   size_t res;
3168   JavaThread* thread = (JavaThread*)Thread::current();
3169   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3170   ThreadBlockInVM tbiv(thread);
3171   RESTARTABLE(::pread(fd, buf, (size_t) nBytes, offset), res);
3172   return res;
3173 }
3174 
3175 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3176   size_t res;
3177   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
3178          "Assumed _thread_in_native");
3179   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3180   return res;
3181 }
3182 
3183 void os::naked_short_sleep(jlong ms) {
3184   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3185 
3186   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3187   // Solaris requires -lrt for this.
3188   usleep((ms * 1000));
3189 
3190   return;
3191 }
3192 
3193 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3194 void os::infinite_sleep() {
3195   while (true) {    // sleep forever ...
3196     ::sleep(100);   // ... 100 seconds at a time
3197   }
3198 }
3199 
3200 // Used to convert frequent JVM_Yield() to nops
3201 bool os::dont_yield() {
3202   if (DontYieldALot) {
3203     static hrtime_t last_time = 0;
3204     hrtime_t diff = getTimeNanos() - last_time;
3205 
3206     if (diff < DontYieldALotInterval * 1000000) {
3207       return true;
3208     }
3209 
3210     last_time += diff;
3211 
3212     return false;
3213   } else {
3214     return false;
3215   }
3216 }
3217 
3218 // Note that yield semantics are defined by the scheduling class to which
3219 // the thread currently belongs.  Typically, yield will _not yield to
3220 // other equal or higher priority threads that reside on the dispatch queues
3221 // of other CPUs.
3222 
3223 void os::naked_yield() {
3224   thr_yield();
3225 }
3226 
3227 // Interface for setting lwp priorities.  If we are using T2 libthread,
3228 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3229 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3230 // function is meaningless in this mode so we must adjust the real lwp's priority
3231 // The routines below implement the getting and setting of lwp priorities.
3232 //
3233 // Note: T2 is now the only supported libthread. UseBoundThreads flag is
3234 //       being deprecated and all threads are now BoundThreads
3235 //
3236 // Note: There are three priority scales used on Solaris.  Java priotities
3237 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3238 //       from 0 to 127, and the current scheduling class of the process we
3239 //       are running in.  This is typically from -60 to +60.
3240 //       The setting of the lwp priorities in done after a call to thr_setprio
3241 //       so Java priorities are mapped to libthread priorities and we map from
3242 //       the latter to lwp priorities.  We don't keep priorities stored in
3243 //       Java priorities since some of our worker threads want to set priorities
3244 //       higher than all Java threads.
3245 //
3246 // For related information:
3247 // (1)  man -s 2 priocntl
3248 // (2)  man -s 4 priocntl
3249 // (3)  man dispadmin
3250 // =    librt.so
3251 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3252 // =    ps -cL <pid> ... to validate priority.
3253 // =    sched_get_priority_min and _max
3254 //              pthread_create
3255 //              sched_setparam
3256 //              pthread_setschedparam
3257 //
3258 // Assumptions:
3259 // +    We assume that all threads in the process belong to the same
3260 //              scheduling class.   IE. an homogenous process.
3261 // +    Must be root or in IA group to change change "interactive" attribute.
3262 //              Priocntl() will fail silently.  The only indication of failure is when
3263 //              we read-back the value and notice that it hasn't changed.
3264 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3265 // +    For RT, change timeslice as well.  Invariant:
3266 //              constant "priority integral"
3267 //              Konst == TimeSlice * (60-Priority)
3268 //              Given a priority, compute appropriate timeslice.
3269 // +    Higher numerical values have higher priority.
3270 
3271 // sched class attributes
3272 typedef struct {
3273   int   schedPolicy;              // classID
3274   int   maxPrio;
3275   int   minPrio;
3276 } SchedInfo;
3277 
3278 
3279 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3280 
3281 #ifdef ASSERT
3282 static int  ReadBackValidate = 1;
3283 #endif
3284 static int  myClass     = 0;
3285 static int  myMin       = 0;
3286 static int  myMax       = 0;
3287 static int  myCur       = 0;
3288 static bool priocntl_enable = false;
3289 
3290 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3291 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3292 
3293 
3294 // lwp_priocntl_init
3295 //
3296 // Try to determine the priority scale for our process.
3297 //
3298 // Return errno or 0 if OK.
3299 //
3300 static int lwp_priocntl_init() {
3301   int rslt;
3302   pcinfo_t ClassInfo;
3303   pcparms_t ParmInfo;
3304   int i;
3305 
3306   if (!UseThreadPriorities) return 0;
3307 
3308   // If ThreadPriorityPolicy is 1, switch tables
3309   if (ThreadPriorityPolicy == 1) {
3310     for (i = 0; i < CriticalPriority+1; i++)
3311       os::java_to_os_priority[i] = prio_policy1[i];
3312   }
3313   if (UseCriticalJavaThreadPriority) {
3314     // MaxPriority always maps to the FX scheduling class and criticalPrio.
3315     // See set_native_priority() and set_lwp_class_and_priority().
3316     // Save original MaxPriority mapping in case attempt to
3317     // use critical priority fails.
3318     java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3319     // Set negative to distinguish from other priorities
3320     os::java_to_os_priority[MaxPriority] = -criticalPrio;
3321   }
3322 
3323   // Get IDs for a set of well-known scheduling classes.
3324   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3325   // the system.  We should have a loop that iterates over the
3326   // classID values, which are known to be "small" integers.
3327 
3328   strcpy(ClassInfo.pc_clname, "TS");
3329   ClassInfo.pc_cid = -1;
3330   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3331   if (rslt < 0) return errno;
3332   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3333   tsLimits.schedPolicy = ClassInfo.pc_cid;
3334   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3335   tsLimits.minPrio = -tsLimits.maxPrio;
3336 
3337   strcpy(ClassInfo.pc_clname, "IA");
3338   ClassInfo.pc_cid = -1;
3339   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3340   if (rslt < 0) return errno;
3341   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3342   iaLimits.schedPolicy = ClassInfo.pc_cid;
3343   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3344   iaLimits.minPrio = -iaLimits.maxPrio;
3345 
3346   strcpy(ClassInfo.pc_clname, "RT");
3347   ClassInfo.pc_cid = -1;
3348   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3349   if (rslt < 0) return errno;
3350   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3351   rtLimits.schedPolicy = ClassInfo.pc_cid;
3352   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3353   rtLimits.minPrio = 0;
3354 
3355   strcpy(ClassInfo.pc_clname, "FX");
3356   ClassInfo.pc_cid = -1;
3357   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3358   if (rslt < 0) return errno;
3359   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3360   fxLimits.schedPolicy = ClassInfo.pc_cid;
3361   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3362   fxLimits.minPrio = 0;
3363 
3364   // Query our "current" scheduling class.
3365   // This will normally be IA, TS or, rarely, FX or RT.
3366   memset(&ParmInfo, 0, sizeof(ParmInfo));
3367   ParmInfo.pc_cid = PC_CLNULL;
3368   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3369   if (rslt < 0) return errno;
3370   myClass = ParmInfo.pc_cid;
3371 
3372   // We now know our scheduling classId, get specific information
3373   // about the class.
3374   ClassInfo.pc_cid = myClass;
3375   ClassInfo.pc_clname[0] = 0;
3376   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3377   if (rslt < 0) return errno;
3378 
3379   if (ThreadPriorityVerbose) {
3380     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3381   }
3382 
3383   memset(&ParmInfo, 0, sizeof(pcparms_t));
3384   ParmInfo.pc_cid = PC_CLNULL;
3385   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3386   if (rslt < 0) return errno;
3387 
3388   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3389     myMin = rtLimits.minPrio;
3390     myMax = rtLimits.maxPrio;
3391   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3392     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3393     myMin = iaLimits.minPrio;
3394     myMax = iaLimits.maxPrio;
3395     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3396   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3397     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3398     myMin = tsLimits.minPrio;
3399     myMax = tsLimits.maxPrio;
3400     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3401   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3402     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3403     myMin = fxLimits.minPrio;
3404     myMax = fxLimits.maxPrio;
3405     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3406   } else {
3407     // No clue - punt
3408     if (ThreadPriorityVerbose) {
3409       tty->print_cr("Unknown scheduling class: %s ... \n",
3410                     ClassInfo.pc_clname);
3411     }
3412     return EINVAL;      // no clue, punt
3413   }
3414 
3415   if (ThreadPriorityVerbose) {
3416     tty->print_cr("Thread priority Range: [%d..%d]\n", myMin, myMax);
3417   }
3418 
3419   priocntl_enable = true;  // Enable changing priorities
3420   return 0;
3421 }
3422 
3423 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3424 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3425 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3426 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3427 
3428 
3429 // scale_to_lwp_priority
3430 //
3431 // Convert from the libthread "thr_setprio" scale to our current
3432 // lwp scheduling class scale.
3433 //
3434 static int scale_to_lwp_priority(int rMin, int rMax, int x) {
3435   int v;
3436 
3437   if (x == 127) return rMax;            // avoid round-down
3438   v = (((x*(rMax-rMin)))/128)+rMin;
3439   return v;
3440 }
3441 
3442 
3443 // set_lwp_class_and_priority
3444 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3445                                int newPrio, int new_class, bool scale) {
3446   int rslt;
3447   int Actual, Expected, prv;
3448   pcparms_t ParmInfo;                   // for GET-SET
3449 #ifdef ASSERT
3450   pcparms_t ReadBack;                   // for readback
3451 #endif
3452 
3453   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3454   // Query current values.
3455   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3456   // Cache "pcparms_t" in global ParmCache.
3457   // TODO: elide set-to-same-value
3458 
3459   // If something went wrong on init, don't change priorities.
3460   if (!priocntl_enable) {
3461     if (ThreadPriorityVerbose) {
3462       tty->print_cr("Trying to set priority but init failed, ignoring");
3463     }
3464     return EINVAL;
3465   }
3466 
3467   // If lwp hasn't started yet, just return
3468   // the _start routine will call us again.
3469   if (lwpid <= 0) {
3470     if (ThreadPriorityVerbose) {
3471       tty->print_cr("deferring the set_lwp_class_and_priority of thread "
3472                     INTPTR_FORMAT " to %d, lwpid not set",
3473                     ThreadID, newPrio);
3474     }
3475     return 0;
3476   }
3477 
3478   if (ThreadPriorityVerbose) {
3479     tty->print_cr ("set_lwp_class_and_priority("
3480                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3481                    ThreadID, lwpid, newPrio);
3482   }
3483 
3484   memset(&ParmInfo, 0, sizeof(pcparms_t));
3485   ParmInfo.pc_cid = PC_CLNULL;
3486   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3487   if (rslt < 0) return errno;
3488 
3489   int cur_class = ParmInfo.pc_cid;
3490   ParmInfo.pc_cid = (id_t)new_class;
3491 
3492   if (new_class == rtLimits.schedPolicy) {
3493     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3494     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3495                                                        rtLimits.maxPrio, newPrio)
3496                                : newPrio;
3497     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3498     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3499     if (ThreadPriorityVerbose) {
3500       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3501     }
3502   } else if (new_class == iaLimits.schedPolicy) {
3503     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3504     int maxClamped     = MIN2(iaLimits.maxPrio,
3505                               cur_class == new_class
3506                               ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3507     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3508                                                        maxClamped, newPrio)
3509                                : newPrio;
3510     iaInfo->ia_uprilim = cur_class == new_class
3511                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3512     iaInfo->ia_mode    = IA_NOCHANGE;
3513     if (ThreadPriorityVerbose) {
3514       tty->print_cr("IA: [%d...%d] %d->%d\n",
3515                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3516     }
3517   } else if (new_class == tsLimits.schedPolicy) {
3518     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3519     int maxClamped     = MIN2(tsLimits.maxPrio,
3520                               cur_class == new_class
3521                               ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3522     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3523                                                        maxClamped, newPrio)
3524                                : newPrio;
3525     tsInfo->ts_uprilim = cur_class == new_class
3526                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3527     if (ThreadPriorityVerbose) {
3528       tty->print_cr("TS: [%d...%d] %d->%d\n",
3529                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3530     }
3531   } else if (new_class == fxLimits.schedPolicy) {
3532     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3533     int maxClamped     = MIN2(fxLimits.maxPrio,
3534                               cur_class == new_class
3535                               ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3536     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3537                                                        maxClamped, newPrio)
3538                                : newPrio;
3539     fxInfo->fx_uprilim = cur_class == new_class
3540                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3541     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3542     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3543     if (ThreadPriorityVerbose) {
3544       tty->print_cr("FX: [%d...%d] %d->%d\n",
3545                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3546     }
3547   } else {
3548     if (ThreadPriorityVerbose) {
3549       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3550     }
3551     return EINVAL;    // no clue, punt
3552   }
3553 
3554   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3555   if (ThreadPriorityVerbose && rslt) {
3556     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3557   }
3558   if (rslt < 0) return errno;
3559 
3560 #ifdef ASSERT
3561   // Sanity check: read back what we just attempted to set.
3562   // In theory it could have changed in the interim ...
3563   //
3564   // The priocntl system call is tricky.
3565   // Sometimes it'll validate the priority value argument and
3566   // return EINVAL if unhappy.  At other times it fails silently.
3567   // Readbacks are prudent.
3568 
3569   if (!ReadBackValidate) return 0;
3570 
3571   memset(&ReadBack, 0, sizeof(pcparms_t));
3572   ReadBack.pc_cid = PC_CLNULL;
3573   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3574   assert(rslt >= 0, "priocntl failed");
3575   Actual = Expected = 0xBAD;
3576   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3577   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3578     Actual   = RTPRI(ReadBack)->rt_pri;
3579     Expected = RTPRI(ParmInfo)->rt_pri;
3580   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3581     Actual   = IAPRI(ReadBack)->ia_upri;
3582     Expected = IAPRI(ParmInfo)->ia_upri;
3583   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3584     Actual   = TSPRI(ReadBack)->ts_upri;
3585     Expected = TSPRI(ParmInfo)->ts_upri;
3586   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3587     Actual   = FXPRI(ReadBack)->fx_upri;
3588     Expected = FXPRI(ParmInfo)->fx_upri;
3589   } else {
3590     if (ThreadPriorityVerbose) {
3591       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3592                     ParmInfo.pc_cid);
3593     }
3594   }
3595 
3596   if (Actual != Expected) {
3597     if (ThreadPriorityVerbose) {
3598       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3599                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3600     }
3601   }
3602 #endif
3603 
3604   return 0;
3605 }
3606 
3607 // Solaris only gives access to 128 real priorities at a time,
3608 // so we expand Java's ten to fill this range.  This would be better
3609 // if we dynamically adjusted relative priorities.
3610 //
3611 // The ThreadPriorityPolicy option allows us to select 2 different
3612 // priority scales.
3613 //
3614 // ThreadPriorityPolicy=0
3615 // Since the Solaris' default priority is MaximumPriority, we do not
3616 // set a priority lower than Max unless a priority lower than
3617 // NormPriority is requested.
3618 //
3619 // ThreadPriorityPolicy=1
3620 // This mode causes the priority table to get filled with
3621 // linear values.  NormPriority get's mapped to 50% of the
3622 // Maximum priority an so on.  This will cause VM threads
3623 // to get unfair treatment against other Solaris processes
3624 // which do not explicitly alter their thread priorities.
3625 
3626 int os::java_to_os_priority[CriticalPriority + 1] = {
3627   -99999,         // 0 Entry should never be used
3628 
3629   0,              // 1 MinPriority
3630   32,             // 2
3631   64,             // 3
3632 
3633   96,             // 4
3634   127,            // 5 NormPriority
3635   127,            // 6
3636 
3637   127,            // 7
3638   127,            // 8
3639   127,            // 9 NearMaxPriority
3640 
3641   127,            // 10 MaxPriority
3642 
3643   -criticalPrio   // 11 CriticalPriority
3644 };
3645 
3646 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3647   OSThread* osthread = thread->osthread();
3648 
3649   // Save requested priority in case the thread hasn't been started
3650   osthread->set_native_priority(newpri);
3651 
3652   // Check for critical priority request
3653   bool fxcritical = false;
3654   if (newpri == -criticalPrio) {
3655     fxcritical = true;
3656     newpri = criticalPrio;
3657   }
3658 
3659   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3660   if (!UseThreadPriorities) return OS_OK;
3661 
3662   int status = 0;
3663 
3664   if (!fxcritical) {
3665     // Use thr_setprio only if we have a priority that thr_setprio understands
3666     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3667   }
3668 
3669   int lwp_status =
3670           set_lwp_class_and_priority(osthread->thread_id(),
3671                                      osthread->lwp_id(),
3672                                      newpri,
3673                                      fxcritical ? fxLimits.schedPolicy : myClass,
3674                                      !fxcritical);
3675   if (lwp_status != 0 && fxcritical) {
3676     // Try again, this time without changing the scheduling class
3677     newpri = java_MaxPriority_to_os_priority;
3678     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3679                                             osthread->lwp_id(),
3680                                             newpri, myClass, false);
3681   }
3682   status |= lwp_status;
3683   return (status == 0) ? OS_OK : OS_ERR;
3684 }
3685 
3686 
3687 OSReturn os::get_native_priority(const Thread* const thread,
3688                                  int *priority_ptr) {
3689   int p;
3690   if (!UseThreadPriorities) {
3691     *priority_ptr = NormalPriority;
3692     return OS_OK;
3693   }
3694   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3695   if (status != 0) {
3696     return OS_ERR;
3697   }
3698   *priority_ptr = p;
3699   return OS_OK;
3700 }
3701 
3702 
3703 // Hint to the underlying OS that a task switch would not be good.
3704 // Void return because it's a hint and can fail.
3705 void os::hint_no_preempt() {
3706   schedctl_start(schedctl_init());
3707 }
3708 
3709 static void resume_clear_context(OSThread *osthread) {
3710   osthread->set_ucontext(NULL);
3711 }
3712 
3713 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3714   osthread->set_ucontext(context);
3715 }
3716 
3717 static Semaphore sr_semaphore;
3718 
3719 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3720   // Save and restore errno to avoid confusing native code with EINTR
3721   // after sigsuspend.
3722   int old_errno = errno;
3723 
3724   OSThread* osthread = thread->osthread();
3725   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3726 
3727   os::SuspendResume::State current = osthread->sr.state();
3728   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3729     suspend_save_context(osthread, uc);
3730 
3731     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3732     os::SuspendResume::State state = osthread->sr.suspended();
3733     if (state == os::SuspendResume::SR_SUSPENDED) {
3734       sigset_t suspend_set;  // signals for sigsuspend()
3735 
3736       // get current set of blocked signals and unblock resume signal
3737       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3738       sigdelset(&suspend_set, os::Solaris::SIGasync());
3739 
3740       sr_semaphore.signal();
3741       // wait here until we are resumed
3742       while (1) {
3743         sigsuspend(&suspend_set);
3744 
3745         os::SuspendResume::State result = osthread->sr.running();
3746         if (result == os::SuspendResume::SR_RUNNING) {
3747           sr_semaphore.signal();
3748           break;
3749         }
3750       }
3751 
3752     } else if (state == os::SuspendResume::SR_RUNNING) {
3753       // request was cancelled, continue
3754     } else {
3755       ShouldNotReachHere();
3756     }
3757 
3758     resume_clear_context(osthread);
3759   } else if (current == os::SuspendResume::SR_RUNNING) {
3760     // request was cancelled, continue
3761   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3762     // ignore
3763   } else {
3764     // ignore
3765   }
3766 
3767   errno = old_errno;
3768 }
3769 
3770 void os::print_statistics() {
3771 }
3772 
3773 int os::message_box(const char* title, const char* message) {
3774   int i;
3775   fdStream err(defaultStream::error_fd());
3776   for (i = 0; i < 78; i++) err.print_raw("=");
3777   err.cr();
3778   err.print_raw_cr(title);
3779   for (i = 0; i < 78; i++) err.print_raw("-");
3780   err.cr();
3781   err.print_raw_cr(message);
3782   for (i = 0; i < 78; i++) err.print_raw("=");
3783   err.cr();
3784 
3785   char buf[16];
3786   // Prevent process from exiting upon "read error" without consuming all CPU
3787   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3788 
3789   return buf[0] == 'y' || buf[0] == 'Y';
3790 }
3791 
3792 static int sr_notify(OSThread* osthread) {
3793   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
3794   assert_status(status == 0, status, "thr_kill");
3795   return status;
3796 }
3797 
3798 // "Randomly" selected value for how long we want to spin
3799 // before bailing out on suspending a thread, also how often
3800 // we send a signal to a thread we want to resume
3801 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3802 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3803 
3804 static bool do_suspend(OSThread* osthread) {
3805   assert(osthread->sr.is_running(), "thread should be running");
3806   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
3807 
3808   // mark as suspended and send signal
3809   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3810     // failed to switch, state wasn't running?
3811     ShouldNotReachHere();
3812     return false;
3813   }
3814 
3815   if (sr_notify(osthread) != 0) {
3816     ShouldNotReachHere();
3817   }
3818 
3819   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3820   while (true) {
3821     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
3822       break;
3823     } else {
3824       // timeout
3825       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3826       if (cancelled == os::SuspendResume::SR_RUNNING) {
3827         return false;
3828       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3829         // make sure that we consume the signal on the semaphore as well
3830         sr_semaphore.wait();
3831         break;
3832       } else {
3833         ShouldNotReachHere();
3834         return false;
3835       }
3836     }
3837   }
3838 
3839   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3840   return true;
3841 }
3842 
3843 static void do_resume(OSThread* osthread) {
3844   assert(osthread->sr.is_suspended(), "thread should be suspended");
3845   assert(!sr_semaphore.trywait(), "invalid semaphore state");
3846 
3847   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3848     // failed to switch to WAKEUP_REQUEST
3849     ShouldNotReachHere();
3850     return;
3851   }
3852 
3853   while (true) {
3854     if (sr_notify(osthread) == 0) {
3855       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
3856         if (osthread->sr.is_running()) {
3857           return;
3858         }
3859       }
3860     } else {
3861       ShouldNotReachHere();
3862     }
3863   }
3864 
3865   guarantee(osthread->sr.is_running(), "Must be running!");
3866 }
3867 
3868 void os::SuspendedThreadTask::internal_do_task() {
3869   if (do_suspend(_thread->osthread())) {
3870     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3871     do_task(context);
3872     do_resume(_thread->osthread());
3873   }
3874 }
3875 
3876 class PcFetcher : public os::SuspendedThreadTask {
3877  public:
3878   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3879   ExtendedPC result();
3880  protected:
3881   void do_task(const os::SuspendedThreadTaskContext& context);
3882  private:
3883   ExtendedPC _epc;
3884 };
3885 
3886 ExtendedPC PcFetcher::result() {
3887   guarantee(is_done(), "task is not done yet.");
3888   return _epc;
3889 }
3890 
3891 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3892   Thread* thread = context.thread();
3893   OSThread* osthread = thread->osthread();
3894   if (osthread->ucontext() != NULL) {
3895     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
3896   } else {
3897     // NULL context is unexpected, double-check this is the VMThread
3898     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3899   }
3900 }
3901 
3902 // A lightweight implementation that does not suspend the target thread and
3903 // thus returns only a hint. Used for profiling only!
3904 ExtendedPC os::get_thread_pc(Thread* thread) {
3905   // Make sure that it is called by the watcher and the Threads lock is owned.
3906   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
3907   // For now, is only used to profile the VM Thread
3908   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3909   PcFetcher fetcher(thread);
3910   fetcher.run();
3911   return fetcher.result();
3912 }
3913 
3914 
3915 // This does not do anything on Solaris. This is basically a hook for being
3916 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
3917 void os::os_exception_wrapper(java_call_t f, JavaValue* value,
3918                               methodHandle* method, JavaCallArguments* args,
3919                               Thread* thread) {
3920   f(value, method, args, thread);
3921 }
3922 
3923 // This routine may be used by user applications as a "hook" to catch signals.
3924 // The user-defined signal handler must pass unrecognized signals to this
3925 // routine, and if it returns true (non-zero), then the signal handler must
3926 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
3927 // routine will never retun false (zero), but instead will execute a VM panic
3928 // routine kill the process.
3929 //
3930 // If this routine returns false, it is OK to call it again.  This allows
3931 // the user-defined signal handler to perform checks either before or after
3932 // the VM performs its own checks.  Naturally, the user code would be making
3933 // a serious error if it tried to handle an exception (such as a null check
3934 // or breakpoint) that the VM was generating for its own correct operation.
3935 //
3936 // This routine may recognize any of the following kinds of signals:
3937 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
3938 // os::Solaris::SIGasync
3939 // It should be consulted by handlers for any of those signals.
3940 // It explicitly does not recognize os::Solaris::SIGinterrupt
3941 //
3942 // The caller of this routine must pass in the three arguments supplied
3943 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3944 // field of the structure passed to sigaction().  This routine assumes that
3945 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3946 //
3947 // Note that the VM will print warnings if it detects conflicting signal
3948 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3949 //
3950 extern "C" JNIEXPORT int JVM_handle_solaris_signal(int signo,
3951                                                    siginfo_t* siginfo,
3952                                                    void* ucontext,
3953                                                    int abort_if_unrecognized);
3954 
3955 
3956 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
3957   int orig_errno = errno;  // Preserve errno value over signal handler.
3958   JVM_handle_solaris_signal(sig, info, ucVoid, true);
3959   errno = orig_errno;
3960 }
3961 
3962 // Do not delete - if guarantee is ever removed,  a signal handler (even empty)
3963 // is needed to provoke threads blocked on IO to return an EINTR
3964 // Note: this explicitly does NOT call JVM_handle_solaris_signal and
3965 // does NOT participate in signal chaining due to requirement for
3966 // NOT setting SA_RESTART to make EINTR work.
3967 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
3968   if (UseSignalChaining) {
3969     struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
3970     if (actp && actp->sa_handler) {
3971       vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
3972     }
3973   }
3974 }
3975 
3976 // This boolean allows users to forward their own non-matching signals
3977 // to JVM_handle_solaris_signal, harmlessly.
3978 bool os::Solaris::signal_handlers_are_installed = false;
3979 
3980 // For signal-chaining
3981 bool os::Solaris::libjsig_is_loaded = false;
3982 typedef struct sigaction *(*get_signal_t)(int);
3983 get_signal_t os::Solaris::get_signal_action = NULL;
3984 
3985 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
3986   struct sigaction *actp = NULL;
3987 
3988   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
3989     // Retrieve the old signal handler from libjsig
3990     actp = (*get_signal_action)(sig);
3991   }
3992   if (actp == NULL) {
3993     // Retrieve the preinstalled signal handler from jvm
3994     actp = get_preinstalled_handler(sig);
3995   }
3996 
3997   return actp;
3998 }
3999 
4000 static bool call_chained_handler(struct sigaction *actp, int sig,
4001                                  siginfo_t *siginfo, void *context) {
4002   // Call the old signal handler
4003   if (actp->sa_handler == SIG_DFL) {
4004     // It's more reasonable to let jvm treat it as an unexpected exception
4005     // instead of taking the default action.
4006     return false;
4007   } else if (actp->sa_handler != SIG_IGN) {
4008     if ((actp->sa_flags & SA_NODEFER) == 0) {
4009       // automaticlly block the signal
4010       sigaddset(&(actp->sa_mask), sig);
4011     }
4012 
4013     sa_handler_t hand;
4014     sa_sigaction_t sa;
4015     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4016     // retrieve the chained handler
4017     if (siginfo_flag_set) {
4018       sa = actp->sa_sigaction;
4019     } else {
4020       hand = actp->sa_handler;
4021     }
4022 
4023     if ((actp->sa_flags & SA_RESETHAND) != 0) {
4024       actp->sa_handler = SIG_DFL;
4025     }
4026 
4027     // try to honor the signal mask
4028     sigset_t oset;
4029     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4030 
4031     // call into the chained handler
4032     if (siginfo_flag_set) {
4033       (*sa)(sig, siginfo, context);
4034     } else {
4035       (*hand)(sig);
4036     }
4037 
4038     // restore the signal mask
4039     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4040   }
4041   // Tell jvm's signal handler the signal is taken care of.
4042   return true;
4043 }
4044 
4045 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4046   bool chained = false;
4047   // signal-chaining
4048   if (UseSignalChaining) {
4049     struct sigaction *actp = get_chained_signal_action(sig);
4050     if (actp != NULL) {
4051       chained = call_chained_handler(actp, sig, siginfo, context);
4052     }
4053   }
4054   return chained;
4055 }
4056 
4057 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4058   assert((chainedsigactions != (struct sigaction *)NULL) &&
4059          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
4060   if (preinstalled_sigs[sig] != 0) {
4061     return &chainedsigactions[sig];
4062   }
4063   return NULL;
4064 }
4065 
4066 void os::Solaris::save_preinstalled_handler(int sig,
4067                                             struct sigaction& oldAct) {
4068   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4069   assert((chainedsigactions != (struct sigaction *)NULL) &&
4070          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
4071   chainedsigactions[sig] = oldAct;
4072   preinstalled_sigs[sig] = 1;
4073 }
4074 
4075 void os::Solaris::set_signal_handler(int sig, bool set_installed,
4076                                      bool oktochain) {
4077   // Check for overwrite.
4078   struct sigaction oldAct;
4079   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4080   void* oldhand =
4081       oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4082                           : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4083   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4084       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4085       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4086     if (AllowUserSignalHandlers || !set_installed) {
4087       // Do not overwrite; user takes responsibility to forward to us.
4088       return;
4089     } else if (UseSignalChaining) {
4090       if (oktochain) {
4091         // save the old handler in jvm
4092         save_preinstalled_handler(sig, oldAct);
4093       } else {
4094         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4095       }
4096       // libjsig also interposes the sigaction() call below and saves the
4097       // old sigaction on it own.
4098     } else {
4099       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4100                     "%#lx for signal %d.", (long)oldhand, sig));
4101     }
4102   }
4103 
4104   struct sigaction sigAct;
4105   sigfillset(&(sigAct.sa_mask));
4106   sigAct.sa_handler = SIG_DFL;
4107 
4108   sigAct.sa_sigaction = signalHandler;
4109   // Handle SIGSEGV on alternate signal stack if
4110   // not using stack banging
4111   if (!UseStackBanging && sig == SIGSEGV) {
4112     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4113   } else if (sig == os::Solaris::SIGinterrupt()) {
4114     // Interruptible i/o requires SA_RESTART cleared so EINTR
4115     // is returned instead of restarting system calls
4116     sigemptyset(&sigAct.sa_mask);
4117     sigAct.sa_handler = NULL;
4118     sigAct.sa_flags = SA_SIGINFO;
4119     sigAct.sa_sigaction = sigINTRHandler;
4120   } else {
4121     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4122   }
4123   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4124 
4125   sigaction(sig, &sigAct, &oldAct);
4126 
4127   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4128                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4129   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4130 }
4131 
4132 
4133 #define DO_SIGNAL_CHECK(sig)                      \
4134   do {                                            \
4135     if (!sigismember(&check_signal_done, sig)) {  \
4136       os::Solaris::check_signal_handler(sig);     \
4137     }                                             \
4138   } while (0)
4139 
4140 // This method is a periodic task to check for misbehaving JNI applications
4141 // under CheckJNI, we can add any periodic checks here
4142 
4143 void os::run_periodic_checks() {
4144   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4145   // thereby preventing a NULL checks.
4146   if (!check_addr0_done) check_addr0_done = check_addr0(tty);
4147 
4148   if (check_signals == false) return;
4149 
4150   // SEGV and BUS if overridden could potentially prevent
4151   // generation of hs*.log in the event of a crash, debugging
4152   // such a case can be very challenging, so we absolutely
4153   // check for the following for a good measure:
4154   DO_SIGNAL_CHECK(SIGSEGV);
4155   DO_SIGNAL_CHECK(SIGILL);
4156   DO_SIGNAL_CHECK(SIGFPE);
4157   DO_SIGNAL_CHECK(SIGBUS);
4158   DO_SIGNAL_CHECK(SIGPIPE);
4159   DO_SIGNAL_CHECK(SIGXFSZ);
4160 
4161   // ReduceSignalUsage allows the user to override these handlers
4162   // see comments at the very top and jvm_solaris.h
4163   if (!ReduceSignalUsage) {
4164     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4165     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4166     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4167     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4168   }
4169 
4170   // See comments above for using JVM1/JVM2 and UseAltSigs
4171   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4172   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4173 
4174 }
4175 
4176 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4177 
4178 static os_sigaction_t os_sigaction = NULL;
4179 
4180 void os::Solaris::check_signal_handler(int sig) {
4181   char buf[O_BUFLEN];
4182   address jvmHandler = NULL;
4183 
4184   struct sigaction act;
4185   if (os_sigaction == NULL) {
4186     // only trust the default sigaction, in case it has been interposed
4187     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4188     if (os_sigaction == NULL) return;
4189   }
4190 
4191   os_sigaction(sig, (struct sigaction*)NULL, &act);
4192 
4193   address thisHandler = (act.sa_flags & SA_SIGINFO)
4194     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4195     : CAST_FROM_FN_PTR(address, act.sa_handler);
4196 
4197 
4198   switch (sig) {
4199   case SIGSEGV:
4200   case SIGBUS:
4201   case SIGFPE:
4202   case SIGPIPE:
4203   case SIGXFSZ:
4204   case SIGILL:
4205     jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4206     break;
4207 
4208   case SHUTDOWN1_SIGNAL:
4209   case SHUTDOWN2_SIGNAL:
4210   case SHUTDOWN3_SIGNAL:
4211   case BREAK_SIGNAL:
4212     jvmHandler = (address)user_handler();
4213     break;
4214 
4215   default:
4216     int intrsig = os::Solaris::SIGinterrupt();
4217     int asynsig = os::Solaris::SIGasync();
4218 
4219     if (sig == intrsig) {
4220       jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4221     } else if (sig == asynsig) {
4222       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4223     } else {
4224       return;
4225     }
4226     break;
4227   }
4228 
4229 
4230   if (thisHandler != jvmHandler) {
4231     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4232     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4233     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4234     // No need to check this sig any longer
4235     sigaddset(&check_signal_done, sig);
4236     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4237     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4238       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4239                     exception_name(sig, buf, O_BUFLEN));
4240     }
4241   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4242     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4243     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4244     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4245     // No need to check this sig any longer
4246     sigaddset(&check_signal_done, sig);
4247   }
4248 
4249   // Print all the signal handler state
4250   if (sigismember(&check_signal_done, sig)) {
4251     print_signal_handlers(tty, buf, O_BUFLEN);
4252   }
4253 
4254 }
4255 
4256 void os::Solaris::install_signal_handlers() {
4257   bool libjsigdone = false;
4258   signal_handlers_are_installed = true;
4259 
4260   // signal-chaining
4261   typedef void (*signal_setting_t)();
4262   signal_setting_t begin_signal_setting = NULL;
4263   signal_setting_t end_signal_setting = NULL;
4264   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4265                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4266   if (begin_signal_setting != NULL) {
4267     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4268                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4269     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4270                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4271     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4272                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4273     libjsig_is_loaded = true;
4274     if (os::Solaris::get_libjsig_version != NULL) {
4275       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4276     }
4277     assert(UseSignalChaining, "should enable signal-chaining");
4278   }
4279   if (libjsig_is_loaded) {
4280     // Tell libjsig jvm is setting signal handlers
4281     (*begin_signal_setting)();
4282   }
4283 
4284   set_signal_handler(SIGSEGV, true, true);
4285   set_signal_handler(SIGPIPE, true, true);
4286   set_signal_handler(SIGXFSZ, true, true);
4287   set_signal_handler(SIGBUS, true, true);
4288   set_signal_handler(SIGILL, true, true);
4289   set_signal_handler(SIGFPE, true, true);
4290 
4291 
4292   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4293 
4294     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4295     // can not register overridable signals which might be > 32
4296     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4297       // Tell libjsig jvm has finished setting signal handlers
4298       (*end_signal_setting)();
4299       libjsigdone = true;
4300     }
4301   }
4302 
4303   // Never ok to chain our SIGinterrupt
4304   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4305   set_signal_handler(os::Solaris::SIGasync(), true, true);
4306 
4307   if (libjsig_is_loaded && !libjsigdone) {
4308     // Tell libjsig jvm finishes setting signal handlers
4309     (*end_signal_setting)();
4310   }
4311 
4312   // We don't activate signal checker if libjsig is in place, we trust ourselves
4313   // and if UserSignalHandler is installed all bets are off.
4314   // Log that signal checking is off only if -verbose:jni is specified.
4315   if (CheckJNICalls) {
4316     if (libjsig_is_loaded) {
4317       if (PrintJNIResolving) {
4318         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4319       }
4320       check_signals = false;
4321     }
4322     if (AllowUserSignalHandlers) {
4323       if (PrintJNIResolving) {
4324         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4325       }
4326       check_signals = false;
4327     }
4328   }
4329 }
4330 
4331 
4332 void report_error(const char* file_name, int line_no, const char* title,
4333                   const char* format, ...);
4334 
4335 const char * signames[] = {
4336   "SIG0",
4337   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4338   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4339   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4340   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4341   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4342   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4343   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4344   "SIGCANCEL", "SIGLOST"
4345 };
4346 
4347 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4348   if (0 < exception_code && exception_code <= SIGRTMAX) {
4349     // signal
4350     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4351       jio_snprintf(buf, size, "%s", signames[exception_code]);
4352     } else {
4353       jio_snprintf(buf, size, "SIG%d", exception_code);
4354     }
4355     return buf;
4356   } else {
4357     return NULL;
4358   }
4359 }
4360 
4361 // (Static) wrapper for getisax(2) call.
4362 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4363 
4364 // (Static) wrappers for the liblgrp API
4365 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4366 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4367 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4368 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4369 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4370 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4371 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4372 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4373 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4374 
4375 // (Static) wrapper for meminfo() call.
4376 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4377 
4378 static address resolve_symbol_lazy(const char* name) {
4379   address addr = (address) dlsym(RTLD_DEFAULT, name);
4380   if (addr == NULL) {
4381     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4382     addr = (address) dlsym(RTLD_NEXT, name);
4383   }
4384   return addr;
4385 }
4386 
4387 static address resolve_symbol(const char* name) {
4388   address addr = resolve_symbol_lazy(name);
4389   if (addr == NULL) {
4390     fatal(dlerror());
4391   }
4392   return addr;
4393 }
4394 
4395 void os::Solaris::libthread_init() {
4396   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4397 
4398   lwp_priocntl_init();
4399 
4400   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4401   if (func == NULL) {
4402     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4403     // Guarantee that this VM is running on an new enough OS (5.6 or
4404     // later) that it will have a new enough libthread.so.
4405     guarantee(func != NULL, "libthread.so is too old.");
4406   }
4407 
4408   int size;
4409   void (*handler_info_func)(address *, int *);
4410   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4411   handler_info_func(&handler_start, &size);
4412   handler_end = handler_start + size;
4413 }
4414 
4415 
4416 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4417 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4418 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4419 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4420 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4421 int os::Solaris::_mutex_scope = USYNC_THREAD;
4422 
4423 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4424 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4425 int_fnP_cond_tP os::Solaris::_cond_signal;
4426 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4427 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4428 int_fnP_cond_tP os::Solaris::_cond_destroy;
4429 int os::Solaris::_cond_scope = USYNC_THREAD;
4430 
4431 void os::Solaris::synchronization_init() {
4432   if (UseLWPSynchronization) {
4433     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4434     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4435     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4436     os::Solaris::set_mutex_init(lwp_mutex_init);
4437     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4438     os::Solaris::set_mutex_scope(USYNC_THREAD);
4439 
4440     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4441     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4442     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4443     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4444     os::Solaris::set_cond_init(lwp_cond_init);
4445     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4446     os::Solaris::set_cond_scope(USYNC_THREAD);
4447   } else {
4448     os::Solaris::set_mutex_scope(USYNC_THREAD);
4449     os::Solaris::set_cond_scope(USYNC_THREAD);
4450 
4451     if (UsePthreads) {
4452       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4453       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4454       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4455       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4456       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4457 
4458       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4459       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4460       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4461       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4462       os::Solaris::set_cond_init(pthread_cond_default_init);
4463       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4464     } else {
4465       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4466       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4467       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4468       os::Solaris::set_mutex_init(::mutex_init);
4469       os::Solaris::set_mutex_destroy(::mutex_destroy);
4470 
4471       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4472       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4473       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4474       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4475       os::Solaris::set_cond_init(::cond_init);
4476       os::Solaris::set_cond_destroy(::cond_destroy);
4477     }
4478   }
4479 }
4480 
4481 bool os::Solaris::liblgrp_init() {
4482   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4483   if (handle != NULL) {
4484     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4485     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4486     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4487     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4488     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4489     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4490     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4491     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4492                                                       dlsym(handle, "lgrp_cookie_stale")));
4493 
4494     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4495     set_lgrp_cookie(c);
4496     return true;
4497   }
4498   return false;
4499 }
4500 
4501 void os::Solaris::misc_sym_init() {
4502   address func;
4503 
4504   // getisax
4505   func = resolve_symbol_lazy("getisax");
4506   if (func != NULL) {
4507     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4508   }
4509 
4510   // meminfo
4511   func = resolve_symbol_lazy("meminfo");
4512   if (func != NULL) {
4513     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4514   }
4515 }
4516 
4517 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4518   assert(_getisax != NULL, "_getisax not set");
4519   return _getisax(array, n);
4520 }
4521 
4522 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4523 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4524 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4525 
4526 void init_pset_getloadavg_ptr(void) {
4527   pset_getloadavg_ptr =
4528     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4529   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4530     warning("pset_getloadavg function not found");
4531   }
4532 }
4533 
4534 int os::Solaris::_dev_zero_fd = -1;
4535 
4536 // this is called _before_ the global arguments have been parsed
4537 void os::init(void) {
4538   _initial_pid = getpid();
4539 
4540   max_hrtime = first_hrtime = gethrtime();
4541 
4542   init_random(1234567);
4543 
4544   page_size = sysconf(_SC_PAGESIZE);
4545   if (page_size == -1) {
4546     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4547                   strerror(errno)));
4548   }
4549   init_page_sizes((size_t) page_size);
4550 
4551   Solaris::initialize_system_info();
4552 
4553   // Initialize misc. symbols as soon as possible, so we can use them
4554   // if we need them.
4555   Solaris::misc_sym_init();
4556 
4557   int fd = ::open("/dev/zero", O_RDWR);
4558   if (fd < 0) {
4559     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4560   } else {
4561     Solaris::set_dev_zero_fd(fd);
4562 
4563     // Close on exec, child won't inherit.
4564     fcntl(fd, F_SETFD, FD_CLOEXEC);
4565   }
4566 
4567   clock_tics_per_sec = CLK_TCK;
4568 
4569   // check if dladdr1() exists; dladdr1 can provide more information than
4570   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4571   // and is available on linker patches for 5.7 and 5.8.
4572   // libdl.so must have been loaded, this call is just an entry lookup
4573   void * hdl = dlopen("libdl.so", RTLD_NOW);
4574   if (hdl) {
4575     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4576   }
4577 
4578   // (Solaris only) this switches to calls that actually do locking.
4579   ThreadCritical::initialize();
4580 
4581   main_thread = thr_self();
4582 
4583   // Constant minimum stack size allowed. It must be at least
4584   // the minimum of what the OS supports (thr_min_stack()), and
4585   // enough to allow the thread to get to user bytecode execution.
4586   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4587   // If the pagesize of the VM is greater than 8K determine the appropriate
4588   // number of initial guard pages.  The user can change this with the
4589   // command line arguments, if needed.
4590   if (vm_page_size() > 8*K) {
4591     StackYellowPages = 1;
4592     StackRedPages = 1;
4593     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4594   }
4595 }
4596 
4597 // To install functions for atexit system call
4598 extern "C" {
4599   static void perfMemory_exit_helper() {
4600     perfMemory_exit();
4601   }
4602 }
4603 
4604 // this is called _after_ the global arguments have been parsed
4605 jint os::init_2(void) {
4606   // try to enable extended file IO ASAP, see 6431278
4607   os::Solaris::try_enable_extended_io();
4608 
4609   // Allocate a single page and mark it as readable for safepoint polling.  Also
4610   // use this first mmap call to check support for MAP_ALIGN.
4611   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4612                                                       page_size,
4613                                                       MAP_PRIVATE | MAP_ALIGN,
4614                                                       PROT_READ);
4615   if (polling_page == NULL) {
4616     has_map_align = false;
4617     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4618                                                 PROT_READ);
4619   }
4620 
4621   os::set_polling_page(polling_page);
4622 
4623 #ifndef PRODUCT
4624   if (Verbose && PrintMiscellaneous) {
4625     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n",
4626                (intptr_t)polling_page);
4627   }
4628 #endif
4629 
4630   if (!UseMembar) {
4631     address mem_serialize_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE);
4632     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4633     os::set_memory_serialize_page(mem_serialize_page);
4634 
4635 #ifndef PRODUCT
4636     if (Verbose && PrintMiscellaneous) {
4637       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n",
4638                  (intptr_t)mem_serialize_page);
4639     }
4640 #endif
4641   }
4642 
4643   // Check minimum allowable stack size for thread creation and to initialize
4644   // the java system classes, including StackOverflowError - depends on page
4645   // size.  Add a page for compiler2 recursion in main thread.
4646   // Add in 2*BytesPerWord times page size to account for VM stack during
4647   // class initialization depending on 32 or 64 bit VM.
4648   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4649                                         (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4650                                         2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4651 
4652   size_t threadStackSizeInBytes = ThreadStackSize * K;
4653   if (threadStackSizeInBytes != 0 &&
4654       threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4655     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4656                   os::Solaris::min_stack_allowed/K);
4657     return JNI_ERR;
4658   }
4659 
4660   // For 64kbps there will be a 64kb page size, which makes
4661   // the usable default stack size quite a bit less.  Increase the
4662   // stack for 64kb (or any > than 8kb) pages, this increases
4663   // virtual memory fragmentation (since we're not creating the
4664   // stack on a power of 2 boundary.  The real fix for this
4665   // should be to fix the guard page mechanism.
4666 
4667   if (vm_page_size() > 8*K) {
4668     threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4669        ? threadStackSizeInBytes +
4670          ((StackYellowPages + StackRedPages) * vm_page_size())
4671        : 0;
4672     ThreadStackSize = threadStackSizeInBytes/K;
4673   }
4674 
4675   // Make the stack size a multiple of the page size so that
4676   // the yellow/red zones can be guarded.
4677   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4678                                                 vm_page_size()));
4679 
4680   Solaris::libthread_init();
4681 
4682   if (UseNUMA) {
4683     if (!Solaris::liblgrp_init()) {
4684       UseNUMA = false;
4685     } else {
4686       size_t lgrp_limit = os::numa_get_groups_num();
4687       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4688       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4689       FREE_C_HEAP_ARRAY(int, lgrp_ids);
4690       if (lgrp_num < 2) {
4691         // There's only one locality group, disable NUMA.
4692         UseNUMA = false;
4693       }
4694     }
4695     if (!UseNUMA && ForceNUMA) {
4696       UseNUMA = true;
4697     }
4698   }
4699 
4700   Solaris::signal_sets_init();
4701   Solaris::init_signal_mem();
4702   Solaris::install_signal_handlers();
4703 
4704   if (libjsigversion < JSIG_VERSION_1_4_1) {
4705     Maxlibjsigsigs = OLDMAXSIGNUM;
4706   }
4707 
4708   // initialize synchronization primitives to use either thread or
4709   // lwp synchronization (controlled by UseLWPSynchronization)
4710   Solaris::synchronization_init();
4711 
4712   if (MaxFDLimit) {
4713     // set the number of file descriptors to max. print out error
4714     // if getrlimit/setrlimit fails but continue regardless.
4715     struct rlimit nbr_files;
4716     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4717     if (status != 0) {
4718       if (PrintMiscellaneous && (Verbose || WizardMode)) {
4719         perror("os::init_2 getrlimit failed");
4720       }
4721     } else {
4722       nbr_files.rlim_cur = nbr_files.rlim_max;
4723       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4724       if (status != 0) {
4725         if (PrintMiscellaneous && (Verbose || WizardMode)) {
4726           perror("os::init_2 setrlimit failed");
4727         }
4728       }
4729     }
4730   }
4731 
4732   // Calculate theoretical max. size of Threads to guard gainst
4733   // artifical out-of-memory situations, where all available address-
4734   // space has been reserved by thread stacks. Default stack size is 1Mb.
4735   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
4736     JavaThread::stack_size_at_create() : (1*K*K);
4737   assert(pre_thread_stack_size != 0, "Must have a stack");
4738   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
4739   // we should start doing Virtual Memory banging. Currently when the threads will
4740   // have used all but 200Mb of space.
4741   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
4742   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
4743 
4744   // at-exit methods are called in the reverse order of their registration.
4745   // In Solaris 7 and earlier, atexit functions are called on return from
4746   // main or as a result of a call to exit(3C). There can be only 32 of
4747   // these functions registered and atexit() does not set errno. In Solaris
4748   // 8 and later, there is no limit to the number of functions registered
4749   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
4750   // functions are called upon dlclose(3DL) in addition to return from main
4751   // and exit(3C).
4752 
4753   if (PerfAllowAtExitRegistration) {
4754     // only register atexit functions if PerfAllowAtExitRegistration is set.
4755     // atexit functions can be delayed until process exit time, which
4756     // can be problematic for embedded VM situations. Embedded VMs should
4757     // call DestroyJavaVM() to assure that VM resources are released.
4758 
4759     // note: perfMemory_exit_helper atexit function may be removed in
4760     // the future if the appropriate cleanup code can be added to the
4761     // VM_Exit VMOperation's doit method.
4762     if (atexit(perfMemory_exit_helper) != 0) {
4763       warning("os::init2 atexit(perfMemory_exit_helper) failed");
4764     }
4765   }
4766 
4767   // Init pset_loadavg function pointer
4768   init_pset_getloadavg_ptr();
4769 
4770   return JNI_OK;
4771 }
4772 
4773 // Mark the polling page as unreadable
4774 void os::make_polling_page_unreadable(void) {
4775   if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0) {
4776     fatal("Could not disable polling page");
4777   }
4778 }
4779 
4780 // Mark the polling page as readable
4781 void os::make_polling_page_readable(void) {
4782   if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0) {
4783     fatal("Could not enable polling page");
4784   }
4785 }
4786 
4787 // OS interface.
4788 
4789 bool os::check_heap(bool force) { return true; }
4790 
4791 // Is a (classpath) directory empty?
4792 bool os::dir_is_empty(const char* path) {
4793   DIR *dir = NULL;
4794   struct dirent *ptr;
4795 
4796   dir = opendir(path);
4797   if (dir == NULL) return true;
4798 
4799   // Scan the directory
4800   bool result = true;
4801   char buf[sizeof(struct dirent) + MAX_PATH];
4802   struct dirent *dbuf = (struct dirent *) buf;
4803   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
4804     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4805       result = false;
4806     }
4807   }
4808   closedir(dir);
4809   return result;
4810 }
4811 
4812 // This code originates from JDK's sysOpen and open64_w
4813 // from src/solaris/hpi/src/system_md.c
4814 
4815 int os::open(const char *path, int oflag, int mode) {
4816   if (strlen(path) > MAX_PATH - 1) {
4817     errno = ENAMETOOLONG;
4818     return -1;
4819   }
4820   int fd;
4821 
4822   fd = ::open64(path, oflag, mode);
4823   if (fd == -1) return -1;
4824 
4825   // If the open succeeded, the file might still be a directory
4826   {
4827     struct stat64 buf64;
4828     int ret = ::fstat64(fd, &buf64);
4829     int st_mode = buf64.st_mode;
4830 
4831     if (ret != -1) {
4832       if ((st_mode & S_IFMT) == S_IFDIR) {
4833         errno = EISDIR;
4834         ::close(fd);
4835         return -1;
4836       }
4837     } else {
4838       ::close(fd);
4839       return -1;
4840     }
4841   }
4842 
4843   // 32-bit Solaris systems suffer from:
4844   //
4845   // - an historical default soft limit of 256 per-process file
4846   //   descriptors that is too low for many Java programs.
4847   //
4848   // - a design flaw where file descriptors created using stdio
4849   //   fopen must be less than 256, _even_ when the first limit above
4850   //   has been raised.  This can cause calls to fopen (but not calls to
4851   //   open, for example) to fail mysteriously, perhaps in 3rd party
4852   //   native code (although the JDK itself uses fopen).  One can hardly
4853   //   criticize them for using this most standard of all functions.
4854   //
4855   // We attempt to make everything work anyways by:
4856   //
4857   // - raising the soft limit on per-process file descriptors beyond
4858   //   256
4859   //
4860   // - As of Solaris 10u4, we can request that Solaris raise the 256
4861   //   stdio fopen limit by calling function enable_extended_FILE_stdio.
4862   //   This is done in init_2 and recorded in enabled_extended_FILE_stdio
4863   //
4864   // - If we are stuck on an old (pre 10u4) Solaris system, we can
4865   //   workaround the bug by remapping non-stdio file descriptors below
4866   //   256 to ones beyond 256, which is done below.
4867   //
4868   // See:
4869   // 1085341: 32-bit stdio routines should support file descriptors >255
4870   // 6533291: Work around 32-bit Solaris stdio limit of 256 open files
4871   // 6431278: Netbeans crash on 32 bit Solaris: need to call
4872   //          enable_extended_FILE_stdio() in VM initialisation
4873   // Giri Mandalika's blog
4874   // http://technopark02.blogspot.com/2005_05_01_archive.html
4875   //
4876 #ifndef  _LP64
4877   if ((!enabled_extended_FILE_stdio) && fd < 256) {
4878     int newfd = ::fcntl(fd, F_DUPFD, 256);
4879     if (newfd != -1) {
4880       ::close(fd);
4881       fd = newfd;
4882     }
4883   }
4884 #endif // 32-bit Solaris
4885 
4886   // All file descriptors that are opened in the JVM and not
4887   // specifically destined for a subprocess should have the
4888   // close-on-exec flag set.  If we don't set it, then careless 3rd
4889   // party native code might fork and exec without closing all
4890   // appropriate file descriptors (e.g. as we do in closeDescriptors in
4891   // UNIXProcess.c), and this in turn might:
4892   //
4893   // - cause end-of-file to fail to be detected on some file
4894   //   descriptors, resulting in mysterious hangs, or
4895   //
4896   // - might cause an fopen in the subprocess to fail on a system
4897   //   suffering from bug 1085341.
4898   //
4899   // (Yes, the default setting of the close-on-exec flag is a Unix
4900   // design flaw)
4901   //
4902   // See:
4903   // 1085341: 32-bit stdio routines should support file descriptors >255
4904   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4905   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4906   //
4907 #ifdef FD_CLOEXEC
4908   {
4909     int flags = ::fcntl(fd, F_GETFD);
4910     if (flags != -1) {
4911       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4912     }
4913   }
4914 #endif
4915 
4916   return fd;
4917 }
4918 
4919 // create binary file, rewriting existing file if required
4920 int os::create_binary_file(const char* path, bool rewrite_existing) {
4921   int oflags = O_WRONLY | O_CREAT;
4922   if (!rewrite_existing) {
4923     oflags |= O_EXCL;
4924   }
4925   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4926 }
4927 
4928 // return current position of file pointer
4929 jlong os::current_file_offset(int fd) {
4930   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4931 }
4932 
4933 // move file pointer to the specified offset
4934 jlong os::seek_to_file_offset(int fd, jlong offset) {
4935   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4936 }
4937 
4938 jlong os::lseek(int fd, jlong offset, int whence) {
4939   return (jlong) ::lseek64(fd, offset, whence);
4940 }
4941 
4942 char * os::native_path(char *path) {
4943   return path;
4944 }
4945 
4946 int os::ftruncate(int fd, jlong length) {
4947   return ::ftruncate64(fd, length);
4948 }
4949 
4950 int os::fsync(int fd)  {
4951   RESTARTABLE_RETURN_INT(::fsync(fd));
4952 }
4953 
4954 int os::available(int fd, jlong *bytes) {
4955   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
4956          "Assumed _thread_in_native");
4957   jlong cur, end;
4958   int mode;
4959   struct stat64 buf64;
4960 
4961   if (::fstat64(fd, &buf64) >= 0) {
4962     mode = buf64.st_mode;
4963     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4964       int n,ioctl_return;
4965 
4966       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
4967       if (ioctl_return>= 0) {
4968         *bytes = n;
4969         return 1;
4970       }
4971     }
4972   }
4973   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4974     return 0;
4975   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4976     return 0;
4977   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4978     return 0;
4979   }
4980   *bytes = end - cur;
4981   return 1;
4982 }
4983 
4984 // Map a block of memory.
4985 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4986                         char *addr, size_t bytes, bool read_only,
4987                         bool allow_exec) {
4988   int prot;
4989   int flags;
4990 
4991   if (read_only) {
4992     prot = PROT_READ;
4993     flags = MAP_SHARED;
4994   } else {
4995     prot = PROT_READ | PROT_WRITE;
4996     flags = MAP_PRIVATE;
4997   }
4998 
4999   if (allow_exec) {
5000     prot |= PROT_EXEC;
5001   }
5002 
5003   if (addr != NULL) {
5004     flags |= MAP_FIXED;
5005   }
5006 
5007   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5008                                      fd, file_offset);
5009   if (mapped_address == MAP_FAILED) {
5010     return NULL;
5011   }
5012   return mapped_address;
5013 }
5014 
5015 
5016 // Remap a block of memory.
5017 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5018                           char *addr, size_t bytes, bool read_only,
5019                           bool allow_exec) {
5020   // same as map_memory() on this OS
5021   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5022                         allow_exec);
5023 }
5024 
5025 
5026 // Unmap a block of memory.
5027 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5028   return munmap(addr, bytes) == 0;
5029 }
5030 
5031 void os::pause() {
5032   char filename[MAX_PATH];
5033   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5034     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5035   } else {
5036     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5037   }
5038 
5039   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5040   if (fd != -1) {
5041     struct stat buf;
5042     ::close(fd);
5043     while (::stat(filename, &buf) == 0) {
5044       (void)::poll(NULL, 0, 100);
5045     }
5046   } else {
5047     jio_fprintf(stderr,
5048                 "Could not open pause file '%s', continuing immediately.\n", filename);
5049   }
5050 }
5051 
5052 #ifndef PRODUCT
5053 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5054 // Turn this on if you need to trace synch operations.
5055 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5056 // and call record_synch_enable and record_synch_disable
5057 // around the computation of interest.
5058 
5059 void record_synch(char* name, bool returning);  // defined below
5060 
5061 class RecordSynch {
5062   char* _name;
5063  public:
5064   RecordSynch(char* name) :_name(name) { record_synch(_name, false); }
5065   ~RecordSynch()                       { record_synch(_name, true); }
5066 };
5067 
5068 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5069 extern "C" ret name params {                                    \
5070   typedef ret name##_t params;                                  \
5071   static name##_t* implem = NULL;                               \
5072   static int callcount = 0;                                     \
5073   if (implem == NULL) {                                         \
5074     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5075     if (implem == NULL)  fatal(dlerror());                      \
5076   }                                                             \
5077   ++callcount;                                                  \
5078   RecordSynch _rs(#name);                                       \
5079   inner;                                                        \
5080   return implem args;                                           \
5081 }
5082 // in dbx, examine callcounts this way:
5083 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5084 
5085 #define CHECK_POINTER_OK(p) \
5086   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5087 #define CHECK_MU \
5088   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5089 #define CHECK_CV \
5090   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5091 #define CHECK_P(p) \
5092   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5093 
5094 #define CHECK_MUTEX(mutex_op) \
5095   CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5096 
5097 CHECK_MUTEX(   mutex_lock)
5098 CHECK_MUTEX(  _mutex_lock)
5099 CHECK_MUTEX( mutex_unlock)
5100 CHECK_MUTEX(_mutex_unlock)
5101 CHECK_MUTEX( mutex_trylock)
5102 CHECK_MUTEX(_mutex_trylock)
5103 
5104 #define CHECK_COND(cond_op) \
5105   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU; CHECK_CV);
5106 
5107 CHECK_COND( cond_wait);
5108 CHECK_COND(_cond_wait);
5109 CHECK_COND(_cond_wait_cancel);
5110 
5111 #define CHECK_COND2(cond_op) \
5112   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU; CHECK_CV);
5113 
5114 CHECK_COND2( cond_timedwait);
5115 CHECK_COND2(_cond_timedwait);
5116 CHECK_COND2(_cond_timedwait_cancel);
5117 
5118 // do the _lwp_* versions too
5119 #define mutex_t lwp_mutex_t
5120 #define cond_t  lwp_cond_t
5121 CHECK_MUTEX(  _lwp_mutex_lock)
5122 CHECK_MUTEX(  _lwp_mutex_unlock)
5123 CHECK_MUTEX(  _lwp_mutex_trylock)
5124 CHECK_MUTEX( __lwp_mutex_lock)
5125 CHECK_MUTEX( __lwp_mutex_unlock)
5126 CHECK_MUTEX( __lwp_mutex_trylock)
5127 CHECK_MUTEX(___lwp_mutex_lock)
5128 CHECK_MUTEX(___lwp_mutex_unlock)
5129 
5130 CHECK_COND(  _lwp_cond_wait);
5131 CHECK_COND( __lwp_cond_wait);
5132 CHECK_COND(___lwp_cond_wait);
5133 
5134 CHECK_COND2(  _lwp_cond_timedwait);
5135 CHECK_COND2( __lwp_cond_timedwait);
5136 #undef mutex_t
5137 #undef cond_t
5138 
5139 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5140 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5141 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5142 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5143 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5144 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5145 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5146 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5147 
5148 
5149 // recording machinery:
5150 
5151 enum { RECORD_SYNCH_LIMIT = 200 };
5152 char* record_synch_name[RECORD_SYNCH_LIMIT];
5153 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5154 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5155 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5156 int record_synch_count = 0;
5157 bool record_synch_enabled = false;
5158 
5159 // in dbx, examine recorded data this way:
5160 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5161 
5162 void record_synch(char* name, bool returning) {
5163   if (record_synch_enabled) {
5164     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5165       record_synch_name[record_synch_count] = name;
5166       record_synch_returning[record_synch_count] = returning;
5167       record_synch_thread[record_synch_count] = thr_self();
5168       record_synch_arg0ptr[record_synch_count] = &name;
5169       record_synch_count++;
5170     }
5171     // put more checking code here:
5172     // ...
5173   }
5174 }
5175 
5176 void record_synch_enable() {
5177   // start collecting trace data, if not already doing so
5178   if (!record_synch_enabled)  record_synch_count = 0;
5179   record_synch_enabled = true;
5180 }
5181 
5182 void record_synch_disable() {
5183   // stop collecting trace data
5184   record_synch_enabled = false;
5185 }
5186 
5187 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5188 #endif // PRODUCT
5189 
5190 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5191 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5192                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5193 
5194 
5195 // JVMTI & JVM monitoring and management support
5196 // The thread_cpu_time() and current_thread_cpu_time() are only
5197 // supported if is_thread_cpu_time_supported() returns true.
5198 // They are not supported on Solaris T1.
5199 
5200 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5201 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5202 // of a thread.
5203 //
5204 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5205 // returns the fast estimate available on the platform.
5206 
5207 // hrtime_t gethrvtime() return value includes
5208 // user time but does not include system time
5209 jlong os::current_thread_cpu_time() {
5210   return (jlong) gethrvtime();
5211 }
5212 
5213 jlong os::thread_cpu_time(Thread *thread) {
5214   // return user level CPU time only to be consistent with
5215   // what current_thread_cpu_time returns.
5216   // thread_cpu_time_info() must be changed if this changes
5217   return os::thread_cpu_time(thread, false /* user time only */);
5218 }
5219 
5220 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5221   if (user_sys_cpu_time) {
5222     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5223   } else {
5224     return os::current_thread_cpu_time();
5225   }
5226 }
5227 
5228 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5229   char proc_name[64];
5230   int count;
5231   prusage_t prusage;
5232   jlong lwp_time;
5233   int fd;
5234 
5235   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5236           getpid(),
5237           thread->osthread()->lwp_id());
5238   fd = ::open(proc_name, O_RDONLY);
5239   if (fd == -1) return -1;
5240 
5241   do {
5242     count = ::pread(fd,
5243                     (void *)&prusage.pr_utime,
5244                     thr_time_size,
5245                     thr_time_off);
5246   } while (count < 0 && errno == EINTR);
5247   ::close(fd);
5248   if (count < 0) return -1;
5249 
5250   if (user_sys_cpu_time) {
5251     // user + system CPU time
5252     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5253                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5254                  (jlong)prusage.pr_stime.tv_nsec +
5255                  (jlong)prusage.pr_utime.tv_nsec;
5256   } else {
5257     // user level CPU time only
5258     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5259                 (jlong)prusage.pr_utime.tv_nsec;
5260   }
5261 
5262   return (lwp_time);
5263 }
5264 
5265 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5266   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5267   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5268   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5269   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5270 }
5271 
5272 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5273   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5274   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5275   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5276   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5277 }
5278 
5279 bool os::is_thread_cpu_time_supported() {
5280   return true;
5281 }
5282 
5283 // System loadavg support.  Returns -1 if load average cannot be obtained.
5284 // Return the load average for our processor set if the primitive exists
5285 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5286 int os::loadavg(double loadavg[], int nelem) {
5287   if (pset_getloadavg_ptr != NULL) {
5288     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5289   } else {
5290     return ::getloadavg(loadavg, nelem);
5291   }
5292 }
5293 
5294 //---------------------------------------------------------------------------------
5295 
5296 bool os::find(address addr, outputStream* st) {
5297   Dl_info dlinfo;
5298   memset(&dlinfo, 0, sizeof(dlinfo));
5299   if (dladdr(addr, &dlinfo) != 0) {
5300     st->print(PTR_FORMAT ": ", addr);
5301     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5302       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5303     } else if (dlinfo.dli_fbase != NULL) {
5304       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5305     } else {
5306       st->print("<absolute address>");
5307     }
5308     if (dlinfo.dli_fname != NULL) {
5309       st->print(" in %s", dlinfo.dli_fname);
5310     }
5311     if (dlinfo.dli_fbase != NULL) {
5312       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5313     }
5314     st->cr();
5315 
5316     if (Verbose) {
5317       // decode some bytes around the PC
5318       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5319       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5320       address       lowest = (address) dlinfo.dli_sname;
5321       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5322       if (begin < lowest)  begin = lowest;
5323       Dl_info dlinfo2;
5324       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5325           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
5326         end = (address) dlinfo2.dli_saddr;
5327       }
5328       Disassembler::decode(begin, end, st);
5329     }
5330     return true;
5331   }
5332   return false;
5333 }
5334 
5335 // Following function has been added to support HotSparc's libjvm.so running
5336 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5337 // src/solaris/hpi/native_threads in the EVM codebase.
5338 //
5339 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5340 // libraries and should thus be removed. We will leave it behind for a while
5341 // until we no longer want to able to run on top of 1.3.0 Solaris production
5342 // JDK. See 4341971.
5343 
5344 #define STACK_SLACK 0x800
5345 
5346 extern "C" {
5347   intptr_t sysThreadAvailableStackWithSlack() {
5348     stack_t st;
5349     intptr_t retval, stack_top;
5350     retval = thr_stksegment(&st);
5351     assert(retval == 0, "incorrect return value from thr_stksegment");
5352     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5353     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5354     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5355     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5356   }
5357 }
5358 
5359 // ObjectMonitor park-unpark infrastructure ...
5360 //
5361 // We implement Solaris and Linux PlatformEvents with the
5362 // obvious condvar-mutex-flag triple.
5363 // Another alternative that works quite well is pipes:
5364 // Each PlatformEvent consists of a pipe-pair.
5365 // The thread associated with the PlatformEvent
5366 // calls park(), which reads from the input end of the pipe.
5367 // Unpark() writes into the other end of the pipe.
5368 // The write-side of the pipe must be set NDELAY.
5369 // Unfortunately pipes consume a large # of handles.
5370 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5371 // Using pipes for the 1st few threads might be workable, however.
5372 //
5373 // park() is permitted to return spuriously.
5374 // Callers of park() should wrap the call to park() in
5375 // an appropriate loop.  A litmus test for the correct
5376 // usage of park is the following: if park() were modified
5377 // to immediately return 0 your code should still work,
5378 // albeit degenerating to a spin loop.
5379 //
5380 // In a sense, park()-unpark() just provides more polite spinning
5381 // and polling with the key difference over naive spinning being
5382 // that a parked thread needs to be explicitly unparked() in order
5383 // to wake up and to poll the underlying condition.
5384 //
5385 // Assumption:
5386 //    Only one parker can exist on an event, which is why we allocate
5387 //    them per-thread. Multiple unparkers can coexist.
5388 //
5389 // _Event transitions in park()
5390 //   -1 => -1 : illegal
5391 //    1 =>  0 : pass - return immediately
5392 //    0 => -1 : block; then set _Event to 0 before returning
5393 //
5394 // _Event transitions in unpark()
5395 //    0 => 1 : just return
5396 //    1 => 1 : just return
5397 //   -1 => either 0 or 1; must signal target thread
5398 //         That is, we can safely transition _Event from -1 to either
5399 //         0 or 1.
5400 //
5401 // _Event serves as a restricted-range semaphore.
5402 //   -1 : thread is blocked, i.e. there is a waiter
5403 //    0 : neutral: thread is running or ready,
5404 //        could have been signaled after a wait started
5405 //    1 : signaled - thread is running or ready
5406 //
5407 // Another possible encoding of _Event would be with
5408 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5409 //
5410 // TODO-FIXME: add DTRACE probes for:
5411 // 1.   Tx parks
5412 // 2.   Ty unparks Tx
5413 // 3.   Tx resumes from park
5414 
5415 
5416 // value determined through experimentation
5417 #define ROUNDINGFIX 11
5418 
5419 // utility to compute the abstime argument to timedwait.
5420 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5421 
5422 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5423   // millis is the relative timeout time
5424   // abstime will be the absolute timeout time
5425   if (millis < 0)  millis = 0;
5426   struct timeval now;
5427   int status = gettimeofday(&now, NULL);
5428   assert(status == 0, "gettimeofday");
5429   jlong seconds = millis / 1000;
5430   jlong max_wait_period;
5431 
5432   if (UseLWPSynchronization) {
5433     // forward port of fix for 4275818 (not sleeping long enough)
5434     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5435     // _lwp_cond_timedwait() used a round_down algorithm rather
5436     // than a round_up. For millis less than our roundfactor
5437     // it rounded down to 0 which doesn't meet the spec.
5438     // For millis > roundfactor we may return a bit sooner, but
5439     // since we can not accurately identify the patch level and
5440     // this has already been fixed in Solaris 9 and 8 we will
5441     // leave it alone rather than always rounding down.
5442 
5443     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5444     // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5445     // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5446     max_wait_period = 21000000;
5447   } else {
5448     max_wait_period = 50000000;
5449   }
5450   millis %= 1000;
5451   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5452     seconds = max_wait_period;
5453   }
5454   abstime->tv_sec = now.tv_sec  + seconds;
5455   long       usec = now.tv_usec + millis * 1000;
5456   if (usec >= 1000000) {
5457     abstime->tv_sec += 1;
5458     usec -= 1000000;
5459   }
5460   abstime->tv_nsec = usec * 1000;
5461   return abstime;
5462 }
5463 
5464 void os::PlatformEvent::park() {           // AKA: down()
5465   // Transitions for _Event:
5466   //   -1 => -1 : illegal
5467   //    1 =>  0 : pass - return immediately
5468   //    0 => -1 : block; then set _Event to 0 before returning
5469 
5470   // Invariant: Only the thread associated with the Event/PlatformEvent
5471   // may call park().
5472   assert(_nParked == 0, "invariant");
5473 
5474   int v;
5475   for (;;) {
5476     v = _Event;
5477     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5478   }
5479   guarantee(v >= 0, "invariant");
5480   if (v == 0) {
5481     // Do this the hard way by blocking ...
5482     // See http://monaco.sfbay/detail.jsf?cr=5094058.
5483     // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5484     // Only for SPARC >= V8PlusA
5485 #if defined(__sparc) && defined(COMPILER2)
5486     if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5487 #endif
5488     int status = os::Solaris::mutex_lock(_mutex);
5489     assert_status(status == 0, status, "mutex_lock");
5490     guarantee(_nParked == 0, "invariant");
5491     ++_nParked;
5492     while (_Event < 0) {
5493       // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5494       // Treat this the same as if the wait was interrupted
5495       // With usr/lib/lwp going to kernel, always handle ETIME
5496       status = os::Solaris::cond_wait(_cond, _mutex);
5497       if (status == ETIME) status = EINTR;
5498       assert_status(status == 0 || status == EINTR, status, "cond_wait");
5499     }
5500     --_nParked;
5501     _Event = 0;
5502     status = os::Solaris::mutex_unlock(_mutex);
5503     assert_status(status == 0, status, "mutex_unlock");
5504     // Paranoia to ensure our locked and lock-free paths interact
5505     // correctly with each other.
5506     OrderAccess::fence();
5507   }
5508 }
5509 
5510 int os::PlatformEvent::park(jlong millis) {
5511   // Transitions for _Event:
5512   //   -1 => -1 : illegal
5513   //    1 =>  0 : pass - return immediately
5514   //    0 => -1 : block; then set _Event to 0 before returning
5515 
5516   guarantee(_nParked == 0, "invariant");
5517   int v;
5518   for (;;) {
5519     v = _Event;
5520     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5521   }
5522   guarantee(v >= 0, "invariant");
5523   if (v != 0) return OS_OK;
5524 
5525   int ret = OS_TIMEOUT;
5526   timestruc_t abst;
5527   compute_abstime(&abst, millis);
5528 
5529   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5530   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5531   // Only for SPARC >= V8PlusA
5532 #if defined(__sparc) && defined(COMPILER2)
5533   if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5534 #endif
5535   int status = os::Solaris::mutex_lock(_mutex);
5536   assert_status(status == 0, status, "mutex_lock");
5537   guarantee(_nParked == 0, "invariant");
5538   ++_nParked;
5539   while (_Event < 0) {
5540     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5541     assert_status(status == 0 || status == EINTR ||
5542                   status == ETIME || status == ETIMEDOUT,
5543                   status, "cond_timedwait");
5544     if (!FilterSpuriousWakeups) break;                // previous semantics
5545     if (status == ETIME || status == ETIMEDOUT) break;
5546     // We consume and ignore EINTR and spurious wakeups.
5547   }
5548   --_nParked;
5549   if (_Event >= 0) ret = OS_OK;
5550   _Event = 0;
5551   status = os::Solaris::mutex_unlock(_mutex);
5552   assert_status(status == 0, status, "mutex_unlock");
5553   // Paranoia to ensure our locked and lock-free paths interact
5554   // correctly with each other.
5555   OrderAccess::fence();
5556   return ret;
5557 }
5558 
5559 void os::PlatformEvent::unpark() {
5560   // Transitions for _Event:
5561   //    0 => 1 : just return
5562   //    1 => 1 : just return
5563   //   -1 => either 0 or 1; must signal target thread
5564   //         That is, we can safely transition _Event from -1 to either
5565   //         0 or 1.
5566   // See also: "Semaphores in Plan 9" by Mullender & Cox
5567   //
5568   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5569   // that it will take two back-to-back park() calls for the owning
5570   // thread to block. This has the benefit of forcing a spurious return
5571   // from the first park() call after an unpark() call which will help
5572   // shake out uses of park() and unpark() without condition variables.
5573 
5574   if (Atomic::xchg(1, &_Event) >= 0) return;
5575 
5576   // If the thread associated with the event was parked, wake it.
5577   // Wait for the thread assoc with the PlatformEvent to vacate.
5578   int status = os::Solaris::mutex_lock(_mutex);
5579   assert_status(status == 0, status, "mutex_lock");
5580   int AnyWaiters = _nParked;
5581   status = os::Solaris::mutex_unlock(_mutex);
5582   assert_status(status == 0, status, "mutex_unlock");
5583   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5584   if (AnyWaiters != 0) {
5585     // Note that we signal() *after* dropping the lock for "immortal" Events.
5586     // This is safe and avoids a common class of  futile wakeups.  In rare
5587     // circumstances this can cause a thread to return prematurely from
5588     // cond_{timed}wait() but the spurious wakeup is benign and the victim
5589     // will simply re-test the condition and re-park itself.
5590     // This provides particular benefit if the underlying platform does not
5591     // provide wait morphing.
5592     status = os::Solaris::cond_signal(_cond);
5593     assert_status(status == 0, status, "cond_signal");
5594   }
5595 }
5596 
5597 // JSR166
5598 // -------------------------------------------------------
5599 
5600 // The solaris and linux implementations of park/unpark are fairly
5601 // conservative for now, but can be improved. They currently use a
5602 // mutex/condvar pair, plus _counter.
5603 // Park decrements _counter if > 0, else does a condvar wait.  Unpark
5604 // sets count to 1 and signals condvar.  Only one thread ever waits
5605 // on the condvar. Contention seen when trying to park implies that someone
5606 // is unparking you, so don't wait. And spurious returns are fine, so there
5607 // is no need to track notifications.
5608 
5609 #define MAX_SECS 100000000
5610 
5611 // This code is common to linux and solaris and will be moved to a
5612 // common place in dolphin.
5613 //
5614 // The passed in time value is either a relative time in nanoseconds
5615 // or an absolute time in milliseconds. Either way it has to be unpacked
5616 // into suitable seconds and nanoseconds components and stored in the
5617 // given timespec structure.
5618 // Given time is a 64-bit value and the time_t used in the timespec is only
5619 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
5620 // overflow if times way in the future are given. Further on Solaris versions
5621 // prior to 10 there is a restriction (see cond_timedwait) that the specified
5622 // number of seconds, in abstime, is less than current_time  + 100,000,000.
5623 // As it will be 28 years before "now + 100000000" will overflow we can
5624 // ignore overflow and just impose a hard-limit on seconds using the value
5625 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
5626 // years from "now".
5627 //
5628 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5629   assert(time > 0, "convertTime");
5630 
5631   struct timeval now;
5632   int status = gettimeofday(&now, NULL);
5633   assert(status == 0, "gettimeofday");
5634 
5635   time_t max_secs = now.tv_sec + MAX_SECS;
5636 
5637   if (isAbsolute) {
5638     jlong secs = time / 1000;
5639     if (secs > max_secs) {
5640       absTime->tv_sec = max_secs;
5641     } else {
5642       absTime->tv_sec = secs;
5643     }
5644     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5645   } else {
5646     jlong secs = time / NANOSECS_PER_SEC;
5647     if (secs >= MAX_SECS) {
5648       absTime->tv_sec = max_secs;
5649       absTime->tv_nsec = 0;
5650     } else {
5651       absTime->tv_sec = now.tv_sec + secs;
5652       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5653       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5654         absTime->tv_nsec -= NANOSECS_PER_SEC;
5655         ++absTime->tv_sec; // note: this must be <= max_secs
5656       }
5657     }
5658   }
5659   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5660   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5661   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5662   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5663 }
5664 
5665 void Parker::park(bool isAbsolute, jlong time) {
5666   // Ideally we'd do something useful while spinning, such
5667   // as calling unpackTime().
5668 
5669   // Optional fast-path check:
5670   // Return immediately if a permit is available.
5671   // We depend on Atomic::xchg() having full barrier semantics
5672   // since we are doing a lock-free update to _counter.
5673   if (Atomic::xchg(0, &_counter) > 0) return;
5674 
5675   // Optional fast-exit: Check interrupt before trying to wait
5676   Thread* thread = Thread::current();
5677   assert(thread->is_Java_thread(), "Must be JavaThread");
5678   JavaThread *jt = (JavaThread *)thread;
5679   if (Thread::is_interrupted(thread, false)) {
5680     return;
5681   }
5682 
5683   // First, demultiplex/decode time arguments
5684   timespec absTime;
5685   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
5686     return;
5687   }
5688   if (time > 0) {
5689     // Warning: this code might be exposed to the old Solaris time
5690     // round-down bugs.  Grep "roundingFix" for details.
5691     unpackTime(&absTime, isAbsolute, time);
5692   }
5693 
5694   // Enter safepoint region
5695   // Beware of deadlocks such as 6317397.
5696   // The per-thread Parker:: _mutex is a classic leaf-lock.
5697   // In particular a thread must never block on the Threads_lock while
5698   // holding the Parker:: mutex.  If safepoints are pending both the
5699   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5700   ThreadBlockInVM tbivm(jt);
5701 
5702   // Don't wait if cannot get lock since interference arises from
5703   // unblocking.  Also. check interrupt before trying wait
5704   if (Thread::is_interrupted(thread, false) ||
5705       os::Solaris::mutex_trylock(_mutex) != 0) {
5706     return;
5707   }
5708 
5709   int status;
5710 
5711   if (_counter > 0)  { // no wait needed
5712     _counter = 0;
5713     status = os::Solaris::mutex_unlock(_mutex);
5714     assert(status == 0, "invariant");
5715     // Paranoia to ensure our locked and lock-free paths interact
5716     // correctly with each other and Java-level accesses.
5717     OrderAccess::fence();
5718     return;
5719   }
5720 
5721 #ifdef ASSERT
5722   // Don't catch signals while blocked; let the running threads have the signals.
5723   // (This allows a debugger to break into the running thread.)
5724   sigset_t oldsigs;
5725   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
5726   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5727 #endif
5728 
5729   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5730   jt->set_suspend_equivalent();
5731   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5732 
5733   // Do this the hard way by blocking ...
5734   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5735   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5736   // Only for SPARC >= V8PlusA
5737 #if defined(__sparc) && defined(COMPILER2)
5738   if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5739 #endif
5740 
5741   if (time == 0) {
5742     status = os::Solaris::cond_wait(_cond, _mutex);
5743   } else {
5744     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
5745   }
5746   // Note that an untimed cond_wait() can sometimes return ETIME on older
5747   // versions of the Solaris.
5748   assert_status(status == 0 || status == EINTR ||
5749                 status == ETIME || status == ETIMEDOUT,
5750                 status, "cond_timedwait");
5751 
5752 #ifdef ASSERT
5753   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
5754 #endif
5755   _counter = 0;
5756   status = os::Solaris::mutex_unlock(_mutex);
5757   assert_status(status == 0, status, "mutex_unlock");
5758   // Paranoia to ensure our locked and lock-free paths interact
5759   // correctly with each other and Java-level accesses.
5760   OrderAccess::fence();
5761 
5762   // If externally suspended while waiting, re-suspend
5763   if (jt->handle_special_suspend_equivalent_condition()) {
5764     jt->java_suspend_self();
5765   }
5766 }
5767 
5768 void Parker::unpark() {
5769   int status = os::Solaris::mutex_lock(_mutex);
5770   assert(status == 0, "invariant");
5771   const int s = _counter;
5772   _counter = 1;
5773   status = os::Solaris::mutex_unlock(_mutex);
5774   assert(status == 0, "invariant");
5775 
5776   if (s < 1) {
5777     status = os::Solaris::cond_signal(_cond);
5778     assert(status == 0, "invariant");
5779   }
5780 }
5781 
5782 extern char** environ;
5783 
5784 // Run the specified command in a separate process. Return its exit value,
5785 // or -1 on failure (e.g. can't fork a new process).
5786 // Unlike system(), this function can be called from signal handler. It
5787 // doesn't block SIGINT et al.
5788 int os::fork_and_exec(char* cmd) {
5789   char * argv[4];
5790   argv[0] = (char *)"sh";
5791   argv[1] = (char *)"-c";
5792   argv[2] = cmd;
5793   argv[3] = NULL;
5794 
5795   // fork is async-safe, fork1 is not so can't use in signal handler
5796   pid_t pid;
5797   Thread* t = ThreadLocalStorage::get_thread_slow();
5798   if (t != NULL && t->is_inside_signal_handler()) {
5799     pid = fork();
5800   } else {
5801     pid = fork1();
5802   }
5803 
5804   if (pid < 0) {
5805     // fork failed
5806     warning("fork failed: %s", strerror(errno));
5807     return -1;
5808 
5809   } else if (pid == 0) {
5810     // child process
5811 
5812     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
5813     execve("/usr/bin/sh", argv, environ);
5814 
5815     // execve failed
5816     _exit(-1);
5817 
5818   } else  {
5819     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5820     // care about the actual exit code, for now.
5821 
5822     int status;
5823 
5824     // Wait for the child process to exit.  This returns immediately if
5825     // the child has already exited. */
5826     while (waitpid(pid, &status, 0) < 0) {
5827       switch (errno) {
5828       case ECHILD: return 0;
5829       case EINTR: break;
5830       default: return -1;
5831       }
5832     }
5833 
5834     if (WIFEXITED(status)) {
5835       // The child exited normally; get its exit code.
5836       return WEXITSTATUS(status);
5837     } else if (WIFSIGNALED(status)) {
5838       // The child exited because of a signal
5839       // The best value to return is 0x80 + signal number,
5840       // because that is what all Unix shells do, and because
5841       // it allows callers to distinguish between process exit and
5842       // process death by signal.
5843       return 0x80 + WTERMSIG(status);
5844     } else {
5845       // Unknown exit code; pass it through
5846       return status;
5847     }
5848   }
5849 }
5850 
5851 // is_headless_jre()
5852 //
5853 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5854 // in order to report if we are running in a headless jre
5855 //
5856 // Since JDK8 xawt/libmawt.so was moved into the same directory
5857 // as libawt.so, and renamed libawt_xawt.so
5858 //
5859 bool os::is_headless_jre() {
5860   struct stat statbuf;
5861   char buf[MAXPATHLEN];
5862   char libmawtpath[MAXPATHLEN];
5863   const char *xawtstr  = "/xawt/libmawt.so";
5864   const char *new_xawtstr = "/libawt_xawt.so";
5865   char *p;
5866 
5867   // Get path to libjvm.so
5868   os::jvm_path(buf, sizeof(buf));
5869 
5870   // Get rid of libjvm.so
5871   p = strrchr(buf, '/');
5872   if (p == NULL) {
5873     return false;
5874   } else {
5875     *p = '\0';
5876   }
5877 
5878   // Get rid of client or server
5879   p = strrchr(buf, '/');
5880   if (p == NULL) {
5881     return false;
5882   } else {
5883     *p = '\0';
5884   }
5885 
5886   // check xawt/libmawt.so
5887   strcpy(libmawtpath, buf);
5888   strcat(libmawtpath, xawtstr);
5889   if (::stat(libmawtpath, &statbuf) == 0) return false;
5890 
5891   // check libawt_xawt.so
5892   strcpy(libmawtpath, buf);
5893   strcat(libmawtpath, new_xawtstr);
5894   if (::stat(libmawtpath, &statbuf) == 0) return false;
5895 
5896   return true;
5897 }
5898 
5899 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
5900   size_t res;
5901   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5902          "Assumed _thread_in_native");
5903   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
5904   return res;
5905 }
5906 
5907 int os::close(int fd) {
5908   return ::close(fd);
5909 }
5910 
5911 int os::socket_close(int fd) {
5912   return ::close(fd);
5913 }
5914 
5915 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5916   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5917          "Assumed _thread_in_native");
5918   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
5919 }
5920 
5921 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5922   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5923          "Assumed _thread_in_native");
5924   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5925 }
5926 
5927 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5928   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5929 }
5930 
5931 // As both poll and select can be interrupted by signals, we have to be
5932 // prepared to restart the system call after updating the timeout, unless
5933 // a poll() is done with timeout == -1, in which case we repeat with this
5934 // "wait forever" value.
5935 
5936 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
5937   int _result;
5938   _result = ::connect(fd, him, len);
5939 
5940   // On Solaris, when a connect() call is interrupted, the connection
5941   // can be established asynchronously (see 6343810). Subsequent calls
5942   // to connect() must check the errno value which has the semantic
5943   // described below (copied from the connect() man page). Handling
5944   // of asynchronously established connections is required for both
5945   // blocking and non-blocking sockets.
5946   //     EINTR            The  connection  attempt  was   interrupted
5947   //                      before  any data arrived by the delivery of
5948   //                      a signal. The connection, however, will  be
5949   //                      established asynchronously.
5950   //
5951   //     EINPROGRESS      The socket is non-blocking, and the connec-
5952   //                      tion  cannot  be completed immediately.
5953   //
5954   //     EALREADY         The socket is non-blocking,  and a previous
5955   //                      connection  attempt  has  not yet been com-
5956   //                      pleted.
5957   //
5958   //     EISCONN          The socket is already connected.
5959   if (_result == OS_ERR && errno == EINTR) {
5960     // restarting a connect() changes its errno semantics
5961     RESTARTABLE(::connect(fd, him, len), _result);
5962     // undo these changes
5963     if (_result == OS_ERR) {
5964       if (errno == EALREADY) {
5965         errno = EINPROGRESS; // fall through
5966       } else if (errno == EISCONN) {
5967         errno = 0;
5968         return OS_OK;
5969       }
5970     }
5971   }
5972   return _result;
5973 }
5974 
5975 // Get the default path to the core file
5976 // Returns the length of the string
5977 int os::get_core_path(char* buffer, size_t bufferSize) {
5978   const char* p = get_current_directory(buffer, bufferSize);
5979 
5980   if (p == NULL) {
5981     assert(p != NULL, "failed to get current directory");
5982     return 0;
5983   }
5984 
5985   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
5986                                               p, current_process_id());
5987 
5988   return strlen(buffer);
5989 }
5990 
5991 #ifndef PRODUCT
5992 void TestReserveMemorySpecial_test() {
5993   // No tests available for this platform
5994 }
5995 #endif