1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "os_solaris.inline.hpp"
  41 #include "prims/jniFastGetField.hpp"
  42 #include "prims/jvm.h"
  43 #include "prims/jvm_misc.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/atomic.inline.hpp"
  46 #include "runtime/extendedPC.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/interfaceSupport.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/mutexLocker.hpp"
  52 #include "runtime/objectMonitor.hpp"
  53 #include "runtime/orderAccess.inline.hpp"
  54 #include "runtime/osThread.hpp"
  55 #include "runtime/perfMemory.hpp"
  56 #include "runtime/sharedRuntime.hpp"
  57 #include "runtime/statSampler.hpp"
  58 #include "runtime/stubRoutines.hpp"
  59 #include "runtime/thread.inline.hpp"
  60 #include "runtime/threadCritical.hpp"
  61 #include "runtime/timer.hpp"
  62 #include "runtime/vm_version.hpp"
  63 #include "services/attachListener.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "services/runtimeService.hpp"
  66 #include "utilities/decoder.hpp"
  67 #include "utilities/defaultStream.hpp"
  68 #include "utilities/events.hpp"
  69 #include "utilities/growableArray.hpp"
  70 #include "utilities/vmError.hpp"
  71 
  72 // put OS-includes here
  73 # include <dlfcn.h>
  74 # include <errno.h>
  75 # include <exception>
  76 # include <link.h>
  77 # include <poll.h>
  78 # include <pthread.h>
  79 # include <pwd.h>
  80 # include <schedctl.h>
  81 # include <setjmp.h>
  82 # include <signal.h>
  83 # include <stdio.h>
  84 # include <alloca.h>
  85 # include <sys/filio.h>
  86 # include <sys/ipc.h>
  87 # include <sys/lwp.h>
  88 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  89 # include <sys/mman.h>
  90 # include <sys/processor.h>
  91 # include <sys/procset.h>
  92 # include <sys/pset.h>
  93 # include <sys/resource.h>
  94 # include <sys/shm.h>
  95 # include <sys/socket.h>
  96 # include <sys/stat.h>
  97 # include <sys/systeminfo.h>
  98 # include <sys/time.h>
  99 # include <sys/times.h>
 100 # include <sys/types.h>
 101 # include <sys/wait.h>
 102 # include <sys/utsname.h>
 103 # include <thread.h>
 104 # include <unistd.h>
 105 # include <sys/priocntl.h>
 106 # include <sys/rtpriocntl.h>
 107 # include <sys/tspriocntl.h>
 108 # include <sys/iapriocntl.h>
 109 # include <sys/fxpriocntl.h>
 110 # include <sys/loadavg.h>
 111 # include <string.h>
 112 # include <stdio.h>
 113 
 114 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 115 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 116 
 117 #define MAX_PATH (2 * K)
 118 
 119 // for timer info max values which include all bits
 120 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 121 
 122 
 123 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 124 // compile on older systems without this header file.
 125 
 126 #ifndef MADV_ACCESS_LWP
 127 # define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
 128 #endif
 129 #ifndef MADV_ACCESS_MANY
 130 # define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
 131 #endif
 132 
 133 #ifndef LGRP_RSRC_CPU
 134 # define LGRP_RSRC_CPU           0       /* CPU resources */
 135 #endif
 136 #ifndef LGRP_RSRC_MEM
 137 # define LGRP_RSRC_MEM           1       /* memory resources */
 138 #endif
 139 
 140 // see thr_setprio(3T) for the basis of these numbers
 141 #define MinimumPriority 0
 142 #define NormalPriority  64
 143 #define MaximumPriority 127
 144 
 145 // Values for ThreadPriorityPolicy == 1
 146 int prio_policy1[CriticalPriority+1] = {
 147   -99999,  0, 16,  32,  48,  64,
 148           80, 96, 112, 124, 127, 127 };
 149 
 150 // System parameters used internally
 151 static clock_t clock_tics_per_sec = 100;
 152 
 153 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 154 static bool enabled_extended_FILE_stdio = false;
 155 
 156 // For diagnostics to print a message once. see run_periodic_checks
 157 static bool check_addr0_done = false;
 158 static sigset_t check_signal_done;
 159 static bool check_signals = true;
 160 
 161 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 162 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 163 
 164 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 165 
 166 
 167 // "default" initializers for missing libc APIs
 168 extern "C" {
 169   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 170   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 171 
 172   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 173   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 174 }
 175 
 176 // "default" initializers for pthread-based synchronization
 177 extern "C" {
 178   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 179   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 180 }
 181 
 182 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 183 
 184 // Thread Local Storage
 185 // This is common to all Solaris platforms so it is defined here,
 186 // in this common file.
 187 // The declarations are in the os_cpu threadLS*.hpp files.
 188 //
 189 // Static member initialization for TLS
 190 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
 191 
 192 #ifndef PRODUCT
 193 #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
 194 
 195 int ThreadLocalStorage::_tcacheHit = 0;
 196 int ThreadLocalStorage::_tcacheMiss = 0;
 197 
 198 void ThreadLocalStorage::print_statistics() {
 199   int total = _tcacheMiss+_tcacheHit;
 200   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
 201                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
 202 }
 203 #undef _PCT
 204 #endif // PRODUCT
 205 
 206 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
 207                                                         int index) {
 208   Thread *thread = get_thread_slow();
 209   if (thread != NULL) {
 210     address sp = os::current_stack_pointer();
 211     guarantee(thread->_stack_base == NULL ||
 212               (sp <= thread->_stack_base &&
 213                  sp >= thread->_stack_base - thread->_stack_size) ||
 214                is_error_reported(),
 215               "sp must be inside of selected thread stack");
 216 
 217     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
 218     _get_thread_cache[index] = thread;
 219   }
 220   return thread;
 221 }
 222 
 223 
 224 static const double all_zero[sizeof(Thread) / sizeof(double) + 1] = {0};
 225 #define NO_CACHED_THREAD ((Thread*)all_zero)
 226 
 227 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
 228 
 229   // Store the new value before updating the cache to prevent a race
 230   // between get_thread_via_cache_slowly() and this store operation.
 231   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
 232 
 233   // Update thread cache with new thread if setting on thread create,
 234   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
 235   uintptr_t raw = pd_raw_thread_id();
 236   int ix = pd_cache_index(raw);
 237   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
 238 }
 239 
 240 void ThreadLocalStorage::pd_init() {
 241   for (int i = 0; i < _pd_cache_size; i++) {
 242     _get_thread_cache[i] = NO_CACHED_THREAD;
 243   }
 244 }
 245 
 246 // Invalidate all the caches (happens to be the same as pd_init).
 247 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
 248 
 249 #undef NO_CACHED_THREAD
 250 
 251 // END Thread Local Storage
 252 
 253 static inline size_t adjust_stack_size(address base, size_t size) {
 254   if ((ssize_t)size < 0) {
 255     // 4759953: Compensate for ridiculous stack size.
 256     size = max_intx;
 257   }
 258   if (size > (size_t)base) {
 259     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 260     size = (size_t)base;
 261   }
 262   return size;
 263 }
 264 
 265 static inline stack_t get_stack_info() {
 266   stack_t st;
 267   int retval = thr_stksegment(&st);
 268   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 269   assert(retval == 0, "incorrect return value from thr_stksegment");
 270   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 271   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 272   return st;
 273 }
 274 
 275 address os::current_stack_base() {
 276   int r = thr_main();
 277   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 278   bool is_primordial_thread = r;
 279 
 280   // Workaround 4352906, avoid calls to thr_stksegment by
 281   // thr_main after the first one (it looks like we trash
 282   // some data, causing the value for ss_sp to be incorrect).
 283   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 284     stack_t st = get_stack_info();
 285     if (is_primordial_thread) {
 286       // cache initial value of stack base
 287       os::Solaris::_main_stack_base = (address)st.ss_sp;
 288     }
 289     return (address)st.ss_sp;
 290   } else {
 291     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 292     return os::Solaris::_main_stack_base;
 293   }
 294 }
 295 
 296 size_t os::current_stack_size() {
 297   size_t size;
 298 
 299   int r = thr_main();
 300   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 301   if (!r) {
 302     size = get_stack_info().ss_size;
 303   } else {
 304     struct rlimit limits;
 305     getrlimit(RLIMIT_STACK, &limits);
 306     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 307   }
 308   // base may not be page aligned
 309   address base = current_stack_base();
 310   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 311   return (size_t)(base - bottom);
 312 }
 313 
 314 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 315   return localtime_r(clock, res);
 316 }
 317 
 318 void os::Solaris::try_enable_extended_io() {
 319   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 320 
 321   if (!UseExtendedFileIO) {
 322     return;
 323   }
 324 
 325   enable_extended_FILE_stdio_t enabler =
 326     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 327                                          "enable_extended_FILE_stdio");
 328   if (enabler) {
 329     enabler(-1, -1);
 330   }
 331 }
 332 
 333 static int _processors_online = 0;
 334 
 335          jint os::Solaris::_os_thread_limit = 0;
 336 volatile jint os::Solaris::_os_thread_count = 0;
 337 
 338 julong os::available_memory() {
 339   return Solaris::available_memory();
 340 }
 341 
 342 julong os::Solaris::available_memory() {
 343   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 344 }
 345 
 346 julong os::Solaris::_physical_memory = 0;
 347 
 348 julong os::physical_memory() {
 349    return Solaris::physical_memory();
 350 }
 351 
 352 static hrtime_t first_hrtime = 0;
 353 static const hrtime_t hrtime_hz = 1000*1000*1000;
 354 static volatile hrtime_t max_hrtime = 0;
 355 
 356 
 357 void os::Solaris::initialize_system_info() {
 358   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 359   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
 360   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 361 }
 362 
 363 int os::active_processor_count() {
 364   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 365   pid_t pid = getpid();
 366   psetid_t pset = PS_NONE;
 367   // Are we running in a processor set or is there any processor set around?
 368   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 369     uint_t pset_cpus;
 370     // Query the number of cpus available to us.
 371     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 372       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 373       _processors_online = pset_cpus;
 374       return pset_cpus;
 375     }
 376   }
 377   // Otherwise return number of online cpus
 378   return online_cpus;
 379 }
 380 
 381 static bool find_processors_in_pset(psetid_t        pset,
 382                                     processorid_t** id_array,
 383                                     uint_t*         id_length) {
 384   bool result = false;
 385   // Find the number of processors in the processor set.
 386   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 387     // Make up an array to hold their ids.
 388     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 389     // Fill in the array with their processor ids.
 390     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 391       result = true;
 392     }
 393   }
 394   return result;
 395 }
 396 
 397 // Callers of find_processors_online() must tolerate imprecise results --
 398 // the system configuration can change asynchronously because of DR
 399 // or explicit psradm operations.
 400 //
 401 // We also need to take care that the loop (below) terminates as the
 402 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 403 // request and the loop that builds the list of processor ids.   Unfortunately
 404 // there's no reliable way to determine the maximum valid processor id,
 405 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 406 // man pages, which claim the processor id set is "sparse, but
 407 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 408 // exit the loop.
 409 //
 410 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 411 // not available on S8.0.
 412 
 413 static bool find_processors_online(processorid_t** id_array,
 414                                    uint*           id_length) {
 415   const processorid_t MAX_PROCESSOR_ID = 100000;
 416   // Find the number of processors online.
 417   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 418   // Make up an array to hold their ids.
 419   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 420   // Processors need not be numbered consecutively.
 421   long found = 0;
 422   processorid_t next = 0;
 423   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 424     processor_info_t info;
 425     if (processor_info(next, &info) == 0) {
 426       // NB, PI_NOINTR processors are effectively online ...
 427       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 428         (*id_array)[found] = next;
 429         found += 1;
 430       }
 431     }
 432     next += 1;
 433   }
 434   if (found < *id_length) {
 435       // The loop above didn't identify the expected number of processors.
 436       // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 437       // and re-running the loop, above, but there's no guarantee of progress
 438       // if the system configuration is in flux.  Instead, we just return what
 439       // we've got.  Note that in the worst case find_processors_online() could
 440       // return an empty set.  (As a fall-back in the case of the empty set we
 441       // could just return the ID of the current processor).
 442       *id_length = found;
 443   }
 444 
 445   return true;
 446 }
 447 
 448 static bool assign_distribution(processorid_t* id_array,
 449                                 uint           id_length,
 450                                 uint*          distribution,
 451                                 uint           distribution_length) {
 452   // We assume we can assign processorid_t's to uint's.
 453   assert(sizeof(processorid_t) == sizeof(uint),
 454          "can't convert processorid_t to uint");
 455   // Quick check to see if we won't succeed.
 456   if (id_length < distribution_length) {
 457     return false;
 458   }
 459   // Assign processor ids to the distribution.
 460   // Try to shuffle processors to distribute work across boards,
 461   // assuming 4 processors per board.
 462   const uint processors_per_board = ProcessDistributionStride;
 463   // Find the maximum processor id.
 464   processorid_t max_id = 0;
 465   for (uint m = 0; m < id_length; m += 1) {
 466     max_id = MAX2(max_id, id_array[m]);
 467   }
 468   // The next id, to limit loops.
 469   const processorid_t limit_id = max_id + 1;
 470   // Make up markers for available processors.
 471   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 472   for (uint c = 0; c < limit_id; c += 1) {
 473     available_id[c] = false;
 474   }
 475   for (uint a = 0; a < id_length; a += 1) {
 476     available_id[id_array[a]] = true;
 477   }
 478   // Step by "boards", then by "slot", copying to "assigned".
 479   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 480   //                remembering which processors have been assigned by
 481   //                previous calls, etc., so as to distribute several
 482   //                independent calls of this method.  What we'd like is
 483   //                It would be nice to have an API that let us ask
 484   //                how many processes are bound to a processor,
 485   //                but we don't have that, either.
 486   //                In the short term, "board" is static so that
 487   //                subsequent distributions don't all start at board 0.
 488   static uint board = 0;
 489   uint assigned = 0;
 490   // Until we've found enough processors ....
 491   while (assigned < distribution_length) {
 492     // ... find the next available processor in the board.
 493     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 494       uint try_id = board * processors_per_board + slot;
 495       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 496         distribution[assigned] = try_id;
 497         available_id[try_id] = false;
 498         assigned += 1;
 499         break;
 500       }
 501     }
 502     board += 1;
 503     if (board * processors_per_board + 0 >= limit_id) {
 504       board = 0;
 505     }
 506   }
 507   if (available_id != NULL) {
 508     FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
 509   }
 510   return true;
 511 }
 512 
 513 void os::set_native_thread_name(const char *name) {
 514   // Not yet implemented.
 515   return;
 516 }
 517 
 518 bool os::distribute_processes(uint length, uint* distribution) {
 519   bool result = false;
 520   // Find the processor id's of all the available CPUs.
 521   processorid_t* id_array  = NULL;
 522   uint           id_length = 0;
 523   // There are some races between querying information and using it,
 524   // since processor sets can change dynamically.
 525   psetid_t pset = PS_NONE;
 526   // Are we running in a processor set?
 527   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 528     result = find_processors_in_pset(pset, &id_array, &id_length);
 529   } else {
 530     result = find_processors_online(&id_array, &id_length);
 531   }
 532   if (result == true) {
 533     if (id_length >= length) {
 534       result = assign_distribution(id_array, id_length, distribution, length);
 535     } else {
 536       result = false;
 537     }
 538   }
 539   if (id_array != NULL) {
 540     FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
 541   }
 542   return result;
 543 }
 544 
 545 bool os::bind_to_processor(uint processor_id) {
 546   // We assume that a processorid_t can be stored in a uint.
 547   assert(sizeof(uint) == sizeof(processorid_t),
 548          "can't convert uint to processorid_t");
 549   int bind_result =
 550     processor_bind(P_LWPID,                       // bind LWP.
 551                    P_MYID,                        // bind current LWP.
 552                    (processorid_t) processor_id,  // id.
 553                    NULL);                         // don't return old binding.
 554   return (bind_result == 0);
 555 }
 556 
 557 bool os::getenv(const char* name, char* buffer, int len) {
 558   char* val = ::getenv(name);
 559   if (val == NULL
 560   ||   strlen(val) + 1  >  len ) {
 561     if (len > 0)  buffer[0] = 0; // return a null string
 562     return false;
 563   }
 564   strcpy(buffer, val);
 565   return true;
 566 }
 567 
 568 
 569 // Return true if user is running as root.
 570 
 571 bool os::have_special_privileges() {
 572   static bool init = false;
 573   static bool privileges = false;
 574   if (!init) {
 575     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 576     init = true;
 577   }
 578   return privileges;
 579 }
 580 
 581 
 582 void os::init_system_properties_values() {
 583   // The next steps are taken in the product version:
 584   //
 585   // Obtain the JAVA_HOME value from the location of libjvm.so.
 586   // This library should be located at:
 587   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 588   //
 589   // If "/jre/lib/" appears at the right place in the path, then we
 590   // assume libjvm.so is installed in a JDK and we use this path.
 591   //
 592   // Otherwise exit with message: "Could not create the Java virtual machine."
 593   //
 594   // The following extra steps are taken in the debugging version:
 595   //
 596   // If "/jre/lib/" does NOT appear at the right place in the path
 597   // instead of exit check for $JAVA_HOME environment variable.
 598   //
 599   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 600   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 601   // it looks like libjvm.so is installed there
 602   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 603   //
 604   // Otherwise exit.
 605   //
 606   // Important note: if the location of libjvm.so changes this
 607   // code needs to be changed accordingly.
 608 
 609 // Base path of extensions installed on the system.
 610 #define SYS_EXT_DIR     "/usr/jdk/packages"
 611 #define EXTENSIONS_DIR  "/lib/ext"
 612 #define ENDORSED_DIR    "/lib/endorsed"
 613 
 614   char cpu_arch[12];
 615   // Buffer that fits several sprintfs.
 616   // Note that the space for the colon and the trailing null are provided
 617   // by the nulls included by the sizeof operator.
 618   const size_t bufsize =
 619     MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
 620          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 621          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
 622          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
 623   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 624 
 625   // sysclasspath, java_home, dll_dir
 626   {
 627     char *pslash;
 628     os::jvm_path(buf, bufsize);
 629 
 630     // Found the full path to libjvm.so.
 631     // Now cut the path to <java_home>/jre if we can.
 632     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 633     pslash = strrchr(buf, '/');
 634     if (pslash != NULL) {
 635       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 636     }
 637     Arguments::set_dll_dir(buf);
 638 
 639     if (pslash != NULL) {
 640       pslash = strrchr(buf, '/');
 641       if (pslash != NULL) {
 642         *pslash = '\0';          // Get rid of /<arch>.
 643         pslash = strrchr(buf, '/');
 644         if (pslash != NULL) {
 645           *pslash = '\0';        // Get rid of /lib.
 646         }
 647       }
 648     }
 649     Arguments::set_java_home(buf);
 650     set_boot_path('/', ':');
 651   }
 652 
 653   // Where to look for native libraries.
 654   {
 655     // Use dlinfo() to determine the correct java.library.path.
 656     //
 657     // If we're launched by the Java launcher, and the user
 658     // does not set java.library.path explicitly on the commandline,
 659     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 660     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 661     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 662     // /usr/lib), which is exactly what we want.
 663     //
 664     // If the user does set java.library.path, it completely
 665     // overwrites this setting, and always has.
 666     //
 667     // If we're not launched by the Java launcher, we may
 668     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 669     // settings.  Again, dlinfo does exactly what we want.
 670 
 671     Dl_serinfo     info_sz, *info = &info_sz;
 672     Dl_serpath     *path;
 673     char           *library_path;
 674     char           *common_path = buf;
 675 
 676     // Determine search path count and required buffer size.
 677     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 678       FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 679       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 680     }
 681 
 682     // Allocate new buffer and initialize.
 683     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 684     info->dls_size = info_sz.dls_size;
 685     info->dls_cnt = info_sz.dls_cnt;
 686 
 687     // Obtain search path information.
 688     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 689       FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 690       FREE_C_HEAP_ARRAY(char, info, mtInternal);
 691       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 692     }
 693 
 694     path = &info->dls_serpath[0];
 695 
 696     // Note: Due to a legacy implementation, most of the library path
 697     // is set in the launcher. This was to accomodate linking restrictions
 698     // on legacy Solaris implementations (which are no longer supported).
 699     // Eventually, all the library path setting will be done here.
 700     //
 701     // However, to prevent the proliferation of improperly built native
 702     // libraries, the new path component /usr/jdk/packages is added here.
 703 
 704     // Determine the actual CPU architecture.
 705     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 706 #ifdef _LP64
 707     // If we are a 64-bit vm, perform the following translations:
 708     //   sparc   -> sparcv9
 709     //   i386    -> amd64
 710     if (strcmp(cpu_arch, "sparc") == 0) {
 711       strcat(cpu_arch, "v9");
 712     } else if (strcmp(cpu_arch, "i386") == 0) {
 713       strcpy(cpu_arch, "amd64");
 714     }
 715 #endif
 716 
 717     // Construct the invariant part of ld_library_path.
 718     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 719 
 720     // Struct size is more than sufficient for the path components obtained
 721     // through the dlinfo() call, so only add additional space for the path
 722     // components explicitly added here.
 723     size_t library_path_size = info->dls_size + strlen(common_path);
 724     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 725     library_path[0] = '\0';
 726 
 727     // Construct the desired Java library path from the linker's library
 728     // search path.
 729     //
 730     // For compatibility, it is optimal that we insert the additional path
 731     // components specific to the Java VM after those components specified
 732     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 733     // infrastructure.
 734     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 735       strcpy(library_path, common_path);
 736     } else {
 737       int inserted = 0;
 738       int i;
 739       for (i = 0; i < info->dls_cnt; i++, path++) {
 740         uint_t flags = path->dls_flags & LA_SER_MASK;
 741         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 742           strcat(library_path, common_path);
 743           strcat(library_path, os::path_separator());
 744           inserted = 1;
 745         }
 746         strcat(library_path, path->dls_name);
 747         strcat(library_path, os::path_separator());
 748       }
 749       // Eliminate trailing path separator.
 750       library_path[strlen(library_path)-1] = '\0';
 751     }
 752 
 753     // happens before argument parsing - can't use a trace flag
 754     // tty->print_raw("init_system_properties_values: native lib path: ");
 755     // tty->print_raw_cr(library_path);
 756 
 757     // Callee copies into its own buffer.
 758     Arguments::set_library_path(library_path);
 759 
 760     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
 761     FREE_C_HEAP_ARRAY(char, info, mtInternal);
 762   }
 763 
 764   // Extensions directories.
 765   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 766   Arguments::set_ext_dirs(buf);
 767 
 768   // Endorsed standards default directory.
 769   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
 770   Arguments::set_endorsed_dirs(buf);
 771 
 772   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 773 
 774 #undef SYS_EXT_DIR
 775 #undef EXTENSIONS_DIR
 776 #undef ENDORSED_DIR
 777 }
 778 
 779 void os::breakpoint() {
 780   BREAKPOINT;
 781 }
 782 
 783 bool os::obsolete_option(const JavaVMOption *option)
 784 {
 785   if (!strncmp(option->optionString, "-Xt", 3)) {
 786     return true;
 787   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 788     return true;
 789   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 790     return true;
 791   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 792     return true;
 793   }
 794   return false;
 795 }
 796 
 797 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 798   address  stackStart  = (address)thread->stack_base();
 799   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 800   if (sp < stackStart && sp >= stackEnd) return true;
 801   return false;
 802 }
 803 
 804 extern "C" void breakpoint() {
 805   // use debugger to set breakpoint here
 806 }
 807 
 808 static thread_t main_thread;
 809 
 810 // Thread start routine for all new Java threads
 811 extern "C" void* java_start(void* thread_addr) {
 812   // Try to randomize the cache line index of hot stack frames.
 813   // This helps when threads of the same stack traces evict each other's
 814   // cache lines. The threads can be either from the same JVM instance, or
 815   // from different JVM instances. The benefit is especially true for
 816   // processors with hyperthreading technology.
 817   static int counter = 0;
 818   int pid = os::current_process_id();
 819   alloca(((pid ^ counter++) & 7) * 128);
 820 
 821   int prio;
 822   Thread* thread = (Thread*)thread_addr;
 823   OSThread* osthr = thread->osthread();
 824 
 825   osthr->set_lwp_id(_lwp_self());  // Store lwp in case we are bound
 826   thread->_schedctl = (void *) schedctl_init();
 827 
 828   if (UseNUMA) {
 829     int lgrp_id = os::numa_get_group_id();
 830     if (lgrp_id != -1) {
 831       thread->set_lgrp_id(lgrp_id);
 832     }
 833   }
 834 
 835   // If the creator called set priority before we started,
 836   // we need to call set_native_priority now that we have an lwp.
 837   // We used to get the priority from thr_getprio (we called
 838   // thr_setprio way back in create_thread) and pass it to
 839   // set_native_priority, but Solaris scales the priority
 840   // in java_to_os_priority, so when we read it back here,
 841   // we pass trash to set_native_priority instead of what's
 842   // in java_to_os_priority. So we save the native priority
 843   // in the osThread and recall it here.
 844 
 845   if (osthr->thread_id() != -1) {
 846     if (UseThreadPriorities) {
 847       int prio = osthr->native_priority();
 848       if (ThreadPriorityVerbose) {
 849         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 850                       INTPTR_FORMAT ", setting priority: %d\n",
 851                       osthr->thread_id(), osthr->lwp_id(), prio);
 852       }
 853       os::set_native_priority(thread, prio);
 854     }
 855   } else if (ThreadPriorityVerbose) {
 856     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 857   }
 858 
 859   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 860 
 861   // initialize signal mask for this thread
 862   os::Solaris::hotspot_sigmask(thread);
 863 
 864   thread->run();
 865 
 866   // One less thread is executing
 867   // When the VMThread gets here, the main thread may have already exited
 868   // which frees the CodeHeap containing the Atomic::dec code
 869   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 870     Atomic::dec(&os::Solaris::_os_thread_count);
 871   }
 872 
 873   if (UseDetachedThreads) {
 874     thr_exit(NULL);
 875     ShouldNotReachHere();
 876   }
 877   return NULL;
 878 }
 879 
 880 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 881   // Allocate the OSThread object
 882   OSThread* osthread = new OSThread(NULL, NULL);
 883   if (osthread == NULL) return NULL;
 884 
 885   // Store info on the Solaris thread into the OSThread
 886   osthread->set_thread_id(thread_id);
 887   osthread->set_lwp_id(_lwp_self());
 888   thread->_schedctl = (void *) schedctl_init();
 889 
 890   if (UseNUMA) {
 891     int lgrp_id = os::numa_get_group_id();
 892     if (lgrp_id != -1) {
 893       thread->set_lgrp_id(lgrp_id);
 894     }
 895   }
 896 
 897   if (ThreadPriorityVerbose) {
 898     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 899                   osthread->thread_id(), osthread->lwp_id());
 900   }
 901 
 902   // Initial thread state is INITIALIZED, not SUSPENDED
 903   osthread->set_state(INITIALIZED);
 904 
 905   return osthread;
 906 }
 907 
 908 void os::Solaris::hotspot_sigmask(Thread* thread) {
 909 
 910   //Save caller's signal mask
 911   sigset_t sigmask;
 912   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 913   OSThread *osthread = thread->osthread();
 914   osthread->set_caller_sigmask(sigmask);
 915 
 916   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 917   if (!ReduceSignalUsage) {
 918     if (thread->is_VM_thread()) {
 919       // Only the VM thread handles BREAK_SIGNAL ...
 920       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 921     } else {
 922       // ... all other threads block BREAK_SIGNAL
 923       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 924       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 925     }
 926   }
 927 }
 928 
 929 bool os::create_attached_thread(JavaThread* thread) {
 930 #ifdef ASSERT
 931   thread->verify_not_published();
 932 #endif
 933   OSThread* osthread = create_os_thread(thread, thr_self());
 934   if (osthread == NULL) {
 935      return false;
 936   }
 937 
 938   // Initial thread state is RUNNABLE
 939   osthread->set_state(RUNNABLE);
 940   thread->set_osthread(osthread);
 941 
 942   // initialize signal mask for this thread
 943   // and save the caller's signal mask
 944   os::Solaris::hotspot_sigmask(thread);
 945 
 946   return true;
 947 }
 948 
 949 bool os::create_main_thread(JavaThread* thread) {
 950 #ifdef ASSERT
 951   thread->verify_not_published();
 952 #endif
 953   if (_starting_thread == NULL) {
 954     _starting_thread = create_os_thread(thread, main_thread);
 955      if (_starting_thread == NULL) {
 956         return false;
 957      }
 958   }
 959 
 960   // The primodial thread is runnable from the start
 961   _starting_thread->set_state(RUNNABLE);
 962 
 963   thread->set_osthread(_starting_thread);
 964 
 965   // initialize signal mask for this thread
 966   // and save the caller's signal mask
 967   os::Solaris::hotspot_sigmask(thread);
 968 
 969   return true;
 970 }
 971 
 972 
 973 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 974   // Allocate the OSThread object
 975   OSThread* osthread = new OSThread(NULL, NULL);
 976   if (osthread == NULL) {
 977     return false;
 978   }
 979 
 980   if (ThreadPriorityVerbose) {
 981     char *thrtyp;
 982     switch (thr_type) {
 983       case vm_thread:
 984         thrtyp = (char *)"vm";
 985         break;
 986       case cgc_thread:
 987         thrtyp = (char *)"cgc";
 988         break;
 989       case pgc_thread:
 990         thrtyp = (char *)"pgc";
 991         break;
 992       case java_thread:
 993         thrtyp = (char *)"java";
 994         break;
 995       case compiler_thread:
 996         thrtyp = (char *)"compiler";
 997         break;
 998       case watcher_thread:
 999         thrtyp = (char *)"watcher";
1000         break;
1001       default:
1002         thrtyp = (char *)"unknown";
1003         break;
1004     }
1005     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1006   }
1007 
1008   // Calculate stack size if it's not specified by caller.
1009   if (stack_size == 0) {
1010     // The default stack size 1M (2M for LP64).
1011     stack_size = (BytesPerWord >> 2) * K * K;
1012 
1013     switch (thr_type) {
1014     case os::java_thread:
1015       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1016       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1017       break;
1018     case os::compiler_thread:
1019       if (CompilerThreadStackSize > 0) {
1020         stack_size = (size_t)(CompilerThreadStackSize * K);
1021         break;
1022       } // else fall through:
1023         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1024     case os::vm_thread:
1025     case os::pgc_thread:
1026     case os::cgc_thread:
1027     case os::watcher_thread:
1028       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1029       break;
1030     }
1031   }
1032   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1033 
1034   // Initial state is ALLOCATED but not INITIALIZED
1035   osthread->set_state(ALLOCATED);
1036 
1037   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1038     // We got lots of threads. Check if we still have some address space left.
1039     // Need to be at least 5Mb of unreserved address space. We do check by
1040     // trying to reserve some.
1041     const size_t VirtualMemoryBangSize = 20*K*K;
1042     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1043     if (mem == NULL) {
1044       delete osthread;
1045       return false;
1046     } else {
1047       // Release the memory again
1048       os::release_memory(mem, VirtualMemoryBangSize);
1049     }
1050   }
1051 
1052   // Setup osthread because the child thread may need it.
1053   thread->set_osthread(osthread);
1054 
1055   // Create the Solaris thread
1056   thread_t tid = 0;
1057   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
1058   int      status;
1059 
1060   // Mark that we don't have an lwp or thread id yet.
1061   // In case we attempt to set the priority before the thread starts.
1062   osthread->set_lwp_id(-1);
1063   osthread->set_thread_id(-1);
1064 
1065   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1066   if (status != 0) {
1067     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1068       perror("os::create_thread");
1069     }
1070     thread->set_osthread(NULL);
1071     // Need to clean up stuff we've allocated so far
1072     delete osthread;
1073     return false;
1074   }
1075 
1076   Atomic::inc(&os::Solaris::_os_thread_count);
1077 
1078   // Store info on the Solaris thread into the OSThread
1079   osthread->set_thread_id(tid);
1080 
1081   // Remember that we created this thread so we can set priority on it
1082   osthread->set_vm_created();
1083 
1084   // Initial thread state is INITIALIZED, not SUSPENDED
1085   osthread->set_state(INITIALIZED);
1086 
1087   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1088   return true;
1089 }
1090 
1091 /* defined for >= Solaris 10. This allows builds on earlier versions
1092  *  of Solaris to take advantage of the newly reserved Solaris JVM signals
1093  *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1094  *  and -XX:+UseAltSigs does nothing since these should have no conflict
1095  */
1096 #if !defined(SIGJVM1)
1097 #define SIGJVM1 39
1098 #define SIGJVM2 40
1099 #endif
1100 
1101 debug_only(static bool signal_sets_initialized = false);
1102 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1103 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1104 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1105 
1106 bool os::Solaris::is_sig_ignored(int sig) {
1107       struct sigaction oact;
1108       sigaction(sig, (struct sigaction*)NULL, &oact);
1109       void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1110                                      : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1111       if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1112            return true;
1113       else
1114            return false;
1115 }
1116 
1117 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1118 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1119 static bool isJVM1available() {
1120   return SIGJVM1 < SIGRTMIN;
1121 }
1122 
1123 void os::Solaris::signal_sets_init() {
1124   // Should also have an assertion stating we are still single-threaded.
1125   assert(!signal_sets_initialized, "Already initialized");
1126   // Fill in signals that are necessarily unblocked for all threads in
1127   // the VM. Currently, we unblock the following signals:
1128   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1129   //                         by -Xrs (=ReduceSignalUsage));
1130   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1131   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1132   // the dispositions or masks wrt these signals.
1133   // Programs embedding the VM that want to use the above signals for their
1134   // own purposes must, at this time, use the "-Xrs" option to prevent
1135   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1136   // (See bug 4345157, and other related bugs).
1137   // In reality, though, unblocking these signals is really a nop, since
1138   // these signals are not blocked by default.
1139   sigemptyset(&unblocked_sigs);
1140   sigemptyset(&allowdebug_blocked_sigs);
1141   sigaddset(&unblocked_sigs, SIGILL);
1142   sigaddset(&unblocked_sigs, SIGSEGV);
1143   sigaddset(&unblocked_sigs, SIGBUS);
1144   sigaddset(&unblocked_sigs, SIGFPE);
1145 
1146   if (isJVM1available) {
1147     os::Solaris::set_SIGinterrupt(SIGJVM1);
1148     os::Solaris::set_SIGasync(SIGJVM2);
1149   } else if (UseAltSigs) {
1150     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1151     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1152   } else {
1153     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1154     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1155   }
1156 
1157   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1158   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1159 
1160   if (!ReduceSignalUsage) {
1161    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1162       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1163       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1164    }
1165    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1166       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1167       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1168    }
1169    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1170       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1171       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1172    }
1173   }
1174   // Fill in signals that are blocked by all but the VM thread.
1175   sigemptyset(&vm_sigs);
1176   if (!ReduceSignalUsage)
1177     sigaddset(&vm_sigs, BREAK_SIGNAL);
1178   debug_only(signal_sets_initialized = true);
1179 
1180   // For diagnostics only used in run_periodic_checks
1181   sigemptyset(&check_signal_done);
1182 }
1183 
1184 // These are signals that are unblocked while a thread is running Java.
1185 // (For some reason, they get blocked by default.)
1186 sigset_t* os::Solaris::unblocked_signals() {
1187   assert(signal_sets_initialized, "Not initialized");
1188   return &unblocked_sigs;
1189 }
1190 
1191 // These are the signals that are blocked while a (non-VM) thread is
1192 // running Java. Only the VM thread handles these signals.
1193 sigset_t* os::Solaris::vm_signals() {
1194   assert(signal_sets_initialized, "Not initialized");
1195   return &vm_sigs;
1196 }
1197 
1198 // These are signals that are blocked during cond_wait to allow debugger in
1199 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1200   assert(signal_sets_initialized, "Not initialized");
1201   return &allowdebug_blocked_sigs;
1202 }
1203 
1204 
1205 void _handle_uncaught_cxx_exception() {
1206   VMError err("An uncaught C++ exception");
1207   err.report_and_die();
1208 }
1209 
1210 
1211 // First crack at OS-specific initialization, from inside the new thread.
1212 void os::initialize_thread(Thread* thr) {
1213   int r = thr_main();
1214   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
1215   if (r) {
1216     JavaThread* jt = (JavaThread *)thr;
1217     assert(jt != NULL, "Sanity check");
1218     size_t stack_size;
1219     address base = jt->stack_base();
1220     if (Arguments::created_by_java_launcher()) {
1221       // Use 2MB to allow for Solaris 7 64 bit mode.
1222       stack_size = JavaThread::stack_size_at_create() == 0
1223         ? 2048*K : JavaThread::stack_size_at_create();
1224 
1225       // There are rare cases when we may have already used more than
1226       // the basic stack size allotment before this method is invoked.
1227       // Attempt to allow for a normally sized java_stack.
1228       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1229       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1230     } else {
1231       // 6269555: If we were not created by a Java launcher, i.e. if we are
1232       // running embedded in a native application, treat the primordial thread
1233       // as much like a native attached thread as possible.  This means using
1234       // the current stack size from thr_stksegment(), unless it is too large
1235       // to reliably setup guard pages.  A reasonable max size is 8MB.
1236       size_t current_size = current_stack_size();
1237       // This should never happen, but just in case....
1238       if (current_size == 0) current_size = 2 * K * K;
1239       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1240     }
1241     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1242     stack_size = (size_t)(base - bottom);
1243 
1244     assert(stack_size > 0, "Stack size calculation problem");
1245 
1246     if (stack_size > jt->stack_size()) {
1247       NOT_PRODUCT(
1248         struct rlimit limits;
1249         getrlimit(RLIMIT_STACK, &limits);
1250         size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1251         assert(size >= jt->stack_size(), "Stack size problem in main thread");
1252       )
1253       tty->print_cr(
1254         "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1255         "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1256         "See limit(1) to increase the stack size limit.",
1257         stack_size / K, jt->stack_size() / K);
1258       vm_exit(1);
1259     }
1260     assert(jt->stack_size() >= stack_size,
1261           "Attempt to map more stack than was allocated");
1262     jt->set_stack_size(stack_size);
1263   }
1264 
1265   // With the T2 libthread (T1 is no longer supported) threads are always bound
1266   // and we use stackbanging in all cases.
1267 
1268   os::Solaris::init_thread_fpu_state();
1269   std::set_terminate(_handle_uncaught_cxx_exception);
1270 }
1271 
1272 
1273 
1274 // Free Solaris resources related to the OSThread
1275 void os::free_thread(OSThread* osthread) {
1276   assert(osthread != NULL, "os::free_thread but osthread not set");
1277 
1278 
1279   // We are told to free resources of the argument thread,
1280   // but we can only really operate on the current thread.
1281   // The main thread must take the VMThread down synchronously
1282   // before the main thread exits and frees up CodeHeap
1283   guarantee((Thread::current()->osthread() == osthread
1284      || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1285   if (Thread::current()->osthread() == osthread) {
1286     // Restore caller's signal mask
1287     sigset_t sigmask = osthread->caller_sigmask();
1288     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1289   }
1290   delete osthread;
1291 }
1292 
1293 void os::pd_start_thread(Thread* thread) {
1294   int status = thr_continue(thread->osthread()->thread_id());
1295   assert_status(status == 0, status, "thr_continue failed");
1296 }
1297 
1298 
1299 intx os::current_thread_id() {
1300   return (intx)thr_self();
1301 }
1302 
1303 static pid_t _initial_pid = 0;
1304 
1305 int os::current_process_id() {
1306   return (int)(_initial_pid ? _initial_pid : getpid());
1307 }
1308 
1309 int os::allocate_thread_local_storage() {
1310   // %%%       in Win32 this allocates a memory segment pointed to by a
1311   //           register.  Dan Stein can implement a similar feature in
1312   //           Solaris.  Alternatively, the VM can do the same thing
1313   //           explicitly: malloc some storage and keep the pointer in a
1314   //           register (which is part of the thread's context) (or keep it
1315   //           in TLS).
1316   // %%%       In current versions of Solaris, thr_self and TSD can
1317   //           be accessed via short sequences of displaced indirections.
1318   //           The value of thr_self is available as %g7(36).
1319   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1320   //           assuming that the current thread already has a value bound to k.
1321   //           It may be worth experimenting with such access patterns,
1322   //           and later having the parameters formally exported from a Solaris
1323   //           interface.  I think, however, that it will be faster to
1324   //           maintain the invariant that %g2 always contains the
1325   //           JavaThread in Java code, and have stubs simply
1326   //           treat %g2 as a caller-save register, preserving it in a %lN.
1327   thread_key_t tk;
1328   if (thr_keycreate( &tk, NULL))
1329     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1330                   "(%s)", strerror(errno)));
1331   return int(tk);
1332 }
1333 
1334 void os::free_thread_local_storage(int index) {
1335   // %%% don't think we need anything here
1336   // if ( pthread_key_delete((pthread_key_t) tk) )
1337   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
1338 }
1339 
1340 #define SMALLINT 32   // libthread allocate for tsd_common is a version specific
1341                       // small number - point is NO swap space available
1342 void os::thread_local_storage_at_put(int index, void* value) {
1343   // %%% this is used only in threadLocalStorage.cpp
1344   if (thr_setspecific((thread_key_t)index, value)) {
1345     if (errno == ENOMEM) {
1346        vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1347                              "thr_setspecific: out of swap space");
1348     } else {
1349       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1350                     "(%s)", strerror(errno)));
1351     }
1352   } else {
1353       ThreadLocalStorage::set_thread_in_slot((Thread *) value);
1354   }
1355 }
1356 
1357 // This function could be called before TLS is initialized, for example, when
1358 // VM receives an async signal or when VM causes a fatal error during
1359 // initialization. Return NULL if thr_getspecific() fails.
1360 void* os::thread_local_storage_at(int index) {
1361   // %%% this is used only in threadLocalStorage.cpp
1362   void* r = NULL;
1363   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1364 }
1365 
1366 
1367 // gethrtime() should be monotonic according to the documentation,
1368 // but some virtualized platforms are known to break this guarantee.
1369 // getTimeNanos() must be guaranteed not to move backwards, so we
1370 // are forced to add a check here.
1371 inline hrtime_t getTimeNanos() {
1372   const hrtime_t now = gethrtime();
1373   const hrtime_t prev = max_hrtime;
1374   if (now <= prev) {
1375     return prev;   // same or retrograde time;
1376   }
1377   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1378   assert(obsv >= prev, "invariant");   // Monotonicity
1379   // If the CAS succeeded then we're done and return "now".
1380   // If the CAS failed and the observed value "obsv" is >= now then
1381   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1382   // some other thread raced this thread and installed a new value, in which case
1383   // we could either (a) retry the entire operation, (b) retry trying to install now
1384   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1385   // we might discard a higher "now" value in deference to a slightly lower but freshly
1386   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1387   // to (a) or (b) -- and greatly reduces coherence traffic.
1388   // We might also condition (c) on the magnitude of the delta between obsv and now.
1389   // Avoiding excessive CAS operations to hot RW locations is critical.
1390   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1391   return (prev == obsv) ? now : obsv;
1392 }
1393 
1394 // Time since start-up in seconds to a fine granularity.
1395 // Used by VMSelfDestructTimer and the MemProfiler.
1396 double os::elapsedTime() {
1397   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1398 }
1399 
1400 jlong os::elapsed_counter() {
1401   return (jlong)(getTimeNanos() - first_hrtime);
1402 }
1403 
1404 jlong os::elapsed_frequency() {
1405    return hrtime_hz;
1406 }
1407 
1408 // Return the real, user, and system times in seconds from an
1409 // arbitrary fixed point in the past.
1410 bool os::getTimesSecs(double* process_real_time,
1411                   double* process_user_time,
1412                   double* process_system_time) {
1413   struct tms ticks;
1414   clock_t real_ticks = times(&ticks);
1415 
1416   if (real_ticks == (clock_t) (-1)) {
1417     return false;
1418   } else {
1419     double ticks_per_second = (double) clock_tics_per_sec;
1420     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1421     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1422     // For consistency return the real time from getTimeNanos()
1423     // converted to seconds.
1424     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1425 
1426     return true;
1427   }
1428 }
1429 
1430 bool os::supports_vtime() { return true; }
1431 
1432 bool os::enable_vtime() {
1433   int fd = ::open("/proc/self/ctl", O_WRONLY);
1434   if (fd == -1)
1435     return false;
1436 
1437   long cmd[] = { PCSET, PR_MSACCT };
1438   int res = ::write(fd, cmd, sizeof(long) * 2);
1439   ::close(fd);
1440   if (res != sizeof(long) * 2)
1441     return false;
1442 
1443   return true;
1444 }
1445 
1446 bool os::vtime_enabled() {
1447   int fd = ::open("/proc/self/status", O_RDONLY);
1448   if (fd == -1)
1449     return false;
1450 
1451   pstatus_t status;
1452   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1453   ::close(fd);
1454   if (res != sizeof(pstatus_t))
1455     return false;
1456 
1457   return status.pr_flags & PR_MSACCT;
1458 }
1459 
1460 double os::elapsedVTime() {
1461   return (double)gethrvtime() / (double)hrtime_hz;
1462 }
1463 
1464 // Used internally for comparisons only
1465 // getTimeMillis guaranteed to not move backwards on Solaris
1466 jlong getTimeMillis() {
1467   jlong nanotime = getTimeNanos();
1468   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1469 }
1470 
1471 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1472 jlong os::javaTimeMillis() {
1473   timeval t;
1474   if (gettimeofday( &t, NULL) == -1)
1475     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1476   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1477 }
1478 
1479 jlong os::javaTimeNanos() {
1480   return (jlong)getTimeNanos();
1481 }
1482 
1483 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1484   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1485   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1486   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1487   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1488 }
1489 
1490 char * os::local_time_string(char *buf, size_t buflen) {
1491   struct tm t;
1492   time_t long_time;
1493   time(&long_time);
1494   localtime_r(&long_time, &t);
1495   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1496                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1497                t.tm_hour, t.tm_min, t.tm_sec);
1498   return buf;
1499 }
1500 
1501 // Note: os::shutdown() might be called very early during initialization, or
1502 // called from signal handler. Before adding something to os::shutdown(), make
1503 // sure it is async-safe and can handle partially initialized VM.
1504 void os::shutdown() {
1505 
1506   // allow PerfMemory to attempt cleanup of any persistent resources
1507   perfMemory_exit();
1508 
1509   // needs to remove object in file system
1510   AttachListener::abort();
1511 
1512   // flush buffered output, finish log files
1513   ostream_abort();
1514 
1515   // Check for abort hook
1516   abort_hook_t abort_hook = Arguments::abort_hook();
1517   if (abort_hook != NULL) {
1518     abort_hook();
1519   }
1520 }
1521 
1522 // Note: os::abort() might be called very early during initialization, or
1523 // called from signal handler. Before adding something to os::abort(), make
1524 // sure it is async-safe and can handle partially initialized VM.
1525 void os::abort(bool dump_core) {
1526   os::shutdown();
1527   if (dump_core) {
1528 #ifndef PRODUCT
1529     fdStream out(defaultStream::output_fd());
1530     out.print_raw("Current thread is ");
1531     char buf[16];
1532     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1533     out.print_raw_cr(buf);
1534     out.print_raw_cr("Dumping core ...");
1535 #endif
1536     ::abort(); // dump core (for debugging)
1537   }
1538 
1539   ::exit(1);
1540 }
1541 
1542 // Die immediately, no exit hook, no abort hook, no cleanup.
1543 void os::die() {
1544   ::abort(); // dump core (for debugging)
1545 }
1546 
1547 // unused
1548 void os::set_error_file(const char *logfile) {}
1549 
1550 // DLL functions
1551 
1552 const char* os::dll_file_extension() { return ".so"; }
1553 
1554 // This must be hard coded because it's the system's temporary
1555 // directory not the java application's temp directory, ala java.io.tmpdir.
1556 const char* os::get_temp_directory() { return "/tmp"; }
1557 
1558 static bool file_exists(const char* filename) {
1559   struct stat statbuf;
1560   if (filename == NULL || strlen(filename) == 0) {
1561     return false;
1562   }
1563   return os::stat(filename, &statbuf) == 0;
1564 }
1565 
1566 bool os::dll_build_name(char* buffer, size_t buflen,
1567                         const char* pname, const char* fname) {
1568   bool retval = false;
1569   const size_t pnamelen = pname ? strlen(pname) : 0;
1570 
1571   // Return error on buffer overflow.
1572   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1573     return retval;
1574   }
1575 
1576   if (pnamelen == 0) {
1577     snprintf(buffer, buflen, "lib%s.so", fname);
1578     retval = true;
1579   } else if (strchr(pname, *os::path_separator()) != NULL) {
1580     int n;
1581     char** pelements = split_path(pname, &n);
1582     if (pelements == NULL) {
1583       return false;
1584     }
1585     for (int i = 0; i < n; i++) {
1586       // really shouldn't be NULL but what the heck, check can't hurt
1587       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1588         continue; // skip the empty path values
1589       }
1590       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1591       if (file_exists(buffer)) {
1592         retval = true;
1593         break;
1594       }
1595     }
1596     // release the storage
1597     for (int i = 0; i < n; i++) {
1598       if (pelements[i] != NULL) {
1599         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1600       }
1601     }
1602     if (pelements != NULL) {
1603       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1604     }
1605   } else {
1606     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1607     retval = true;
1608   }
1609   return retval;
1610 }
1611 
1612 // check if addr is inside libjvm.so
1613 bool os::address_is_in_vm(address addr) {
1614   static address libjvm_base_addr;
1615   Dl_info dlinfo;
1616 
1617   if (libjvm_base_addr == NULL) {
1618     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1619       libjvm_base_addr = (address)dlinfo.dli_fbase;
1620     }
1621     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1622   }
1623 
1624   if (dladdr((void *)addr, &dlinfo) != 0) {
1625     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1626   }
1627 
1628   return false;
1629 }
1630 
1631 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1632 static dladdr1_func_type dladdr1_func = NULL;
1633 
1634 bool os::dll_address_to_function_name(address addr, char *buf,
1635                                       int buflen, int * offset) {
1636   // buf is not optional, but offset is optional
1637   assert(buf != NULL, "sanity check");
1638 
1639   Dl_info dlinfo;
1640 
1641   // dladdr1_func was initialized in os::init()
1642   if (dladdr1_func != NULL) {
1643     // yes, we have dladdr1
1644 
1645     // Support for dladdr1 is checked at runtime; it may be
1646     // available even if the vm is built on a machine that does
1647     // not have dladdr1 support.  Make sure there is a value for
1648     // RTLD_DL_SYMENT.
1649     #ifndef RTLD_DL_SYMENT
1650     #define RTLD_DL_SYMENT 1
1651     #endif
1652 #ifdef _LP64
1653     Elf64_Sym * info;
1654 #else
1655     Elf32_Sym * info;
1656 #endif
1657     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1658                      RTLD_DL_SYMENT) != 0) {
1659       // see if we have a matching symbol that covers our address
1660       if (dlinfo.dli_saddr != NULL &&
1661           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1662         if (dlinfo.dli_sname != NULL) {
1663           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1664             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1665           }
1666           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1667           return true;
1668         }
1669       }
1670       // no matching symbol so try for just file info
1671       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1672         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1673                             buf, buflen, offset, dlinfo.dli_fname)) {
1674           return true;
1675         }
1676       }
1677     }
1678     buf[0] = '\0';
1679     if (offset != NULL) *offset  = -1;
1680     return false;
1681   }
1682 
1683   // no, only dladdr is available
1684   if (dladdr((void *)addr, &dlinfo) != 0) {
1685     // see if we have a matching symbol
1686     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1687       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1688         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1689       }
1690       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1691       return true;
1692     }
1693     // no matching symbol so try for just file info
1694     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1695       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1696                           buf, buflen, offset, dlinfo.dli_fname)) {
1697         return true;
1698       }
1699     }
1700   }
1701   buf[0] = '\0';
1702   if (offset != NULL) *offset  = -1;
1703   return false;
1704 }
1705 
1706 bool os::dll_address_to_library_name(address addr, char* buf,
1707                                      int buflen, int* offset) {
1708   // buf is not optional, but offset is optional
1709   assert(buf != NULL, "sanity check");
1710 
1711   Dl_info dlinfo;
1712 
1713   if (dladdr((void*)addr, &dlinfo) != 0) {
1714     if (dlinfo.dli_fname != NULL) {
1715       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1716     }
1717     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1718       *offset = addr - (address)dlinfo.dli_fbase;
1719     }
1720     return true;
1721   }
1722 
1723   buf[0] = '\0';
1724   if (offset) *offset = -1;
1725   return false;
1726 }
1727 
1728 // Prints the names and full paths of all opened dynamic libraries
1729 // for current process
1730 void os::print_dll_info(outputStream * st) {
1731   Dl_info dli;
1732   void *handle;
1733   Link_map *map;
1734   Link_map *p;
1735 
1736   st->print_cr("Dynamic libraries:"); st->flush();
1737 
1738   if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
1739       dli.dli_fname == NULL) {
1740     st->print_cr("Error: Cannot print dynamic libraries.");
1741     return;
1742   }
1743   handle = dlopen(dli.dli_fname, RTLD_LAZY);
1744   if (handle == NULL) {
1745     st->print_cr("Error: Cannot print dynamic libraries.");
1746     return;
1747   }
1748   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1749   if (map == NULL) {
1750     st->print_cr("Error: Cannot print dynamic libraries.");
1751     return;
1752   }
1753 
1754   while (map->l_prev != NULL)
1755     map = map->l_prev;
1756 
1757   while (map != NULL) {
1758     st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
1759     map = map->l_next;
1760   }
1761 
1762   dlclose(handle);
1763 }
1764 
1765   // Loads .dll/.so and
1766   // in case of error it checks if .dll/.so was built for the
1767   // same architecture as Hotspot is running on
1768 
1769 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1770 {
1771   void * result= ::dlopen(filename, RTLD_LAZY);
1772   if (result != NULL) {
1773     // Successful loading
1774     return result;
1775   }
1776 
1777   Elf32_Ehdr elf_head;
1778 
1779   // Read system error message into ebuf
1780   // It may or may not be overwritten below
1781   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1782   ebuf[ebuflen-1]='\0';
1783   int diag_msg_max_length=ebuflen-strlen(ebuf);
1784   char* diag_msg_buf=ebuf+strlen(ebuf);
1785 
1786   if (diag_msg_max_length==0) {
1787     // No more space in ebuf for additional diagnostics message
1788     return NULL;
1789   }
1790 
1791 
1792   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1793 
1794   if (file_descriptor < 0) {
1795     // Can't open library, report dlerror() message
1796     return NULL;
1797   }
1798 
1799   bool failed_to_read_elf_head=
1800     (sizeof(elf_head)!=
1801         (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1802 
1803   ::close(file_descriptor);
1804   if (failed_to_read_elf_head) {
1805     // file i/o error - report dlerror() msg
1806     return NULL;
1807   }
1808 
1809   typedef struct {
1810     Elf32_Half  code;         // Actual value as defined in elf.h
1811     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1812     char        elf_class;    // 32 or 64 bit
1813     char        endianess;    // MSB or LSB
1814     char*       name;         // String representation
1815   } arch_t;
1816 
1817   static const arch_t arch_array[]={
1818     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1819     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1820     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1821     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1822     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1823     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1824     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1825     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1826     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1827     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1828   };
1829 
1830   #if  (defined IA32)
1831     static  Elf32_Half running_arch_code=EM_386;
1832   #elif   (defined AMD64)
1833     static  Elf32_Half running_arch_code=EM_X86_64;
1834   #elif  (defined IA64)
1835     static  Elf32_Half running_arch_code=EM_IA_64;
1836   #elif  (defined __sparc) && (defined _LP64)
1837     static  Elf32_Half running_arch_code=EM_SPARCV9;
1838   #elif  (defined __sparc) && (!defined _LP64)
1839     static  Elf32_Half running_arch_code=EM_SPARC;
1840   #elif  (defined __powerpc64__)
1841     static  Elf32_Half running_arch_code=EM_PPC64;
1842   #elif  (defined __powerpc__)
1843     static  Elf32_Half running_arch_code=EM_PPC;
1844   #elif (defined ARM)
1845     static  Elf32_Half running_arch_code=EM_ARM;
1846   #else
1847     #error Method os::dll_load requires that one of following is defined:\
1848          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1849   #endif
1850 
1851   // Identify compatability class for VM's architecture and library's architecture
1852   // Obtain string descriptions for architectures
1853 
1854   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1855   int running_arch_index=-1;
1856 
1857   for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
1858     if (running_arch_code == arch_array[i].code) {
1859       running_arch_index    = i;
1860     }
1861     if (lib_arch.code == arch_array[i].code) {
1862       lib_arch.compat_class = arch_array[i].compat_class;
1863       lib_arch.name         = arch_array[i].name;
1864     }
1865   }
1866 
1867   assert(running_arch_index != -1,
1868     "Didn't find running architecture code (running_arch_code) in arch_array");
1869   if (running_arch_index == -1) {
1870     // Even though running architecture detection failed
1871     // we may still continue with reporting dlerror() message
1872     return NULL;
1873   }
1874 
1875   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1876     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1877     return NULL;
1878   }
1879 
1880   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1881     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1882     return NULL;
1883   }
1884 
1885   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1886     if (lib_arch.name!=NULL) {
1887       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1888         " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1889         lib_arch.name, arch_array[running_arch_index].name);
1890     } else {
1891       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1892       " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1893         lib_arch.code,
1894         arch_array[running_arch_index].name);
1895     }
1896   }
1897 
1898   return NULL;
1899 }
1900 
1901 void* os::dll_lookup(void* handle, const char* name) {
1902   return dlsym(handle, name);
1903 }
1904 
1905 void* os::get_default_process_handle() {
1906   return (void*)::dlopen(NULL, RTLD_LAZY);
1907 }
1908 
1909 int os::stat(const char *path, struct stat *sbuf) {
1910   char pathbuf[MAX_PATH];
1911   if (strlen(path) > MAX_PATH - 1) {
1912     errno = ENAMETOOLONG;
1913     return -1;
1914   }
1915   os::native_path(strcpy(pathbuf, path));
1916   return ::stat(pathbuf, sbuf);
1917 }
1918 
1919 static bool _print_ascii_file(const char* filename, outputStream* st) {
1920   int fd = ::open(filename, O_RDONLY);
1921   if (fd == -1) {
1922      return false;
1923   }
1924 
1925   char buf[32];
1926   int bytes;
1927   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1928     st->print_raw(buf, bytes);
1929   }
1930 
1931   ::close(fd);
1932 
1933   return true;
1934 }
1935 
1936 void os::print_os_info_brief(outputStream* st) {
1937   os::Solaris::print_distro_info(st);
1938 
1939   os::Posix::print_uname_info(st);
1940 
1941   os::Solaris::print_libversion_info(st);
1942 }
1943 
1944 void os::print_os_info(outputStream* st) {
1945   st->print("OS:");
1946 
1947   os::Solaris::print_distro_info(st);
1948 
1949   os::Posix::print_uname_info(st);
1950 
1951   os::Solaris::print_libversion_info(st);
1952 
1953   os::Posix::print_rlimit_info(st);
1954 
1955   os::Posix::print_load_average(st);
1956 }
1957 
1958 void os::Solaris::print_distro_info(outputStream* st) {
1959   if (!_print_ascii_file("/etc/release", st)) {
1960       st->print("Solaris");
1961     }
1962     st->cr();
1963 }
1964 
1965 void os::Solaris::print_libversion_info(outputStream* st) {
1966   st->print("  (T2 libthread)");
1967   st->cr();
1968 }
1969 
1970 static bool check_addr0(outputStream* st) {
1971   jboolean status = false;
1972   int fd = ::open("/proc/self/map",O_RDONLY);
1973   if (fd >= 0) {
1974     prmap_t p;
1975     while (::read(fd, &p, sizeof(p)) > 0) {
1976       if (p.pr_vaddr == 0x0) {
1977         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
1978         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
1979         st->print("Access:");
1980         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
1981         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
1982         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
1983         st->cr();
1984         status = true;
1985       }
1986     }
1987     ::close(fd);
1988   }
1989   return status;
1990 }
1991 
1992 void os::pd_print_cpu_info(outputStream* st) {
1993   // Nothing to do for now.
1994 }
1995 
1996 void os::print_memory_info(outputStream* st) {
1997   st->print("Memory:");
1998   st->print(" %dk page", os::vm_page_size()>>10);
1999   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2000   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2001   st->cr();
2002   (void) check_addr0(st);
2003 }
2004 
2005 void os::print_siginfo(outputStream* st, void* siginfo) {
2006   const siginfo_t* si = (const siginfo_t*)siginfo;
2007 
2008   os::Posix::print_siginfo_brief(st, si);
2009 
2010   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2011       UseSharedSpaces) {
2012     FileMapInfo* mapinfo = FileMapInfo::current_info();
2013     if (mapinfo->is_in_shared_space(si->si_addr)) {
2014       st->print("\n\nError accessing class data sharing archive."   \
2015                 " Mapped file inaccessible during execution, "      \
2016                 " possible disk/network problem.");
2017     }
2018   }
2019   st->cr();
2020 }
2021 
2022 // Moved from whole group, because we need them here for diagnostic
2023 // prints.
2024 #define OLDMAXSIGNUM 32
2025 static int Maxsignum = 0;
2026 static int *ourSigFlags = NULL;
2027 
2028 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2029 
2030 int os::Solaris::get_our_sigflags(int sig) {
2031   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2032   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2033   return ourSigFlags[sig];
2034 }
2035 
2036 void os::Solaris::set_our_sigflags(int sig, int flags) {
2037   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2038   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2039   ourSigFlags[sig] = flags;
2040 }
2041 
2042 
2043 static const char* get_signal_handler_name(address handler,
2044                                            char* buf, int buflen) {
2045   int offset;
2046   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2047   if (found) {
2048     // skip directory names
2049     const char *p1, *p2;
2050     p1 = buf;
2051     size_t len = strlen(os::file_separator());
2052     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2053     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2054   } else {
2055     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2056   }
2057   return buf;
2058 }
2059 
2060 static void print_signal_handler(outputStream* st, int sig,
2061                                   char* buf, size_t buflen) {
2062   struct sigaction sa;
2063 
2064   sigaction(sig, NULL, &sa);
2065 
2066   st->print("%s: ", os::exception_name(sig, buf, buflen));
2067 
2068   address handler = (sa.sa_flags & SA_SIGINFO)
2069                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2070                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2071 
2072   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2073     st->print("SIG_DFL");
2074   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2075     st->print("SIG_IGN");
2076   } else {
2077     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2078   }
2079 
2080   st->print(", sa_mask[0]=");
2081   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2082 
2083   address rh = VMError::get_resetted_sighandler(sig);
2084   // May be, handler was resetted by VMError?
2085   if (rh != NULL) {
2086     handler = rh;
2087     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2088   }
2089 
2090   st->print(", sa_flags=");
2091   os::Posix::print_sa_flags(st, sa.sa_flags);
2092 
2093   // Check: is it our handler?
2094   if (handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2095      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2096     // It is our signal handler
2097     // check for flags
2098     if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2099       st->print(
2100         ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2101         os::Solaris::get_our_sigflags(sig));
2102     }
2103   }
2104   st->cr();
2105 }
2106 
2107 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2108   st->print_cr("Signal Handlers:");
2109   print_signal_handler(st, SIGSEGV, buf, buflen);
2110   print_signal_handler(st, SIGBUS , buf, buflen);
2111   print_signal_handler(st, SIGFPE , buf, buflen);
2112   print_signal_handler(st, SIGPIPE, buf, buflen);
2113   print_signal_handler(st, SIGXFSZ, buf, buflen);
2114   print_signal_handler(st, SIGILL , buf, buflen);
2115   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2116   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2117   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2118   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2119   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2120   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2121   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2122   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2123 }
2124 
2125 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2126 
2127 // Find the full path to the current module, libjvm.so
2128 void os::jvm_path(char *buf, jint buflen) {
2129   // Error checking.
2130   if (buflen < MAXPATHLEN) {
2131     assert(false, "must use a large-enough buffer");
2132     buf[0] = '\0';
2133     return;
2134   }
2135   // Lazy resolve the path to current module.
2136   if (saved_jvm_path[0] != 0) {
2137     strcpy(buf, saved_jvm_path);
2138     return;
2139   }
2140 
2141   Dl_info dlinfo;
2142   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2143   assert(ret != 0, "cannot locate libjvm");
2144   if (ret != 0 && dlinfo.dli_fname != NULL) {
2145     realpath((char *)dlinfo.dli_fname, buf);
2146   } else {
2147     buf[0] = '\0';
2148     return;
2149   }
2150 
2151   if (Arguments::sun_java_launcher_is_altjvm()) {
2152     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2153     // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
2154     // If "/jre/lib/" appears at the right place in the string, then
2155     // assume we are installed in a JDK and we're done.  Otherwise, check
2156     // for a JAVA_HOME environment variable and fix up the path so it
2157     // looks like libjvm.so is installed there (append a fake suffix
2158     // hotspot/libjvm.so).
2159     const char *p = buf + strlen(buf) - 1;
2160     for (int count = 0; p > buf && count < 5; ++count) {
2161       for (--p; p > buf && *p != '/'; --p)
2162         /* empty */ ;
2163     }
2164 
2165     if (strncmp(p, "/jre/lib/", 9) != 0) {
2166       // Look for JAVA_HOME in the environment.
2167       char* java_home_var = ::getenv("JAVA_HOME");
2168       if (java_home_var != NULL && java_home_var[0] != 0) {
2169         char cpu_arch[12];
2170         char* jrelib_p;
2171         int   len;
2172         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2173 #ifdef _LP64
2174         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2175         if (strcmp(cpu_arch, "sparc") == 0) {
2176           strcat(cpu_arch, "v9");
2177         } else if (strcmp(cpu_arch, "i386") == 0) {
2178           strcpy(cpu_arch, "amd64");
2179         }
2180 #endif
2181         // Check the current module name "libjvm.so".
2182         p = strrchr(buf, '/');
2183         assert(strstr(p, "/libjvm") == p, "invalid library name");
2184 
2185         realpath(java_home_var, buf);
2186         // determine if this is a legacy image or modules image
2187         // modules image doesn't have "jre" subdirectory
2188         len = strlen(buf);
2189         jrelib_p = buf + len;
2190         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2191         if (0 != access(buf, F_OK)) {
2192           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2193         }
2194 
2195         if (0 == access(buf, F_OK)) {
2196           // Use current module name "libjvm.so"
2197           len = strlen(buf);
2198           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2199         } else {
2200           // Go back to path of .so
2201           realpath((char *)dlinfo.dli_fname, buf);
2202         }
2203       }
2204     }
2205   }
2206 
2207   strcpy(saved_jvm_path, buf);
2208 }
2209 
2210 
2211 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2212   // no prefix required, not even "_"
2213 }
2214 
2215 
2216 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2217   // no suffix required
2218 }
2219 
2220 // This method is a copy of JDK's sysGetLastErrorString
2221 // from src/solaris/hpi/src/system_md.c
2222 
2223 size_t os::lasterror(char *buf, size_t len) {
2224 
2225   if (errno == 0)  return 0;
2226 
2227   const char *s = ::strerror(errno);
2228   size_t n = ::strlen(s);
2229   if (n >= len) {
2230     n = len - 1;
2231   }
2232   ::strncpy(buf, s, n);
2233   buf[n] = '\0';
2234   return n;
2235 }
2236 
2237 
2238 // sun.misc.Signal
2239 
2240 extern "C" {
2241   static void UserHandler(int sig, void *siginfo, void *context) {
2242     // Ctrl-C is pressed during error reporting, likely because the error
2243     // handler fails to abort. Let VM die immediately.
2244     if (sig == SIGINT && is_error_reported()) {
2245        os::die();
2246     }
2247 
2248     os::signal_notify(sig);
2249     // We do not need to reinstate the signal handler each time...
2250   }
2251 }
2252 
2253 void* os::user_handler() {
2254   return CAST_FROM_FN_PTR(void*, UserHandler);
2255 }
2256 
2257 class Semaphore : public StackObj {
2258   public:
2259     Semaphore();
2260     ~Semaphore();
2261     void signal();
2262     void wait();
2263     bool trywait();
2264     bool timedwait(unsigned int sec, int nsec);
2265   private:
2266     sema_t _semaphore;
2267 };
2268 
2269 
2270 Semaphore::Semaphore() {
2271   sema_init(&_semaphore, 0, NULL, NULL);
2272 }
2273 
2274 Semaphore::~Semaphore() {
2275   sema_destroy(&_semaphore);
2276 }
2277 
2278 void Semaphore::signal() {
2279   sema_post(&_semaphore);
2280 }
2281 
2282 void Semaphore::wait() {
2283   sema_wait(&_semaphore);
2284 }
2285 
2286 bool Semaphore::trywait() {
2287   return sema_trywait(&_semaphore) == 0;
2288 }
2289 
2290 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2291   struct timespec ts;
2292   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2293 
2294   while (1) {
2295     int result = sema_timedwait(&_semaphore, &ts);
2296     if (result == 0) {
2297       return true;
2298     } else if (errno == EINTR) {
2299       continue;
2300     } else if (errno == ETIME) {
2301       return false;
2302     } else {
2303       return false;
2304     }
2305   }
2306 }
2307 
2308 extern "C" {
2309   typedef void (*sa_handler_t)(int);
2310   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2311 }
2312 
2313 void* os::signal(int signal_number, void* handler) {
2314   struct sigaction sigAct, oldSigAct;
2315   sigfillset(&(sigAct.sa_mask));
2316   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2317   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2318 
2319   if (sigaction(signal_number, &sigAct, &oldSigAct))
2320     // -1 means registration failed
2321     return (void *)-1;
2322 
2323   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2324 }
2325 
2326 void os::signal_raise(int signal_number) {
2327   raise(signal_number);
2328 }
2329 
2330 /*
2331  * The following code is moved from os.cpp for making this
2332  * code platform specific, which it is by its very nature.
2333  */
2334 
2335 // a counter for each possible signal value
2336 static int Sigexit = 0;
2337 static int Maxlibjsigsigs;
2338 static jint *pending_signals = NULL;
2339 static int *preinstalled_sigs = NULL;
2340 static struct sigaction *chainedsigactions = NULL;
2341 static sema_t sig_sem;
2342 typedef int (*version_getting_t)();
2343 version_getting_t os::Solaris::get_libjsig_version = NULL;
2344 static int libjsigversion = NULL;
2345 
2346 int os::sigexitnum_pd() {
2347   assert(Sigexit > 0, "signal memory not yet initialized");
2348   return Sigexit;
2349 }
2350 
2351 void os::Solaris::init_signal_mem() {
2352   // Initialize signal structures
2353   Maxsignum = SIGRTMAX;
2354   Sigexit = Maxsignum+1;
2355   assert(Maxsignum >0, "Unable to obtain max signal number");
2356 
2357   Maxlibjsigsigs = Maxsignum;
2358 
2359   // pending_signals has one int per signal
2360   // The additional signal is for SIGEXIT - exit signal to signal_thread
2361   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2362   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2363 
2364   if (UseSignalChaining) {
2365      chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2366        * (Maxsignum + 1), mtInternal);
2367      memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2368      preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2369      memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2370   }
2371   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
2372   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2373 }
2374 
2375 void os::signal_init_pd() {
2376   int ret;
2377 
2378   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2379   assert(ret == 0, "sema_init() failed");
2380 }
2381 
2382 void os::signal_notify(int signal_number) {
2383   int ret;
2384 
2385   Atomic::inc(&pending_signals[signal_number]);
2386   ret = ::sema_post(&sig_sem);
2387   assert(ret == 0, "sema_post() failed");
2388 }
2389 
2390 static int check_pending_signals(bool wait_for_signal) {
2391   int ret;
2392   while (true) {
2393     for (int i = 0; i < Sigexit + 1; i++) {
2394       jint n = pending_signals[i];
2395       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2396         return i;
2397       }
2398     }
2399     if (!wait_for_signal) {
2400       return -1;
2401     }
2402     JavaThread *thread = JavaThread::current();
2403     ThreadBlockInVM tbivm(thread);
2404 
2405     bool threadIsSuspended;
2406     do {
2407       thread->set_suspend_equivalent();
2408       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2409       while ((ret = ::sema_wait(&sig_sem)) == EINTR)
2410           ;
2411       assert(ret == 0, "sema_wait() failed");
2412 
2413       // were we externally suspended while we were waiting?
2414       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2415       if (threadIsSuspended) {
2416         //
2417         // The semaphore has been incremented, but while we were waiting
2418         // another thread suspended us. We don't want to continue running
2419         // while suspended because that would surprise the thread that
2420         // suspended us.
2421         //
2422         ret = ::sema_post(&sig_sem);
2423         assert(ret == 0, "sema_post() failed");
2424 
2425         thread->java_suspend_self();
2426       }
2427     } while (threadIsSuspended);
2428   }
2429 }
2430 
2431 int os::signal_lookup() {
2432   return check_pending_signals(false);
2433 }
2434 
2435 int os::signal_wait() {
2436   return check_pending_signals(true);
2437 }
2438 
2439 ////////////////////////////////////////////////////////////////////////////////
2440 // Virtual Memory
2441 
2442 static int page_size = -1;
2443 
2444 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2445 // clear this var if support is not available.
2446 static bool has_map_align = true;
2447 
2448 int os::vm_page_size() {
2449   assert(page_size != -1, "must call os::init");
2450   return page_size;
2451 }
2452 
2453 // Solaris allocates memory by pages.
2454 int os::vm_allocation_granularity() {
2455   assert(page_size != -1, "must call os::init");
2456   return page_size;
2457 }
2458 
2459 static bool recoverable_mmap_error(int err) {
2460   // See if the error is one we can let the caller handle. This
2461   // list of errno values comes from the Solaris mmap(2) man page.
2462   switch (err) {
2463   case EBADF:
2464   case EINVAL:
2465   case ENOTSUP:
2466     // let the caller deal with these errors
2467     return true;
2468 
2469   default:
2470     // Any remaining errors on this OS can cause our reserved mapping
2471     // to be lost. That can cause confusion where different data
2472     // structures think they have the same memory mapped. The worst
2473     // scenario is if both the VM and a library think they have the
2474     // same memory mapped.
2475     return false;
2476   }
2477 }
2478 
2479 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2480                                     int err) {
2481   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2482           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2483           strerror(err), err);
2484 }
2485 
2486 static void warn_fail_commit_memory(char* addr, size_t bytes,
2487                                     size_t alignment_hint, bool exec,
2488                                     int err) {
2489   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2490           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2491           alignment_hint, exec, strerror(err), err);
2492 }
2493 
2494 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2495   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2496   size_t size = bytes;
2497   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2498   if (res != NULL) {
2499     if (UseNUMAInterleaving) {
2500       numa_make_global(addr, bytes);
2501     }
2502     return 0;
2503   }
2504 
2505   int err = errno;  // save errno from mmap() call in mmap_chunk()
2506 
2507   if (!recoverable_mmap_error(err)) {
2508     warn_fail_commit_memory(addr, bytes, exec, err);
2509     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2510   }
2511 
2512   return err;
2513 }
2514 
2515 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2516   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2517 }
2518 
2519 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2520                                   const char* mesg) {
2521   assert(mesg != NULL, "mesg must be specified");
2522   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2523   if (err != 0) {
2524     // the caller wants all commit errors to exit with the specified mesg:
2525     warn_fail_commit_memory(addr, bytes, exec, err);
2526     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2527   }
2528 }
2529 
2530 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2531                                     size_t alignment_hint, bool exec) {
2532   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2533   if (err == 0) {
2534     if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) {
2535       // If the large page size has been set and the VM
2536       // is using large pages, use the large page size
2537       // if it is smaller than the alignment hint. This is
2538       // a case where the VM wants to use a larger alignment size
2539       // for its own reasons but still want to use large pages
2540       // (which is what matters to setting the mpss range.
2541       size_t page_size = 0;
2542       if (large_page_size() < alignment_hint) {
2543         assert(UseLargePages, "Expected to be here for large page use only");
2544         page_size = large_page_size();
2545       } else {
2546         // If the alignment hint is less than the large page
2547         // size, the VM wants a particular alignment (thus the hint)
2548         // for internal reasons.  Try to set the mpss range using
2549         // the alignment_hint.
2550         page_size = alignment_hint;
2551       }
2552       // Since this is a hint, ignore any failures.
2553       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2554     }
2555   }
2556   return err;
2557 }
2558 
2559 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2560                           bool exec) {
2561   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2562 }
2563 
2564 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2565                                   size_t alignment_hint, bool exec,
2566                                   const char* mesg) {
2567   assert(mesg != NULL, "mesg must be specified");
2568   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2569   if (err != 0) {
2570     // the caller wants all commit errors to exit with the specified mesg:
2571     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2572     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2573   }
2574 }
2575 
2576 // Uncommit the pages in a specified region.
2577 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2578   if (madvise(addr, bytes, MADV_FREE) < 0) {
2579     debug_only(warning("MADV_FREE failed."));
2580     return;
2581   }
2582 }
2583 
2584 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2585   return os::commit_memory(addr, size, !ExecMem);
2586 }
2587 
2588 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2589   return os::uncommit_memory(addr, size);
2590 }
2591 
2592 // Change the page size in a given range.
2593 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2594   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2595   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2596   if (UseLargePages) {
2597     Solaris::setup_large_pages(addr, bytes, alignment_hint);
2598   }
2599 }
2600 
2601 // Tell the OS to make the range local to the first-touching LWP
2602 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2603   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2604   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2605     debug_only(warning("MADV_ACCESS_LWP failed."));
2606   }
2607 }
2608 
2609 // Tell the OS that this range would be accessed from different LWPs.
2610 void os::numa_make_global(char *addr, size_t bytes) {
2611   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2612   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2613     debug_only(warning("MADV_ACCESS_MANY failed."));
2614   }
2615 }
2616 
2617 // Get the number of the locality groups.
2618 size_t os::numa_get_groups_num() {
2619   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2620   return n != -1 ? n : 1;
2621 }
2622 
2623 // Get a list of leaf locality groups. A leaf lgroup is group that
2624 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2625 // board. An LWP is assigned to one of these groups upon creation.
2626 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2627    if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2628      ids[0] = 0;
2629      return 1;
2630    }
2631    int result_size = 0, top = 1, bottom = 0, cur = 0;
2632    for (int k = 0; k < size; k++) {
2633      int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2634                                     (Solaris::lgrp_id_t*)&ids[top], size - top);
2635      if (r == -1) {
2636        ids[0] = 0;
2637        return 1;
2638      }
2639      if (!r) {
2640        // That's a leaf node.
2641        assert(bottom <= cur, "Sanity check");
2642        // Check if the node has memory
2643        if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2644                                    NULL, 0, LGRP_RSRC_MEM) > 0) {
2645          ids[bottom++] = ids[cur];
2646        }
2647      }
2648      top += r;
2649      cur++;
2650    }
2651    if (bottom == 0) {
2652      // Handle a situation, when the OS reports no memory available.
2653      // Assume UMA architecture.
2654      ids[0] = 0;
2655      return 1;
2656    }
2657    return bottom;
2658 }
2659 
2660 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2661 bool os::numa_topology_changed() {
2662   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2663   if (is_stale != -1 && is_stale) {
2664     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2665     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2666     assert(c != 0, "Failure to initialize LGRP API");
2667     Solaris::set_lgrp_cookie(c);
2668     return true;
2669   }
2670   return false;
2671 }
2672 
2673 // Get the group id of the current LWP.
2674 int os::numa_get_group_id() {
2675   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2676   if (lgrp_id == -1) {
2677     return 0;
2678   }
2679   const int size = os::numa_get_groups_num();
2680   int *ids = (int*)alloca(size * sizeof(int));
2681 
2682   // Get the ids of all lgroups with memory; r is the count.
2683   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2684                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2685   if (r <= 0) {
2686     return 0;
2687   }
2688   return ids[os::random() % r];
2689 }
2690 
2691 // Request information about the page.
2692 bool os::get_page_info(char *start, page_info* info) {
2693   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2694   uint64_t addr = (uintptr_t)start;
2695   uint64_t outdata[2];
2696   uint_t validity = 0;
2697 
2698   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2699     return false;
2700   }
2701 
2702   info->size = 0;
2703   info->lgrp_id = -1;
2704 
2705   if ((validity & 1) != 0) {
2706     if ((validity & 2) != 0) {
2707       info->lgrp_id = outdata[0];
2708     }
2709     if ((validity & 4) != 0) {
2710       info->size = outdata[1];
2711     }
2712     return true;
2713   }
2714   return false;
2715 }
2716 
2717 // Scan the pages from start to end until a page different than
2718 // the one described in the info parameter is encountered.
2719 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2720   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2721   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2722   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2723   uint_t validity[MAX_MEMINFO_CNT];
2724 
2725   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2726   uint64_t p = (uint64_t)start;
2727   while (p < (uint64_t)end) {
2728     addrs[0] = p;
2729     size_t addrs_count = 1;
2730     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2731       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2732       addrs_count++;
2733     }
2734 
2735     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2736       return NULL;
2737     }
2738 
2739     size_t i = 0;
2740     for (; i < addrs_count; i++) {
2741       if ((validity[i] & 1) != 0) {
2742         if ((validity[i] & 4) != 0) {
2743           if (outdata[types * i + 1] != page_expected->size) {
2744             break;
2745           }
2746         } else
2747           if (page_expected->size != 0) {
2748             break;
2749           }
2750 
2751         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2752           if (outdata[types * i] != page_expected->lgrp_id) {
2753             break;
2754           }
2755         }
2756       } else {
2757         return NULL;
2758       }
2759     }
2760 
2761     if (i < addrs_count) {
2762       if ((validity[i] & 2) != 0) {
2763         page_found->lgrp_id = outdata[types * i];
2764       } else {
2765         page_found->lgrp_id = -1;
2766       }
2767       if ((validity[i] & 4) != 0) {
2768         page_found->size = outdata[types * i + 1];
2769       } else {
2770         page_found->size = 0;
2771       }
2772       return (char*)addrs[i];
2773     }
2774 
2775     p = addrs[addrs_count - 1] + page_size;
2776   }
2777   return end;
2778 }
2779 
2780 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2781   size_t size = bytes;
2782   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2783   // uncommitted page. Otherwise, the read/write might succeed if we
2784   // have enough swap space to back the physical page.
2785   return
2786     NULL != Solaris::mmap_chunk(addr, size,
2787                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2788                                 PROT_NONE);
2789 }
2790 
2791 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2792   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2793 
2794   if (b == MAP_FAILED) {
2795     return NULL;
2796   }
2797   return b;
2798 }
2799 
2800 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
2801   char* addr = requested_addr;
2802   int flags = MAP_PRIVATE | MAP_NORESERVE;
2803 
2804   assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
2805 
2806   if (fixed) {
2807     flags |= MAP_FIXED;
2808   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2809     flags |= MAP_ALIGN;
2810     addr = (char*) alignment_hint;
2811   }
2812 
2813   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2814   // uncommitted page. Otherwise, the read/write might succeed if we
2815   // have enough swap space to back the physical page.
2816   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2817 }
2818 
2819 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2820   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
2821 
2822   guarantee(requested_addr == NULL || requested_addr == addr,
2823             "OS failed to return requested mmap address.");
2824   return addr;
2825 }
2826 
2827 // Reserve memory at an arbitrary address, only if that area is
2828 // available (and not reserved for something else).
2829 
2830 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2831   const int max_tries = 10;
2832   char* base[max_tries];
2833   size_t size[max_tries];
2834 
2835   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2836   // is dependent on the requested size and the MMU.  Our initial gap
2837   // value here is just a guess and will be corrected later.
2838   bool had_top_overlap = false;
2839   bool have_adjusted_gap = false;
2840   size_t gap = 0x400000;
2841 
2842   // Assert only that the size is a multiple of the page size, since
2843   // that's all that mmap requires, and since that's all we really know
2844   // about at this low abstraction level.  If we need higher alignment,
2845   // we can either pass an alignment to this method or verify alignment
2846   // in one of the methods further up the call chain.  See bug 5044738.
2847   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2848 
2849   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2850   // Give it a try, if the kernel honors the hint we can return immediately.
2851   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2852 
2853   volatile int err = errno;
2854   if (addr == requested_addr) {
2855     return addr;
2856   } else if (addr != NULL) {
2857     pd_unmap_memory(addr, bytes);
2858   }
2859 
2860   if (PrintMiscellaneous && Verbose) {
2861     char buf[256];
2862     buf[0] = '\0';
2863     if (addr == NULL) {
2864       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2865     }
2866     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2867             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2868             "%s", bytes, requested_addr, addr, buf);
2869   }
2870 
2871   // Address hint method didn't work.  Fall back to the old method.
2872   // In theory, once SNV becomes our oldest supported platform, this
2873   // code will no longer be needed.
2874   //
2875   // Repeatedly allocate blocks until the block is allocated at the
2876   // right spot. Give up after max_tries.
2877   int i;
2878   for (i = 0; i < max_tries; ++i) {
2879     base[i] = reserve_memory(bytes);
2880 
2881     if (base[i] != NULL) {
2882       // Is this the block we wanted?
2883       if (base[i] == requested_addr) {
2884         size[i] = bytes;
2885         break;
2886       }
2887 
2888       // check that the gap value is right
2889       if (had_top_overlap && !have_adjusted_gap) {
2890         size_t actual_gap = base[i-1] - base[i] - bytes;
2891         if (gap != actual_gap) {
2892           // adjust the gap value and retry the last 2 allocations
2893           assert(i > 0, "gap adjustment code problem");
2894           have_adjusted_gap = true;  // adjust the gap only once, just in case
2895           gap = actual_gap;
2896           if (PrintMiscellaneous && Verbose) {
2897             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2898           }
2899           unmap_memory(base[i], bytes);
2900           unmap_memory(base[i-1], size[i-1]);
2901           i-=2;
2902           continue;
2903         }
2904       }
2905 
2906       // Does this overlap the block we wanted? Give back the overlapped
2907       // parts and try again.
2908       //
2909       // There is still a bug in this code: if top_overlap == bytes,
2910       // the overlap is offset from requested region by the value of gap.
2911       // In this case giving back the overlapped part will not work,
2912       // because we'll give back the entire block at base[i] and
2913       // therefore the subsequent allocation will not generate a new gap.
2914       // This could be fixed with a new algorithm that used larger
2915       // or variable size chunks to find the requested region -
2916       // but such a change would introduce additional complications.
2917       // It's rare enough that the planets align for this bug,
2918       // so we'll just wait for a fix for 6204603/5003415 which
2919       // will provide a mmap flag to allow us to avoid this business.
2920 
2921       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2922       if (top_overlap >= 0 && top_overlap < bytes) {
2923         had_top_overlap = true;
2924         unmap_memory(base[i], top_overlap);
2925         base[i] += top_overlap;
2926         size[i] = bytes - top_overlap;
2927       } else {
2928         size_t bottom_overlap = base[i] + bytes - requested_addr;
2929         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2930           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2931             warning("attempt_reserve_memory_at: possible alignment bug");
2932           }
2933           unmap_memory(requested_addr, bottom_overlap);
2934           size[i] = bytes - bottom_overlap;
2935         } else {
2936           size[i] = bytes;
2937         }
2938       }
2939     }
2940   }
2941 
2942   // Give back the unused reserved pieces.
2943 
2944   for (int j = 0; j < i; ++j) {
2945     if (base[j] != NULL) {
2946       unmap_memory(base[j], size[j]);
2947     }
2948   }
2949 
2950   return (i < max_tries) ? requested_addr : NULL;
2951 }
2952 
2953 bool os::pd_release_memory(char* addr, size_t bytes) {
2954   size_t size = bytes;
2955   return munmap(addr, size) == 0;
2956 }
2957 
2958 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
2959   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
2960          "addr must be page aligned");
2961   int retVal = mprotect(addr, bytes, prot);
2962   return retVal == 0;
2963 }
2964 
2965 // Protect memory (Used to pass readonly pages through
2966 // JNI GetArray<type>Elements with empty arrays.)
2967 // Also, used for serialization page and for compressed oops null pointer
2968 // checking.
2969 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2970                         bool is_committed) {
2971   unsigned int p = 0;
2972   switch (prot) {
2973   case MEM_PROT_NONE: p = PROT_NONE; break;
2974   case MEM_PROT_READ: p = PROT_READ; break;
2975   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2976   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2977   default:
2978     ShouldNotReachHere();
2979   }
2980   // is_committed is unused.
2981   return solaris_mprotect(addr, bytes, p);
2982 }
2983 
2984 // guard_memory and unguard_memory only happens within stack guard pages.
2985 // Since ISM pertains only to the heap, guard and unguard memory should not
2986 /// happen with an ISM region.
2987 bool os::guard_memory(char* addr, size_t bytes) {
2988   return solaris_mprotect(addr, bytes, PROT_NONE);
2989 }
2990 
2991 bool os::unguard_memory(char* addr, size_t bytes) {
2992   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
2993 }
2994 
2995 // Large page support
2996 static size_t _large_page_size = 0;
2997 
2998 // Insertion sort for small arrays (descending order).
2999 static void insertion_sort_descending(size_t* array, int len) {
3000   for (int i = 0; i < len; i++) {
3001     size_t val = array[i];
3002     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3003       size_t tmp = array[key];
3004       array[key] = array[key - 1];
3005       array[key - 1] = tmp;
3006     }
3007   }
3008 }
3009 
3010 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3011   const unsigned int usable_count = VM_Version::page_size_count();
3012   if (usable_count == 1) {
3013     return false;
3014   }
3015 
3016   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3017   // build platform, getpagesizes() (without the '2') can be called directly.
3018   typedef int (*gps_t)(size_t[], int);
3019   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3020   if (gps_func == NULL) {
3021     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3022     if (gps_func == NULL) {
3023       if (warn) {
3024         warning("MPSS is not supported by the operating system.");
3025       }
3026       return false;
3027     }
3028   }
3029 
3030   // Fill the array of page sizes.
3031   int n = (*gps_func)(_page_sizes, page_sizes_max);
3032   assert(n > 0, "Solaris bug?");
3033 
3034   if (n == page_sizes_max) {
3035     // Add a sentinel value (necessary only if the array was completely filled
3036     // since it is static (zeroed at initialization)).
3037     _page_sizes[--n] = 0;
3038     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3039   }
3040   assert(_page_sizes[n] == 0, "missing sentinel");
3041   trace_page_sizes("available page sizes", _page_sizes, n);
3042 
3043   if (n == 1) return false;     // Only one page size available.
3044 
3045   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3046   // select up to usable_count elements.  First sort the array, find the first
3047   // acceptable value, then copy the usable sizes to the top of the array and
3048   // trim the rest.  Make sure to include the default page size :-).
3049   //
3050   // A better policy could get rid of the 4M limit by taking the sizes of the
3051   // important VM memory regions (java heap and possibly the code cache) into
3052   // account.
3053   insertion_sort_descending(_page_sizes, n);
3054   const size_t size_limit =
3055     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3056   int beg;
3057   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */;
3058   const int end = MIN2((int)usable_count, n) - 1;
3059   for (int cur = 0; cur < end; ++cur, ++beg) {
3060     _page_sizes[cur] = _page_sizes[beg];
3061   }
3062   _page_sizes[end] = vm_page_size();
3063   _page_sizes[end + 1] = 0;
3064 
3065   if (_page_sizes[end] > _page_sizes[end - 1]) {
3066     // Default page size is not the smallest; sort again.
3067     insertion_sort_descending(_page_sizes, end + 1);
3068   }
3069   *page_size = _page_sizes[0];
3070 
3071   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3072   return true;
3073 }
3074 
3075 void os::large_page_init() {
3076   if (UseLargePages) {
3077     // print a warning if any large page related flag is specified on command line
3078     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3079                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3080 
3081     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3082   }
3083 }
3084 
3085 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3086   // Signal to OS that we want large pages for addresses
3087   // from addr, addr + bytes
3088   struct memcntl_mha mpss_struct;
3089   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3090   mpss_struct.mha_pagesize = align;
3091   mpss_struct.mha_flags = 0;
3092   // Upon successful completion, memcntl() returns 0
3093   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3094     debug_only(warning("Attempt to use MPSS failed."));
3095     return false;
3096   }
3097   return true;
3098 }
3099 
3100 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3101   fatal("os::reserve_memory_special should not be called on Solaris.");
3102   return NULL;
3103 }
3104 
3105 bool os::release_memory_special(char* base, size_t bytes) {
3106   fatal("os::release_memory_special should not be called on Solaris.");
3107   return false;
3108 }
3109 
3110 size_t os::large_page_size() {
3111   return _large_page_size;
3112 }
3113 
3114 // MPSS allows application to commit large page memory on demand; with ISM
3115 // the entire memory region must be allocated as shared memory.
3116 bool os::can_commit_large_page_memory() {
3117   return true;
3118 }
3119 
3120 bool os::can_execute_large_page_memory() {
3121   return true;
3122 }
3123 
3124 // Read calls from inside the vm need to perform state transitions
3125 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3126   size_t res;
3127   JavaThread* thread = (JavaThread*)Thread::current();
3128   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3129   ThreadBlockInVM tbiv(thread);
3130   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3131   return res;
3132 }
3133 
3134 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3135   size_t res;
3136   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
3137           "Assumed _thread_in_native");
3138   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3139   return res;
3140 }
3141 
3142 void os::naked_short_sleep(jlong ms) {
3143   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3144 
3145   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3146   // Solaris requires -lrt for this.
3147   usleep((ms * 1000));
3148 
3149   return;
3150 }
3151 
3152 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3153 void os::infinite_sleep() {
3154   while (true) {    // sleep forever ...
3155     ::sleep(100);   // ... 100 seconds at a time
3156   }
3157 }
3158 
3159 // Used to convert frequent JVM_Yield() to nops
3160 bool os::dont_yield() {
3161   if (DontYieldALot) {
3162     static hrtime_t last_time = 0;
3163     hrtime_t diff = getTimeNanos() - last_time;
3164 
3165     if (diff < DontYieldALotInterval * 1000000)
3166       return true;
3167 
3168     last_time += diff;
3169 
3170     return false;
3171   }
3172   else {
3173     return false;
3174   }
3175 }
3176 
3177 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3178 // the linux and win32 implementations do not.  This should be checked.
3179 
3180 void os::yield() {
3181   // Yields to all threads with same or greater priority
3182   os::sleep(Thread::current(), 0, false);
3183 }
3184 
3185 // Note that yield semantics are defined by the scheduling class to which
3186 // the thread currently belongs.  Typically, yield will _not yield to
3187 // other equal or higher priority threads that reside on the dispatch queues
3188 // of other CPUs.
3189 
3190 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3191 
3192 // Interface for setting lwp priorities.  If we are using T2 libthread,
3193 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3194 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3195 // function is meaningless in this mode so we must adjust the real lwp's priority
3196 // The routines below implement the getting and setting of lwp priorities.
3197 //
3198 // Note: T2 is now the only supported libthread. UseBoundThreads flag is
3199 //       being deprecated and all threads are now BoundThreads
3200 //
3201 // Note: There are three priority scales used on Solaris.  Java priotities
3202 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3203 //       from 0 to 127, and the current scheduling class of the process we
3204 //       are running in.  This is typically from -60 to +60.
3205 //       The setting of the lwp priorities in done after a call to thr_setprio
3206 //       so Java priorities are mapped to libthread priorities and we map from
3207 //       the latter to lwp priorities.  We don't keep priorities stored in
3208 //       Java priorities since some of our worker threads want to set priorities
3209 //       higher than all Java threads.
3210 //
3211 // For related information:
3212 // (1)  man -s 2 priocntl
3213 // (2)  man -s 4 priocntl
3214 // (3)  man dispadmin
3215 // =    librt.so
3216 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3217 // =    ps -cL <pid> ... to validate priority.
3218 // =    sched_get_priority_min and _max
3219 //              pthread_create
3220 //              sched_setparam
3221 //              pthread_setschedparam
3222 //
3223 // Assumptions:
3224 // +    We assume that all threads in the process belong to the same
3225 //              scheduling class.   IE. an homogenous process.
3226 // +    Must be root or in IA group to change change "interactive" attribute.
3227 //              Priocntl() will fail silently.  The only indication of failure is when
3228 //              we read-back the value and notice that it hasn't changed.
3229 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3230 // +    For RT, change timeslice as well.  Invariant:
3231 //              constant "priority integral"
3232 //              Konst == TimeSlice * (60-Priority)
3233 //              Given a priority, compute appropriate timeslice.
3234 // +    Higher numerical values have higher priority.
3235 
3236 // sched class attributes
3237 typedef struct {
3238         int   schedPolicy;              // classID
3239         int   maxPrio;
3240         int   minPrio;
3241 } SchedInfo;
3242 
3243 
3244 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3245 
3246 #ifdef ASSERT
3247 static int  ReadBackValidate = 1;
3248 #endif
3249 static int  myClass     = 0;
3250 static int  myMin       = 0;
3251 static int  myMax       = 0;
3252 static int  myCur       = 0;
3253 static bool priocntl_enable = false;
3254 
3255 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3256 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3257 
3258 
3259 // lwp_priocntl_init
3260 //
3261 // Try to determine the priority scale for our process.
3262 //
3263 // Return errno or 0 if OK.
3264 //
3265 static int lwp_priocntl_init() {
3266   int rslt;
3267   pcinfo_t ClassInfo;
3268   pcparms_t ParmInfo;
3269   int i;
3270 
3271   if (!UseThreadPriorities) return 0;
3272 
3273   // If ThreadPriorityPolicy is 1, switch tables
3274   if (ThreadPriorityPolicy == 1) {
3275     for (i = 0; i < CriticalPriority+1; i++)
3276       os::java_to_os_priority[i] = prio_policy1[i];
3277   }
3278   if (UseCriticalJavaThreadPriority) {
3279     // MaxPriority always maps to the FX scheduling class and criticalPrio.
3280     // See set_native_priority() and set_lwp_class_and_priority().
3281     // Save original MaxPriority mapping in case attempt to
3282     // use critical priority fails.
3283     java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3284     // Set negative to distinguish from other priorities
3285     os::java_to_os_priority[MaxPriority] = -criticalPrio;
3286   }
3287 
3288   // Get IDs for a set of well-known scheduling classes.
3289   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3290   // the system.  We should have a loop that iterates over the
3291   // classID values, which are known to be "small" integers.
3292 
3293   strcpy(ClassInfo.pc_clname, "TS");
3294   ClassInfo.pc_cid = -1;
3295   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3296   if (rslt < 0) return errno;
3297   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3298   tsLimits.schedPolicy = ClassInfo.pc_cid;
3299   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3300   tsLimits.minPrio = -tsLimits.maxPrio;
3301 
3302   strcpy(ClassInfo.pc_clname, "IA");
3303   ClassInfo.pc_cid = -1;
3304   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3305   if (rslt < 0) return errno;
3306   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3307   iaLimits.schedPolicy = ClassInfo.pc_cid;
3308   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3309   iaLimits.minPrio = -iaLimits.maxPrio;
3310 
3311   strcpy(ClassInfo.pc_clname, "RT");
3312   ClassInfo.pc_cid = -1;
3313   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3314   if (rslt < 0) return errno;
3315   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3316   rtLimits.schedPolicy = ClassInfo.pc_cid;
3317   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3318   rtLimits.minPrio = 0;
3319 
3320   strcpy(ClassInfo.pc_clname, "FX");
3321   ClassInfo.pc_cid = -1;
3322   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3323   if (rslt < 0) return errno;
3324   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3325   fxLimits.schedPolicy = ClassInfo.pc_cid;
3326   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3327   fxLimits.minPrio = 0;
3328 
3329   // Query our "current" scheduling class.
3330   // This will normally be IA, TS or, rarely, FX or RT.
3331   memset(&ParmInfo, 0, sizeof(ParmInfo));
3332   ParmInfo.pc_cid = PC_CLNULL;
3333   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3334   if (rslt < 0) return errno;
3335   myClass = ParmInfo.pc_cid;
3336 
3337   // We now know our scheduling classId, get specific information
3338   // about the class.
3339   ClassInfo.pc_cid = myClass;
3340   ClassInfo.pc_clname[0] = 0;
3341   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3342   if (rslt < 0) return errno;
3343 
3344   if (ThreadPriorityVerbose) {
3345     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3346   }
3347 
3348   memset(&ParmInfo, 0, sizeof(pcparms_t));
3349   ParmInfo.pc_cid = PC_CLNULL;
3350   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3351   if (rslt < 0) return errno;
3352 
3353   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3354     myMin = rtLimits.minPrio;
3355     myMax = rtLimits.maxPrio;
3356   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3357     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3358     myMin = iaLimits.minPrio;
3359     myMax = iaLimits.maxPrio;
3360     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3361   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3362     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3363     myMin = tsLimits.minPrio;
3364     myMax = tsLimits.maxPrio;
3365     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3366   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3367     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3368     myMin = fxLimits.minPrio;
3369     myMax = fxLimits.maxPrio;
3370     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3371   } else {
3372     // No clue - punt
3373     if (ThreadPriorityVerbose)
3374       tty->print_cr("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3375     return EINVAL;      // no clue, punt
3376   }
3377 
3378   if (ThreadPriorityVerbose) {
3379     tty->print_cr("Thread priority Range: [%d..%d]\n", myMin, myMax);
3380   }
3381 
3382   priocntl_enable = true;  // Enable changing priorities
3383   return 0;
3384 }
3385 
3386 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3387 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3388 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3389 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3390 
3391 
3392 // scale_to_lwp_priority
3393 //
3394 // Convert from the libthread "thr_setprio" scale to our current
3395 // lwp scheduling class scale.
3396 //
3397 static
3398 int     scale_to_lwp_priority (int rMin, int rMax, int x)
3399 {
3400   int v;
3401 
3402   if (x == 127) return rMax;            // avoid round-down
3403     v = (((x*(rMax-rMin)))/128)+rMin;
3404   return v;
3405 }
3406 
3407 
3408 // set_lwp_class_and_priority
3409 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3410                                int newPrio, int new_class, bool scale) {
3411   int rslt;
3412   int Actual, Expected, prv;
3413   pcparms_t ParmInfo;                   // for GET-SET
3414 #ifdef ASSERT
3415   pcparms_t ReadBack;                   // for readback
3416 #endif
3417 
3418   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3419   // Query current values.
3420   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3421   // Cache "pcparms_t" in global ParmCache.
3422   // TODO: elide set-to-same-value
3423 
3424   // If something went wrong on init, don't change priorities.
3425   if (!priocntl_enable) {
3426     if (ThreadPriorityVerbose)
3427       tty->print_cr("Trying to set priority but init failed, ignoring");
3428     return EINVAL;
3429   }
3430 
3431   // If lwp hasn't started yet, just return
3432   // the _start routine will call us again.
3433   if (lwpid <= 0) {
3434     if (ThreadPriorityVerbose) {
3435       tty->print_cr("deferring the set_lwp_class_and_priority of thread "
3436                      INTPTR_FORMAT " to %d, lwpid not set",
3437                      ThreadID, newPrio);
3438     }
3439     return 0;
3440   }
3441 
3442   if (ThreadPriorityVerbose) {
3443     tty->print_cr ("set_lwp_class_and_priority("
3444                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3445                    ThreadID, lwpid, newPrio);
3446   }
3447 
3448   memset(&ParmInfo, 0, sizeof(pcparms_t));
3449   ParmInfo.pc_cid = PC_CLNULL;
3450   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3451   if (rslt < 0) return errno;
3452 
3453   int cur_class = ParmInfo.pc_cid;
3454   ParmInfo.pc_cid = (id_t)new_class;
3455 
3456   if (new_class == rtLimits.schedPolicy) {
3457     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3458     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3459                                                        rtLimits.maxPrio, newPrio)
3460                                : newPrio;
3461     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3462     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3463     if (ThreadPriorityVerbose) {
3464       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3465     }
3466   } else if (new_class == iaLimits.schedPolicy) {
3467     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3468     int maxClamped     = MIN2(iaLimits.maxPrio,
3469                               cur_class == new_class
3470                                 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3471     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3472                                                        maxClamped, newPrio)
3473                                : newPrio;
3474     iaInfo->ia_uprilim = cur_class == new_class
3475                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3476     iaInfo->ia_mode    = IA_NOCHANGE;
3477     if (ThreadPriorityVerbose) {
3478       tty->print_cr("IA: [%d...%d] %d->%d\n",
3479                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3480     }
3481   } else if (new_class == tsLimits.schedPolicy) {
3482     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3483     int maxClamped     = MIN2(tsLimits.maxPrio,
3484                               cur_class == new_class
3485                                 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3486     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3487                                                        maxClamped, newPrio)
3488                                : newPrio;
3489     tsInfo->ts_uprilim = cur_class == new_class
3490                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3491     if (ThreadPriorityVerbose) {
3492       tty->print_cr("TS: [%d...%d] %d->%d\n",
3493                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3494     }
3495   } else if (new_class == fxLimits.schedPolicy) {
3496     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3497     int maxClamped     = MIN2(fxLimits.maxPrio,
3498                               cur_class == new_class
3499                                 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3500     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3501                                                        maxClamped, newPrio)
3502                                : newPrio;
3503     fxInfo->fx_uprilim = cur_class == new_class
3504                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3505     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3506     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3507     if (ThreadPriorityVerbose) {
3508       tty->print_cr("FX: [%d...%d] %d->%d\n",
3509                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3510     }
3511   } else {
3512     if (ThreadPriorityVerbose) {
3513       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3514     }
3515     return EINVAL;    // no clue, punt
3516   }
3517 
3518   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3519   if (ThreadPriorityVerbose && rslt) {
3520     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3521   }
3522   if (rslt < 0) return errno;
3523 
3524 #ifdef ASSERT
3525   // Sanity check: read back what we just attempted to set.
3526   // In theory it could have changed in the interim ...
3527   //
3528   // The priocntl system call is tricky.
3529   // Sometimes it'll validate the priority value argument and
3530   // return EINVAL if unhappy.  At other times it fails silently.
3531   // Readbacks are prudent.
3532 
3533   if (!ReadBackValidate) return 0;
3534 
3535   memset(&ReadBack, 0, sizeof(pcparms_t));
3536   ReadBack.pc_cid = PC_CLNULL;
3537   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3538   assert(rslt >= 0, "priocntl failed");
3539   Actual = Expected = 0xBAD;
3540   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3541   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3542     Actual   = RTPRI(ReadBack)->rt_pri;
3543     Expected = RTPRI(ParmInfo)->rt_pri;
3544   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3545     Actual   = IAPRI(ReadBack)->ia_upri;
3546     Expected = IAPRI(ParmInfo)->ia_upri;
3547   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3548     Actual   = TSPRI(ReadBack)->ts_upri;
3549     Expected = TSPRI(ParmInfo)->ts_upri;
3550   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3551     Actual   = FXPRI(ReadBack)->fx_upri;
3552     Expected = FXPRI(ParmInfo)->fx_upri;
3553   } else {
3554     if (ThreadPriorityVerbose) {
3555       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3556                     ParmInfo.pc_cid);
3557     }
3558   }
3559 
3560   if (Actual != Expected) {
3561     if (ThreadPriorityVerbose) {
3562       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3563                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3564     }
3565   }
3566 #endif
3567 
3568   return 0;
3569 }
3570 
3571 // Solaris only gives access to 128 real priorities at a time,
3572 // so we expand Java's ten to fill this range.  This would be better
3573 // if we dynamically adjusted relative priorities.
3574 //
3575 // The ThreadPriorityPolicy option allows us to select 2 different
3576 // priority scales.
3577 //
3578 // ThreadPriorityPolicy=0
3579 // Since the Solaris' default priority is MaximumPriority, we do not
3580 // set a priority lower than Max unless a priority lower than
3581 // NormPriority is requested.
3582 //
3583 // ThreadPriorityPolicy=1
3584 // This mode causes the priority table to get filled with
3585 // linear values.  NormPriority get's mapped to 50% of the
3586 // Maximum priority an so on.  This will cause VM threads
3587 // to get unfair treatment against other Solaris processes
3588 // which do not explicitly alter their thread priorities.
3589 //
3590 
3591 int os::java_to_os_priority[CriticalPriority + 1] = {
3592   -99999,         // 0 Entry should never be used
3593 
3594   0,              // 1 MinPriority
3595   32,             // 2
3596   64,             // 3
3597 
3598   96,             // 4
3599   127,            // 5 NormPriority
3600   127,            // 6
3601 
3602   127,            // 7
3603   127,            // 8
3604   127,            // 9 NearMaxPriority
3605 
3606   127,            // 10 MaxPriority
3607 
3608   -criticalPrio   // 11 CriticalPriority
3609 };
3610 
3611 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3612   OSThread* osthread = thread->osthread();
3613 
3614   // Save requested priority in case the thread hasn't been started
3615   osthread->set_native_priority(newpri);
3616 
3617   // Check for critical priority request
3618   bool fxcritical = false;
3619   if (newpri == -criticalPrio) {
3620     fxcritical = true;
3621     newpri = criticalPrio;
3622   }
3623 
3624   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3625   if (!UseThreadPriorities) return OS_OK;
3626 
3627   int status = 0;
3628 
3629   if (!fxcritical) {
3630     // Use thr_setprio only if we have a priority that thr_setprio understands
3631     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3632   }
3633 
3634   int lwp_status =
3635           set_lwp_class_and_priority(osthread->thread_id(),
3636           osthread->lwp_id(),
3637           newpri,
3638           fxcritical ? fxLimits.schedPolicy : myClass,
3639           !fxcritical);
3640   if (lwp_status != 0 && fxcritical) {
3641     // Try again, this time without changing the scheduling class
3642     newpri = java_MaxPriority_to_os_priority;
3643     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3644             osthread->lwp_id(),
3645             newpri, myClass, false);
3646   }
3647   status |= lwp_status;
3648   return (status == 0) ? OS_OK : OS_ERR;
3649 }
3650 
3651 
3652 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3653   int p;
3654   if (!UseThreadPriorities) {
3655     *priority_ptr = NormalPriority;
3656     return OS_OK;
3657   }
3658   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3659   if (status != 0) {
3660     return OS_ERR;
3661   }
3662   *priority_ptr = p;
3663   return OS_OK;
3664 }
3665 
3666 
3667 // Hint to the underlying OS that a task switch would not be good.
3668 // Void return because it's a hint and can fail.
3669 void os::hint_no_preempt() {
3670   schedctl_start(schedctl_init());
3671 }
3672 
3673 static void resume_clear_context(OSThread *osthread) {
3674   osthread->set_ucontext(NULL);
3675 }
3676 
3677 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3678   osthread->set_ucontext(context);
3679 }
3680 
3681 static Semaphore sr_semaphore;
3682 
3683 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3684   // Save and restore errno to avoid confusing native code with EINTR
3685   // after sigsuspend.
3686   int old_errno = errno;
3687 
3688   OSThread* osthread = thread->osthread();
3689   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3690 
3691   os::SuspendResume::State current = osthread->sr.state();
3692   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3693     suspend_save_context(osthread, uc);
3694 
3695     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3696     os::SuspendResume::State state = osthread->sr.suspended();
3697     if (state == os::SuspendResume::SR_SUSPENDED) {
3698       sigset_t suspend_set;  // signals for sigsuspend()
3699 
3700       // get current set of blocked signals and unblock resume signal
3701       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3702       sigdelset(&suspend_set, os::Solaris::SIGasync());
3703 
3704       sr_semaphore.signal();
3705       // wait here until we are resumed
3706       while (1) {
3707         sigsuspend(&suspend_set);
3708 
3709         os::SuspendResume::State result = osthread->sr.running();
3710         if (result == os::SuspendResume::SR_RUNNING) {
3711           sr_semaphore.signal();
3712           break;
3713         }
3714       }
3715 
3716     } else if (state == os::SuspendResume::SR_RUNNING) {
3717       // request was cancelled, continue
3718     } else {
3719       ShouldNotReachHere();
3720     }
3721 
3722     resume_clear_context(osthread);
3723   } else if (current == os::SuspendResume::SR_RUNNING) {
3724     // request was cancelled, continue
3725   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3726     // ignore
3727   } else {
3728     // ignore
3729   }
3730 
3731   errno = old_errno;
3732 }
3733 
3734 void os::print_statistics() {
3735 }
3736 
3737 int os::message_box(const char* title, const char* message) {
3738   int i;
3739   fdStream err(defaultStream::error_fd());
3740   for (i = 0; i < 78; i++) err.print_raw("=");
3741   err.cr();
3742   err.print_raw_cr(title);
3743   for (i = 0; i < 78; i++) err.print_raw("-");
3744   err.cr();
3745   err.print_raw_cr(message);
3746   for (i = 0; i < 78; i++) err.print_raw("=");
3747   err.cr();
3748 
3749   char buf[16];
3750   // Prevent process from exiting upon "read error" without consuming all CPU
3751   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3752 
3753   return buf[0] == 'y' || buf[0] == 'Y';
3754 }
3755 
3756 static int sr_notify(OSThread* osthread) {
3757   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
3758   assert_status(status == 0, status, "thr_kill");
3759   return status;
3760 }
3761 
3762 // "Randomly" selected value for how long we want to spin
3763 // before bailing out on suspending a thread, also how often
3764 // we send a signal to a thread we want to resume
3765 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3766 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3767 
3768 static bool do_suspend(OSThread* osthread) {
3769   assert(osthread->sr.is_running(), "thread should be running");
3770   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
3771 
3772   // mark as suspended and send signal
3773   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3774     // failed to switch, state wasn't running?
3775     ShouldNotReachHere();
3776     return false;
3777   }
3778 
3779   if (sr_notify(osthread) != 0) {
3780     ShouldNotReachHere();
3781   }
3782 
3783   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3784   while (true) {
3785     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
3786       break;
3787     } else {
3788       // timeout
3789       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3790       if (cancelled == os::SuspendResume::SR_RUNNING) {
3791         return false;
3792       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3793         // make sure that we consume the signal on the semaphore as well
3794         sr_semaphore.wait();
3795         break;
3796       } else {
3797         ShouldNotReachHere();
3798         return false;
3799       }
3800     }
3801   }
3802 
3803   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3804   return true;
3805 }
3806 
3807 static void do_resume(OSThread* osthread) {
3808   assert(osthread->sr.is_suspended(), "thread should be suspended");
3809   assert(!sr_semaphore.trywait(), "invalid semaphore state");
3810 
3811   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3812     // failed to switch to WAKEUP_REQUEST
3813     ShouldNotReachHere();
3814     return;
3815   }
3816 
3817   while (true) {
3818     if (sr_notify(osthread) == 0) {
3819       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
3820         if (osthread->sr.is_running()) {
3821           return;
3822         }
3823       }
3824     } else {
3825       ShouldNotReachHere();
3826     }
3827   }
3828 
3829   guarantee(osthread->sr.is_running(), "Must be running!");
3830 }
3831 
3832 void os::SuspendedThreadTask::internal_do_task() {
3833   if (do_suspend(_thread->osthread())) {
3834     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3835     do_task(context);
3836     do_resume(_thread->osthread());
3837   }
3838 }
3839 
3840 class PcFetcher : public os::SuspendedThreadTask {
3841 public:
3842   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3843   ExtendedPC result();
3844 protected:
3845   void do_task(const os::SuspendedThreadTaskContext& context);
3846 private:
3847   ExtendedPC _epc;
3848 };
3849 
3850 ExtendedPC PcFetcher::result() {
3851   guarantee(is_done(), "task is not done yet.");
3852   return _epc;
3853 }
3854 
3855 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3856   Thread* thread = context.thread();
3857   OSThread* osthread = thread->osthread();
3858   if (osthread->ucontext() != NULL) {
3859     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
3860   } else {
3861     // NULL context is unexpected, double-check this is the VMThread
3862     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3863   }
3864 }
3865 
3866 // A lightweight implementation that does not suspend the target thread and
3867 // thus returns only a hint. Used for profiling only!
3868 ExtendedPC os::get_thread_pc(Thread* thread) {
3869   // Make sure that it is called by the watcher and the Threads lock is owned.
3870   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
3871   // For now, is only used to profile the VM Thread
3872   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3873   PcFetcher fetcher(thread);
3874   fetcher.run();
3875   return fetcher.result();
3876 }
3877 
3878 
3879 // This does not do anything on Solaris. This is basically a hook for being
3880 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
3881 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
3882   f(value, method, args, thread);
3883 }
3884 
3885 // This routine may be used by user applications as a "hook" to catch signals.
3886 // The user-defined signal handler must pass unrecognized signals to this
3887 // routine, and if it returns true (non-zero), then the signal handler must
3888 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
3889 // routine will never retun false (zero), but instead will execute a VM panic
3890 // routine kill the process.
3891 //
3892 // If this routine returns false, it is OK to call it again.  This allows
3893 // the user-defined signal handler to perform checks either before or after
3894 // the VM performs its own checks.  Naturally, the user code would be making
3895 // a serious error if it tried to handle an exception (such as a null check
3896 // or breakpoint) that the VM was generating for its own correct operation.
3897 //
3898 // This routine may recognize any of the following kinds of signals:
3899 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
3900 // os::Solaris::SIGasync
3901 // It should be consulted by handlers for any of those signals.
3902 // It explicitly does not recognize os::Solaris::SIGinterrupt
3903 //
3904 // The caller of this routine must pass in the three arguments supplied
3905 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3906 // field of the structure passed to sigaction().  This routine assumes that
3907 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3908 //
3909 // Note that the VM will print warnings if it detects conflicting signal
3910 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3911 //
3912 extern "C" JNIEXPORT int
3913 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
3914                           int abort_if_unrecognized);
3915 
3916 
3917 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
3918   int orig_errno = errno;  // Preserve errno value over signal handler.
3919   JVM_handle_solaris_signal(sig, info, ucVoid, true);
3920   errno = orig_errno;
3921 }
3922 
3923 /* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
3924    is needed to provoke threads blocked on IO to return an EINTR
3925    Note: this explicitly does NOT call JVM_handle_solaris_signal and
3926    does NOT participate in signal chaining due to requirement for
3927    NOT setting SA_RESTART to make EINTR work. */
3928 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
3929    if (UseSignalChaining) {
3930       struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
3931       if (actp && actp->sa_handler) {
3932         vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
3933       }
3934    }
3935 }
3936 
3937 // This boolean allows users to forward their own non-matching signals
3938 // to JVM_handle_solaris_signal, harmlessly.
3939 bool os::Solaris::signal_handlers_are_installed = false;
3940 
3941 // For signal-chaining
3942 bool os::Solaris::libjsig_is_loaded = false;
3943 typedef struct sigaction *(*get_signal_t)(int);
3944 get_signal_t os::Solaris::get_signal_action = NULL;
3945 
3946 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
3947   struct sigaction *actp = NULL;
3948 
3949   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
3950     // Retrieve the old signal handler from libjsig
3951     actp = (*get_signal_action)(sig);
3952   }
3953   if (actp == NULL) {
3954     // Retrieve the preinstalled signal handler from jvm
3955     actp = get_preinstalled_handler(sig);
3956   }
3957 
3958   return actp;
3959 }
3960 
3961 static bool call_chained_handler(struct sigaction *actp, int sig,
3962                                  siginfo_t *siginfo, void *context) {
3963   // Call the old signal handler
3964   if (actp->sa_handler == SIG_DFL) {
3965     // It's more reasonable to let jvm treat it as an unexpected exception
3966     // instead of taking the default action.
3967     return false;
3968   } else if (actp->sa_handler != SIG_IGN) {
3969     if ((actp->sa_flags & SA_NODEFER) == 0) {
3970       // automaticlly block the signal
3971       sigaddset(&(actp->sa_mask), sig);
3972     }
3973 
3974     sa_handler_t hand;
3975     sa_sigaction_t sa;
3976     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3977     // retrieve the chained handler
3978     if (siginfo_flag_set) {
3979       sa = actp->sa_sigaction;
3980     } else {
3981       hand = actp->sa_handler;
3982     }
3983 
3984     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3985       actp->sa_handler = SIG_DFL;
3986     }
3987 
3988     // try to honor the signal mask
3989     sigset_t oset;
3990     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3991 
3992     // call into the chained handler
3993     if (siginfo_flag_set) {
3994       (*sa)(sig, siginfo, context);
3995     } else {
3996       (*hand)(sig);
3997     }
3998 
3999     // restore the signal mask
4000     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4001   }
4002   // Tell jvm's signal handler the signal is taken care of.
4003   return true;
4004 }
4005 
4006 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4007   bool chained = false;
4008   // signal-chaining
4009   if (UseSignalChaining) {
4010     struct sigaction *actp = get_chained_signal_action(sig);
4011     if (actp != NULL) {
4012       chained = call_chained_handler(actp, sig, siginfo, context);
4013     }
4014   }
4015   return chained;
4016 }
4017 
4018 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4019   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4020   if (preinstalled_sigs[sig] != 0) {
4021     return &chainedsigactions[sig];
4022   }
4023   return NULL;
4024 }
4025 
4026 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4027 
4028   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4029   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4030   chainedsigactions[sig] = oldAct;
4031   preinstalled_sigs[sig] = 1;
4032 }
4033 
4034 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4035   // Check for overwrite.
4036   struct sigaction oldAct;
4037   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4038   void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4039                                       : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4040   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4041       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4042       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4043     if (AllowUserSignalHandlers || !set_installed) {
4044       // Do not overwrite; user takes responsibility to forward to us.
4045       return;
4046     } else if (UseSignalChaining) {
4047       if (oktochain) {
4048         // save the old handler in jvm
4049         save_preinstalled_handler(sig, oldAct);
4050       } else {
4051         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4052       }
4053       // libjsig also interposes the sigaction() call below and saves the
4054       // old sigaction on it own.
4055     } else {
4056       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4057                     "%#lx for signal %d.", (long)oldhand, sig));
4058     }
4059   }
4060 
4061   struct sigaction sigAct;
4062   sigfillset(&(sigAct.sa_mask));
4063   sigAct.sa_handler = SIG_DFL;
4064 
4065   sigAct.sa_sigaction = signalHandler;
4066   // Handle SIGSEGV on alternate signal stack if
4067   // not using stack banging
4068   if (!UseStackBanging && sig == SIGSEGV) {
4069     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4070   // Interruptible i/o requires SA_RESTART cleared so EINTR
4071   // is returned instead of restarting system calls
4072   } else if (sig == os::Solaris::SIGinterrupt()) {
4073     sigemptyset(&sigAct.sa_mask);
4074     sigAct.sa_handler = NULL;
4075     sigAct.sa_flags = SA_SIGINFO;
4076     sigAct.sa_sigaction = sigINTRHandler;
4077   } else {
4078     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4079   }
4080   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4081 
4082   sigaction(sig, &sigAct, &oldAct);
4083 
4084   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4085                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4086   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4087 }
4088 
4089 
4090 #define DO_SIGNAL_CHECK(sig) \
4091   if (!sigismember(&check_signal_done, sig)) \
4092     os::Solaris::check_signal_handler(sig)
4093 
4094 // This method is a periodic task to check for misbehaving JNI applications
4095 // under CheckJNI, we can add any periodic checks here
4096 
4097 void os::run_periodic_checks() {
4098   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4099   // thereby preventing a NULL checks.
4100   if (!check_addr0_done) check_addr0_done = check_addr0(tty);
4101 
4102   if (check_signals == false) return;
4103 
4104   // SEGV and BUS if overridden could potentially prevent
4105   // generation of hs*.log in the event of a crash, debugging
4106   // such a case can be very challenging, so we absolutely
4107   // check for the following for a good measure:
4108   DO_SIGNAL_CHECK(SIGSEGV);
4109   DO_SIGNAL_CHECK(SIGILL);
4110   DO_SIGNAL_CHECK(SIGFPE);
4111   DO_SIGNAL_CHECK(SIGBUS);
4112   DO_SIGNAL_CHECK(SIGPIPE);
4113   DO_SIGNAL_CHECK(SIGXFSZ);
4114 
4115   // ReduceSignalUsage allows the user to override these handlers
4116   // see comments at the very top and jvm_solaris.h
4117   if (!ReduceSignalUsage) {
4118     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4119     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4120     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4121     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4122   }
4123 
4124   // See comments above for using JVM1/JVM2 and UseAltSigs
4125   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4126   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4127 
4128 }
4129 
4130 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4131 
4132 static os_sigaction_t os_sigaction = NULL;
4133 
4134 void os::Solaris::check_signal_handler(int sig) {
4135   char buf[O_BUFLEN];
4136   address jvmHandler = NULL;
4137 
4138   struct sigaction act;
4139   if (os_sigaction == NULL) {
4140     // only trust the default sigaction, in case it has been interposed
4141     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4142     if (os_sigaction == NULL) return;
4143   }
4144 
4145   os_sigaction(sig, (struct sigaction*)NULL, &act);
4146 
4147   address thisHandler = (act.sa_flags & SA_SIGINFO)
4148     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4149     : CAST_FROM_FN_PTR(address, act.sa_handler);
4150 
4151 
4152   switch (sig) {
4153     case SIGSEGV:
4154     case SIGBUS:
4155     case SIGFPE:
4156     case SIGPIPE:
4157     case SIGXFSZ:
4158     case SIGILL:
4159       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4160       break;
4161 
4162     case SHUTDOWN1_SIGNAL:
4163     case SHUTDOWN2_SIGNAL:
4164     case SHUTDOWN3_SIGNAL:
4165     case BREAK_SIGNAL:
4166       jvmHandler = (address)user_handler();
4167       break;
4168 
4169     default:
4170       int intrsig = os::Solaris::SIGinterrupt();
4171       int asynsig = os::Solaris::SIGasync();
4172 
4173       if (sig == intrsig) {
4174         jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4175       } else if (sig == asynsig) {
4176         jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4177       } else {
4178         return;
4179       }
4180       break;
4181   }
4182 
4183 
4184   if (thisHandler != jvmHandler) {
4185     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4186     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4187     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4188     // No need to check this sig any longer
4189     sigaddset(&check_signal_done, sig);
4190     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4191     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4192       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4193                     exception_name(sig, buf, O_BUFLEN));
4194     }
4195   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4196     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4197     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4198     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4199     // No need to check this sig any longer
4200     sigaddset(&check_signal_done, sig);
4201   }
4202 
4203   // Print all the signal handler state
4204   if (sigismember(&check_signal_done, sig)) {
4205     print_signal_handlers(tty, buf, O_BUFLEN);
4206   }
4207 
4208 }
4209 
4210 void os::Solaris::install_signal_handlers() {
4211   bool libjsigdone = false;
4212   signal_handlers_are_installed = true;
4213 
4214   // signal-chaining
4215   typedef void (*signal_setting_t)();
4216   signal_setting_t begin_signal_setting = NULL;
4217   signal_setting_t end_signal_setting = NULL;
4218   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4219                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4220   if (begin_signal_setting != NULL) {
4221     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4222                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4223     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4224                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4225     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4226                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4227     libjsig_is_loaded = true;
4228     if (os::Solaris::get_libjsig_version != NULL) {
4229       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4230     }
4231     assert(UseSignalChaining, "should enable signal-chaining");
4232   }
4233   if (libjsig_is_loaded) {
4234     // Tell libjsig jvm is setting signal handlers
4235     (*begin_signal_setting)();
4236   }
4237 
4238   set_signal_handler(SIGSEGV, true, true);
4239   set_signal_handler(SIGPIPE, true, true);
4240   set_signal_handler(SIGXFSZ, true, true);
4241   set_signal_handler(SIGBUS, true, true);
4242   set_signal_handler(SIGILL, true, true);
4243   set_signal_handler(SIGFPE, true, true);
4244 
4245 
4246   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4247 
4248     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4249     // can not register overridable signals which might be > 32
4250     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4251     // Tell libjsig jvm has finished setting signal handlers
4252       (*end_signal_setting)();
4253       libjsigdone = true;
4254     }
4255   }
4256 
4257   // Never ok to chain our SIGinterrupt
4258   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4259   set_signal_handler(os::Solaris::SIGasync(), true, true);
4260 
4261   if (libjsig_is_loaded && !libjsigdone) {
4262     // Tell libjsig jvm finishes setting signal handlers
4263     (*end_signal_setting)();
4264   }
4265 
4266   // We don't activate signal checker if libjsig is in place, we trust ourselves
4267   // and if UserSignalHandler is installed all bets are off.
4268   // Log that signal checking is off only if -verbose:jni is specified.
4269   if (CheckJNICalls) {
4270     if (libjsig_is_loaded) {
4271       if (PrintJNIResolving) {
4272         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4273       }
4274       check_signals = false;
4275     }
4276     if (AllowUserSignalHandlers) {
4277       if (PrintJNIResolving) {
4278         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4279       }
4280       check_signals = false;
4281     }
4282   }
4283 }
4284 
4285 
4286 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4287 
4288 const char * signames[] = {
4289   "SIG0",
4290   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4291   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4292   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4293   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4294   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4295   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4296   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4297   "SIGCANCEL", "SIGLOST"
4298 };
4299 
4300 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4301   if (0 < exception_code && exception_code <= SIGRTMAX) {
4302     // signal
4303     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4304        jio_snprintf(buf, size, "%s", signames[exception_code]);
4305     } else {
4306        jio_snprintf(buf, size, "SIG%d", exception_code);
4307     }
4308     return buf;
4309   } else {
4310     return NULL;
4311   }
4312 }
4313 
4314 // (Static) wrapper for getisax(2) call.
4315 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4316 
4317 // (Static) wrappers for the liblgrp API
4318 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4319 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4320 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4321 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4322 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4323 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4324 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4325 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4326 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4327 
4328 // (Static) wrapper for meminfo() call.
4329 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4330 
4331 static address resolve_symbol_lazy(const char* name) {
4332   address addr = (address) dlsym(RTLD_DEFAULT, name);
4333   if (addr == NULL) {
4334     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4335     addr = (address) dlsym(RTLD_NEXT, name);
4336   }
4337   return addr;
4338 }
4339 
4340 static address resolve_symbol(const char* name) {
4341   address addr = resolve_symbol_lazy(name);
4342   if (addr == NULL) {
4343     fatal(dlerror());
4344   }
4345   return addr;
4346 }
4347 
4348 void os::Solaris::libthread_init() {
4349   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4350 
4351   lwp_priocntl_init();
4352 
4353   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4354   if (func == NULL) {
4355     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4356     // Guarantee that this VM is running on an new enough OS (5.6 or
4357     // later) that it will have a new enough libthread.so.
4358     guarantee(func != NULL, "libthread.so is too old.");
4359   }
4360 
4361   int size;
4362   void (*handler_info_func)(address *, int *);
4363   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4364   handler_info_func(&handler_start, &size);
4365   handler_end = handler_start + size;
4366 }
4367 
4368 
4369 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4370 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4371 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4372 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4373 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4374 int os::Solaris::_mutex_scope = USYNC_THREAD;
4375 
4376 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4377 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4378 int_fnP_cond_tP os::Solaris::_cond_signal;
4379 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4380 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4381 int_fnP_cond_tP os::Solaris::_cond_destroy;
4382 int os::Solaris::_cond_scope = USYNC_THREAD;
4383 
4384 void os::Solaris::synchronization_init() {
4385   if (UseLWPSynchronization) {
4386     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4387     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4388     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4389     os::Solaris::set_mutex_init(lwp_mutex_init);
4390     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4391     os::Solaris::set_mutex_scope(USYNC_THREAD);
4392 
4393     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4394     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4395     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4396     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4397     os::Solaris::set_cond_init(lwp_cond_init);
4398     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4399     os::Solaris::set_cond_scope(USYNC_THREAD);
4400   }
4401   else {
4402     os::Solaris::set_mutex_scope(USYNC_THREAD);
4403     os::Solaris::set_cond_scope(USYNC_THREAD);
4404 
4405     if (UsePthreads) {
4406       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4407       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4408       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4409       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4410       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4411 
4412       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4413       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4414       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4415       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4416       os::Solaris::set_cond_init(pthread_cond_default_init);
4417       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4418     }
4419     else {
4420       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4421       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4422       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4423       os::Solaris::set_mutex_init(::mutex_init);
4424       os::Solaris::set_mutex_destroy(::mutex_destroy);
4425 
4426       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4427       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4428       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4429       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4430       os::Solaris::set_cond_init(::cond_init);
4431       os::Solaris::set_cond_destroy(::cond_destroy);
4432     }
4433   }
4434 }
4435 
4436 bool os::Solaris::liblgrp_init() {
4437   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4438   if (handle != NULL) {
4439     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4440     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4441     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4442     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4443     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4444     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4445     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4446     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4447                                        dlsym(handle, "lgrp_cookie_stale")));
4448 
4449     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4450     set_lgrp_cookie(c);
4451     return true;
4452   }
4453   return false;
4454 }
4455 
4456 void os::Solaris::misc_sym_init() {
4457   address func;
4458 
4459   // getisax
4460   func = resolve_symbol_lazy("getisax");
4461   if (func != NULL) {
4462     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4463   }
4464 
4465   // meminfo
4466   func = resolve_symbol_lazy("meminfo");
4467   if (func != NULL) {
4468     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4469   }
4470 }
4471 
4472 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4473   assert(_getisax != NULL, "_getisax not set");
4474   return _getisax(array, n);
4475 }
4476 
4477 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4478 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4479 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4480 
4481 void init_pset_getloadavg_ptr(void) {
4482   pset_getloadavg_ptr =
4483     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4484   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4485     warning("pset_getloadavg function not found");
4486   }
4487 }
4488 
4489 int os::Solaris::_dev_zero_fd = -1;
4490 
4491 // this is called _before_ the global arguments have been parsed
4492 void os::init(void) {
4493   _initial_pid = getpid();
4494 
4495   max_hrtime = first_hrtime = gethrtime();
4496 
4497   init_random(1234567);
4498 
4499   page_size = sysconf(_SC_PAGESIZE);
4500   if (page_size == -1)
4501     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4502                   strerror(errno)));
4503   init_page_sizes((size_t) page_size);
4504 
4505   Solaris::initialize_system_info();
4506 
4507   // Initialize misc. symbols as soon as possible, so we can use them
4508   // if we need them.
4509   Solaris::misc_sym_init();
4510 
4511   int fd = ::open("/dev/zero", O_RDWR);
4512   if (fd < 0) {
4513     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4514   } else {
4515     Solaris::set_dev_zero_fd(fd);
4516 
4517     // Close on exec, child won't inherit.
4518     fcntl(fd, F_SETFD, FD_CLOEXEC);
4519   }
4520 
4521   clock_tics_per_sec = CLK_TCK;
4522 
4523   // check if dladdr1() exists; dladdr1 can provide more information than
4524   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4525   // and is available on linker patches for 5.7 and 5.8.
4526   // libdl.so must have been loaded, this call is just an entry lookup
4527   void * hdl = dlopen("libdl.so", RTLD_NOW);
4528   if (hdl)
4529     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4530 
4531   // (Solaris only) this switches to calls that actually do locking.
4532   ThreadCritical::initialize();
4533 
4534   main_thread = thr_self();
4535 
4536   // Constant minimum stack size allowed. It must be at least
4537   // the minimum of what the OS supports (thr_min_stack()), and
4538   // enough to allow the thread to get to user bytecode execution.
4539   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4540   // If the pagesize of the VM is greater than 8K determine the appropriate
4541   // number of initial guard pages.  The user can change this with the
4542   // command line arguments, if needed.
4543   if (vm_page_size() > 8*K) {
4544     StackYellowPages = 1;
4545     StackRedPages = 1;
4546     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4547   }
4548 }
4549 
4550 // To install functions for atexit system call
4551 extern "C" {
4552   static void perfMemory_exit_helper() {
4553     perfMemory_exit();
4554   }
4555 }
4556 
4557 // this is called _after_ the global arguments have been parsed
4558 jint os::init_2(void) {
4559   // try to enable extended file IO ASAP, see 6431278
4560   os::Solaris::try_enable_extended_io();
4561 
4562   // Allocate a single page and mark it as readable for safepoint polling.  Also
4563   // use this first mmap call to check support for MAP_ALIGN.
4564   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4565                                                       page_size,
4566                                                       MAP_PRIVATE | MAP_ALIGN,
4567                                                       PROT_READ);
4568   if (polling_page == NULL) {
4569     has_map_align = false;
4570     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4571                                                 PROT_READ);
4572   }
4573 
4574   os::set_polling_page(polling_page);
4575 
4576 #ifndef PRODUCT
4577   if (Verbose && PrintMiscellaneous)
4578     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
4579 #endif
4580 
4581   if (!UseMembar) {
4582     address mem_serialize_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE);
4583     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4584     os::set_memory_serialize_page(mem_serialize_page);
4585 
4586 #ifndef PRODUCT
4587     if (Verbose && PrintMiscellaneous)
4588       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
4589 #endif
4590   }
4591 
4592   // Check minimum allowable stack size for thread creation and to initialize
4593   // the java system classes, including StackOverflowError - depends on page
4594   // size.  Add a page for compiler2 recursion in main thread.
4595   // Add in 2*BytesPerWord times page size to account for VM stack during
4596   // class initialization depending on 32 or 64 bit VM.
4597   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4598             (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4599                     2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4600 
4601   size_t threadStackSizeInBytes = ThreadStackSize * K;
4602   if (threadStackSizeInBytes != 0 &&
4603     threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4604     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4605                   os::Solaris::min_stack_allowed/K);
4606     return JNI_ERR;
4607   }
4608 
4609   // For 64kbps there will be a 64kb page size, which makes
4610   // the usable default stack size quite a bit less.  Increase the
4611   // stack for 64kb (or any > than 8kb) pages, this increases
4612   // virtual memory fragmentation (since we're not creating the
4613   // stack on a power of 2 boundary.  The real fix for this
4614   // should be to fix the guard page mechanism.
4615 
4616   if (vm_page_size() > 8*K) {
4617       threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4618          ? threadStackSizeInBytes +
4619            ((StackYellowPages + StackRedPages) * vm_page_size())
4620          : 0;
4621       ThreadStackSize = threadStackSizeInBytes/K;
4622   }
4623 
4624   // Make the stack size a multiple of the page size so that
4625   // the yellow/red zones can be guarded.
4626   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4627         vm_page_size()));
4628 
4629   Solaris::libthread_init();
4630 
4631   if (UseNUMA) {
4632     if (!Solaris::liblgrp_init()) {
4633       UseNUMA = false;
4634     } else {
4635       size_t lgrp_limit = os::numa_get_groups_num();
4636       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4637       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4638       FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
4639       if (lgrp_num < 2) {
4640         // There's only one locality group, disable NUMA.
4641         UseNUMA = false;
4642       }
4643     }
4644     if (!UseNUMA && ForceNUMA) {
4645       UseNUMA = true;
4646     }
4647   }
4648 
4649   Solaris::signal_sets_init();
4650   Solaris::init_signal_mem();
4651   Solaris::install_signal_handlers();
4652 
4653   if (libjsigversion < JSIG_VERSION_1_4_1) {
4654     Maxlibjsigsigs = OLDMAXSIGNUM;
4655   }
4656 
4657   // initialize synchronization primitives to use either thread or
4658   // lwp synchronization (controlled by UseLWPSynchronization)
4659   Solaris::synchronization_init();
4660 
4661   if (MaxFDLimit) {
4662     // set the number of file descriptors to max. print out error
4663     // if getrlimit/setrlimit fails but continue regardless.
4664     struct rlimit nbr_files;
4665     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4666     if (status != 0) {
4667       if (PrintMiscellaneous && (Verbose || WizardMode))
4668         perror("os::init_2 getrlimit failed");
4669     } else {
4670       nbr_files.rlim_cur = nbr_files.rlim_max;
4671       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4672       if (status != 0) {
4673         if (PrintMiscellaneous && (Verbose || WizardMode))
4674           perror("os::init_2 setrlimit failed");
4675       }
4676     }
4677   }
4678 
4679   // Calculate theoretical max. size of Threads to guard gainst
4680   // artifical out-of-memory situations, where all available address-
4681   // space has been reserved by thread stacks. Default stack size is 1Mb.
4682   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
4683     JavaThread::stack_size_at_create() : (1*K*K);
4684   assert(pre_thread_stack_size != 0, "Must have a stack");
4685   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
4686   // we should start doing Virtual Memory banging. Currently when the threads will
4687   // have used all but 200Mb of space.
4688   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
4689   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
4690 
4691   // at-exit methods are called in the reverse order of their registration.
4692   // In Solaris 7 and earlier, atexit functions are called on return from
4693   // main or as a result of a call to exit(3C). There can be only 32 of
4694   // these functions registered and atexit() does not set errno. In Solaris
4695   // 8 and later, there is no limit to the number of functions registered
4696   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
4697   // functions are called upon dlclose(3DL) in addition to return from main
4698   // and exit(3C).
4699 
4700   if (PerfAllowAtExitRegistration) {
4701     // only register atexit functions if PerfAllowAtExitRegistration is set.
4702     // atexit functions can be delayed until process exit time, which
4703     // can be problematic for embedded VM situations. Embedded VMs should
4704     // call DestroyJavaVM() to assure that VM resources are released.
4705 
4706     // note: perfMemory_exit_helper atexit function may be removed in
4707     // the future if the appropriate cleanup code can be added to the
4708     // VM_Exit VMOperation's doit method.
4709     if (atexit(perfMemory_exit_helper) != 0) {
4710       warning("os::init2 atexit(perfMemory_exit_helper) failed");
4711     }
4712   }
4713 
4714   // Init pset_loadavg function pointer
4715   init_pset_getloadavg_ptr();
4716 
4717   return JNI_OK;
4718 }
4719 
4720 void os::init_3(void) {
4721   return;
4722 }
4723 
4724 // Mark the polling page as unreadable
4725 void os::make_polling_page_unreadable(void) {
4726   if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0)
4727     fatal("Could not disable polling page");
4728 };
4729 
4730 // Mark the polling page as readable
4731 void os::make_polling_page_readable(void) {
4732   if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0)
4733     fatal("Could not enable polling page");
4734 };
4735 
4736 // OS interface.
4737 
4738 bool os::check_heap(bool force) { return true; }
4739 
4740 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
4741 static vsnprintf_t sol_vsnprintf = NULL;
4742 
4743 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
4744   if (!sol_vsnprintf) {
4745     //search  for the named symbol in the objects that were loaded after libjvm
4746     void* where = RTLD_NEXT;
4747     if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
4748         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
4749     if (!sol_vsnprintf){
4750       //search  for the named symbol in the objects that were loaded before libjvm
4751       where = RTLD_DEFAULT;
4752       if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
4753         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
4754       assert(sol_vsnprintf != NULL, "vsnprintf not found");
4755     }
4756   }
4757   return (*sol_vsnprintf)(buf, count, fmt, argptr);
4758 }
4759 
4760 
4761 // Is a (classpath) directory empty?
4762 bool os::dir_is_empty(const char* path) {
4763   DIR *dir = NULL;
4764   struct dirent *ptr;
4765 
4766   dir = opendir(path);
4767   if (dir == NULL) return true;
4768 
4769   /* Scan the directory */
4770   bool result = true;
4771   char buf[sizeof(struct dirent) + MAX_PATH];
4772   struct dirent *dbuf = (struct dirent *) buf;
4773   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
4774     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4775       result = false;
4776     }
4777   }
4778   closedir(dir);
4779   return result;
4780 }
4781 
4782 // This code originates from JDK's sysOpen and open64_w
4783 // from src/solaris/hpi/src/system_md.c
4784 
4785 #ifndef O_DELETE
4786 #define O_DELETE 0x10000
4787 #endif
4788 
4789 // Open a file. Unlink the file immediately after open returns
4790 // if the specified oflag has the O_DELETE flag set.
4791 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
4792 
4793 int os::open(const char *path, int oflag, int mode) {
4794   if (strlen(path) > MAX_PATH - 1) {
4795     errno = ENAMETOOLONG;
4796     return -1;
4797   }
4798   int fd;
4799   int o_delete = (oflag & O_DELETE);
4800   oflag = oflag & ~O_DELETE;
4801 
4802   fd = ::open64(path, oflag, mode);
4803   if (fd == -1) return -1;
4804 
4805   //If the open succeeded, the file might still be a directory
4806   {
4807     struct stat64 buf64;
4808     int ret = ::fstat64(fd, &buf64);
4809     int st_mode = buf64.st_mode;
4810 
4811     if (ret != -1) {
4812       if ((st_mode & S_IFMT) == S_IFDIR) {
4813         errno = EISDIR;
4814         ::close(fd);
4815         return -1;
4816       }
4817     } else {
4818       ::close(fd);
4819       return -1;
4820     }
4821   }
4822     /*
4823      * 32-bit Solaris systems suffer from:
4824      *
4825      * - an historical default soft limit of 256 per-process file
4826      *   descriptors that is too low for many Java programs.
4827      *
4828      * - a design flaw where file descriptors created using stdio
4829      *   fopen must be less than 256, _even_ when the first limit above
4830      *   has been raised.  This can cause calls to fopen (but not calls to
4831      *   open, for example) to fail mysteriously, perhaps in 3rd party
4832      *   native code (although the JDK itself uses fopen).  One can hardly
4833      *   criticize them for using this most standard of all functions.
4834      *
4835      * We attempt to make everything work anyways by:
4836      *
4837      * - raising the soft limit on per-process file descriptors beyond
4838      *   256
4839      *
4840      * - As of Solaris 10u4, we can request that Solaris raise the 256
4841      *   stdio fopen limit by calling function enable_extended_FILE_stdio.
4842      *   This is done in init_2 and recorded in enabled_extended_FILE_stdio
4843      *
4844      * - If we are stuck on an old (pre 10u4) Solaris system, we can
4845      *   workaround the bug by remapping non-stdio file descriptors below
4846      *   256 to ones beyond 256, which is done below.
4847      *
4848      * See:
4849      * 1085341: 32-bit stdio routines should support file descriptors >255
4850      * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
4851      * 6431278: Netbeans crash on 32 bit Solaris: need to call
4852      *          enable_extended_FILE_stdio() in VM initialisation
4853      * Giri Mandalika's blog
4854      * http://technopark02.blogspot.com/2005_05_01_archive.html
4855      */
4856 #ifndef  _LP64
4857      if ((!enabled_extended_FILE_stdio) && fd < 256) {
4858          int newfd = ::fcntl(fd, F_DUPFD, 256);
4859          if (newfd != -1) {
4860              ::close(fd);
4861              fd = newfd;
4862          }
4863      }
4864 #endif // 32-bit Solaris
4865     /*
4866      * All file descriptors that are opened in the JVM and not
4867      * specifically destined for a subprocess should have the
4868      * close-on-exec flag set.  If we don't set it, then careless 3rd
4869      * party native code might fork and exec without closing all
4870      * appropriate file descriptors (e.g. as we do in closeDescriptors in
4871      * UNIXProcess.c), and this in turn might:
4872      *
4873      * - cause end-of-file to fail to be detected on some file
4874      *   descriptors, resulting in mysterious hangs, or
4875      *
4876      * - might cause an fopen in the subprocess to fail on a system
4877      *   suffering from bug 1085341.
4878      *
4879      * (Yes, the default setting of the close-on-exec flag is a Unix
4880      * design flaw)
4881      *
4882      * See:
4883      * 1085341: 32-bit stdio routines should support file descriptors >255
4884      * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4885      * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4886      */
4887 #ifdef FD_CLOEXEC
4888     {
4889         int flags = ::fcntl(fd, F_GETFD);
4890         if (flags != -1)
4891             ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4892     }
4893 #endif
4894 
4895   if (o_delete != 0) {
4896     ::unlink(path);
4897   }
4898   return fd;
4899 }
4900 
4901 // create binary file, rewriting existing file if required
4902 int os::create_binary_file(const char* path, bool rewrite_existing) {
4903   int oflags = O_WRONLY | O_CREAT;
4904   if (!rewrite_existing) {
4905     oflags |= O_EXCL;
4906   }
4907   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4908 }
4909 
4910 // return current position of file pointer
4911 jlong os::current_file_offset(int fd) {
4912   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4913 }
4914 
4915 // move file pointer to the specified offset
4916 jlong os::seek_to_file_offset(int fd, jlong offset) {
4917   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4918 }
4919 
4920 jlong os::lseek(int fd, jlong offset, int whence) {
4921   return (jlong) ::lseek64(fd, offset, whence);
4922 }
4923 
4924 char * os::native_path(char *path) {
4925   return path;
4926 }
4927 
4928 int os::ftruncate(int fd, jlong length) {
4929   return ::ftruncate64(fd, length);
4930 }
4931 
4932 int os::fsync(int fd)  {
4933   RESTARTABLE_RETURN_INT(::fsync(fd));
4934 }
4935 
4936 int os::available(int fd, jlong *bytes) {
4937   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
4938           "Assumed _thread_in_native");
4939   jlong cur, end;
4940   int mode;
4941   struct stat64 buf64;
4942 
4943   if (::fstat64(fd, &buf64) >= 0) {
4944     mode = buf64.st_mode;
4945     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4946       int n,ioctl_return;
4947 
4948       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
4949       if (ioctl_return>= 0) {
4950           *bytes = n;
4951         return 1;
4952       }
4953     }
4954   }
4955   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4956     return 0;
4957   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4958     return 0;
4959   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4960     return 0;
4961   }
4962   *bytes = end - cur;
4963   return 1;
4964 }
4965 
4966 // Map a block of memory.
4967 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4968                      char *addr, size_t bytes, bool read_only,
4969                      bool allow_exec) {
4970   int prot;
4971   int flags;
4972 
4973   if (read_only) {
4974     prot = PROT_READ;
4975     flags = MAP_SHARED;
4976   } else {
4977     prot = PROT_READ | PROT_WRITE;
4978     flags = MAP_PRIVATE;
4979   }
4980 
4981   if (allow_exec) {
4982     prot |= PROT_EXEC;
4983   }
4984 
4985   if (addr != NULL) {
4986     flags |= MAP_FIXED;
4987   }
4988 
4989   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
4990                                      fd, file_offset);
4991   if (mapped_address == MAP_FAILED) {
4992     return NULL;
4993   }
4994   return mapped_address;
4995 }
4996 
4997 
4998 // Remap a block of memory.
4999 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5000                        char *addr, size_t bytes, bool read_only,
5001                        bool allow_exec) {
5002   // same as map_memory() on this OS
5003   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5004                         allow_exec);
5005 }
5006 
5007 
5008 // Unmap a block of memory.
5009 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5010   return munmap(addr, bytes) == 0;
5011 }
5012 
5013 void os::pause() {
5014   char filename[MAX_PATH];
5015   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5016     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5017   } else {
5018     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5019   }
5020 
5021   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5022   if (fd != -1) {
5023     struct stat buf;
5024     ::close(fd);
5025     while (::stat(filename, &buf) == 0) {
5026       (void)::poll(NULL, 0, 100);
5027     }
5028   } else {
5029     jio_fprintf(stderr,
5030       "Could not open pause file '%s', continuing immediately.\n", filename);
5031   }
5032 }
5033 
5034 #ifndef PRODUCT
5035 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5036 // Turn this on if you need to trace synch operations.
5037 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5038 // and call record_synch_enable and record_synch_disable
5039 // around the computation of interest.
5040 
5041 void record_synch(char* name, bool returning);  // defined below
5042 
5043 class RecordSynch {
5044   char* _name;
5045  public:
5046   RecordSynch(char* name) :_name(name)
5047                  { record_synch(_name, false); }
5048   ~RecordSynch() { record_synch(_name,   true);  }
5049 };
5050 
5051 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5052 extern "C" ret name params {                                    \
5053   typedef ret name##_t params;                                  \
5054   static name##_t* implem = NULL;                               \
5055   static int callcount = 0;                                     \
5056   if (implem == NULL) {                                         \
5057     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5058     if (implem == NULL)  fatal(dlerror());                      \
5059   }                                                             \
5060   ++callcount;                                                  \
5061   RecordSynch _rs(#name);                                       \
5062   inner;                                                        \
5063   return implem args;                                           \
5064 }
5065 // in dbx, examine callcounts this way:
5066 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5067 
5068 #define CHECK_POINTER_OK(p) \
5069   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5070 #define CHECK_MU \
5071   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5072 #define CHECK_CV \
5073   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5074 #define CHECK_P(p) \
5075   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5076 
5077 #define CHECK_MUTEX(mutex_op) \
5078 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5079 
5080 CHECK_MUTEX(   mutex_lock)
5081 CHECK_MUTEX(  _mutex_lock)
5082 CHECK_MUTEX( mutex_unlock)
5083 CHECK_MUTEX(_mutex_unlock)
5084 CHECK_MUTEX( mutex_trylock)
5085 CHECK_MUTEX(_mutex_trylock)
5086 
5087 #define CHECK_COND(cond_op) \
5088 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5089 
5090 CHECK_COND( cond_wait);
5091 CHECK_COND(_cond_wait);
5092 CHECK_COND(_cond_wait_cancel);
5093 
5094 #define CHECK_COND2(cond_op) \
5095 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5096 
5097 CHECK_COND2( cond_timedwait);
5098 CHECK_COND2(_cond_timedwait);
5099 CHECK_COND2(_cond_timedwait_cancel);
5100 
5101 // do the _lwp_* versions too
5102 #define mutex_t lwp_mutex_t
5103 #define cond_t  lwp_cond_t
5104 CHECK_MUTEX(  _lwp_mutex_lock)
5105 CHECK_MUTEX(  _lwp_mutex_unlock)
5106 CHECK_MUTEX(  _lwp_mutex_trylock)
5107 CHECK_MUTEX( __lwp_mutex_lock)
5108 CHECK_MUTEX( __lwp_mutex_unlock)
5109 CHECK_MUTEX( __lwp_mutex_trylock)
5110 CHECK_MUTEX(___lwp_mutex_lock)
5111 CHECK_MUTEX(___lwp_mutex_unlock)
5112 
5113 CHECK_COND(  _lwp_cond_wait);
5114 CHECK_COND( __lwp_cond_wait);
5115 CHECK_COND(___lwp_cond_wait);
5116 
5117 CHECK_COND2(  _lwp_cond_timedwait);
5118 CHECK_COND2( __lwp_cond_timedwait);
5119 #undef mutex_t
5120 #undef cond_t
5121 
5122 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5123 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5124 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5125 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5126 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5127 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5128 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5129 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5130 
5131 
5132 // recording machinery:
5133 
5134 enum { RECORD_SYNCH_LIMIT = 200 };
5135 char* record_synch_name[RECORD_SYNCH_LIMIT];
5136 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5137 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5138 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5139 int record_synch_count = 0;
5140 bool record_synch_enabled = false;
5141 
5142 // in dbx, examine recorded data this way:
5143 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5144 
5145 void record_synch(char* name, bool returning) {
5146   if (record_synch_enabled) {
5147     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5148       record_synch_name[record_synch_count] = name;
5149       record_synch_returning[record_synch_count] = returning;
5150       record_synch_thread[record_synch_count] = thr_self();
5151       record_synch_arg0ptr[record_synch_count] = &name;
5152       record_synch_count++;
5153     }
5154     // put more checking code here:
5155     // ...
5156   }
5157 }
5158 
5159 void record_synch_enable() {
5160   // start collecting trace data, if not already doing so
5161   if (!record_synch_enabled)  record_synch_count = 0;
5162   record_synch_enabled = true;
5163 }
5164 
5165 void record_synch_disable() {
5166   // stop collecting trace data
5167   record_synch_enabled = false;
5168 }
5169 
5170 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5171 #endif // PRODUCT
5172 
5173 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5174 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5175                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5176 
5177 
5178 // JVMTI & JVM monitoring and management support
5179 // The thread_cpu_time() and current_thread_cpu_time() are only
5180 // supported if is_thread_cpu_time_supported() returns true.
5181 // They are not supported on Solaris T1.
5182 
5183 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5184 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5185 // of a thread.
5186 //
5187 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5188 // returns the fast estimate available on the platform.
5189 
5190 // hrtime_t gethrvtime() return value includes
5191 // user time but does not include system time
5192 jlong os::current_thread_cpu_time() {
5193   return (jlong) gethrvtime();
5194 }
5195 
5196 jlong os::thread_cpu_time(Thread *thread) {
5197   // return user level CPU time only to be consistent with
5198   // what current_thread_cpu_time returns.
5199   // thread_cpu_time_info() must be changed if this changes
5200   return os::thread_cpu_time(thread, false /* user time only */);
5201 }
5202 
5203 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5204   if (user_sys_cpu_time) {
5205     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5206   } else {
5207     return os::current_thread_cpu_time();
5208   }
5209 }
5210 
5211 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5212   char proc_name[64];
5213   int count;
5214   prusage_t prusage;
5215   jlong lwp_time;
5216   int fd;
5217 
5218   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5219                      getpid(),
5220                      thread->osthread()->lwp_id());
5221   fd = ::open(proc_name, O_RDONLY);
5222   if (fd == -1) return -1;
5223 
5224   do {
5225     count = ::pread(fd,
5226                   (void *)&prusage.pr_utime,
5227                   thr_time_size,
5228                   thr_time_off);
5229   } while (count < 0 && errno == EINTR);
5230   ::close(fd);
5231   if (count < 0) return -1;
5232 
5233   if (user_sys_cpu_time) {
5234     // user + system CPU time
5235     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5236                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5237                  (jlong)prusage.pr_stime.tv_nsec +
5238                  (jlong)prusage.pr_utime.tv_nsec;
5239   } else {
5240     // user level CPU time only
5241     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5242                 (jlong)prusage.pr_utime.tv_nsec;
5243   }
5244 
5245   return (lwp_time);
5246 }
5247 
5248 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5249   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5250   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5251   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5252   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5253 }
5254 
5255 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5256   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5257   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5258   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5259   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5260 }
5261 
5262 bool os::is_thread_cpu_time_supported() {
5263   return true;
5264 }
5265 
5266 // System loadavg support.  Returns -1 if load average cannot be obtained.
5267 // Return the load average for our processor set if the primitive exists
5268 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5269 int os::loadavg(double loadavg[], int nelem) {
5270   if (pset_getloadavg_ptr != NULL) {
5271     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5272   } else {
5273     return ::getloadavg(loadavg, nelem);
5274   }
5275 }
5276 
5277 //---------------------------------------------------------------------------------
5278 
5279 bool os::find(address addr, outputStream* st) {
5280   Dl_info dlinfo;
5281   memset(&dlinfo, 0, sizeof(dlinfo));
5282   if (dladdr(addr, &dlinfo) != 0) {
5283     st->print(PTR_FORMAT ": ", addr);
5284     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5285       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5286     } else if (dlinfo.dli_fbase != NULL)
5287       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5288     else
5289       st->print("<absolute address>");
5290     if (dlinfo.dli_fname != NULL) {
5291       st->print(" in %s", dlinfo.dli_fname);
5292     }
5293     if (dlinfo.dli_fbase != NULL) {
5294       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5295     }
5296     st->cr();
5297 
5298     if (Verbose) {
5299       // decode some bytes around the PC
5300       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5301       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5302       address       lowest = (address) dlinfo.dli_sname;
5303       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5304       if (begin < lowest)  begin = lowest;
5305       Dl_info dlinfo2;
5306       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5307           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5308         end = (address) dlinfo2.dli_saddr;
5309       Disassembler::decode(begin, end, st);
5310     }
5311     return true;
5312   }
5313   return false;
5314 }
5315 
5316 // Following function has been added to support HotSparc's libjvm.so running
5317 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5318 // src/solaris/hpi/native_threads in the EVM codebase.
5319 //
5320 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5321 // libraries and should thus be removed. We will leave it behind for a while
5322 // until we no longer want to able to run on top of 1.3.0 Solaris production
5323 // JDK. See 4341971.
5324 
5325 #define STACK_SLACK 0x800
5326 
5327 extern "C" {
5328   intptr_t sysThreadAvailableStackWithSlack() {
5329     stack_t st;
5330     intptr_t retval, stack_top;
5331     retval = thr_stksegment(&st);
5332     assert(retval == 0, "incorrect return value from thr_stksegment");
5333     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5334     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5335     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5336     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5337   }
5338 }
5339 
5340 // ObjectMonitor park-unpark infrastructure ...
5341 //
5342 // We implement Solaris and Linux PlatformEvents with the
5343 // obvious condvar-mutex-flag triple.
5344 // Another alternative that works quite well is pipes:
5345 // Each PlatformEvent consists of a pipe-pair.
5346 // The thread associated with the PlatformEvent
5347 // calls park(), which reads from the input end of the pipe.
5348 // Unpark() writes into the other end of the pipe.
5349 // The write-side of the pipe must be set NDELAY.
5350 // Unfortunately pipes consume a large # of handles.
5351 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5352 // Using pipes for the 1st few threads might be workable, however.
5353 //
5354 // park() is permitted to return spuriously.
5355 // Callers of park() should wrap the call to park() in
5356 // an appropriate loop.  A litmus test for the correct
5357 // usage of park is the following: if park() were modified
5358 // to immediately return 0 your code should still work,
5359 // albeit degenerating to a spin loop.
5360 //
5361 // An interesting optimization for park() is to use a trylock()
5362 // to attempt to acquire the mutex.  If the trylock() fails
5363 // then we know that a concurrent unpark() operation is in-progress.
5364 // in that case the park() code could simply set _count to 0
5365 // and return immediately.  The subsequent park() operation *might*
5366 // return immediately.  That's harmless as the caller of park() is
5367 // expected to loop.  By using trylock() we will have avoided a
5368 // avoided a context switch caused by contention on the per-thread mutex.
5369 //
5370 // TODO-FIXME:
5371 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
5372 //     objectmonitor implementation.
5373 // 2.  Collapse the JSR166 parker event, and the
5374 //     objectmonitor ParkEvent into a single "Event" construct.
5375 // 3.  In park() and unpark() add:
5376 //     assert (Thread::current() == AssociatedWith).
5377 // 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5378 //     1-out-of-N park() operations will return immediately.
5379 //
5380 // _Event transitions in park()
5381 //   -1 => -1 : illegal
5382 //    1 =>  0 : pass - return immediately
5383 //    0 => -1 : block
5384 //
5385 // _Event serves as a restricted-range semaphore.
5386 //
5387 // Another possible encoding of _Event would be with
5388 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5389 //
5390 // TODO-FIXME: add DTRACE probes for:
5391 // 1.   Tx parks
5392 // 2.   Ty unparks Tx
5393 // 3.   Tx resumes from park
5394 
5395 
5396 // value determined through experimentation
5397 #define ROUNDINGFIX 11
5398 
5399 // utility to compute the abstime argument to timedwait.
5400 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5401 
5402 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5403   // millis is the relative timeout time
5404   // abstime will be the absolute timeout time
5405   if (millis < 0)  millis = 0;
5406   struct timeval now;
5407   int status = gettimeofday(&now, NULL);
5408   assert(status == 0, "gettimeofday");
5409   jlong seconds = millis / 1000;
5410   jlong max_wait_period;
5411 
5412   if (UseLWPSynchronization) {
5413     // forward port of fix for 4275818 (not sleeping long enough)
5414     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5415     // _lwp_cond_timedwait() used a round_down algorithm rather
5416     // than a round_up. For millis less than our roundfactor
5417     // it rounded down to 0 which doesn't meet the spec.
5418     // For millis > roundfactor we may return a bit sooner, but
5419     // since we can not accurately identify the patch level and
5420     // this has already been fixed in Solaris 9 and 8 we will
5421     // leave it alone rather than always rounding down.
5422 
5423     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5424        // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5425            // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5426            max_wait_period = 21000000;
5427   } else {
5428     max_wait_period = 50000000;
5429   }
5430   millis %= 1000;
5431   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5432      seconds = max_wait_period;
5433   }
5434   abstime->tv_sec = now.tv_sec  + seconds;
5435   long       usec = now.tv_usec + millis * 1000;
5436   if (usec >= 1000000) {
5437     abstime->tv_sec += 1;
5438     usec -= 1000000;
5439   }
5440   abstime->tv_nsec = usec * 1000;
5441   return abstime;
5442 }
5443 
5444 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5445 // Conceptually TryPark() should be equivalent to park(0).
5446 
5447 int os::PlatformEvent::TryPark() {
5448   for (;;) {
5449     const int v = _Event;
5450     guarantee((v == 0) || (v == 1), "invariant");
5451     if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
5452   }
5453 }
5454 
5455 void os::PlatformEvent::park() {           // AKA: down()
5456   // Invariant: Only the thread associated with the Event/PlatformEvent
5457   // may call park().
5458   int v;
5459   for (;;) {
5460       v = _Event;
5461       if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5462   }
5463   guarantee(v >= 0, "invariant");
5464   if (v == 0) {
5465      // Do this the hard way by blocking ...
5466      // See http://monaco.sfbay/detail.jsf?cr=5094058.
5467      // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5468      // Only for SPARC >= V8PlusA
5469 #if defined(__sparc) && defined(COMPILER2)
5470      if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5471 #endif
5472      int status = os::Solaris::mutex_lock(_mutex);
5473      assert_status(status == 0, status, "mutex_lock");
5474      guarantee(_nParked == 0, "invariant");
5475      ++_nParked;
5476      while (_Event < 0) {
5477         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5478         // Treat this the same as if the wait was interrupted
5479         // With usr/lib/lwp going to kernel, always handle ETIME
5480         status = os::Solaris::cond_wait(_cond, _mutex);
5481         if (status == ETIME) status = EINTR;
5482         assert_status(status == 0 || status == EINTR, status, "cond_wait");
5483      }
5484      --_nParked;
5485      _Event = 0;
5486      status = os::Solaris::mutex_unlock(_mutex);
5487      assert_status(status == 0, status, "mutex_unlock");
5488     // Paranoia to ensure our locked and lock-free paths interact
5489     // correctly with each other.
5490     OrderAccess::fence();
5491   }
5492 }
5493 
5494 int os::PlatformEvent::park(jlong millis) {
5495   guarantee(_nParked == 0, "invariant");
5496   int v;
5497   for (;;) {
5498       v = _Event;
5499       if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5500   }
5501   guarantee(v >= 0, "invariant");
5502   if (v != 0) return OS_OK;
5503 
5504   int ret = OS_TIMEOUT;
5505   timestruc_t abst;
5506   compute_abstime(&abst, millis);
5507 
5508   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5509   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5510   // Only for SPARC >= V8PlusA
5511 #if defined(__sparc) && defined(COMPILER2)
5512  if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5513 #endif
5514   int status = os::Solaris::mutex_lock(_mutex);
5515   assert_status(status == 0, status, "mutex_lock");
5516   guarantee(_nParked == 0, "invariant");
5517   ++_nParked;
5518   while (_Event < 0) {
5519      int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5520      assert_status(status == 0 || status == EINTR ||
5521                    status == ETIME || status == ETIMEDOUT,
5522                    status, "cond_timedwait");
5523      if (!FilterSpuriousWakeups) break;                // previous semantics
5524      if (status == ETIME || status == ETIMEDOUT) break;
5525      // We consume and ignore EINTR and spurious wakeups.
5526   }
5527   --_nParked;
5528   if (_Event >= 0) ret = OS_OK;
5529   _Event = 0;
5530   status = os::Solaris::mutex_unlock(_mutex);
5531   assert_status(status == 0, status, "mutex_unlock");
5532   // Paranoia to ensure our locked and lock-free paths interact
5533   // correctly with each other.
5534   OrderAccess::fence();
5535   return ret;
5536 }
5537 
5538 void os::PlatformEvent::unpark() {
5539   // Transitions for _Event:
5540   //    0 :=> 1
5541   //    1 :=> 1
5542   //   -1 :=> either 0 or 1; must signal target thread
5543   //          That is, we can safely transition _Event from -1 to either
5544   //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
5545   //          unpark() calls.
5546   // See also: "Semaphores in Plan 9" by Mullender & Cox
5547   //
5548   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5549   // that it will take two back-to-back park() calls for the owning
5550   // thread to block. This has the benefit of forcing a spurious return
5551   // from the first park() call after an unpark() call which will help
5552   // shake out uses of park() and unpark() without condition variables.
5553 
5554   if (Atomic::xchg(1, &_Event) >= 0) return;
5555 
5556   // If the thread associated with the event was parked, wake it.
5557   // Wait for the thread assoc with the PlatformEvent to vacate.
5558   int status = os::Solaris::mutex_lock(_mutex);
5559   assert_status(status == 0, status, "mutex_lock");
5560   int AnyWaiters = _nParked;
5561   status = os::Solaris::mutex_unlock(_mutex);
5562   assert_status(status == 0, status, "mutex_unlock");
5563   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5564   if (AnyWaiters != 0) {
5565     // We intentional signal *after* dropping the lock
5566     // to avoid a common class of futile wakeups.
5567     status = os::Solaris::cond_signal(_cond);
5568     assert_status(status == 0, status, "cond_signal");
5569   }
5570 }
5571 
5572 // JSR166
5573 // -------------------------------------------------------
5574 
5575 /*
5576  * The solaris and linux implementations of park/unpark are fairly
5577  * conservative for now, but can be improved. They currently use a
5578  * mutex/condvar pair, plus _counter.
5579  * Park decrements _counter if > 0, else does a condvar wait.  Unpark
5580  * sets count to 1 and signals condvar.  Only one thread ever waits
5581  * on the condvar. Contention seen when trying to park implies that someone
5582  * is unparking you, so don't wait. And spurious returns are fine, so there
5583  * is no need to track notifications.
5584  */
5585 
5586 #define MAX_SECS 100000000
5587 /*
5588  * This code is common to linux and solaris and will be moved to a
5589  * common place in dolphin.
5590  *
5591  * The passed in time value is either a relative time in nanoseconds
5592  * or an absolute time in milliseconds. Either way it has to be unpacked
5593  * into suitable seconds and nanoseconds components and stored in the
5594  * given timespec structure.
5595  * Given time is a 64-bit value and the time_t used in the timespec is only
5596  * a signed-32-bit value (except on 64-bit Linux) we have to watch for
5597  * overflow if times way in the future are given. Further on Solaris versions
5598  * prior to 10 there is a restriction (see cond_timedwait) that the specified
5599  * number of seconds, in abstime, is less than current_time  + 100,000,000.
5600  * As it will be 28 years before "now + 100000000" will overflow we can
5601  * ignore overflow and just impose a hard-limit on seconds using the value
5602  * of "now + 100,000,000". This places a limit on the timeout of about 3.17
5603  * years from "now".
5604  */
5605 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5606   assert(time > 0, "convertTime");
5607 
5608   struct timeval now;
5609   int status = gettimeofday(&now, NULL);
5610   assert(status == 0, "gettimeofday");
5611 
5612   time_t max_secs = now.tv_sec + MAX_SECS;
5613 
5614   if (isAbsolute) {
5615     jlong secs = time / 1000;
5616     if (secs > max_secs) {
5617       absTime->tv_sec = max_secs;
5618     }
5619     else {
5620       absTime->tv_sec = secs;
5621     }
5622     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5623   }
5624   else {
5625     jlong secs = time / NANOSECS_PER_SEC;
5626     if (secs >= MAX_SECS) {
5627       absTime->tv_sec = max_secs;
5628       absTime->tv_nsec = 0;
5629     }
5630     else {
5631       absTime->tv_sec = now.tv_sec + secs;
5632       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5633       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5634         absTime->tv_nsec -= NANOSECS_PER_SEC;
5635         ++absTime->tv_sec; // note: this must be <= max_secs
5636       }
5637     }
5638   }
5639   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5640   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5641   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5642   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5643 }
5644 
5645 void Parker::park(bool isAbsolute, jlong time) {
5646   // Ideally we'd do something useful while spinning, such
5647   // as calling unpackTime().
5648 
5649   // Optional fast-path check:
5650   // Return immediately if a permit is available.
5651   // We depend on Atomic::xchg() having full barrier semantics
5652   // since we are doing a lock-free update to _counter.
5653   if (Atomic::xchg(0, &_counter) > 0) return;
5654 
5655   // Optional fast-exit: Check interrupt before trying to wait
5656   Thread* thread = Thread::current();
5657   assert(thread->is_Java_thread(), "Must be JavaThread");
5658   JavaThread *jt = (JavaThread *)thread;
5659   if (Thread::is_interrupted(thread, false)) {
5660     return;
5661   }
5662 
5663   // First, demultiplex/decode time arguments
5664   timespec absTime;
5665   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
5666     return;
5667   }
5668   if (time > 0) {
5669     // Warning: this code might be exposed to the old Solaris time
5670     // round-down bugs.  Grep "roundingFix" for details.
5671     unpackTime(&absTime, isAbsolute, time);
5672   }
5673 
5674   // Enter safepoint region
5675   // Beware of deadlocks such as 6317397.
5676   // The per-thread Parker:: _mutex is a classic leaf-lock.
5677   // In particular a thread must never block on the Threads_lock while
5678   // holding the Parker:: mutex.  If safepoints are pending both the
5679   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5680   ThreadBlockInVM tbivm(jt);
5681 
5682   // Don't wait if cannot get lock since interference arises from
5683   // unblocking.  Also. check interrupt before trying wait
5684   if (Thread::is_interrupted(thread, false) ||
5685       os::Solaris::mutex_trylock(_mutex) != 0) {
5686     return;
5687   }
5688 
5689   int status;
5690 
5691   if (_counter > 0)  { // no wait needed
5692     _counter = 0;
5693     status = os::Solaris::mutex_unlock(_mutex);
5694     assert(status == 0, "invariant");
5695     // Paranoia to ensure our locked and lock-free paths interact
5696     // correctly with each other and Java-level accesses.
5697     OrderAccess::fence();
5698     return;
5699   }
5700 
5701 #ifdef ASSERT
5702   // Don't catch signals while blocked; let the running threads have the signals.
5703   // (This allows a debugger to break into the running thread.)
5704   sigset_t oldsigs;
5705   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
5706   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5707 #endif
5708 
5709   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5710   jt->set_suspend_equivalent();
5711   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5712 
5713   // Do this the hard way by blocking ...
5714   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5715   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5716   // Only for SPARC >= V8PlusA
5717 #if defined(__sparc) && defined(COMPILER2)
5718   if (ClearFPUAtPark) { _mark_fpu_nosave(); }
5719 #endif
5720 
5721   if (time == 0) {
5722     status = os::Solaris::cond_wait(_cond, _mutex);
5723   } else {
5724     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
5725   }
5726   // Note that an untimed cond_wait() can sometimes return ETIME on older
5727   // versions of the Solaris.
5728   assert_status(status == 0 || status == EINTR ||
5729                 status == ETIME || status == ETIMEDOUT,
5730                 status, "cond_timedwait");
5731 
5732 #ifdef ASSERT
5733   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
5734 #endif
5735   _counter = 0;
5736   status = os::Solaris::mutex_unlock(_mutex);
5737   assert_status(status == 0, status, "mutex_unlock");
5738   // Paranoia to ensure our locked and lock-free paths interact
5739   // correctly with each other and Java-level accesses.
5740   OrderAccess::fence();
5741 
5742   // If externally suspended while waiting, re-suspend
5743   if (jt->handle_special_suspend_equivalent_condition()) {
5744     jt->java_suspend_self();
5745   }
5746 }
5747 
5748 void Parker::unpark() {
5749   int s, status;
5750   status = os::Solaris::mutex_lock(_mutex);
5751   assert(status == 0, "invariant");
5752   s = _counter;
5753   _counter = 1;
5754   status = os::Solaris::mutex_unlock(_mutex);
5755   assert(status == 0, "invariant");
5756 
5757   if (s < 1) {
5758     status = os::Solaris::cond_signal(_cond);
5759     assert(status == 0, "invariant");
5760   }
5761 }
5762 
5763 extern char** environ;
5764 
5765 // Run the specified command in a separate process. Return its exit value,
5766 // or -1 on failure (e.g. can't fork a new process).
5767 // Unlike system(), this function can be called from signal handler. It
5768 // doesn't block SIGINT et al.
5769 int os::fork_and_exec(char* cmd) {
5770   char * argv[4];
5771   argv[0] = (char *)"sh";
5772   argv[1] = (char *)"-c";
5773   argv[2] = cmd;
5774   argv[3] = NULL;
5775 
5776   // fork is async-safe, fork1 is not so can't use in signal handler
5777   pid_t pid;
5778   Thread* t = ThreadLocalStorage::get_thread_slow();
5779   if (t != NULL && t->is_inside_signal_handler()) {
5780     pid = fork();
5781   } else {
5782     pid = fork1();
5783   }
5784 
5785   if (pid < 0) {
5786     // fork failed
5787     warning("fork failed: %s", strerror(errno));
5788     return -1;
5789 
5790   } else if (pid == 0) {
5791     // child process
5792 
5793     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
5794     execve("/usr/bin/sh", argv, environ);
5795 
5796     // execve failed
5797     _exit(-1);
5798 
5799   } else  {
5800     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5801     // care about the actual exit code, for now.
5802 
5803     int status;
5804 
5805     // Wait for the child process to exit.  This returns immediately if
5806     // the child has already exited. */
5807     while (waitpid(pid, &status, 0) < 0) {
5808         switch (errno) {
5809         case ECHILD: return 0;
5810         case EINTR: break;
5811         default: return -1;
5812         }
5813     }
5814 
5815     if (WIFEXITED(status)) {
5816        // The child exited normally; get its exit code.
5817        return WEXITSTATUS(status);
5818     } else if (WIFSIGNALED(status)) {
5819        // The child exited because of a signal
5820        // The best value to return is 0x80 + signal number,
5821        // because that is what all Unix shells do, and because
5822        // it allows callers to distinguish between process exit and
5823        // process death by signal.
5824        return 0x80 + WTERMSIG(status);
5825     } else {
5826        // Unknown exit code; pass it through
5827        return status;
5828     }
5829   }
5830 }
5831 
5832 // is_headless_jre()
5833 //
5834 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5835 // in order to report if we are running in a headless jre
5836 //
5837 // Since JDK8 xawt/libmawt.so was moved into the same directory
5838 // as libawt.so, and renamed libawt_xawt.so
5839 //
5840 bool os::is_headless_jre() {
5841     struct stat statbuf;
5842     char buf[MAXPATHLEN];
5843     char libmawtpath[MAXPATHLEN];
5844     const char *xawtstr  = "/xawt/libmawt.so";
5845     const char *new_xawtstr = "/libawt_xawt.so";
5846     char *p;
5847 
5848     // Get path to libjvm.so
5849     os::jvm_path(buf, sizeof(buf));
5850 
5851     // Get rid of libjvm.so
5852     p = strrchr(buf, '/');
5853     if (p == NULL) return false;
5854     else *p = '\0';
5855 
5856     // Get rid of client or server
5857     p = strrchr(buf, '/');
5858     if (p == NULL) return false;
5859     else *p = '\0';
5860 
5861     // check xawt/libmawt.so
5862     strcpy(libmawtpath, buf);
5863     strcat(libmawtpath, xawtstr);
5864     if (::stat(libmawtpath, &statbuf) == 0) return false;
5865 
5866     // check libawt_xawt.so
5867     strcpy(libmawtpath, buf);
5868     strcat(libmawtpath, new_xawtstr);
5869     if (::stat(libmawtpath, &statbuf) == 0) return false;
5870 
5871     return true;
5872 }
5873 
5874 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
5875   size_t res;
5876   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5877           "Assumed _thread_in_native");
5878   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
5879   return res;
5880 }
5881 
5882 int os::close(int fd) {
5883   return ::close(fd);
5884 }
5885 
5886 int os::socket_close(int fd) {
5887   return ::close(fd);
5888 }
5889 
5890 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5891   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5892           "Assumed _thread_in_native");
5893   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
5894 }
5895 
5896 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5897   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5898           "Assumed _thread_in_native");
5899   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5900 }
5901 
5902 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5903   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5904 }
5905 
5906 // As both poll and select can be interrupted by signals, we have to be
5907 // prepared to restart the system call after updating the timeout, unless
5908 // a poll() is done with timeout == -1, in which case we repeat with this
5909 // "wait forever" value.
5910 
5911 int os::timeout(int fd, long timeout) {
5912   int res;
5913   struct timeval t;
5914   julong prevtime, newtime;
5915   static const char* aNull = 0;
5916   struct pollfd pfd;
5917   pfd.fd = fd;
5918   pfd.events = POLLIN;
5919 
5920   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5921           "Assumed _thread_in_native");
5922 
5923   gettimeofday(&t, &aNull);
5924   prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
5925 
5926   for (;;) {
5927     res = ::poll(&pfd, 1, timeout);
5928     if (res == OS_ERR && errno == EINTR) {
5929         if (timeout != -1) {
5930           gettimeofday(&t, &aNull);
5931           newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
5932           timeout -= newtime - prevtime;
5933           if (timeout <= 0)
5934             return OS_OK;
5935           prevtime = newtime;
5936         }
5937     } else return res;
5938   }
5939 }
5940 
5941 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
5942   int _result;
5943   _result = ::connect(fd, him, len);
5944 
5945   // On Solaris, when a connect() call is interrupted, the connection
5946   // can be established asynchronously (see 6343810). Subsequent calls
5947   // to connect() must check the errno value which has the semantic
5948   // described below (copied from the connect() man page). Handling
5949   // of asynchronously established connections is required for both
5950   // blocking and non-blocking sockets.
5951   //     EINTR            The  connection  attempt  was   interrupted
5952   //                      before  any data arrived by the delivery of
5953   //                      a signal. The connection, however, will  be
5954   //                      established asynchronously.
5955   //
5956   //     EINPROGRESS      The socket is non-blocking, and the connec-
5957   //                      tion  cannot  be completed immediately.
5958   //
5959   //     EALREADY         The socket is non-blocking,  and a previous
5960   //                      connection  attempt  has  not yet been com-
5961   //                      pleted.
5962   //
5963   //     EISCONN          The socket is already connected.
5964   if (_result == OS_ERR && errno == EINTR) {
5965      /* restarting a connect() changes its errno semantics */
5966      RESTARTABLE(::connect(fd, him, len), _result);
5967      /* undo these changes */
5968      if (_result == OS_ERR) {
5969        if (errno == EALREADY) {
5970          errno = EINPROGRESS; /* fall through */
5971        } else if (errno == EISCONN) {
5972          errno = 0;
5973          return OS_OK;
5974        }
5975      }
5976    }
5977    return _result;
5978  }
5979 
5980 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
5981   if (fd < 0) {
5982     return OS_ERR;
5983   }
5984   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5985           "Assumed _thread_in_native");
5986   RESTARTABLE_RETURN_INT((int)::accept(fd, him, len));
5987 }
5988 
5989 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
5990                  sockaddr* from, socklen_t* fromlen) {
5991   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5992           "Assumed _thread_in_native");
5993   RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
5994 }
5995 
5996 int os::sendto(int fd, char* buf, size_t len, uint flags,
5997                struct sockaddr* to, socklen_t tolen) {
5998   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5999           "Assumed _thread_in_native");
6000   RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
6001 }
6002 
6003 int os::socket_available(int fd, jint *pbytes) {
6004   if (fd < 0) {
6005     return OS_OK;
6006   }
6007   int ret;
6008   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6009   // note: ioctl can return 0 when successful, JVM_SocketAvailable
6010   // is expected to return 0 on failure and 1 on success to the jdk.
6011   return (ret == OS_ERR) ? 0 : 1;
6012 }
6013 
6014 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6015   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
6016           "Assumed _thread_in_native");
6017    return ::bind(fd, him, len);
6018 }
6019 
6020 // Get the default path to the core file
6021 // Returns the length of the string
6022 int os::get_core_path(char* buffer, size_t bufferSize) {
6023   const char* p = get_current_directory(buffer, bufferSize);
6024 
6025   if (p == NULL) {
6026     assert(p != NULL, "failed to get current directory");
6027     return 0;
6028   }
6029 
6030   return strlen(buffer);
6031 }
6032 
6033 #ifndef PRODUCT
6034 void TestReserveMemorySpecial_test() {
6035   // No tests available for this platform
6036 }
6037 #endif