1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "prims/jniFastGetField.hpp"
  41 #include "prims/jvm.h"
  42 #include "prims/jvm_misc.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/extendedPC.hpp"
  45 #include "runtime/globals.hpp"
  46 #include "runtime/interfaceSupport.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/javaCalls.hpp"
  49 #include "runtime/mutexLocker.hpp"
  50 #include "runtime/objectMonitor.hpp"
  51 #include "runtime/osThread.hpp"
  52 #include "runtime/perfMemory.hpp"
  53 #include "runtime/sharedRuntime.hpp"
  54 #include "runtime/statSampler.hpp"
  55 #include "runtime/stubRoutines.hpp"
  56 #include "runtime/thread.inline.hpp"
  57 #include "runtime/threadCritical.hpp"
  58 #include "runtime/timer.hpp"
  59 #include "services/attachListener.hpp"
  60 #include "services/memTracker.hpp"
  61 #include "services/runtimeService.hpp"
  62 #include "utilities/decoder.hpp"
  63 #include "utilities/defaultStream.hpp"
  64 #include "utilities/events.hpp"
  65 #include "utilities/growableArray.hpp"
  66 #include "utilities/vmError.hpp"
  67 
  68 // put OS-includes here
  69 # include <dlfcn.h>
  70 # include <errno.h>
  71 # include <exception>
  72 # include <link.h>
  73 # include <poll.h>
  74 # include <pthread.h>
  75 # include <pwd.h>
  76 # include <schedctl.h>
  77 # include <setjmp.h>
  78 # include <signal.h>
  79 # include <stdio.h>
  80 # include <alloca.h>
  81 # include <sys/filio.h>
  82 # include <sys/ipc.h>
  83 # include <sys/lwp.h>
  84 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  85 # include <sys/mman.h>
  86 # include <sys/processor.h>
  87 # include <sys/procset.h>
  88 # include <sys/pset.h>
  89 # include <sys/resource.h>
  90 # include <sys/shm.h>
  91 # include <sys/socket.h>
  92 # include <sys/stat.h>
  93 # include <sys/systeminfo.h>
  94 # include <sys/time.h>
  95 # include <sys/times.h>
  96 # include <sys/types.h>
  97 # include <sys/wait.h>
  98 # include <sys/utsname.h>
  99 # include <thread.h>
 100 # include <unistd.h>
 101 # include <sys/priocntl.h>
 102 # include <sys/rtpriocntl.h>
 103 # include <sys/tspriocntl.h>
 104 # include <sys/iapriocntl.h>
 105 # include <sys/fxpriocntl.h>
 106 # include <sys/loadavg.h>
 107 # include <string.h>
 108 # include <stdio.h>
 109 
 110 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 111 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 112 
 113 #define MAX_PATH (2 * K)
 114 
 115 // for timer info max values which include all bits
 116 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 117 
 118 
 119 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 120 // compile on older systems without this header file.
 121 
 122 #ifndef MADV_ACCESS_LWP
 123 # define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
 124 #endif
 125 #ifndef MADV_ACCESS_MANY
 126 # define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
 127 #endif
 128 
 129 #ifndef LGRP_RSRC_CPU
 130 # define LGRP_RSRC_CPU           0       /* CPU resources */
 131 #endif
 132 #ifndef LGRP_RSRC_MEM
 133 # define LGRP_RSRC_MEM           1       /* memory resources */
 134 #endif
 135 
 136 // see thr_setprio(3T) for the basis of these numbers
 137 #define MinimumPriority 0
 138 #define NormalPriority  64
 139 #define MaximumPriority 127
 140 
 141 // Values for ThreadPriorityPolicy == 1
 142 int prio_policy1[CriticalPriority+1] = {
 143   -99999,  0, 16,  32,  48,  64,
 144           80, 96, 112, 124, 127, 127 };
 145 
 146 // System parameters used internally
 147 static clock_t clock_tics_per_sec = 100;
 148 
 149 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 150 static bool enabled_extended_FILE_stdio = false;
 151 
 152 // For diagnostics to print a message once. see run_periodic_checks
 153 static bool check_addr0_done = false;
 154 static sigset_t check_signal_done;
 155 static bool check_signals = true;
 156 
 157 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 158 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 159 
 160 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 161 
 162 
 163 // "default" initializers for missing libc APIs
 164 extern "C" {
 165   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 166   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 167 
 168   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 169   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 170 }
 171 
 172 // "default" initializers for pthread-based synchronization
 173 extern "C" {
 174   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 175   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 176 }
 177 
 178 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 179 
 180 // Thread Local Storage
 181 // This is common to all Solaris platforms so it is defined here,
 182 // in this common file.
 183 // The declarations are in the os_cpu threadLS*.hpp files.
 184 //
 185 // Static member initialization for TLS
 186 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
 187 
 188 #ifndef PRODUCT
 189 #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
 190 
 191 int ThreadLocalStorage::_tcacheHit = 0;
 192 int ThreadLocalStorage::_tcacheMiss = 0;
 193 
 194 void ThreadLocalStorage::print_statistics() {
 195   int total = _tcacheMiss+_tcacheHit;
 196   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
 197                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
 198 }
 199 #undef _PCT
 200 #endif // PRODUCT
 201 
 202 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
 203                                                         int index) {
 204   Thread *thread = get_thread_slow();
 205   if (thread != NULL) {
 206     address sp = os::current_stack_pointer();
 207     guarantee(thread->_stack_base == NULL ||
 208               (sp <= thread->_stack_base &&
 209                  sp >= thread->_stack_base - thread->_stack_size) ||
 210                is_error_reported(),
 211               "sp must be inside of selected thread stack");
 212 
 213     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
 214     _get_thread_cache[ index ] = thread;
 215   }
 216   return thread;
 217 }
 218 
 219 
 220 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
 221 #define NO_CACHED_THREAD ((Thread*)all_zero)
 222 
 223 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
 224 
 225   // Store the new value before updating the cache to prevent a race
 226   // between get_thread_via_cache_slowly() and this store operation.
 227   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
 228 
 229   // Update thread cache with new thread if setting on thread create,
 230   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
 231   uintptr_t raw = pd_raw_thread_id();
 232   int ix = pd_cache_index(raw);
 233   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
 234 }
 235 
 236 void ThreadLocalStorage::pd_init() {
 237   for (int i = 0; i < _pd_cache_size; i++) {
 238     _get_thread_cache[i] = NO_CACHED_THREAD;
 239   }
 240 }
 241 
 242 // Invalidate all the caches (happens to be the same as pd_init).
 243 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
 244 
 245 #undef NO_CACHED_THREAD
 246 
 247 // END Thread Local Storage
 248 
 249 static inline size_t adjust_stack_size(address base, size_t size) {
 250   if ((ssize_t)size < 0) {
 251     // 4759953: Compensate for ridiculous stack size.
 252     size = max_intx;
 253   }
 254   if (size > (size_t)base) {
 255     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 256     size = (size_t)base;
 257   }
 258   return size;
 259 }
 260 
 261 static inline stack_t get_stack_info() {
 262   stack_t st;
 263   int retval = thr_stksegment(&st);
 264   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 265   assert(retval == 0, "incorrect return value from thr_stksegment");
 266   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 267   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 268   return st;
 269 }
 270 
 271 address os::current_stack_base() {
 272   int r = thr_main() ;
 273   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 274   bool is_primordial_thread = r;
 275 
 276   // Workaround 4352906, avoid calls to thr_stksegment by
 277   // thr_main after the first one (it looks like we trash
 278   // some data, causing the value for ss_sp to be incorrect).
 279   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 280     stack_t st = get_stack_info();
 281     if (is_primordial_thread) {
 282       // cache initial value of stack base
 283       os::Solaris::_main_stack_base = (address)st.ss_sp;
 284     }
 285     return (address)st.ss_sp;
 286   } else {
 287     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 288     return os::Solaris::_main_stack_base;
 289   }
 290 }
 291 
 292 size_t os::current_stack_size() {
 293   size_t size;
 294 
 295   int r = thr_main() ;
 296   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 297   if(!r) {
 298     size = get_stack_info().ss_size;
 299   } else {
 300     struct rlimit limits;
 301     getrlimit(RLIMIT_STACK, &limits);
 302     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 303   }
 304   // base may not be page aligned
 305   address base = current_stack_base();
 306   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 307   return (size_t)(base - bottom);
 308 }
 309 
 310 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 311   return localtime_r(clock, res);
 312 }
 313 
 314 void os::Solaris::try_enable_extended_io() {
 315   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 316 
 317   if (!UseExtendedFileIO) {
 318     return;
 319   }
 320 
 321   enable_extended_FILE_stdio_t enabler =
 322     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 323                                          "enable_extended_FILE_stdio");
 324   if (enabler) {
 325     enabler(-1, -1);
 326   }
 327 }
 328 
 329 static int _processors_online = 0;
 330 
 331          jint os::Solaris::_os_thread_limit = 0;
 332 volatile jint os::Solaris::_os_thread_count = 0;
 333 
 334 julong os::available_memory() {
 335   return Solaris::available_memory();
 336 }
 337 
 338 julong os::Solaris::available_memory() {
 339   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 340 }
 341 
 342 julong os::Solaris::_physical_memory = 0;
 343 
 344 julong os::physical_memory() {
 345    return Solaris::physical_memory();
 346 }
 347 
 348 static hrtime_t first_hrtime = 0;
 349 static const hrtime_t hrtime_hz = 1000*1000*1000;
 350 const int LOCK_BUSY = 1;
 351 const int LOCK_FREE = 0;
 352 const int LOCK_INVALID = -1;
 353 static volatile hrtime_t max_hrtime = 0;
 354 static volatile int max_hrtime_lock = LOCK_FREE;     // Update counter with LSB as lock-in-progress
 355 
 356 
 357 void os::Solaris::initialize_system_info() {
 358   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 359   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
 360   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 361 }
 362 
 363 int os::active_processor_count() {
 364   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 365   pid_t pid = getpid();
 366   psetid_t pset = PS_NONE;
 367   // Are we running in a processor set or is there any processor set around?
 368   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 369     uint_t pset_cpus;
 370     // Query the number of cpus available to us.
 371     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 372       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 373       _processors_online = pset_cpus;
 374       return pset_cpus;
 375     }
 376   }
 377   // Otherwise return number of online cpus
 378   return online_cpus;
 379 }
 380 
 381 static bool find_processors_in_pset(psetid_t        pset,
 382                                     processorid_t** id_array,
 383                                     uint_t*         id_length) {
 384   bool result = false;
 385   // Find the number of processors in the processor set.
 386   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 387     // Make up an array to hold their ids.
 388     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 389     // Fill in the array with their processor ids.
 390     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 391       result = true;
 392     }
 393   }
 394   return result;
 395 }
 396 
 397 // Callers of find_processors_online() must tolerate imprecise results --
 398 // the system configuration can change asynchronously because of DR
 399 // or explicit psradm operations.
 400 //
 401 // We also need to take care that the loop (below) terminates as the
 402 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 403 // request and the loop that builds the list of processor ids.   Unfortunately
 404 // there's no reliable way to determine the maximum valid processor id,
 405 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 406 // man pages, which claim the processor id set is "sparse, but
 407 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 408 // exit the loop.
 409 //
 410 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 411 // not available on S8.0.
 412 
 413 static bool find_processors_online(processorid_t** id_array,
 414                                    uint*           id_length) {
 415   const processorid_t MAX_PROCESSOR_ID = 100000 ;
 416   // Find the number of processors online.
 417   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 418   // Make up an array to hold their ids.
 419   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 420   // Processors need not be numbered consecutively.
 421   long found = 0;
 422   processorid_t next = 0;
 423   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 424     processor_info_t info;
 425     if (processor_info(next, &info) == 0) {
 426       // NB, PI_NOINTR processors are effectively online ...
 427       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 428         (*id_array)[found] = next;
 429         found += 1;
 430       }
 431     }
 432     next += 1;
 433   }
 434   if (found < *id_length) {
 435       // The loop above didn't identify the expected number of processors.
 436       // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 437       // and re-running the loop, above, but there's no guarantee of progress
 438       // if the system configuration is in flux.  Instead, we just return what
 439       // we've got.  Note that in the worst case find_processors_online() could
 440       // return an empty set.  (As a fall-back in the case of the empty set we
 441       // could just return the ID of the current processor).
 442       *id_length = found ;
 443   }
 444 
 445   return true;
 446 }
 447 
 448 static bool assign_distribution(processorid_t* id_array,
 449                                 uint           id_length,
 450                                 uint*          distribution,
 451                                 uint           distribution_length) {
 452   // We assume we can assign processorid_t's to uint's.
 453   assert(sizeof(processorid_t) == sizeof(uint),
 454          "can't convert processorid_t to uint");
 455   // Quick check to see if we won't succeed.
 456   if (id_length < distribution_length) {
 457     return false;
 458   }
 459   // Assign processor ids to the distribution.
 460   // Try to shuffle processors to distribute work across boards,
 461   // assuming 4 processors per board.
 462   const uint processors_per_board = ProcessDistributionStride;
 463   // Find the maximum processor id.
 464   processorid_t max_id = 0;
 465   for (uint m = 0; m < id_length; m += 1) {
 466     max_id = MAX2(max_id, id_array[m]);
 467   }
 468   // The next id, to limit loops.
 469   const processorid_t limit_id = max_id + 1;
 470   // Make up markers for available processors.
 471   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 472   for (uint c = 0; c < limit_id; c += 1) {
 473     available_id[c] = false;
 474   }
 475   for (uint a = 0; a < id_length; a += 1) {
 476     available_id[id_array[a]] = true;
 477   }
 478   // Step by "boards", then by "slot", copying to "assigned".
 479   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 480   //                remembering which processors have been assigned by
 481   //                previous calls, etc., so as to distribute several
 482   //                independent calls of this method.  What we'd like is
 483   //                It would be nice to have an API that let us ask
 484   //                how many processes are bound to a processor,
 485   //                but we don't have that, either.
 486   //                In the short term, "board" is static so that
 487   //                subsequent distributions don't all start at board 0.
 488   static uint board = 0;
 489   uint assigned = 0;
 490   // Until we've found enough processors ....
 491   while (assigned < distribution_length) {
 492     // ... find the next available processor in the board.
 493     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 494       uint try_id = board * processors_per_board + slot;
 495       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 496         distribution[assigned] = try_id;
 497         available_id[try_id] = false;
 498         assigned += 1;
 499         break;
 500       }
 501     }
 502     board += 1;
 503     if (board * processors_per_board + 0 >= limit_id) {
 504       board = 0;
 505     }
 506   }
 507   if (available_id != NULL) {
 508     FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
 509   }
 510   return true;
 511 }
 512 
 513 void os::set_native_thread_name(const char *name) {
 514   // Not yet implemented.
 515   return;
 516 }
 517 
 518 bool os::distribute_processes(uint length, uint* distribution) {
 519   bool result = false;
 520   // Find the processor id's of all the available CPUs.
 521   processorid_t* id_array  = NULL;
 522   uint           id_length = 0;
 523   // There are some races between querying information and using it,
 524   // since processor sets can change dynamically.
 525   psetid_t pset = PS_NONE;
 526   // Are we running in a processor set?
 527   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 528     result = find_processors_in_pset(pset, &id_array, &id_length);
 529   } else {
 530     result = find_processors_online(&id_array, &id_length);
 531   }
 532   if (result == true) {
 533     if (id_length >= length) {
 534       result = assign_distribution(id_array, id_length, distribution, length);
 535     } else {
 536       result = false;
 537     }
 538   }
 539   if (id_array != NULL) {
 540     FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
 541   }
 542   return result;
 543 }
 544 
 545 bool os::bind_to_processor(uint processor_id) {
 546   // We assume that a processorid_t can be stored in a uint.
 547   assert(sizeof(uint) == sizeof(processorid_t),
 548          "can't convert uint to processorid_t");
 549   int bind_result =
 550     processor_bind(P_LWPID,                       // bind LWP.
 551                    P_MYID,                        // bind current LWP.
 552                    (processorid_t) processor_id,  // id.
 553                    NULL);                         // don't return old binding.
 554   return (bind_result == 0);
 555 }
 556 
 557 bool os::getenv(const char* name, char* buffer, int len) {
 558   char* val = ::getenv( name );
 559   if ( val == NULL
 560   ||   strlen(val) + 1  >  len ) {
 561     if (len > 0)  buffer[0] = 0; // return a null string
 562     return false;
 563   }
 564   strcpy( buffer, val );
 565   return true;
 566 }
 567 
 568 
 569 // Return true if user is running as root.
 570 
 571 bool os::have_special_privileges() {
 572   static bool init = false;
 573   static bool privileges = false;
 574   if (!init) {
 575     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 576     init = true;
 577   }
 578   return privileges;
 579 }
 580 
 581 
 582 void os::init_system_properties_values() {
 583   // The next steps are taken in the product version:
 584   //
 585   // Obtain the JAVA_HOME value from the location of libjvm.so.
 586   // This library should be located at:
 587   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 588   //
 589   // If "/jre/lib/" appears at the right place in the path, then we
 590   // assume libjvm.so is installed in a JDK and we use this path.
 591   //
 592   // Otherwise exit with message: "Could not create the Java virtual machine."
 593   //
 594   // The following extra steps are taken in the debugging version:
 595   //
 596   // If "/jre/lib/" does NOT appear at the right place in the path
 597   // instead of exit check for $JAVA_HOME environment variable.
 598   //
 599   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 600   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 601   // it looks like libjvm.so is installed there
 602   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 603   //
 604   // Otherwise exit.
 605   //
 606   // Important note: if the location of libjvm.so changes this
 607   // code needs to be changed accordingly.
 608 
 609 // Base path of extensions installed on the system.
 610 #define SYS_EXT_DIR     "/usr/jdk/packages"
 611 #define EXTENSIONS_DIR  "/lib/ext"
 612 #define ENDORSED_DIR    "/lib/endorsed"
 613 
 614   char cpu_arch[12];
 615   // Buffer that fits several sprintfs.
 616   // Note that the space for the colon and the trailing null are provided
 617   // by the nulls included by the sizeof operator.
 618   const size_t bufsize =
 619     MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
 620          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 621          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
 622          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
 623   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 624 
 625   // sysclasspath, java_home, dll_dir
 626   {
 627     char *pslash;
 628     os::jvm_path(buf, bufsize);
 629 
 630     // Found the full path to libjvm.so.
 631     // Now cut the path to <java_home>/jre if we can.
 632     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 633     pslash = strrchr(buf, '/');
 634     if (pslash != NULL) {
 635       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 636     }
 637     Arguments::set_dll_dir(buf);
 638 
 639     if (pslash != NULL) {
 640       pslash = strrchr(buf, '/');
 641       if (pslash != NULL) {
 642         *pslash = '\0';          // Get rid of /<arch>.
 643         pslash = strrchr(buf, '/');
 644         if (pslash != NULL) {
 645           *pslash = '\0';        // Get rid of /lib.
 646         }
 647       }
 648     }
 649     Arguments::set_java_home(buf);
 650     set_boot_path('/', ':');
 651   }
 652 
 653   // Where to look for native libraries.
 654   {
 655     // Use dlinfo() to determine the correct java.library.path.
 656     //
 657     // If we're launched by the Java launcher, and the user
 658     // does not set java.library.path explicitly on the commandline,
 659     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 660     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 661     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 662     // /usr/lib), which is exactly what we want.
 663     //
 664     // If the user does set java.library.path, it completely
 665     // overwrites this setting, and always has.
 666     //
 667     // If we're not launched by the Java launcher, we may
 668     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 669     // settings.  Again, dlinfo does exactly what we want.
 670 
 671     Dl_serinfo     info_sz, *info = &info_sz;
 672     Dl_serpath     *path;
 673     char           *library_path;
 674     char           *common_path = buf;
 675 
 676     // Determine search path count and required buffer size.
 677     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 678       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 679       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 680     }
 681 
 682     // Allocate new buffer and initialize.
 683     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 684     info->dls_size = info_sz.dls_size;
 685     info->dls_cnt = info_sz.dls_cnt;
 686 
 687     // Obtain search path information.
 688     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 689       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 690       FREE_C_HEAP_ARRAY(char, info, mtInternal);
 691       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 692     }
 693 
 694     path = &info->dls_serpath[0];
 695 
 696     // Note: Due to a legacy implementation, most of the library path
 697     // is set in the launcher. This was to accomodate linking restrictions
 698     // on legacy Solaris implementations (which are no longer supported).
 699     // Eventually, all the library path setting will be done here.
 700     //
 701     // However, to prevent the proliferation of improperly built native
 702     // libraries, the new path component /usr/jdk/packages is added here.
 703 
 704     // Determine the actual CPU architecture.
 705     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 706 #ifdef _LP64
 707     // If we are a 64-bit vm, perform the following translations:
 708     //   sparc   -> sparcv9
 709     //   i386    -> amd64
 710     if (strcmp(cpu_arch, "sparc") == 0) {
 711       strcat(cpu_arch, "v9");
 712     } else if (strcmp(cpu_arch, "i386") == 0) {
 713       strcpy(cpu_arch, "amd64");
 714     }
 715 #endif
 716 
 717     // Construct the invariant part of ld_library_path.
 718     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 719 
 720     // Struct size is more than sufficient for the path components obtained
 721     // through the dlinfo() call, so only add additional space for the path
 722     // components explicitly added here.
 723     size_t library_path_size = info->dls_size + strlen(common_path);
 724     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 725     library_path[0] = '\0';
 726 
 727     // Construct the desired Java library path from the linker's library
 728     // search path.
 729     //
 730     // For compatibility, it is optimal that we insert the additional path
 731     // components specific to the Java VM after those components specified
 732     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 733     // infrastructure.
 734     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 735       strcpy(library_path, common_path);
 736     } else {
 737       int inserted = 0;
 738       int i;
 739       for (i = 0; i < info->dls_cnt; i++, path++) {
 740         uint_t flags = path->dls_flags & LA_SER_MASK;
 741         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 742           strcat(library_path, common_path);
 743           strcat(library_path, os::path_separator());
 744           inserted = 1;
 745         }
 746         strcat(library_path, path->dls_name);
 747         strcat(library_path, os::path_separator());
 748       }
 749       // Eliminate trailing path separator.
 750       library_path[strlen(library_path)-1] = '\0';
 751     }
 752 
 753     // happens before argument parsing - can't use a trace flag
 754     // tty->print_raw("init_system_properties_values: native lib path: ");
 755     // tty->print_raw_cr(library_path);
 756 
 757     // Callee copies into its own buffer.
 758     Arguments::set_library_path(library_path);
 759 
 760     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
 761     FREE_C_HEAP_ARRAY(char, info, mtInternal);
 762   }
 763 
 764   // Extensions directories.
 765   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 766   Arguments::set_ext_dirs(buf);
 767 
 768   // Endorsed standards default directory.
 769   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
 770   Arguments::set_endorsed_dirs(buf);
 771 
 772   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 773 
 774 #undef SYS_EXT_DIR
 775 #undef EXTENSIONS_DIR
 776 #undef ENDORSED_DIR
 777 }
 778 
 779 void os::breakpoint() {
 780   BREAKPOINT;
 781 }
 782 
 783 bool os::obsolete_option(const JavaVMOption *option)
 784 {
 785   if (!strncmp(option->optionString, "-Xt", 3)) {
 786     return true;
 787   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 788     return true;
 789   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 790     return true;
 791   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 792     return true;
 793   }
 794   return false;
 795 }
 796 
 797 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 798   address  stackStart  = (address)thread->stack_base();
 799   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 800   if (sp < stackStart && sp >= stackEnd ) return true;
 801   return false;
 802 }
 803 
 804 extern "C" void breakpoint() {
 805   // use debugger to set breakpoint here
 806 }
 807 
 808 static thread_t main_thread;
 809 
 810 // Thread start routine for all new Java threads
 811 extern "C" void* java_start(void* thread_addr) {
 812   // Try to randomize the cache line index of hot stack frames.
 813   // This helps when threads of the same stack traces evict each other's
 814   // cache lines. The threads can be either from the same JVM instance, or
 815   // from different JVM instances. The benefit is especially true for
 816   // processors with hyperthreading technology.
 817   static int counter = 0;
 818   int pid = os::current_process_id();
 819   alloca(((pid ^ counter++) & 7) * 128);
 820 
 821   int prio;
 822   Thread* thread = (Thread*)thread_addr;
 823   OSThread* osthr = thread->osthread();
 824 
 825   osthr->set_lwp_id( _lwp_self() );  // Store lwp in case we are bound
 826   thread->_schedctl = (void *) schedctl_init () ;
 827 
 828   if (UseNUMA) {
 829     int lgrp_id = os::numa_get_group_id();
 830     if (lgrp_id != -1) {
 831       thread->set_lgrp_id(lgrp_id);
 832     }
 833   }
 834 
 835   // If the creator called set priority before we started,
 836   // we need to call set_native_priority now that we have an lwp.
 837   // We used to get the priority from thr_getprio (we called
 838   // thr_setprio way back in create_thread) and pass it to
 839   // set_native_priority, but Solaris scales the priority
 840   // in java_to_os_priority, so when we read it back here,
 841   // we pass trash to set_native_priority instead of what's
 842   // in java_to_os_priority. So we save the native priority
 843   // in the osThread and recall it here.
 844 
 845   if ( osthr->thread_id() != -1 ) {
 846     if ( UseThreadPriorities ) {
 847       int prio = osthr->native_priority();
 848       if (ThreadPriorityVerbose) {
 849         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 850                       INTPTR_FORMAT ", setting priority: %d\n",
 851                       osthr->thread_id(), osthr->lwp_id(), prio);
 852       }
 853       os::set_native_priority(thread, prio);
 854     }
 855   } else if (ThreadPriorityVerbose) {
 856     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 857   }
 858 
 859   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 860 
 861   // initialize signal mask for this thread
 862   os::Solaris::hotspot_sigmask(thread);
 863 
 864   thread->run();
 865 
 866   // One less thread is executing
 867   // When the VMThread gets here, the main thread may have already exited
 868   // which frees the CodeHeap containing the Atomic::dec code
 869   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 870     Atomic::dec(&os::Solaris::_os_thread_count);
 871   }
 872 
 873   if (UseDetachedThreads) {
 874     thr_exit(NULL);
 875     ShouldNotReachHere();
 876   }
 877   return NULL;
 878 }
 879 
 880 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 881   // Allocate the OSThread object
 882   OSThread* osthread = new OSThread(NULL, NULL);
 883   if (osthread == NULL) return NULL;
 884 
 885   // Store info on the Solaris thread into the OSThread
 886   osthread->set_thread_id(thread_id);
 887   osthread->set_lwp_id(_lwp_self());
 888   thread->_schedctl = (void *) schedctl_init () ;
 889 
 890   if (UseNUMA) {
 891     int lgrp_id = os::numa_get_group_id();
 892     if (lgrp_id != -1) {
 893       thread->set_lgrp_id(lgrp_id);
 894     }
 895   }
 896 
 897   if ( ThreadPriorityVerbose ) {
 898     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 899                   osthread->thread_id(), osthread->lwp_id() );
 900   }
 901 
 902   // Initial thread state is INITIALIZED, not SUSPENDED
 903   osthread->set_state(INITIALIZED);
 904 
 905   return osthread;
 906 }
 907 
 908 void os::Solaris::hotspot_sigmask(Thread* thread) {
 909 
 910   //Save caller's signal mask
 911   sigset_t sigmask;
 912   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 913   OSThread *osthread = thread->osthread();
 914   osthread->set_caller_sigmask(sigmask);
 915 
 916   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 917   if (!ReduceSignalUsage) {
 918     if (thread->is_VM_thread()) {
 919       // Only the VM thread handles BREAK_SIGNAL ...
 920       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 921     } else {
 922       // ... all other threads block BREAK_SIGNAL
 923       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 924       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 925     }
 926   }
 927 }
 928 
 929 bool os::create_attached_thread(JavaThread* thread) {
 930 #ifdef ASSERT
 931   thread->verify_not_published();
 932 #endif
 933   OSThread* osthread = create_os_thread(thread, thr_self());
 934   if (osthread == NULL) {
 935      return false;
 936   }
 937 
 938   // Initial thread state is RUNNABLE
 939   osthread->set_state(RUNNABLE);
 940   thread->set_osthread(osthread);
 941 
 942   // initialize signal mask for this thread
 943   // and save the caller's signal mask
 944   os::Solaris::hotspot_sigmask(thread);
 945 
 946   return true;
 947 }
 948 
 949 bool os::create_main_thread(JavaThread* thread) {
 950 #ifdef ASSERT
 951   thread->verify_not_published();
 952 #endif
 953   if (_starting_thread == NULL) {
 954     _starting_thread = create_os_thread(thread, main_thread);
 955      if (_starting_thread == NULL) {
 956         return false;
 957      }
 958   }
 959 
 960   // The primodial thread is runnable from the start
 961   _starting_thread->set_state(RUNNABLE);
 962 
 963   thread->set_osthread(_starting_thread);
 964 
 965   // initialize signal mask for this thread
 966   // and save the caller's signal mask
 967   os::Solaris::hotspot_sigmask(thread);
 968 
 969   return true;
 970 }
 971 
 972 
 973 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 974   // Allocate the OSThread object
 975   OSThread* osthread = new OSThread(NULL, NULL);
 976   if (osthread == NULL) {
 977     return false;
 978   }
 979 
 980   if ( ThreadPriorityVerbose ) {
 981     char *thrtyp;
 982     switch ( thr_type ) {
 983       case vm_thread:
 984         thrtyp = (char *)"vm";
 985         break;
 986       case cgc_thread:
 987         thrtyp = (char *)"cgc";
 988         break;
 989       case pgc_thread:
 990         thrtyp = (char *)"pgc";
 991         break;
 992       case java_thread:
 993         thrtyp = (char *)"java";
 994         break;
 995       case compiler_thread:
 996         thrtyp = (char *)"compiler";
 997         break;
 998       case watcher_thread:
 999         thrtyp = (char *)"watcher";
1000         break;
1001       default:
1002         thrtyp = (char *)"unknown";
1003         break;
1004     }
1005     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1006   }
1007 
1008   // Calculate stack size if it's not specified by caller.
1009   if (stack_size == 0) {
1010     // The default stack size 1M (2M for LP64).
1011     stack_size = (BytesPerWord >> 2) * K * K;
1012 
1013     switch (thr_type) {
1014     case os::java_thread:
1015       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1016       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1017       break;
1018     case os::compiler_thread:
1019       if (CompilerThreadStackSize > 0) {
1020         stack_size = (size_t)(CompilerThreadStackSize * K);
1021         break;
1022       } // else fall through:
1023         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1024     case os::vm_thread:
1025     case os::pgc_thread:
1026     case os::cgc_thread:
1027     case os::watcher_thread:
1028       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1029       break;
1030     }
1031   }
1032   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1033 
1034   // Initial state is ALLOCATED but not INITIALIZED
1035   osthread->set_state(ALLOCATED);
1036 
1037   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1038     // We got lots of threads. Check if we still have some address space left.
1039     // Need to be at least 5Mb of unreserved address space. We do check by
1040     // trying to reserve some.
1041     const size_t VirtualMemoryBangSize = 20*K*K;
1042     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1043     if (mem == NULL) {
1044       delete osthread;
1045       return false;
1046     } else {
1047       // Release the memory again
1048       os::release_memory(mem, VirtualMemoryBangSize);
1049     }
1050   }
1051 
1052   // Setup osthread because the child thread may need it.
1053   thread->set_osthread(osthread);
1054 
1055   // Create the Solaris thread
1056   thread_t tid = 0;
1057   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
1058   int      status;
1059 
1060   // Mark that we don't have an lwp or thread id yet.
1061   // In case we attempt to set the priority before the thread starts.
1062   osthread->set_lwp_id(-1);
1063   osthread->set_thread_id(-1);
1064 
1065   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1066   if (status != 0) {
1067     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1068       perror("os::create_thread");
1069     }
1070     thread->set_osthread(NULL);
1071     // Need to clean up stuff we've allocated so far
1072     delete osthread;
1073     return false;
1074   }
1075 
1076   Atomic::inc(&os::Solaris::_os_thread_count);
1077 
1078   // Store info on the Solaris thread into the OSThread
1079   osthread->set_thread_id(tid);
1080 
1081   // Remember that we created this thread so we can set priority on it
1082   osthread->set_vm_created();
1083 
1084   // Initial thread state is INITIALIZED, not SUSPENDED
1085   osthread->set_state(INITIALIZED);
1086 
1087   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1088   return true;
1089 }
1090 
1091 /* defined for >= Solaris 10. This allows builds on earlier versions
1092  *  of Solaris to take advantage of the newly reserved Solaris JVM signals
1093  *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1094  *  and -XX:+UseAltSigs does nothing since these should have no conflict
1095  */
1096 #if !defined(SIGJVM1)
1097 #define SIGJVM1 39
1098 #define SIGJVM2 40
1099 #endif
1100 
1101 debug_only(static bool signal_sets_initialized = false);
1102 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1103 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1104 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1105 
1106 bool os::Solaris::is_sig_ignored(int sig) {
1107       struct sigaction oact;
1108       sigaction(sig, (struct sigaction*)NULL, &oact);
1109       void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1110                                      : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1111       if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1112            return true;
1113       else
1114            return false;
1115 }
1116 
1117 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1118 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1119 static bool isJVM1available() {
1120   return SIGJVM1 < SIGRTMIN;
1121 }
1122 
1123 void os::Solaris::signal_sets_init() {
1124   // Should also have an assertion stating we are still single-threaded.
1125   assert(!signal_sets_initialized, "Already initialized");
1126   // Fill in signals that are necessarily unblocked for all threads in
1127   // the VM. Currently, we unblock the following signals:
1128   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1129   //                         by -Xrs (=ReduceSignalUsage));
1130   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1131   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1132   // the dispositions or masks wrt these signals.
1133   // Programs embedding the VM that want to use the above signals for their
1134   // own purposes must, at this time, use the "-Xrs" option to prevent
1135   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1136   // (See bug 4345157, and other related bugs).
1137   // In reality, though, unblocking these signals is really a nop, since
1138   // these signals are not blocked by default.
1139   sigemptyset(&unblocked_sigs);
1140   sigemptyset(&allowdebug_blocked_sigs);
1141   sigaddset(&unblocked_sigs, SIGILL);
1142   sigaddset(&unblocked_sigs, SIGSEGV);
1143   sigaddset(&unblocked_sigs, SIGBUS);
1144   sigaddset(&unblocked_sigs, SIGFPE);
1145 
1146   if (isJVM1available) {
1147     os::Solaris::set_SIGinterrupt(SIGJVM1);
1148     os::Solaris::set_SIGasync(SIGJVM2);
1149   } else if (UseAltSigs) {
1150     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1151     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1152   } else {
1153     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1154     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1155   }
1156 
1157   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1158   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1159 
1160   if (!ReduceSignalUsage) {
1161    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1162       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1163       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1164    }
1165    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1166       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1167       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1168    }
1169    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1170       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1171       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1172    }
1173   }
1174   // Fill in signals that are blocked by all but the VM thread.
1175   sigemptyset(&vm_sigs);
1176   if (!ReduceSignalUsage)
1177     sigaddset(&vm_sigs, BREAK_SIGNAL);
1178   debug_only(signal_sets_initialized = true);
1179 
1180   // For diagnostics only used in run_periodic_checks
1181   sigemptyset(&check_signal_done);
1182 }
1183 
1184 // These are signals that are unblocked while a thread is running Java.
1185 // (For some reason, they get blocked by default.)
1186 sigset_t* os::Solaris::unblocked_signals() {
1187   assert(signal_sets_initialized, "Not initialized");
1188   return &unblocked_sigs;
1189 }
1190 
1191 // These are the signals that are blocked while a (non-VM) thread is
1192 // running Java. Only the VM thread handles these signals.
1193 sigset_t* os::Solaris::vm_signals() {
1194   assert(signal_sets_initialized, "Not initialized");
1195   return &vm_sigs;
1196 }
1197 
1198 // These are signals that are blocked during cond_wait to allow debugger in
1199 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1200   assert(signal_sets_initialized, "Not initialized");
1201   return &allowdebug_blocked_sigs;
1202 }
1203 
1204 
1205 void _handle_uncaught_cxx_exception() {
1206   VMError err("An uncaught C++ exception");
1207   err.report_and_die();
1208 }
1209 
1210 
1211 // First crack at OS-specific initialization, from inside the new thread.
1212 void os::initialize_thread(Thread* thr) {
1213   int r = thr_main() ;
1214   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
1215   if (r) {
1216     JavaThread* jt = (JavaThread *)thr;
1217     assert(jt != NULL,"Sanity check");
1218     size_t stack_size;
1219     address base = jt->stack_base();
1220     if (Arguments::created_by_java_launcher()) {
1221       // Use 2MB to allow for Solaris 7 64 bit mode.
1222       stack_size = JavaThread::stack_size_at_create() == 0
1223         ? 2048*K : JavaThread::stack_size_at_create();
1224 
1225       // There are rare cases when we may have already used more than
1226       // the basic stack size allotment before this method is invoked.
1227       // Attempt to allow for a normally sized java_stack.
1228       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1229       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1230     } else {
1231       // 6269555: If we were not created by a Java launcher, i.e. if we are
1232       // running embedded in a native application, treat the primordial thread
1233       // as much like a native attached thread as possible.  This means using
1234       // the current stack size from thr_stksegment(), unless it is too large
1235       // to reliably setup guard pages.  A reasonable max size is 8MB.
1236       size_t current_size = current_stack_size();
1237       // This should never happen, but just in case....
1238       if (current_size == 0) current_size = 2 * K * K;
1239       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1240     }
1241     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1242     stack_size = (size_t)(base - bottom);
1243 
1244     assert(stack_size > 0, "Stack size calculation problem");
1245 
1246     if (stack_size > jt->stack_size()) {
1247       NOT_PRODUCT(
1248         struct rlimit limits;
1249         getrlimit(RLIMIT_STACK, &limits);
1250         size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1251         assert(size >= jt->stack_size(), "Stack size problem in main thread");
1252       )
1253       tty->print_cr(
1254         "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1255         "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1256         "See limit(1) to increase the stack size limit.",
1257         stack_size / K, jt->stack_size() / K);
1258       vm_exit(1);
1259     }
1260     assert(jt->stack_size() >= stack_size,
1261           "Attempt to map more stack than was allocated");
1262     jt->set_stack_size(stack_size);
1263   }
1264 
1265   // With the T2 libthread (T1 is no longer supported) threads are always bound
1266   // and we use stackbanging in all cases.
1267 
1268   os::Solaris::init_thread_fpu_state();
1269   std::set_terminate(_handle_uncaught_cxx_exception);
1270 }
1271 
1272 
1273 
1274 // Free Solaris resources related to the OSThread
1275 void os::free_thread(OSThread* osthread) {
1276   assert(osthread != NULL, "os::free_thread but osthread not set");
1277 
1278 
1279   // We are told to free resources of the argument thread,
1280   // but we can only really operate on the current thread.
1281   // The main thread must take the VMThread down synchronously
1282   // before the main thread exits and frees up CodeHeap
1283   guarantee((Thread::current()->osthread() == osthread
1284      || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1285   if (Thread::current()->osthread() == osthread) {
1286     // Restore caller's signal mask
1287     sigset_t sigmask = osthread->caller_sigmask();
1288     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1289   }
1290   delete osthread;
1291 }
1292 
1293 void os::pd_start_thread(Thread* thread) {
1294   int status = thr_continue(thread->osthread()->thread_id());
1295   assert_status(status == 0, status, "thr_continue failed");
1296 }
1297 
1298 
1299 intx os::current_thread_id() {
1300   return (intx)thr_self();
1301 }
1302 
1303 static pid_t _initial_pid = 0;
1304 
1305 int os::current_process_id() {
1306   return (int)(_initial_pid ? _initial_pid : getpid());
1307 }
1308 
1309 int os::allocate_thread_local_storage() {
1310   // %%%       in Win32 this allocates a memory segment pointed to by a
1311   //           register.  Dan Stein can implement a similar feature in
1312   //           Solaris.  Alternatively, the VM can do the same thing
1313   //           explicitly: malloc some storage and keep the pointer in a
1314   //           register (which is part of the thread's context) (or keep it
1315   //           in TLS).
1316   // %%%       In current versions of Solaris, thr_self and TSD can
1317   //           be accessed via short sequences of displaced indirections.
1318   //           The value of thr_self is available as %g7(36).
1319   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1320   //           assuming that the current thread already has a value bound to k.
1321   //           It may be worth experimenting with such access patterns,
1322   //           and later having the parameters formally exported from a Solaris
1323   //           interface.  I think, however, that it will be faster to
1324   //           maintain the invariant that %g2 always contains the
1325   //           JavaThread in Java code, and have stubs simply
1326   //           treat %g2 as a caller-save register, preserving it in a %lN.
1327   thread_key_t tk;
1328   if (thr_keycreate( &tk, NULL ) )
1329     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1330                   "(%s)", strerror(errno)));
1331   return int(tk);
1332 }
1333 
1334 void os::free_thread_local_storage(int index) {
1335   // %%% don't think we need anything here
1336   // if ( pthread_key_delete((pthread_key_t) tk) )
1337   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
1338 }
1339 
1340 #define SMALLINT 32   // libthread allocate for tsd_common is a version specific
1341                       // small number - point is NO swap space available
1342 void os::thread_local_storage_at_put(int index, void* value) {
1343   // %%% this is used only in threadLocalStorage.cpp
1344   if (thr_setspecific((thread_key_t)index, value)) {
1345     if (errno == ENOMEM) {
1346        vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1347                              "thr_setspecific: out of swap space");
1348     } else {
1349       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1350                     "(%s)", strerror(errno)));
1351     }
1352   } else {
1353       ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
1354   }
1355 }
1356 
1357 // This function could be called before TLS is initialized, for example, when
1358 // VM receives an async signal or when VM causes a fatal error during
1359 // initialization. Return NULL if thr_getspecific() fails.
1360 void* os::thread_local_storage_at(int index) {
1361   // %%% this is used only in threadLocalStorage.cpp
1362   void* r = NULL;
1363   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1364 }
1365 
1366 
1367 // gethrtime can move backwards if read from one cpu and then a different cpu
1368 // getTimeNanos is guaranteed to not move backward on Solaris
1369 // AssumeMonotonicOSTimers can be used to remove this guarantee.
1370 inline hrtime_t getTimeNanos() {
1371   const hrtime_t now = gethrtime();
1372   if (AssumeMonotonicOSTimers) {
1373     return now;
1374   }
1375 
1376   const hrtime_t prev = max_hrtime;
1377   if (now <= prev) {
1378     return prev;   // same or retrograde time;
1379   }
1380   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1381   assert(obsv >= prev, "invariant");   // Monotonicity
1382   // If the CAS succeeded then we're done and return "now".
1383   // If the CAS failed and the observed value "obsv" is >= now then
1384   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1385   // some other thread raced this thread and installed a new value, in which case
1386   // we could either (a) retry the entire operation, (b) retry trying to install now
1387   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1388   // we might discard a higher "now" value in deference to a slightly lower but freshly
1389   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1390   // to (a) or (b) -- and greatly reduces coherence traffic.
1391   // We might also condition (c) on the magnitude of the delta between obsv and now.
1392   // Avoiding excessive CAS operations to hot RW locations is critical.
1393   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1394   return (prev == obsv) ? now : obsv;
1395 }
1396 
1397 // Time since start-up in seconds to a fine granularity.
1398 // Used by VMSelfDestructTimer and the MemProfiler.
1399 double os::elapsedTime() {
1400   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1401 }
1402 
1403 jlong os::elapsed_counter() {
1404   return (jlong)(getTimeNanos() - first_hrtime);
1405 }
1406 
1407 jlong os::elapsed_frequency() {
1408    return hrtime_hz;
1409 }
1410 
1411 // Return the real, user, and system times in seconds from an
1412 // arbitrary fixed point in the past.
1413 bool os::getTimesSecs(double* process_real_time,
1414                   double* process_user_time,
1415                   double* process_system_time) {
1416   struct tms ticks;
1417   clock_t real_ticks = times(&ticks);
1418 
1419   if (real_ticks == (clock_t) (-1)) {
1420     return false;
1421   } else {
1422     double ticks_per_second = (double) clock_tics_per_sec;
1423     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1424     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1425     // For consistency return the real time from getTimeNanos()
1426     // converted to seconds.
1427     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1428 
1429     return true;
1430   }
1431 }
1432 
1433 bool os::supports_vtime() { return true; }
1434 
1435 bool os::enable_vtime() {
1436   int fd = ::open("/proc/self/ctl", O_WRONLY);
1437   if (fd == -1)
1438     return false;
1439 
1440   long cmd[] = { PCSET, PR_MSACCT };
1441   int res = ::write(fd, cmd, sizeof(long) * 2);
1442   ::close(fd);
1443   if (res != sizeof(long) * 2)
1444     return false;
1445 
1446   return true;
1447 }
1448 
1449 bool os::vtime_enabled() {
1450   int fd = ::open("/proc/self/status", O_RDONLY);
1451   if (fd == -1)
1452     return false;
1453 
1454   pstatus_t status;
1455   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1456   ::close(fd);
1457   if (res != sizeof(pstatus_t))
1458     return false;
1459 
1460   return status.pr_flags & PR_MSACCT;
1461 }
1462 
1463 double os::elapsedVTime() {
1464   return (double)gethrvtime() / (double)hrtime_hz;
1465 }
1466 
1467 // Used internally for comparisons only
1468 // getTimeMillis guaranteed to not move backwards on Solaris
1469 jlong getTimeMillis() {
1470   jlong nanotime = getTimeNanos();
1471   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1472 }
1473 
1474 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1475 jlong os::javaTimeMillis() {
1476   timeval t;
1477   if (gettimeofday( &t, NULL) == -1)
1478     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1479   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1480 }
1481 
1482 jlong os::javaTimeNanos() {
1483   return (jlong)getTimeNanos();
1484 }
1485 
1486 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1487   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1488   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1489   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1490   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1491 }
1492 
1493 char * os::local_time_string(char *buf, size_t buflen) {
1494   struct tm t;
1495   time_t long_time;
1496   time(&long_time);
1497   localtime_r(&long_time, &t);
1498   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1499                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1500                t.tm_hour, t.tm_min, t.tm_sec);
1501   return buf;
1502 }
1503 
1504 // Note: os::shutdown() might be called very early during initialization, or
1505 // called from signal handler. Before adding something to os::shutdown(), make
1506 // sure it is async-safe and can handle partially initialized VM.
1507 void os::shutdown() {
1508 
1509   // allow PerfMemory to attempt cleanup of any persistent resources
1510   perfMemory_exit();
1511 
1512   // needs to remove object in file system
1513   AttachListener::abort();
1514 
1515   // flush buffered output, finish log files
1516   ostream_abort();
1517 
1518   // Check for abort hook
1519   abort_hook_t abort_hook = Arguments::abort_hook();
1520   if (abort_hook != NULL) {
1521     abort_hook();
1522   }
1523 }
1524 
1525 // Note: os::abort() might be called very early during initialization, or
1526 // called from signal handler. Before adding something to os::abort(), make
1527 // sure it is async-safe and can handle partially initialized VM.
1528 void os::abort(bool dump_core) {
1529   os::shutdown();
1530   if (dump_core) {
1531 #ifndef PRODUCT
1532     fdStream out(defaultStream::output_fd());
1533     out.print_raw("Current thread is ");
1534     char buf[16];
1535     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1536     out.print_raw_cr(buf);
1537     out.print_raw_cr("Dumping core ...");
1538 #endif
1539     ::abort(); // dump core (for debugging)
1540   }
1541 
1542   ::exit(1);
1543 }
1544 
1545 // Die immediately, no exit hook, no abort hook, no cleanup.
1546 void os::die() {
1547   ::abort(); // dump core (for debugging)
1548 }
1549 
1550 // unused
1551 void os::set_error_file(const char *logfile) {}
1552 
1553 // DLL functions
1554 
1555 const char* os::dll_file_extension() { return ".so"; }
1556 
1557 // This must be hard coded because it's the system's temporary
1558 // directory not the java application's temp directory, ala java.io.tmpdir.
1559 const char* os::get_temp_directory() { return "/tmp"; }
1560 
1561 static bool file_exists(const char* filename) {
1562   struct stat statbuf;
1563   if (filename == NULL || strlen(filename) == 0) {
1564     return false;
1565   }
1566   return os::stat(filename, &statbuf) == 0;
1567 }
1568 
1569 bool os::dll_build_name(char* buffer, size_t buflen,
1570                         const char* pname, const char* fname) {
1571   bool retval = false;
1572   const size_t pnamelen = pname ? strlen(pname) : 0;
1573 
1574   // Return error on buffer overflow.
1575   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1576     return retval;
1577   }
1578 
1579   if (pnamelen == 0) {
1580     snprintf(buffer, buflen, "lib%s.so", fname);
1581     retval = true;
1582   } else if (strchr(pname, *os::path_separator()) != NULL) {
1583     int n;
1584     char** pelements = split_path(pname, &n);
1585     if (pelements == NULL) {
1586       return false;
1587     }
1588     for (int i = 0 ; i < n ; i++) {
1589       // really shouldn't be NULL but what the heck, check can't hurt
1590       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1591         continue; // skip the empty path values
1592       }
1593       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1594       if (file_exists(buffer)) {
1595         retval = true;
1596         break;
1597       }
1598     }
1599     // release the storage
1600     for (int i = 0 ; i < n ; i++) {
1601       if (pelements[i] != NULL) {
1602         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1603       }
1604     }
1605     if (pelements != NULL) {
1606       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1607     }
1608   } else {
1609     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1610     retval = true;
1611   }
1612   return retval;
1613 }
1614 
1615 // check if addr is inside libjvm.so
1616 bool os::address_is_in_vm(address addr) {
1617   static address libjvm_base_addr;
1618   Dl_info dlinfo;
1619 
1620   if (libjvm_base_addr == NULL) {
1621     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1622       libjvm_base_addr = (address)dlinfo.dli_fbase;
1623     }
1624     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1625   }
1626 
1627   if (dladdr((void *)addr, &dlinfo) != 0) {
1628     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1629   }
1630 
1631   return false;
1632 }
1633 
1634 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1635 static dladdr1_func_type dladdr1_func = NULL;
1636 
1637 bool os::dll_address_to_function_name(address addr, char *buf,
1638                                       int buflen, int * offset) {
1639   // buf is not optional, but offset is optional
1640   assert(buf != NULL, "sanity check");
1641 
1642   Dl_info dlinfo;
1643 
1644   // dladdr1_func was initialized in os::init()
1645   if (dladdr1_func != NULL) {
1646     // yes, we have dladdr1
1647 
1648     // Support for dladdr1 is checked at runtime; it may be
1649     // available even if the vm is built on a machine that does
1650     // not have dladdr1 support.  Make sure there is a value for
1651     // RTLD_DL_SYMENT.
1652     #ifndef RTLD_DL_SYMENT
1653     #define RTLD_DL_SYMENT 1
1654     #endif
1655 #ifdef _LP64
1656     Elf64_Sym * info;
1657 #else
1658     Elf32_Sym * info;
1659 #endif
1660     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1661                      RTLD_DL_SYMENT) != 0) {
1662       // see if we have a matching symbol that covers our address
1663       if (dlinfo.dli_saddr != NULL &&
1664           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1665         if (dlinfo.dli_sname != NULL) {
1666           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1667             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1668           }
1669           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1670           return true;
1671         }
1672       }
1673       // no matching symbol so try for just file info
1674       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1675         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1676                             buf, buflen, offset, dlinfo.dli_fname)) {
1677           return true;
1678         }
1679       }
1680     }
1681     buf[0] = '\0';
1682     if (offset != NULL) *offset  = -1;
1683     return false;
1684   }
1685 
1686   // no, only dladdr is available
1687   if (dladdr((void *)addr, &dlinfo) != 0) {
1688     // see if we have a matching symbol
1689     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1690       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1691         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1692       }
1693       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1694       return true;
1695     }
1696     // no matching symbol so try for just file info
1697     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1698       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1699                           buf, buflen, offset, dlinfo.dli_fname)) {
1700         return true;
1701       }
1702     }
1703   }
1704   buf[0] = '\0';
1705   if (offset != NULL) *offset  = -1;
1706   return false;
1707 }
1708 
1709 bool os::dll_address_to_library_name(address addr, char* buf,
1710                                      int buflen, int* offset) {
1711   // buf is not optional, but offset is optional
1712   assert(buf != NULL, "sanity check");
1713 
1714   Dl_info dlinfo;
1715 
1716   if (dladdr((void*)addr, &dlinfo) != 0) {
1717     if (dlinfo.dli_fname != NULL) {
1718       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1719     }
1720     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1721       *offset = addr - (address)dlinfo.dli_fbase;
1722     }
1723     return true;
1724   }
1725 
1726   buf[0] = '\0';
1727   if (offset) *offset = -1;
1728   return false;
1729 }
1730 
1731 // Prints the names and full paths of all opened dynamic libraries
1732 // for current process
1733 void os::print_dll_info(outputStream * st) {
1734   Dl_info dli;
1735   void *handle;
1736   Link_map *map;
1737   Link_map *p;
1738 
1739   st->print_cr("Dynamic libraries:"); st->flush();
1740 
1741   if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
1742       dli.dli_fname == NULL) {
1743     st->print_cr("Error: Cannot print dynamic libraries.");
1744     return;
1745   }
1746   handle = dlopen(dli.dli_fname, RTLD_LAZY);
1747   if (handle == NULL) {
1748     st->print_cr("Error: Cannot print dynamic libraries.");
1749     return;
1750   }
1751   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1752   if (map == NULL) {
1753     st->print_cr("Error: Cannot print dynamic libraries.");
1754     return;
1755   }
1756 
1757   while (map->l_prev != NULL)
1758     map = map->l_prev;
1759 
1760   while (map != NULL) {
1761     st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
1762     map = map->l_next;
1763   }
1764 
1765   dlclose(handle);
1766 }
1767 
1768   // Loads .dll/.so and
1769   // in case of error it checks if .dll/.so was built for the
1770   // same architecture as Hotspot is running on
1771 
1772 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1773 {
1774   void * result= ::dlopen(filename, RTLD_LAZY);
1775   if (result != NULL) {
1776     // Successful loading
1777     return result;
1778   }
1779 
1780   Elf32_Ehdr elf_head;
1781 
1782   // Read system error message into ebuf
1783   // It may or may not be overwritten below
1784   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1785   ebuf[ebuflen-1]='\0';
1786   int diag_msg_max_length=ebuflen-strlen(ebuf);
1787   char* diag_msg_buf=ebuf+strlen(ebuf);
1788 
1789   if (diag_msg_max_length==0) {
1790     // No more space in ebuf for additional diagnostics message
1791     return NULL;
1792   }
1793 
1794 
1795   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1796 
1797   if (file_descriptor < 0) {
1798     // Can't open library, report dlerror() message
1799     return NULL;
1800   }
1801 
1802   bool failed_to_read_elf_head=
1803     (sizeof(elf_head)!=
1804         (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1805 
1806   ::close(file_descriptor);
1807   if (failed_to_read_elf_head) {
1808     // file i/o error - report dlerror() msg
1809     return NULL;
1810   }
1811 
1812   typedef struct {
1813     Elf32_Half  code;         // Actual value as defined in elf.h
1814     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1815     char        elf_class;    // 32 or 64 bit
1816     char        endianess;    // MSB or LSB
1817     char*       name;         // String representation
1818   } arch_t;
1819 
1820   static const arch_t arch_array[]={
1821     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1822     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1823     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1824     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1825     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1826     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1827     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1828     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1829     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1830     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1831   };
1832 
1833   #if  (defined IA32)
1834     static  Elf32_Half running_arch_code=EM_386;
1835   #elif   (defined AMD64)
1836     static  Elf32_Half running_arch_code=EM_X86_64;
1837   #elif  (defined IA64)
1838     static  Elf32_Half running_arch_code=EM_IA_64;
1839   #elif  (defined __sparc) && (defined _LP64)
1840     static  Elf32_Half running_arch_code=EM_SPARCV9;
1841   #elif  (defined __sparc) && (!defined _LP64)
1842     static  Elf32_Half running_arch_code=EM_SPARC;
1843   #elif  (defined __powerpc64__)
1844     static  Elf32_Half running_arch_code=EM_PPC64;
1845   #elif  (defined __powerpc__)
1846     static  Elf32_Half running_arch_code=EM_PPC;
1847   #elif (defined ARM)
1848     static  Elf32_Half running_arch_code=EM_ARM;
1849   #else
1850     #error Method os::dll_load requires that one of following is defined:\
1851          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1852   #endif
1853 
1854   // Identify compatability class for VM's architecture and library's architecture
1855   // Obtain string descriptions for architectures
1856 
1857   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1858   int running_arch_index=-1;
1859 
1860   for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
1861     if (running_arch_code == arch_array[i].code) {
1862       running_arch_index    = i;
1863     }
1864     if (lib_arch.code == arch_array[i].code) {
1865       lib_arch.compat_class = arch_array[i].compat_class;
1866       lib_arch.name         = arch_array[i].name;
1867     }
1868   }
1869 
1870   assert(running_arch_index != -1,
1871     "Didn't find running architecture code (running_arch_code) in arch_array");
1872   if (running_arch_index == -1) {
1873     // Even though running architecture detection failed
1874     // we may still continue with reporting dlerror() message
1875     return NULL;
1876   }
1877 
1878   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1879     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1880     return NULL;
1881   }
1882 
1883   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1884     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1885     return NULL;
1886   }
1887 
1888   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1889     if ( lib_arch.name!=NULL ) {
1890       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1891         " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1892         lib_arch.name, arch_array[running_arch_index].name);
1893     } else {
1894       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1895       " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1896         lib_arch.code,
1897         arch_array[running_arch_index].name);
1898     }
1899   }
1900 
1901   return NULL;
1902 }
1903 
1904 void* os::dll_lookup(void* handle, const char* name) {
1905   return dlsym(handle, name);
1906 }
1907 
1908 void* os::get_default_process_handle() {
1909   return (void*)::dlopen(NULL, RTLD_LAZY);
1910 }
1911 
1912 int os::stat(const char *path, struct stat *sbuf) {
1913   char pathbuf[MAX_PATH];
1914   if (strlen(path) > MAX_PATH - 1) {
1915     errno = ENAMETOOLONG;
1916     return -1;
1917   }
1918   os::native_path(strcpy(pathbuf, path));
1919   return ::stat(pathbuf, sbuf);
1920 }
1921 
1922 static bool _print_ascii_file(const char* filename, outputStream* st) {
1923   int fd = ::open(filename, O_RDONLY);
1924   if (fd == -1) {
1925      return false;
1926   }
1927 
1928   char buf[32];
1929   int bytes;
1930   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1931     st->print_raw(buf, bytes);
1932   }
1933 
1934   ::close(fd);
1935 
1936   return true;
1937 }
1938 
1939 void os::print_os_info_brief(outputStream* st) {
1940   os::Solaris::print_distro_info(st);
1941 
1942   os::Posix::print_uname_info(st);
1943 
1944   os::Solaris::print_libversion_info(st);
1945 }
1946 
1947 void os::print_os_info(outputStream* st) {
1948   st->print("OS:");
1949 
1950   os::Solaris::print_distro_info(st);
1951 
1952   os::Posix::print_uname_info(st);
1953 
1954   os::Solaris::print_libversion_info(st);
1955 
1956   os::Posix::print_rlimit_info(st);
1957 
1958   os::Posix::print_load_average(st);
1959 }
1960 
1961 void os::Solaris::print_distro_info(outputStream* st) {
1962   if (!_print_ascii_file("/etc/release", st)) {
1963       st->print("Solaris");
1964     }
1965     st->cr();
1966 }
1967 
1968 void os::Solaris::print_libversion_info(outputStream* st) {
1969   st->print("  (T2 libthread)");
1970   st->cr();
1971 }
1972 
1973 static bool check_addr0(outputStream* st) {
1974   jboolean status = false;
1975   int fd = ::open("/proc/self/map",O_RDONLY);
1976   if (fd >= 0) {
1977     prmap_t p;
1978     while(::read(fd, &p, sizeof(p)) > 0) {
1979       if (p.pr_vaddr == 0x0) {
1980         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
1981         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
1982         st->print("Access:");
1983         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
1984         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
1985         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
1986         st->cr();
1987         status = true;
1988       }
1989     }
1990     ::close(fd);
1991   }
1992   return status;
1993 }
1994 
1995 void os::pd_print_cpu_info(outputStream* st) {
1996   // Nothing to do for now.
1997 }
1998 
1999 void os::print_memory_info(outputStream* st) {
2000   st->print("Memory:");
2001   st->print(" %dk page", os::vm_page_size()>>10);
2002   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2003   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2004   st->cr();
2005   (void) check_addr0(st);
2006 }
2007 
2008 void os::print_siginfo(outputStream* st, void* siginfo) {
2009   const siginfo_t* si = (const siginfo_t*)siginfo;
2010 
2011   os::Posix::print_siginfo_brief(st, si);
2012 
2013   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2014       UseSharedSpaces) {
2015     FileMapInfo* mapinfo = FileMapInfo::current_info();
2016     if (mapinfo->is_in_shared_space(si->si_addr)) {
2017       st->print("\n\nError accessing class data sharing archive."   \
2018                 " Mapped file inaccessible during execution, "      \
2019                 " possible disk/network problem.");
2020     }
2021   }
2022   st->cr();
2023 }
2024 
2025 // Moved from whole group, because we need them here for diagnostic
2026 // prints.
2027 #define OLDMAXSIGNUM 32
2028 static int Maxsignum = 0;
2029 static int *ourSigFlags = NULL;
2030 
2031 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2032 
2033 int os::Solaris::get_our_sigflags(int sig) {
2034   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2035   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2036   return ourSigFlags[sig];
2037 }
2038 
2039 void os::Solaris::set_our_sigflags(int sig, int flags) {
2040   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2041   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2042   ourSigFlags[sig] = flags;
2043 }
2044 
2045 
2046 static const char* get_signal_handler_name(address handler,
2047                                            char* buf, int buflen) {
2048   int offset;
2049   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2050   if (found) {
2051     // skip directory names
2052     const char *p1, *p2;
2053     p1 = buf;
2054     size_t len = strlen(os::file_separator());
2055     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2056     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2057   } else {
2058     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2059   }
2060   return buf;
2061 }
2062 
2063 static void print_signal_handler(outputStream* st, int sig,
2064                                   char* buf, size_t buflen) {
2065   struct sigaction sa;
2066 
2067   sigaction(sig, NULL, &sa);
2068 
2069   st->print("%s: ", os::exception_name(sig, buf, buflen));
2070 
2071   address handler = (sa.sa_flags & SA_SIGINFO)
2072                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2073                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2074 
2075   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2076     st->print("SIG_DFL");
2077   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2078     st->print("SIG_IGN");
2079   } else {
2080     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2081   }
2082 
2083   st->print(", sa_mask[0]=");
2084   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2085 
2086   address rh = VMError::get_resetted_sighandler(sig);
2087   // May be, handler was resetted by VMError?
2088   if(rh != NULL) {
2089     handler = rh;
2090     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2091   }
2092 
2093   st->print(", sa_flags=");
2094   os::Posix::print_sa_flags(st, sa.sa_flags);
2095 
2096   // Check: is it our handler?
2097   if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2098      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2099     // It is our signal handler
2100     // check for flags
2101     if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2102       st->print(
2103         ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2104         os::Solaris::get_our_sigflags(sig));
2105     }
2106   }
2107   st->cr();
2108 }
2109 
2110 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2111   st->print_cr("Signal Handlers:");
2112   print_signal_handler(st, SIGSEGV, buf, buflen);
2113   print_signal_handler(st, SIGBUS , buf, buflen);
2114   print_signal_handler(st, SIGFPE , buf, buflen);
2115   print_signal_handler(st, SIGPIPE, buf, buflen);
2116   print_signal_handler(st, SIGXFSZ, buf, buflen);
2117   print_signal_handler(st, SIGILL , buf, buflen);
2118   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2119   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2120   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2121   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2122   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2123   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2124   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2125   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2126 }
2127 
2128 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2129 
2130 // Find the full path to the current module, libjvm.so
2131 void os::jvm_path(char *buf, jint buflen) {
2132   // Error checking.
2133   if (buflen < MAXPATHLEN) {
2134     assert(false, "must use a large-enough buffer");
2135     buf[0] = '\0';
2136     return;
2137   }
2138   // Lazy resolve the path to current module.
2139   if (saved_jvm_path[0] != 0) {
2140     strcpy(buf, saved_jvm_path);
2141     return;
2142   }
2143 
2144   Dl_info dlinfo;
2145   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2146   assert(ret != 0, "cannot locate libjvm");
2147   if (ret != 0 && dlinfo.dli_fname != NULL) {
2148     realpath((char *)dlinfo.dli_fname, buf);
2149   } else {
2150     buf[0] = '\0';
2151     return;
2152   }
2153 
2154   if (Arguments::sun_java_launcher_is_altjvm()) {
2155     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2156     // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
2157     // If "/jre/lib/" appears at the right place in the string, then
2158     // assume we are installed in a JDK and we're done.  Otherwise, check
2159     // for a JAVA_HOME environment variable and fix up the path so it
2160     // looks like libjvm.so is installed there (append a fake suffix
2161     // hotspot/libjvm.so).
2162     const char *p = buf + strlen(buf) - 1;
2163     for (int count = 0; p > buf && count < 5; ++count) {
2164       for (--p; p > buf && *p != '/'; --p)
2165         /* empty */ ;
2166     }
2167 
2168     if (strncmp(p, "/jre/lib/", 9) != 0) {
2169       // Look for JAVA_HOME in the environment.
2170       char* java_home_var = ::getenv("JAVA_HOME");
2171       if (java_home_var != NULL && java_home_var[0] != 0) {
2172         char cpu_arch[12];
2173         char* jrelib_p;
2174         int   len;
2175         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2176 #ifdef _LP64
2177         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2178         if (strcmp(cpu_arch, "sparc") == 0) {
2179           strcat(cpu_arch, "v9");
2180         } else if (strcmp(cpu_arch, "i386") == 0) {
2181           strcpy(cpu_arch, "amd64");
2182         }
2183 #endif
2184         // Check the current module name "libjvm.so".
2185         p = strrchr(buf, '/');
2186         assert(strstr(p, "/libjvm") == p, "invalid library name");
2187 
2188         realpath(java_home_var, buf);
2189         // determine if this is a legacy image or modules image
2190         // modules image doesn't have "jre" subdirectory
2191         len = strlen(buf);
2192         jrelib_p = buf + len;
2193         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2194         if (0 != access(buf, F_OK)) {
2195           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2196         }
2197 
2198         if (0 == access(buf, F_OK)) {
2199           // Use current module name "libjvm.so"
2200           len = strlen(buf);
2201           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2202         } else {
2203           // Go back to path of .so
2204           realpath((char *)dlinfo.dli_fname, buf);
2205         }
2206       }
2207     }
2208   }
2209 
2210   strcpy(saved_jvm_path, buf);
2211 }
2212 
2213 
2214 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2215   // no prefix required, not even "_"
2216 }
2217 
2218 
2219 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2220   // no suffix required
2221 }
2222 
2223 // This method is a copy of JDK's sysGetLastErrorString
2224 // from src/solaris/hpi/src/system_md.c
2225 
2226 size_t os::lasterror(char *buf, size_t len) {
2227 
2228   if (errno == 0)  return 0;
2229 
2230   const char *s = ::strerror(errno);
2231   size_t n = ::strlen(s);
2232   if (n >= len) {
2233     n = len - 1;
2234   }
2235   ::strncpy(buf, s, n);
2236   buf[n] = '\0';
2237   return n;
2238 }
2239 
2240 
2241 // sun.misc.Signal
2242 
2243 extern "C" {
2244   static void UserHandler(int sig, void *siginfo, void *context) {
2245     // Ctrl-C is pressed during error reporting, likely because the error
2246     // handler fails to abort. Let VM die immediately.
2247     if (sig == SIGINT && is_error_reported()) {
2248        os::die();
2249     }
2250 
2251     os::signal_notify(sig);
2252     // We do not need to reinstate the signal handler each time...
2253   }
2254 }
2255 
2256 void* os::user_handler() {
2257   return CAST_FROM_FN_PTR(void*, UserHandler);
2258 }
2259 
2260 class Semaphore : public StackObj {
2261   public:
2262     Semaphore();
2263     ~Semaphore();
2264     void signal();
2265     void wait();
2266     bool trywait();
2267     bool timedwait(unsigned int sec, int nsec);
2268   private:
2269     sema_t _semaphore;
2270 };
2271 
2272 
2273 Semaphore::Semaphore() {
2274   sema_init(&_semaphore, 0, NULL, NULL);
2275 }
2276 
2277 Semaphore::~Semaphore() {
2278   sema_destroy(&_semaphore);
2279 }
2280 
2281 void Semaphore::signal() {
2282   sema_post(&_semaphore);
2283 }
2284 
2285 void Semaphore::wait() {
2286   sema_wait(&_semaphore);
2287 }
2288 
2289 bool Semaphore::trywait() {
2290   return sema_trywait(&_semaphore) == 0;
2291 }
2292 
2293 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2294   struct timespec ts;
2295   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2296 
2297   while (1) {
2298     int result = sema_timedwait(&_semaphore, &ts);
2299     if (result == 0) {
2300       return true;
2301     } else if (errno == EINTR) {
2302       continue;
2303     } else if (errno == ETIME) {
2304       return false;
2305     } else {
2306       return false;
2307     }
2308   }
2309 }
2310 
2311 extern "C" {
2312   typedef void (*sa_handler_t)(int);
2313   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2314 }
2315 
2316 void* os::signal(int signal_number, void* handler) {
2317   struct sigaction sigAct, oldSigAct;
2318   sigfillset(&(sigAct.sa_mask));
2319   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2320   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2321 
2322   if (sigaction(signal_number, &sigAct, &oldSigAct))
2323     // -1 means registration failed
2324     return (void *)-1;
2325 
2326   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2327 }
2328 
2329 void os::signal_raise(int signal_number) {
2330   raise(signal_number);
2331 }
2332 
2333 /*
2334  * The following code is moved from os.cpp for making this
2335  * code platform specific, which it is by its very nature.
2336  */
2337 
2338 // a counter for each possible signal value
2339 static int Sigexit = 0;
2340 static int Maxlibjsigsigs;
2341 static jint *pending_signals = NULL;
2342 static int *preinstalled_sigs = NULL;
2343 static struct sigaction *chainedsigactions = NULL;
2344 static sema_t sig_sem;
2345 typedef int (*version_getting_t)();
2346 version_getting_t os::Solaris::get_libjsig_version = NULL;
2347 static int libjsigversion = NULL;
2348 
2349 int os::sigexitnum_pd() {
2350   assert(Sigexit > 0, "signal memory not yet initialized");
2351   return Sigexit;
2352 }
2353 
2354 void os::Solaris::init_signal_mem() {
2355   // Initialize signal structures
2356   Maxsignum = SIGRTMAX;
2357   Sigexit = Maxsignum+1;
2358   assert(Maxsignum >0, "Unable to obtain max signal number");
2359 
2360   Maxlibjsigsigs = Maxsignum;
2361 
2362   // pending_signals has one int per signal
2363   // The additional signal is for SIGEXIT - exit signal to signal_thread
2364   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2365   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2366 
2367   if (UseSignalChaining) {
2368      chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2369        * (Maxsignum + 1), mtInternal);
2370      memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2371      preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2372      memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2373   }
2374   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
2375   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2376 }
2377 
2378 void os::signal_init_pd() {
2379   int ret;
2380 
2381   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2382   assert(ret == 0, "sema_init() failed");
2383 }
2384 
2385 void os::signal_notify(int signal_number) {
2386   int ret;
2387 
2388   Atomic::inc(&pending_signals[signal_number]);
2389   ret = ::sema_post(&sig_sem);
2390   assert(ret == 0, "sema_post() failed");
2391 }
2392 
2393 static int check_pending_signals(bool wait_for_signal) {
2394   int ret;
2395   while (true) {
2396     for (int i = 0; i < Sigexit + 1; i++) {
2397       jint n = pending_signals[i];
2398       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2399         return i;
2400       }
2401     }
2402     if (!wait_for_signal) {
2403       return -1;
2404     }
2405     JavaThread *thread = JavaThread::current();
2406     ThreadBlockInVM tbivm(thread);
2407 
2408     bool threadIsSuspended;
2409     do {
2410       thread->set_suspend_equivalent();
2411       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2412       while((ret = ::sema_wait(&sig_sem)) == EINTR)
2413           ;
2414       assert(ret == 0, "sema_wait() failed");
2415 
2416       // were we externally suspended while we were waiting?
2417       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2418       if (threadIsSuspended) {
2419         //
2420         // The semaphore has been incremented, but while we were waiting
2421         // another thread suspended us. We don't want to continue running
2422         // while suspended because that would surprise the thread that
2423         // suspended us.
2424         //
2425         ret = ::sema_post(&sig_sem);
2426         assert(ret == 0, "sema_post() failed");
2427 
2428         thread->java_suspend_self();
2429       }
2430     } while (threadIsSuspended);
2431   }
2432 }
2433 
2434 int os::signal_lookup() {
2435   return check_pending_signals(false);
2436 }
2437 
2438 int os::signal_wait() {
2439   return check_pending_signals(true);
2440 }
2441 
2442 ////////////////////////////////////////////////////////////////////////////////
2443 // Virtual Memory
2444 
2445 static int page_size = -1;
2446 
2447 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2448 // clear this var if support is not available.
2449 static bool has_map_align = true;
2450 
2451 int os::vm_page_size() {
2452   assert(page_size != -1, "must call os::init");
2453   return page_size;
2454 }
2455 
2456 // Solaris allocates memory by pages.
2457 int os::vm_allocation_granularity() {
2458   assert(page_size != -1, "must call os::init");
2459   return page_size;
2460 }
2461 
2462 static bool recoverable_mmap_error(int err) {
2463   // See if the error is one we can let the caller handle. This
2464   // list of errno values comes from the Solaris mmap(2) man page.
2465   switch (err) {
2466   case EBADF:
2467   case EINVAL:
2468   case ENOTSUP:
2469     // let the caller deal with these errors
2470     return true;
2471 
2472   default:
2473     // Any remaining errors on this OS can cause our reserved mapping
2474     // to be lost. That can cause confusion where different data
2475     // structures think they have the same memory mapped. The worst
2476     // scenario is if both the VM and a library think they have the
2477     // same memory mapped.
2478     return false;
2479   }
2480 }
2481 
2482 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2483                                     int err) {
2484   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2485           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2486           strerror(err), err);
2487 }
2488 
2489 static void warn_fail_commit_memory(char* addr, size_t bytes,
2490                                     size_t alignment_hint, bool exec,
2491                                     int err) {
2492   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2493           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2494           alignment_hint, exec, strerror(err), err);
2495 }
2496 
2497 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2498   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2499   size_t size = bytes;
2500   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2501   if (res != NULL) {
2502     if (UseNUMAInterleaving) {
2503       numa_make_global(addr, bytes);
2504     }
2505     return 0;
2506   }
2507 
2508   int err = errno;  // save errno from mmap() call in mmap_chunk()
2509 
2510   if (!recoverable_mmap_error(err)) {
2511     warn_fail_commit_memory(addr, bytes, exec, err);
2512     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2513   }
2514 
2515   return err;
2516 }
2517 
2518 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2519   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2520 }
2521 
2522 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2523                                   const char* mesg) {
2524   assert(mesg != NULL, "mesg must be specified");
2525   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2526   if (err != 0) {
2527     // the caller wants all commit errors to exit with the specified mesg:
2528     warn_fail_commit_memory(addr, bytes, exec, err);
2529     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2530   }
2531 }
2532 
2533 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2534                                     size_t alignment_hint, bool exec) {
2535   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2536   if (err == 0) {
2537     if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) {
2538       // If the large page size has been set and the VM
2539       // is using large pages, use the large page size
2540       // if it is smaller than the alignment hint. This is
2541       // a case where the VM wants to use a larger alignment size
2542       // for its own reasons but still want to use large pages
2543       // (which is what matters to setting the mpss range.
2544       size_t page_size = 0;
2545       if (large_page_size() < alignment_hint) {
2546         assert(UseLargePages, "Expected to be here for large page use only");
2547         page_size = large_page_size();
2548       } else {
2549         // If the alignment hint is less than the large page
2550         // size, the VM wants a particular alignment (thus the hint)
2551         // for internal reasons.  Try to set the mpss range using
2552         // the alignment_hint.
2553         page_size = alignment_hint;
2554       }
2555       // Since this is a hint, ignore any failures.
2556       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2557     }
2558   }
2559   return err;
2560 }
2561 
2562 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2563                           bool exec) {
2564   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2565 }
2566 
2567 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2568                                   size_t alignment_hint, bool exec,
2569                                   const char* mesg) {
2570   assert(mesg != NULL, "mesg must be specified");
2571   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2572   if (err != 0) {
2573     // the caller wants all commit errors to exit with the specified mesg:
2574     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2575     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2576   }
2577 }
2578 
2579 // Uncommit the pages in a specified region.
2580 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2581   if (madvise(addr, bytes, MADV_FREE) < 0) {
2582     debug_only(warning("MADV_FREE failed."));
2583     return;
2584   }
2585 }
2586 
2587 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2588   return os::commit_memory(addr, size, !ExecMem);
2589 }
2590 
2591 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2592   return os::uncommit_memory(addr, size);
2593 }
2594 
2595 // Change the page size in a given range.
2596 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2597   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2598   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2599   if (UseLargePages) {
2600     Solaris::setup_large_pages(addr, bytes, alignment_hint);
2601   }
2602 }
2603 
2604 // Tell the OS to make the range local to the first-touching LWP
2605 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2606   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2607   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2608     debug_only(warning("MADV_ACCESS_LWP failed."));
2609   }
2610 }
2611 
2612 // Tell the OS that this range would be accessed from different LWPs.
2613 void os::numa_make_global(char *addr, size_t bytes) {
2614   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2615   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2616     debug_only(warning("MADV_ACCESS_MANY failed."));
2617   }
2618 }
2619 
2620 // Get the number of the locality groups.
2621 size_t os::numa_get_groups_num() {
2622   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2623   return n != -1 ? n : 1;
2624 }
2625 
2626 // Get a list of leaf locality groups. A leaf lgroup is group that
2627 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2628 // board. An LWP is assigned to one of these groups upon creation.
2629 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2630    if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2631      ids[0] = 0;
2632      return 1;
2633    }
2634    int result_size = 0, top = 1, bottom = 0, cur = 0;
2635    for (int k = 0; k < size; k++) {
2636      int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2637                                     (Solaris::lgrp_id_t*)&ids[top], size - top);
2638      if (r == -1) {
2639        ids[0] = 0;
2640        return 1;
2641      }
2642      if (!r) {
2643        // That's a leaf node.
2644        assert (bottom <= cur, "Sanity check");
2645        // Check if the node has memory
2646        if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2647                                    NULL, 0, LGRP_RSRC_MEM) > 0) {
2648          ids[bottom++] = ids[cur];
2649        }
2650      }
2651      top += r;
2652      cur++;
2653    }
2654    if (bottom == 0) {
2655      // Handle a situation, when the OS reports no memory available.
2656      // Assume UMA architecture.
2657      ids[0] = 0;
2658      return 1;
2659    }
2660    return bottom;
2661 }
2662 
2663 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2664 bool os::numa_topology_changed() {
2665   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2666   if (is_stale != -1 && is_stale) {
2667     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2668     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2669     assert(c != 0, "Failure to initialize LGRP API");
2670     Solaris::set_lgrp_cookie(c);
2671     return true;
2672   }
2673   return false;
2674 }
2675 
2676 // Get the group id of the current LWP.
2677 int os::numa_get_group_id() {
2678   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2679   if (lgrp_id == -1) {
2680     return 0;
2681   }
2682   const int size = os::numa_get_groups_num();
2683   int *ids = (int*)alloca(size * sizeof(int));
2684 
2685   // Get the ids of all lgroups with memory; r is the count.
2686   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2687                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2688   if (r <= 0) {
2689     return 0;
2690   }
2691   return ids[os::random() % r];
2692 }
2693 
2694 // Request information about the page.
2695 bool os::get_page_info(char *start, page_info* info) {
2696   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2697   uint64_t addr = (uintptr_t)start;
2698   uint64_t outdata[2];
2699   uint_t validity = 0;
2700 
2701   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2702     return false;
2703   }
2704 
2705   info->size = 0;
2706   info->lgrp_id = -1;
2707 
2708   if ((validity & 1) != 0) {
2709     if ((validity & 2) != 0) {
2710       info->lgrp_id = outdata[0];
2711     }
2712     if ((validity & 4) != 0) {
2713       info->size = outdata[1];
2714     }
2715     return true;
2716   }
2717   return false;
2718 }
2719 
2720 // Scan the pages from start to end until a page different than
2721 // the one described in the info parameter is encountered.
2722 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2723   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2724   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2725   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2726   uint_t validity[MAX_MEMINFO_CNT];
2727 
2728   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2729   uint64_t p = (uint64_t)start;
2730   while (p < (uint64_t)end) {
2731     addrs[0] = p;
2732     size_t addrs_count = 1;
2733     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2734       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2735       addrs_count++;
2736     }
2737 
2738     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2739       return NULL;
2740     }
2741 
2742     size_t i = 0;
2743     for (; i < addrs_count; i++) {
2744       if ((validity[i] & 1) != 0) {
2745         if ((validity[i] & 4) != 0) {
2746           if (outdata[types * i + 1] != page_expected->size) {
2747             break;
2748           }
2749         } else
2750           if (page_expected->size != 0) {
2751             break;
2752           }
2753 
2754         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2755           if (outdata[types * i] != page_expected->lgrp_id) {
2756             break;
2757           }
2758         }
2759       } else {
2760         return NULL;
2761       }
2762     }
2763 
2764     if (i < addrs_count) {
2765       if ((validity[i] & 2) != 0) {
2766         page_found->lgrp_id = outdata[types * i];
2767       } else {
2768         page_found->lgrp_id = -1;
2769       }
2770       if ((validity[i] & 4) != 0) {
2771         page_found->size = outdata[types * i + 1];
2772       } else {
2773         page_found->size = 0;
2774       }
2775       return (char*)addrs[i];
2776     }
2777 
2778     p = addrs[addrs_count - 1] + page_size;
2779   }
2780   return end;
2781 }
2782 
2783 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2784   size_t size = bytes;
2785   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2786   // uncommitted page. Otherwise, the read/write might succeed if we
2787   // have enough swap space to back the physical page.
2788   return
2789     NULL != Solaris::mmap_chunk(addr, size,
2790                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2791                                 PROT_NONE);
2792 }
2793 
2794 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2795   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2796 
2797   if (b == MAP_FAILED) {
2798     return NULL;
2799   }
2800   return b;
2801 }
2802 
2803 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
2804   char* addr = requested_addr;
2805   int flags = MAP_PRIVATE | MAP_NORESERVE;
2806 
2807   assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
2808 
2809   if (fixed) {
2810     flags |= MAP_FIXED;
2811   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2812     flags |= MAP_ALIGN;
2813     addr = (char*) alignment_hint;
2814   }
2815 
2816   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2817   // uncommitted page. Otherwise, the read/write might succeed if we
2818   // have enough swap space to back the physical page.
2819   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2820 }
2821 
2822 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2823   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
2824 
2825   guarantee(requested_addr == NULL || requested_addr == addr,
2826             "OS failed to return requested mmap address.");
2827   return addr;
2828 }
2829 
2830 // Reserve memory at an arbitrary address, only if that area is
2831 // available (and not reserved for something else).
2832 
2833 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2834   const int max_tries = 10;
2835   char* base[max_tries];
2836   size_t size[max_tries];
2837 
2838   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2839   // is dependent on the requested size and the MMU.  Our initial gap
2840   // value here is just a guess and will be corrected later.
2841   bool had_top_overlap = false;
2842   bool have_adjusted_gap = false;
2843   size_t gap = 0x400000;
2844 
2845   // Assert only that the size is a multiple of the page size, since
2846   // that's all that mmap requires, and since that's all we really know
2847   // about at this low abstraction level.  If we need higher alignment,
2848   // we can either pass an alignment to this method or verify alignment
2849   // in one of the methods further up the call chain.  See bug 5044738.
2850   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2851 
2852   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2853   // Give it a try, if the kernel honors the hint we can return immediately.
2854   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2855 
2856   volatile int err = errno;
2857   if (addr == requested_addr) {
2858     return addr;
2859   } else if (addr != NULL) {
2860     pd_unmap_memory(addr, bytes);
2861   }
2862 
2863   if (PrintMiscellaneous && Verbose) {
2864     char buf[256];
2865     buf[0] = '\0';
2866     if (addr == NULL) {
2867       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2868     }
2869     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2870             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2871             "%s", bytes, requested_addr, addr, buf);
2872   }
2873 
2874   // Address hint method didn't work.  Fall back to the old method.
2875   // In theory, once SNV becomes our oldest supported platform, this
2876   // code will no longer be needed.
2877   //
2878   // Repeatedly allocate blocks until the block is allocated at the
2879   // right spot. Give up after max_tries.
2880   int i;
2881   for (i = 0; i < max_tries; ++i) {
2882     base[i] = reserve_memory(bytes);
2883 
2884     if (base[i] != NULL) {
2885       // Is this the block we wanted?
2886       if (base[i] == requested_addr) {
2887         size[i] = bytes;
2888         break;
2889       }
2890 
2891       // check that the gap value is right
2892       if (had_top_overlap && !have_adjusted_gap) {
2893         size_t actual_gap = base[i-1] - base[i] - bytes;
2894         if (gap != actual_gap) {
2895           // adjust the gap value and retry the last 2 allocations
2896           assert(i > 0, "gap adjustment code problem");
2897           have_adjusted_gap = true;  // adjust the gap only once, just in case
2898           gap = actual_gap;
2899           if (PrintMiscellaneous && Verbose) {
2900             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2901           }
2902           unmap_memory(base[i], bytes);
2903           unmap_memory(base[i-1], size[i-1]);
2904           i-=2;
2905           continue;
2906         }
2907       }
2908 
2909       // Does this overlap the block we wanted? Give back the overlapped
2910       // parts and try again.
2911       //
2912       // There is still a bug in this code: if top_overlap == bytes,
2913       // the overlap is offset from requested region by the value of gap.
2914       // In this case giving back the overlapped part will not work,
2915       // because we'll give back the entire block at base[i] and
2916       // therefore the subsequent allocation will not generate a new gap.
2917       // This could be fixed with a new algorithm that used larger
2918       // or variable size chunks to find the requested region -
2919       // but such a change would introduce additional complications.
2920       // It's rare enough that the planets align for this bug,
2921       // so we'll just wait for a fix for 6204603/5003415 which
2922       // will provide a mmap flag to allow us to avoid this business.
2923 
2924       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2925       if (top_overlap >= 0 && top_overlap < bytes) {
2926         had_top_overlap = true;
2927         unmap_memory(base[i], top_overlap);
2928         base[i] += top_overlap;
2929         size[i] = bytes - top_overlap;
2930       } else {
2931         size_t bottom_overlap = base[i] + bytes - requested_addr;
2932         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2933           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2934             warning("attempt_reserve_memory_at: possible alignment bug");
2935           }
2936           unmap_memory(requested_addr, bottom_overlap);
2937           size[i] = bytes - bottom_overlap;
2938         } else {
2939           size[i] = bytes;
2940         }
2941       }
2942     }
2943   }
2944 
2945   // Give back the unused reserved pieces.
2946 
2947   for (int j = 0; j < i; ++j) {
2948     if (base[j] != NULL) {
2949       unmap_memory(base[j], size[j]);
2950     }
2951   }
2952 
2953   return (i < max_tries) ? requested_addr : NULL;
2954 }
2955 
2956 bool os::pd_release_memory(char* addr, size_t bytes) {
2957   size_t size = bytes;
2958   return munmap(addr, size) == 0;
2959 }
2960 
2961 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
2962   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
2963          "addr must be page aligned");
2964   int retVal = mprotect(addr, bytes, prot);
2965   return retVal == 0;
2966 }
2967 
2968 // Protect memory (Used to pass readonly pages through
2969 // JNI GetArray<type>Elements with empty arrays.)
2970 // Also, used for serialization page and for compressed oops null pointer
2971 // checking.
2972 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2973                         bool is_committed) {
2974   unsigned int p = 0;
2975   switch (prot) {
2976   case MEM_PROT_NONE: p = PROT_NONE; break;
2977   case MEM_PROT_READ: p = PROT_READ; break;
2978   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2979   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2980   default:
2981     ShouldNotReachHere();
2982   }
2983   // is_committed is unused.
2984   return solaris_mprotect(addr, bytes, p);
2985 }
2986 
2987 // guard_memory and unguard_memory only happens within stack guard pages.
2988 // Since ISM pertains only to the heap, guard and unguard memory should not
2989 /// happen with an ISM region.
2990 bool os::guard_memory(char* addr, size_t bytes) {
2991   return solaris_mprotect(addr, bytes, PROT_NONE);
2992 }
2993 
2994 bool os::unguard_memory(char* addr, size_t bytes) {
2995   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
2996 }
2997 
2998 // Large page support
2999 static size_t _large_page_size = 0;
3000 
3001 // Insertion sort for small arrays (descending order).
3002 static void insertion_sort_descending(size_t* array, int len) {
3003   for (int i = 0; i < len; i++) {
3004     size_t val = array[i];
3005     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3006       size_t tmp = array[key];
3007       array[key] = array[key - 1];
3008       array[key - 1] = tmp;
3009     }
3010   }
3011 }
3012 
3013 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3014   const unsigned int usable_count = VM_Version::page_size_count();
3015   if (usable_count == 1) {
3016     return false;
3017   }
3018 
3019   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3020   // build platform, getpagesizes() (without the '2') can be called directly.
3021   typedef int (*gps_t)(size_t[], int);
3022   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3023   if (gps_func == NULL) {
3024     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3025     if (gps_func == NULL) {
3026       if (warn) {
3027         warning("MPSS is not supported by the operating system.");
3028       }
3029       return false;
3030     }
3031   }
3032 
3033   // Fill the array of page sizes.
3034   int n = (*gps_func)(_page_sizes, page_sizes_max);
3035   assert(n > 0, "Solaris bug?");
3036 
3037   if (n == page_sizes_max) {
3038     // Add a sentinel value (necessary only if the array was completely filled
3039     // since it is static (zeroed at initialization)).
3040     _page_sizes[--n] = 0;
3041     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3042   }
3043   assert(_page_sizes[n] == 0, "missing sentinel");
3044   trace_page_sizes("available page sizes", _page_sizes, n);
3045 
3046   if (n == 1) return false;     // Only one page size available.
3047 
3048   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3049   // select up to usable_count elements.  First sort the array, find the first
3050   // acceptable value, then copy the usable sizes to the top of the array and
3051   // trim the rest.  Make sure to include the default page size :-).
3052   //
3053   // A better policy could get rid of the 4M limit by taking the sizes of the
3054   // important VM memory regions (java heap and possibly the code cache) into
3055   // account.
3056   insertion_sort_descending(_page_sizes, n);
3057   const size_t size_limit =
3058     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3059   int beg;
3060   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3061   const int end = MIN2((int)usable_count, n) - 1;
3062   for (int cur = 0; cur < end; ++cur, ++beg) {
3063     _page_sizes[cur] = _page_sizes[beg];
3064   }
3065   _page_sizes[end] = vm_page_size();
3066   _page_sizes[end + 1] = 0;
3067 
3068   if (_page_sizes[end] > _page_sizes[end - 1]) {
3069     // Default page size is not the smallest; sort again.
3070     insertion_sort_descending(_page_sizes, end + 1);
3071   }
3072   *page_size = _page_sizes[0];
3073 
3074   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3075   return true;
3076 }
3077 
3078 void os::large_page_init() {
3079   if (UseLargePages) {
3080     // print a warning if any large page related flag is specified on command line
3081     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3082                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3083 
3084     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3085   }
3086 }
3087 
3088 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3089   // Signal to OS that we want large pages for addresses
3090   // from addr, addr + bytes
3091   struct memcntl_mha mpss_struct;
3092   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3093   mpss_struct.mha_pagesize = align;
3094   mpss_struct.mha_flags = 0;
3095   // Upon successful completion, memcntl() returns 0
3096   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3097     debug_only(warning("Attempt to use MPSS failed."));
3098     return false;
3099   }
3100   return true;
3101 }
3102 
3103 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3104   fatal("os::reserve_memory_special should not be called on Solaris.");
3105   return NULL;
3106 }
3107 
3108 bool os::release_memory_special(char* base, size_t bytes) {
3109   fatal("os::release_memory_special should not be called on Solaris.");
3110   return false;
3111 }
3112 
3113 size_t os::large_page_size() {
3114   return _large_page_size;
3115 }
3116 
3117 // MPSS allows application to commit large page memory on demand; with ISM
3118 // the entire memory region must be allocated as shared memory.
3119 bool os::can_commit_large_page_memory() {
3120   return true;
3121 }
3122 
3123 bool os::can_execute_large_page_memory() {
3124   return true;
3125 }
3126 
3127 // Read calls from inside the vm need to perform state transitions
3128 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3129   size_t res;
3130   JavaThread* thread = (JavaThread*)Thread::current();
3131   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3132   ThreadBlockInVM tbiv(thread);
3133   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3134   return res;
3135 }
3136 
3137 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3138   size_t res;
3139   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
3140           "Assumed _thread_in_native");
3141   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3142   return res;
3143 }
3144 
3145 void os::naked_short_sleep(jlong ms) {
3146   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3147 
3148   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3149   // Solaris requires -lrt for this.
3150   usleep((ms * 1000));
3151 
3152   return;
3153 }
3154 
3155 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3156 void os::infinite_sleep() {
3157   while (true) {    // sleep forever ...
3158     ::sleep(100);   // ... 100 seconds at a time
3159   }
3160 }
3161 
3162 // Used to convert frequent JVM_Yield() to nops
3163 bool os::dont_yield() {
3164   if (DontYieldALot) {
3165     static hrtime_t last_time = 0;
3166     hrtime_t diff = getTimeNanos() - last_time;
3167 
3168     if (diff < DontYieldALotInterval * 1000000)
3169       return true;
3170 
3171     last_time += diff;
3172 
3173     return false;
3174   }
3175   else {
3176     return false;
3177   }
3178 }
3179 
3180 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3181 // the linux and win32 implementations do not.  This should be checked.
3182 
3183 void os::yield() {
3184   // Yields to all threads with same or greater priority
3185   os::sleep(Thread::current(), 0, false);
3186 }
3187 
3188 // Note that yield semantics are defined by the scheduling class to which
3189 // the thread currently belongs.  Typically, yield will _not yield to
3190 // other equal or higher priority threads that reside on the dispatch queues
3191 // of other CPUs.
3192 
3193 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3194 
3195 void os::yield_all() {
3196   // Yields to all threads, including threads with lower priorities
3197   os::sleep(Thread::current(), 1, false);
3198 }
3199 
3200 // Interface for setting lwp priorities.  If we are using T2 libthread,
3201 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3202 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3203 // function is meaningless in this mode so we must adjust the real lwp's priority
3204 // The routines below implement the getting and setting of lwp priorities.
3205 //
3206 // Note: T2 is now the only supported libthread. UseBoundThreads flag is
3207 //       being deprecated and all threads are now BoundThreads
3208 //
3209 // Note: There are three priority scales used on Solaris.  Java priotities
3210 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3211 //       from 0 to 127, and the current scheduling class of the process we
3212 //       are running in.  This is typically from -60 to +60.
3213 //       The setting of the lwp priorities in done after a call to thr_setprio
3214 //       so Java priorities are mapped to libthread priorities and we map from
3215 //       the latter to lwp priorities.  We don't keep priorities stored in
3216 //       Java priorities since some of our worker threads want to set priorities
3217 //       higher than all Java threads.
3218 //
3219 // For related information:
3220 // (1)  man -s 2 priocntl
3221 // (2)  man -s 4 priocntl
3222 // (3)  man dispadmin
3223 // =    librt.so
3224 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3225 // =    ps -cL <pid> ... to validate priority.
3226 // =    sched_get_priority_min and _max
3227 //              pthread_create
3228 //              sched_setparam
3229 //              pthread_setschedparam
3230 //
3231 // Assumptions:
3232 // +    We assume that all threads in the process belong to the same
3233 //              scheduling class.   IE. an homogenous process.
3234 // +    Must be root or in IA group to change change "interactive" attribute.
3235 //              Priocntl() will fail silently.  The only indication of failure is when
3236 //              we read-back the value and notice that it hasn't changed.
3237 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3238 // +    For RT, change timeslice as well.  Invariant:
3239 //              constant "priority integral"
3240 //              Konst == TimeSlice * (60-Priority)
3241 //              Given a priority, compute appropriate timeslice.
3242 // +    Higher numerical values have higher priority.
3243 
3244 // sched class attributes
3245 typedef struct {
3246         int   schedPolicy;              // classID
3247         int   maxPrio;
3248         int   minPrio;
3249 } SchedInfo;
3250 
3251 
3252 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3253 
3254 #ifdef ASSERT
3255 static int  ReadBackValidate = 1;
3256 #endif
3257 static int  myClass     = 0;
3258 static int  myMin       = 0;
3259 static int  myMax       = 0;
3260 static int  myCur       = 0;
3261 static bool priocntl_enable = false;
3262 
3263 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3264 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3265 
3266 
3267 // lwp_priocntl_init
3268 //
3269 // Try to determine the priority scale for our process.
3270 //
3271 // Return errno or 0 if OK.
3272 //
3273 static int lwp_priocntl_init () {
3274   int rslt;
3275   pcinfo_t ClassInfo;
3276   pcparms_t ParmInfo;
3277   int i;
3278 
3279   if (!UseThreadPriorities) return 0;
3280 
3281   // If ThreadPriorityPolicy is 1, switch tables
3282   if (ThreadPriorityPolicy == 1) {
3283     for (i = 0 ; i < CriticalPriority+1; i++)
3284       os::java_to_os_priority[i] = prio_policy1[i];
3285   }
3286   if (UseCriticalJavaThreadPriority) {
3287     // MaxPriority always maps to the FX scheduling class and criticalPrio.
3288     // See set_native_priority() and set_lwp_class_and_priority().
3289     // Save original MaxPriority mapping in case attempt to
3290     // use critical priority fails.
3291     java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3292     // Set negative to distinguish from other priorities
3293     os::java_to_os_priority[MaxPriority] = -criticalPrio;
3294   }
3295 
3296   // Get IDs for a set of well-known scheduling classes.
3297   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3298   // the system.  We should have a loop that iterates over the
3299   // classID values, which are known to be "small" integers.
3300 
3301   strcpy(ClassInfo.pc_clname, "TS");
3302   ClassInfo.pc_cid = -1;
3303   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3304   if (rslt < 0) return errno;
3305   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3306   tsLimits.schedPolicy = ClassInfo.pc_cid;
3307   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3308   tsLimits.minPrio = -tsLimits.maxPrio;
3309 
3310   strcpy(ClassInfo.pc_clname, "IA");
3311   ClassInfo.pc_cid = -1;
3312   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3313   if (rslt < 0) return errno;
3314   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3315   iaLimits.schedPolicy = ClassInfo.pc_cid;
3316   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3317   iaLimits.minPrio = -iaLimits.maxPrio;
3318 
3319   strcpy(ClassInfo.pc_clname, "RT");
3320   ClassInfo.pc_cid = -1;
3321   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3322   if (rslt < 0) return errno;
3323   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3324   rtLimits.schedPolicy = ClassInfo.pc_cid;
3325   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3326   rtLimits.minPrio = 0;
3327 
3328   strcpy(ClassInfo.pc_clname, "FX");
3329   ClassInfo.pc_cid = -1;
3330   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3331   if (rslt < 0) return errno;
3332   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3333   fxLimits.schedPolicy = ClassInfo.pc_cid;
3334   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3335   fxLimits.minPrio = 0;
3336 
3337   // Query our "current" scheduling class.
3338   // This will normally be IA, TS or, rarely, FX or RT.
3339   memset(&ParmInfo, 0, sizeof(ParmInfo));
3340   ParmInfo.pc_cid = PC_CLNULL;
3341   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3342   if (rslt < 0) return errno;
3343   myClass = ParmInfo.pc_cid;
3344 
3345   // We now know our scheduling classId, get specific information
3346   // about the class.
3347   ClassInfo.pc_cid = myClass;
3348   ClassInfo.pc_clname[0] = 0;
3349   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3350   if (rslt < 0) return errno;
3351 
3352   if (ThreadPriorityVerbose) {
3353     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3354   }
3355 
3356   memset(&ParmInfo, 0, sizeof(pcparms_t));
3357   ParmInfo.pc_cid = PC_CLNULL;
3358   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3359   if (rslt < 0) return errno;
3360 
3361   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3362     myMin = rtLimits.minPrio;
3363     myMax = rtLimits.maxPrio;
3364   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3365     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3366     myMin = iaLimits.minPrio;
3367     myMax = iaLimits.maxPrio;
3368     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3369   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3370     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3371     myMin = tsLimits.minPrio;
3372     myMax = tsLimits.maxPrio;
3373     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3374   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3375     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3376     myMin = fxLimits.minPrio;
3377     myMax = fxLimits.maxPrio;
3378     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3379   } else {
3380     // No clue - punt
3381     if (ThreadPriorityVerbose)
3382       tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3383     return EINVAL;      // no clue, punt
3384   }
3385 
3386   if (ThreadPriorityVerbose) {
3387     tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3388   }
3389 
3390   priocntl_enable = true;  // Enable changing priorities
3391   return 0;
3392 }
3393 
3394 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3395 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3396 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3397 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3398 
3399 
3400 // scale_to_lwp_priority
3401 //
3402 // Convert from the libthread "thr_setprio" scale to our current
3403 // lwp scheduling class scale.
3404 //
3405 static
3406 int     scale_to_lwp_priority (int rMin, int rMax, int x)
3407 {
3408   int v;
3409 
3410   if (x == 127) return rMax;            // avoid round-down
3411     v = (((x*(rMax-rMin)))/128)+rMin;
3412   return v;
3413 }
3414 
3415 
3416 // set_lwp_class_and_priority
3417 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3418                                int newPrio, int new_class, bool scale) {
3419   int rslt;
3420   int Actual, Expected, prv;
3421   pcparms_t ParmInfo;                   // for GET-SET
3422 #ifdef ASSERT
3423   pcparms_t ReadBack;                   // for readback
3424 #endif
3425 
3426   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3427   // Query current values.
3428   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3429   // Cache "pcparms_t" in global ParmCache.
3430   // TODO: elide set-to-same-value
3431 
3432   // If something went wrong on init, don't change priorities.
3433   if ( !priocntl_enable ) {
3434     if (ThreadPriorityVerbose)
3435       tty->print_cr("Trying to set priority but init failed, ignoring");
3436     return EINVAL;
3437   }
3438 
3439   // If lwp hasn't started yet, just return
3440   // the _start routine will call us again.
3441   if ( lwpid <= 0 ) {
3442     if (ThreadPriorityVerbose) {
3443       tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
3444                      INTPTR_FORMAT " to %d, lwpid not set",
3445                      ThreadID, newPrio);
3446     }
3447     return 0;
3448   }
3449 
3450   if (ThreadPriorityVerbose) {
3451     tty->print_cr ("set_lwp_class_and_priority("
3452                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3453                    ThreadID, lwpid, newPrio);
3454   }
3455 
3456   memset(&ParmInfo, 0, sizeof(pcparms_t));
3457   ParmInfo.pc_cid = PC_CLNULL;
3458   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3459   if (rslt < 0) return errno;
3460 
3461   int cur_class = ParmInfo.pc_cid;
3462   ParmInfo.pc_cid = (id_t)new_class;
3463 
3464   if (new_class == rtLimits.schedPolicy) {
3465     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3466     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3467                                                        rtLimits.maxPrio, newPrio)
3468                                : newPrio;
3469     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3470     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3471     if (ThreadPriorityVerbose) {
3472       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3473     }
3474   } else if (new_class == iaLimits.schedPolicy) {
3475     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3476     int maxClamped     = MIN2(iaLimits.maxPrio,
3477                               cur_class == new_class
3478                                 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3479     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3480                                                        maxClamped, newPrio)
3481                                : newPrio;
3482     iaInfo->ia_uprilim = cur_class == new_class
3483                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3484     iaInfo->ia_mode    = IA_NOCHANGE;
3485     if (ThreadPriorityVerbose) {
3486       tty->print_cr("IA: [%d...%d] %d->%d\n",
3487                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3488     }
3489   } else if (new_class == tsLimits.schedPolicy) {
3490     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3491     int maxClamped     = MIN2(tsLimits.maxPrio,
3492                               cur_class == new_class
3493                                 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3494     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3495                                                        maxClamped, newPrio)
3496                                : newPrio;
3497     tsInfo->ts_uprilim = cur_class == new_class
3498                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3499     if (ThreadPriorityVerbose) {
3500       tty->print_cr("TS: [%d...%d] %d->%d\n",
3501                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3502     }
3503   } else if (new_class == fxLimits.schedPolicy) {
3504     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3505     int maxClamped     = MIN2(fxLimits.maxPrio,
3506                               cur_class == new_class
3507                                 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3508     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3509                                                        maxClamped, newPrio)
3510                                : newPrio;
3511     fxInfo->fx_uprilim = cur_class == new_class
3512                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3513     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3514     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3515     if (ThreadPriorityVerbose) {
3516       tty->print_cr("FX: [%d...%d] %d->%d\n",
3517                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3518     }
3519   } else {
3520     if (ThreadPriorityVerbose) {
3521       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3522     }
3523     return EINVAL;    // no clue, punt
3524   }
3525 
3526   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3527   if (ThreadPriorityVerbose && rslt) {
3528     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3529   }
3530   if (rslt < 0) return errno;
3531 
3532 #ifdef ASSERT
3533   // Sanity check: read back what we just attempted to set.
3534   // In theory it could have changed in the interim ...
3535   //
3536   // The priocntl system call is tricky.
3537   // Sometimes it'll validate the priority value argument and
3538   // return EINVAL if unhappy.  At other times it fails silently.
3539   // Readbacks are prudent.
3540 
3541   if (!ReadBackValidate) return 0;
3542 
3543   memset(&ReadBack, 0, sizeof(pcparms_t));
3544   ReadBack.pc_cid = PC_CLNULL;
3545   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3546   assert(rslt >= 0, "priocntl failed");
3547   Actual = Expected = 0xBAD;
3548   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3549   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3550     Actual   = RTPRI(ReadBack)->rt_pri;
3551     Expected = RTPRI(ParmInfo)->rt_pri;
3552   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3553     Actual   = IAPRI(ReadBack)->ia_upri;
3554     Expected = IAPRI(ParmInfo)->ia_upri;
3555   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3556     Actual   = TSPRI(ReadBack)->ts_upri;
3557     Expected = TSPRI(ParmInfo)->ts_upri;
3558   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3559     Actual   = FXPRI(ReadBack)->fx_upri;
3560     Expected = FXPRI(ParmInfo)->fx_upri;
3561   } else {
3562     if (ThreadPriorityVerbose) {
3563       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3564                     ParmInfo.pc_cid);
3565     }
3566   }
3567 
3568   if (Actual != Expected) {
3569     if (ThreadPriorityVerbose) {
3570       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3571                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3572     }
3573   }
3574 #endif
3575 
3576   return 0;
3577 }
3578 
3579 // Solaris only gives access to 128 real priorities at a time,
3580 // so we expand Java's ten to fill this range.  This would be better
3581 // if we dynamically adjusted relative priorities.
3582 //
3583 // The ThreadPriorityPolicy option allows us to select 2 different
3584 // priority scales.
3585 //
3586 // ThreadPriorityPolicy=0
3587 // Since the Solaris' default priority is MaximumPriority, we do not
3588 // set a priority lower than Max unless a priority lower than
3589 // NormPriority is requested.
3590 //
3591 // ThreadPriorityPolicy=1
3592 // This mode causes the priority table to get filled with
3593 // linear values.  NormPriority get's mapped to 50% of the
3594 // Maximum priority an so on.  This will cause VM threads
3595 // to get unfair treatment against other Solaris processes
3596 // which do not explicitly alter their thread priorities.
3597 //
3598 
3599 int os::java_to_os_priority[CriticalPriority + 1] = {
3600   -99999,         // 0 Entry should never be used
3601 
3602   0,              // 1 MinPriority
3603   32,             // 2
3604   64,             // 3
3605 
3606   96,             // 4
3607   127,            // 5 NormPriority
3608   127,            // 6
3609 
3610   127,            // 7
3611   127,            // 8
3612   127,            // 9 NearMaxPriority
3613 
3614   127,            // 10 MaxPriority
3615 
3616   -criticalPrio   // 11 CriticalPriority
3617 };
3618 
3619 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3620   OSThread* osthread = thread->osthread();
3621 
3622   // Save requested priority in case the thread hasn't been started
3623   osthread->set_native_priority(newpri);
3624 
3625   // Check for critical priority request
3626   bool fxcritical = false;
3627   if (newpri == -criticalPrio) {
3628     fxcritical = true;
3629     newpri = criticalPrio;
3630   }
3631 
3632   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3633   if (!UseThreadPriorities) return OS_OK;
3634 
3635   int status = 0;
3636 
3637   if (!fxcritical) {
3638     // Use thr_setprio only if we have a priority that thr_setprio understands
3639     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3640   }
3641 
3642   int lwp_status =
3643           set_lwp_class_and_priority(osthread->thread_id(),
3644           osthread->lwp_id(),
3645           newpri,
3646           fxcritical ? fxLimits.schedPolicy : myClass,
3647           !fxcritical);
3648   if (lwp_status != 0 && fxcritical) {
3649     // Try again, this time without changing the scheduling class
3650     newpri = java_MaxPriority_to_os_priority;
3651     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3652             osthread->lwp_id(),
3653             newpri, myClass, false);
3654   }
3655   status |= lwp_status;
3656   return (status == 0) ? OS_OK : OS_ERR;
3657 }
3658 
3659 
3660 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3661   int p;
3662   if ( !UseThreadPriorities ) {
3663     *priority_ptr = NormalPriority;
3664     return OS_OK;
3665   }
3666   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3667   if (status != 0) {
3668     return OS_ERR;
3669   }
3670   *priority_ptr = p;
3671   return OS_OK;
3672 }
3673 
3674 
3675 // Hint to the underlying OS that a task switch would not be good.
3676 // Void return because it's a hint and can fail.
3677 void os::hint_no_preempt() {
3678   schedctl_start(schedctl_init());
3679 }
3680 
3681 static void resume_clear_context(OSThread *osthread) {
3682   osthread->set_ucontext(NULL);
3683 }
3684 
3685 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3686   osthread->set_ucontext(context);
3687 }
3688 
3689 static Semaphore sr_semaphore;
3690 
3691 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3692   // Save and restore errno to avoid confusing native code with EINTR
3693   // after sigsuspend.
3694   int old_errno = errno;
3695 
3696   OSThread* osthread = thread->osthread();
3697   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3698 
3699   os::SuspendResume::State current = osthread->sr.state();
3700   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3701     suspend_save_context(osthread, uc);
3702 
3703     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3704     os::SuspendResume::State state = osthread->sr.suspended();
3705     if (state == os::SuspendResume::SR_SUSPENDED) {
3706       sigset_t suspend_set;  // signals for sigsuspend()
3707 
3708       // get current set of blocked signals and unblock resume signal
3709       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3710       sigdelset(&suspend_set, os::Solaris::SIGasync());
3711 
3712       sr_semaphore.signal();
3713       // wait here until we are resumed
3714       while (1) {
3715         sigsuspend(&suspend_set);
3716 
3717         os::SuspendResume::State result = osthread->sr.running();
3718         if (result == os::SuspendResume::SR_RUNNING) {
3719           sr_semaphore.signal();
3720           break;
3721         }
3722       }
3723 
3724     } else if (state == os::SuspendResume::SR_RUNNING) {
3725       // request was cancelled, continue
3726     } else {
3727       ShouldNotReachHere();
3728     }
3729 
3730     resume_clear_context(osthread);
3731   } else if (current == os::SuspendResume::SR_RUNNING) {
3732     // request was cancelled, continue
3733   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3734     // ignore
3735   } else {
3736     // ignore
3737   }
3738 
3739   errno = old_errno;
3740 }
3741 
3742 void os::print_statistics() {
3743 }
3744 
3745 int os::message_box(const char* title, const char* message) {
3746   int i;
3747   fdStream err(defaultStream::error_fd());
3748   for (i = 0; i < 78; i++) err.print_raw("=");
3749   err.cr();
3750   err.print_raw_cr(title);
3751   for (i = 0; i < 78; i++) err.print_raw("-");
3752   err.cr();
3753   err.print_raw_cr(message);
3754   for (i = 0; i < 78; i++) err.print_raw("=");
3755   err.cr();
3756 
3757   char buf[16];
3758   // Prevent process from exiting upon "read error" without consuming all CPU
3759   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3760 
3761   return buf[0] == 'y' || buf[0] == 'Y';
3762 }
3763 
3764 static int sr_notify(OSThread* osthread) {
3765   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
3766   assert_status(status == 0, status, "thr_kill");
3767   return status;
3768 }
3769 
3770 // "Randomly" selected value for how long we want to spin
3771 // before bailing out on suspending a thread, also how often
3772 // we send a signal to a thread we want to resume
3773 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3774 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3775 
3776 static bool do_suspend(OSThread* osthread) {
3777   assert(osthread->sr.is_running(), "thread should be running");
3778   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
3779 
3780   // mark as suspended and send signal
3781   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3782     // failed to switch, state wasn't running?
3783     ShouldNotReachHere();
3784     return false;
3785   }
3786 
3787   if (sr_notify(osthread) != 0) {
3788     ShouldNotReachHere();
3789   }
3790 
3791   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3792   while (true) {
3793     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
3794       break;
3795     } else {
3796       // timeout
3797       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3798       if (cancelled == os::SuspendResume::SR_RUNNING) {
3799         return false;
3800       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3801         // make sure that we consume the signal on the semaphore as well
3802         sr_semaphore.wait();
3803         break;
3804       } else {
3805         ShouldNotReachHere();
3806         return false;
3807       }
3808     }
3809   }
3810 
3811   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3812   return true;
3813 }
3814 
3815 static void do_resume(OSThread* osthread) {
3816   assert(osthread->sr.is_suspended(), "thread should be suspended");
3817   assert(!sr_semaphore.trywait(), "invalid semaphore state");
3818 
3819   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3820     // failed to switch to WAKEUP_REQUEST
3821     ShouldNotReachHere();
3822     return;
3823   }
3824 
3825   while (true) {
3826     if (sr_notify(osthread) == 0) {
3827       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
3828         if (osthread->sr.is_running()) {
3829           return;
3830         }
3831       }
3832     } else {
3833       ShouldNotReachHere();
3834     }
3835   }
3836 
3837   guarantee(osthread->sr.is_running(), "Must be running!");
3838 }
3839 
3840 void os::SuspendedThreadTask::internal_do_task() {
3841   if (do_suspend(_thread->osthread())) {
3842     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3843     do_task(context);
3844     do_resume(_thread->osthread());
3845   }
3846 }
3847 
3848 class PcFetcher : public os::SuspendedThreadTask {
3849 public:
3850   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3851   ExtendedPC result();
3852 protected:
3853   void do_task(const os::SuspendedThreadTaskContext& context);
3854 private:
3855   ExtendedPC _epc;
3856 };
3857 
3858 ExtendedPC PcFetcher::result() {
3859   guarantee(is_done(), "task is not done yet.");
3860   return _epc;
3861 }
3862 
3863 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3864   Thread* thread = context.thread();
3865   OSThread* osthread = thread->osthread();
3866   if (osthread->ucontext() != NULL) {
3867     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
3868   } else {
3869     // NULL context is unexpected, double-check this is the VMThread
3870     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3871   }
3872 }
3873 
3874 // A lightweight implementation that does not suspend the target thread and
3875 // thus returns only a hint. Used for profiling only!
3876 ExtendedPC os::get_thread_pc(Thread* thread) {
3877   // Make sure that it is called by the watcher and the Threads lock is owned.
3878   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
3879   // For now, is only used to profile the VM Thread
3880   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3881   PcFetcher fetcher(thread);
3882   fetcher.run();
3883   return fetcher.result();
3884 }
3885 
3886 
3887 // This does not do anything on Solaris. This is basically a hook for being
3888 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
3889 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
3890   f(value, method, args, thread);
3891 }
3892 
3893 // This routine may be used by user applications as a "hook" to catch signals.
3894 // The user-defined signal handler must pass unrecognized signals to this
3895 // routine, and if it returns true (non-zero), then the signal handler must
3896 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
3897 // routine will never retun false (zero), but instead will execute a VM panic
3898 // routine kill the process.
3899 //
3900 // If this routine returns false, it is OK to call it again.  This allows
3901 // the user-defined signal handler to perform checks either before or after
3902 // the VM performs its own checks.  Naturally, the user code would be making
3903 // a serious error if it tried to handle an exception (such as a null check
3904 // or breakpoint) that the VM was generating for its own correct operation.
3905 //
3906 // This routine may recognize any of the following kinds of signals:
3907 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
3908 // os::Solaris::SIGasync
3909 // It should be consulted by handlers for any of those signals.
3910 // It explicitly does not recognize os::Solaris::SIGinterrupt
3911 //
3912 // The caller of this routine must pass in the three arguments supplied
3913 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3914 // field of the structure passed to sigaction().  This routine assumes that
3915 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3916 //
3917 // Note that the VM will print warnings if it detects conflicting signal
3918 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3919 //
3920 extern "C" JNIEXPORT int
3921 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
3922                           int abort_if_unrecognized);
3923 
3924 
3925 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
3926   int orig_errno = errno;  // Preserve errno value over signal handler.
3927   JVM_handle_solaris_signal(sig, info, ucVoid, true);
3928   errno = orig_errno;
3929 }
3930 
3931 /* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
3932    is needed to provoke threads blocked on IO to return an EINTR
3933    Note: this explicitly does NOT call JVM_handle_solaris_signal and
3934    does NOT participate in signal chaining due to requirement for
3935    NOT setting SA_RESTART to make EINTR work. */
3936 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
3937    if (UseSignalChaining) {
3938       struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
3939       if (actp && actp->sa_handler) {
3940         vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
3941       }
3942    }
3943 }
3944 
3945 // This boolean allows users to forward their own non-matching signals
3946 // to JVM_handle_solaris_signal, harmlessly.
3947 bool os::Solaris::signal_handlers_are_installed = false;
3948 
3949 // For signal-chaining
3950 bool os::Solaris::libjsig_is_loaded = false;
3951 typedef struct sigaction *(*get_signal_t)(int);
3952 get_signal_t os::Solaris::get_signal_action = NULL;
3953 
3954 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
3955   struct sigaction *actp = NULL;
3956 
3957   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
3958     // Retrieve the old signal handler from libjsig
3959     actp = (*get_signal_action)(sig);
3960   }
3961   if (actp == NULL) {
3962     // Retrieve the preinstalled signal handler from jvm
3963     actp = get_preinstalled_handler(sig);
3964   }
3965 
3966   return actp;
3967 }
3968 
3969 static bool call_chained_handler(struct sigaction *actp, int sig,
3970                                  siginfo_t *siginfo, void *context) {
3971   // Call the old signal handler
3972   if (actp->sa_handler == SIG_DFL) {
3973     // It's more reasonable to let jvm treat it as an unexpected exception
3974     // instead of taking the default action.
3975     return false;
3976   } else if (actp->sa_handler != SIG_IGN) {
3977     if ((actp->sa_flags & SA_NODEFER) == 0) {
3978       // automaticlly block the signal
3979       sigaddset(&(actp->sa_mask), sig);
3980     }
3981 
3982     sa_handler_t hand;
3983     sa_sigaction_t sa;
3984     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3985     // retrieve the chained handler
3986     if (siginfo_flag_set) {
3987       sa = actp->sa_sigaction;
3988     } else {
3989       hand = actp->sa_handler;
3990     }
3991 
3992     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3993       actp->sa_handler = SIG_DFL;
3994     }
3995 
3996     // try to honor the signal mask
3997     sigset_t oset;
3998     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3999 
4000     // call into the chained handler
4001     if (siginfo_flag_set) {
4002       (*sa)(sig, siginfo, context);
4003     } else {
4004       (*hand)(sig);
4005     }
4006 
4007     // restore the signal mask
4008     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4009   }
4010   // Tell jvm's signal handler the signal is taken care of.
4011   return true;
4012 }
4013 
4014 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4015   bool chained = false;
4016   // signal-chaining
4017   if (UseSignalChaining) {
4018     struct sigaction *actp = get_chained_signal_action(sig);
4019     if (actp != NULL) {
4020       chained = call_chained_handler(actp, sig, siginfo, context);
4021     }
4022   }
4023   return chained;
4024 }
4025 
4026 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4027   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4028   if (preinstalled_sigs[sig] != 0) {
4029     return &chainedsigactions[sig];
4030   }
4031   return NULL;
4032 }
4033 
4034 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4035 
4036   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4037   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4038   chainedsigactions[sig] = oldAct;
4039   preinstalled_sigs[sig] = 1;
4040 }
4041 
4042 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4043   // Check for overwrite.
4044   struct sigaction oldAct;
4045   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4046   void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4047                                       : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4048   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4049       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4050       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4051     if (AllowUserSignalHandlers || !set_installed) {
4052       // Do not overwrite; user takes responsibility to forward to us.
4053       return;
4054     } else if (UseSignalChaining) {
4055       if (oktochain) {
4056         // save the old handler in jvm
4057         save_preinstalled_handler(sig, oldAct);
4058       } else {
4059         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4060       }
4061       // libjsig also interposes the sigaction() call below and saves the
4062       // old sigaction on it own.
4063     } else {
4064       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4065                     "%#lx for signal %d.", (long)oldhand, sig));
4066     }
4067   }
4068 
4069   struct sigaction sigAct;
4070   sigfillset(&(sigAct.sa_mask));
4071   sigAct.sa_handler = SIG_DFL;
4072 
4073   sigAct.sa_sigaction = signalHandler;
4074   // Handle SIGSEGV on alternate signal stack if
4075   // not using stack banging
4076   if (!UseStackBanging && sig == SIGSEGV) {
4077     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4078   // Interruptible i/o requires SA_RESTART cleared so EINTR
4079   // is returned instead of restarting system calls
4080   } else if (sig == os::Solaris::SIGinterrupt()) {
4081     sigemptyset(&sigAct.sa_mask);
4082     sigAct.sa_handler = NULL;
4083     sigAct.sa_flags = SA_SIGINFO;
4084     sigAct.sa_sigaction = sigINTRHandler;
4085   } else {
4086     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4087   }
4088   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4089 
4090   sigaction(sig, &sigAct, &oldAct);
4091 
4092   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4093                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4094   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4095 }
4096 
4097 
4098 #define DO_SIGNAL_CHECK(sig) \
4099   if (!sigismember(&check_signal_done, sig)) \
4100     os::Solaris::check_signal_handler(sig)
4101 
4102 // This method is a periodic task to check for misbehaving JNI applications
4103 // under CheckJNI, we can add any periodic checks here
4104 
4105 void os::run_periodic_checks() {
4106   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4107   // thereby preventing a NULL checks.
4108   if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4109 
4110   if (check_signals == false) return;
4111 
4112   // SEGV and BUS if overridden could potentially prevent
4113   // generation of hs*.log in the event of a crash, debugging
4114   // such a case can be very challenging, so we absolutely
4115   // check for the following for a good measure:
4116   DO_SIGNAL_CHECK(SIGSEGV);
4117   DO_SIGNAL_CHECK(SIGILL);
4118   DO_SIGNAL_CHECK(SIGFPE);
4119   DO_SIGNAL_CHECK(SIGBUS);
4120   DO_SIGNAL_CHECK(SIGPIPE);
4121   DO_SIGNAL_CHECK(SIGXFSZ);
4122 
4123   // ReduceSignalUsage allows the user to override these handlers
4124   // see comments at the very top and jvm_solaris.h
4125   if (!ReduceSignalUsage) {
4126     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4127     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4128     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4129     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4130   }
4131 
4132   // See comments above for using JVM1/JVM2 and UseAltSigs
4133   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4134   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4135 
4136 }
4137 
4138 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4139 
4140 static os_sigaction_t os_sigaction = NULL;
4141 
4142 void os::Solaris::check_signal_handler(int sig) {
4143   char buf[O_BUFLEN];
4144   address jvmHandler = NULL;
4145 
4146   struct sigaction act;
4147   if (os_sigaction == NULL) {
4148     // only trust the default sigaction, in case it has been interposed
4149     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4150     if (os_sigaction == NULL) return;
4151   }
4152 
4153   os_sigaction(sig, (struct sigaction*)NULL, &act);
4154 
4155   address thisHandler = (act.sa_flags & SA_SIGINFO)
4156     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4157     : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4158 
4159 
4160   switch(sig) {
4161     case SIGSEGV:
4162     case SIGBUS:
4163     case SIGFPE:
4164     case SIGPIPE:
4165     case SIGXFSZ:
4166     case SIGILL:
4167       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4168       break;
4169 
4170     case SHUTDOWN1_SIGNAL:
4171     case SHUTDOWN2_SIGNAL:
4172     case SHUTDOWN3_SIGNAL:
4173     case BREAK_SIGNAL:
4174       jvmHandler = (address)user_handler();
4175       break;
4176 
4177     default:
4178       int intrsig = os::Solaris::SIGinterrupt();
4179       int asynsig = os::Solaris::SIGasync();
4180 
4181       if (sig == intrsig) {
4182         jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4183       } else if (sig == asynsig) {
4184         jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4185       } else {
4186         return;
4187       }
4188       break;
4189   }
4190 
4191 
4192   if (thisHandler != jvmHandler) {
4193     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4194     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4195     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4196     // No need to check this sig any longer
4197     sigaddset(&check_signal_done, sig);
4198     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4199     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4200       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4201                     exception_name(sig, buf, O_BUFLEN));
4202     }
4203   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4204     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4205     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4206     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4207     // No need to check this sig any longer
4208     sigaddset(&check_signal_done, sig);
4209   }
4210 
4211   // Print all the signal handler state
4212   if (sigismember(&check_signal_done, sig)) {
4213     print_signal_handlers(tty, buf, O_BUFLEN);
4214   }
4215 
4216 }
4217 
4218 void os::Solaris::install_signal_handlers() {
4219   bool libjsigdone = false;
4220   signal_handlers_are_installed = true;
4221 
4222   // signal-chaining
4223   typedef void (*signal_setting_t)();
4224   signal_setting_t begin_signal_setting = NULL;
4225   signal_setting_t end_signal_setting = NULL;
4226   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4227                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4228   if (begin_signal_setting != NULL) {
4229     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4230                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4231     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4232                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4233     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4234                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4235     libjsig_is_loaded = true;
4236     if (os::Solaris::get_libjsig_version != NULL) {
4237       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4238     }
4239     assert(UseSignalChaining, "should enable signal-chaining");
4240   }
4241   if (libjsig_is_loaded) {
4242     // Tell libjsig jvm is setting signal handlers
4243     (*begin_signal_setting)();
4244   }
4245 
4246   set_signal_handler(SIGSEGV, true, true);
4247   set_signal_handler(SIGPIPE, true, true);
4248   set_signal_handler(SIGXFSZ, true, true);
4249   set_signal_handler(SIGBUS, true, true);
4250   set_signal_handler(SIGILL, true, true);
4251   set_signal_handler(SIGFPE, true, true);
4252 
4253 
4254   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4255 
4256     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4257     // can not register overridable signals which might be > 32
4258     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4259     // Tell libjsig jvm has finished setting signal handlers
4260       (*end_signal_setting)();
4261       libjsigdone = true;
4262     }
4263   }
4264 
4265   // Never ok to chain our SIGinterrupt
4266   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4267   set_signal_handler(os::Solaris::SIGasync(), true, true);
4268 
4269   if (libjsig_is_loaded && !libjsigdone) {
4270     // Tell libjsig jvm finishes setting signal handlers
4271     (*end_signal_setting)();
4272   }
4273 
4274   // We don't activate signal checker if libjsig is in place, we trust ourselves
4275   // and if UserSignalHandler is installed all bets are off.
4276   // Log that signal checking is off only if -verbose:jni is specified.
4277   if (CheckJNICalls) {
4278     if (libjsig_is_loaded) {
4279       if (PrintJNIResolving) {
4280         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4281       }
4282       check_signals = false;
4283     }
4284     if (AllowUserSignalHandlers) {
4285       if (PrintJNIResolving) {
4286         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4287       }
4288       check_signals = false;
4289     }
4290   }
4291 }
4292 
4293 
4294 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4295 
4296 const char * signames[] = {
4297   "SIG0",
4298   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4299   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4300   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4301   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4302   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4303   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4304   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4305   "SIGCANCEL", "SIGLOST"
4306 };
4307 
4308 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4309   if (0 < exception_code && exception_code <= SIGRTMAX) {
4310     // signal
4311     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4312        jio_snprintf(buf, size, "%s", signames[exception_code]);
4313     } else {
4314        jio_snprintf(buf, size, "SIG%d", exception_code);
4315     }
4316     return buf;
4317   } else {
4318     return NULL;
4319   }
4320 }
4321 
4322 // (Static) wrapper for getisax(2) call.
4323 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4324 
4325 // (Static) wrappers for the liblgrp API
4326 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4327 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4328 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4329 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4330 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4331 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4332 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4333 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4334 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4335 
4336 // (Static) wrapper for meminfo() call.
4337 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4338 
4339 static address resolve_symbol_lazy(const char* name) {
4340   address addr = (address) dlsym(RTLD_DEFAULT, name);
4341   if(addr == NULL) {
4342     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4343     addr = (address) dlsym(RTLD_NEXT, name);
4344   }
4345   return addr;
4346 }
4347 
4348 static address resolve_symbol(const char* name) {
4349   address addr = resolve_symbol_lazy(name);
4350   if(addr == NULL) {
4351     fatal(dlerror());
4352   }
4353   return addr;
4354 }
4355 
4356 void os::Solaris::libthread_init() {
4357   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4358 
4359   lwp_priocntl_init();
4360 
4361   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4362   if(func == NULL) {
4363     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4364     // Guarantee that this VM is running on an new enough OS (5.6 or
4365     // later) that it will have a new enough libthread.so.
4366     guarantee(func != NULL, "libthread.so is too old.");
4367   }
4368 
4369   int size;
4370   void (*handler_info_func)(address *, int *);
4371   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4372   handler_info_func(&handler_start, &size);
4373   handler_end = handler_start + size;
4374 }
4375 
4376 
4377 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4378 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4379 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4380 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4381 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4382 int os::Solaris::_mutex_scope = USYNC_THREAD;
4383 
4384 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4385 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4386 int_fnP_cond_tP os::Solaris::_cond_signal;
4387 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4388 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4389 int_fnP_cond_tP os::Solaris::_cond_destroy;
4390 int os::Solaris::_cond_scope = USYNC_THREAD;
4391 
4392 void os::Solaris::synchronization_init() {
4393   if(UseLWPSynchronization) {
4394     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4395     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4396     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4397     os::Solaris::set_mutex_init(lwp_mutex_init);
4398     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4399     os::Solaris::set_mutex_scope(USYNC_THREAD);
4400 
4401     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4402     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4403     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4404     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4405     os::Solaris::set_cond_init(lwp_cond_init);
4406     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4407     os::Solaris::set_cond_scope(USYNC_THREAD);
4408   }
4409   else {
4410     os::Solaris::set_mutex_scope(USYNC_THREAD);
4411     os::Solaris::set_cond_scope(USYNC_THREAD);
4412 
4413     if(UsePthreads) {
4414       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4415       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4416       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4417       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4418       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4419 
4420       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4421       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4422       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4423       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4424       os::Solaris::set_cond_init(pthread_cond_default_init);
4425       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4426     }
4427     else {
4428       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4429       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4430       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4431       os::Solaris::set_mutex_init(::mutex_init);
4432       os::Solaris::set_mutex_destroy(::mutex_destroy);
4433 
4434       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4435       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4436       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4437       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4438       os::Solaris::set_cond_init(::cond_init);
4439       os::Solaris::set_cond_destroy(::cond_destroy);
4440     }
4441   }
4442 }
4443 
4444 bool os::Solaris::liblgrp_init() {
4445   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4446   if (handle != NULL) {
4447     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4448     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4449     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4450     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4451     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4452     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4453     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4454     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4455                                        dlsym(handle, "lgrp_cookie_stale")));
4456 
4457     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4458     set_lgrp_cookie(c);
4459     return true;
4460   }
4461   return false;
4462 }
4463 
4464 void os::Solaris::misc_sym_init() {
4465   address func;
4466 
4467   // getisax
4468   func = resolve_symbol_lazy("getisax");
4469   if (func != NULL) {
4470     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4471   }
4472 
4473   // meminfo
4474   func = resolve_symbol_lazy("meminfo");
4475   if (func != NULL) {
4476     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4477   }
4478 }
4479 
4480 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4481   assert(_getisax != NULL, "_getisax not set");
4482   return _getisax(array, n);
4483 }
4484 
4485 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4486 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4487 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4488 
4489 void init_pset_getloadavg_ptr(void) {
4490   pset_getloadavg_ptr =
4491     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4492   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4493     warning("pset_getloadavg function not found");
4494   }
4495 }
4496 
4497 int os::Solaris::_dev_zero_fd = -1;
4498 
4499 // this is called _before_ the global arguments have been parsed
4500 void os::init(void) {
4501   _initial_pid = getpid();
4502 
4503   max_hrtime = first_hrtime = gethrtime();
4504 
4505   init_random(1234567);
4506 
4507   page_size = sysconf(_SC_PAGESIZE);
4508   if (page_size == -1)
4509     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4510                   strerror(errno)));
4511   init_page_sizes((size_t) page_size);
4512 
4513   Solaris::initialize_system_info();
4514 
4515   // Initialize misc. symbols as soon as possible, so we can use them
4516   // if we need them.
4517   Solaris::misc_sym_init();
4518 
4519   int fd = ::open("/dev/zero", O_RDWR);
4520   if (fd < 0) {
4521     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4522   } else {
4523     Solaris::set_dev_zero_fd(fd);
4524 
4525     // Close on exec, child won't inherit.
4526     fcntl(fd, F_SETFD, FD_CLOEXEC);
4527   }
4528 
4529   clock_tics_per_sec = CLK_TCK;
4530 
4531   // check if dladdr1() exists; dladdr1 can provide more information than
4532   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4533   // and is available on linker patches for 5.7 and 5.8.
4534   // libdl.so must have been loaded, this call is just an entry lookup
4535   void * hdl = dlopen("libdl.so", RTLD_NOW);
4536   if (hdl)
4537     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4538 
4539   // (Solaris only) this switches to calls that actually do locking.
4540   ThreadCritical::initialize();
4541 
4542   main_thread = thr_self();
4543 
4544   // Constant minimum stack size allowed. It must be at least
4545   // the minimum of what the OS supports (thr_min_stack()), and
4546   // enough to allow the thread to get to user bytecode execution.
4547   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4548   // If the pagesize of the VM is greater than 8K determine the appropriate
4549   // number of initial guard pages.  The user can change this with the
4550   // command line arguments, if needed.
4551   if (vm_page_size() > 8*K) {
4552     StackYellowPages = 1;
4553     StackRedPages = 1;
4554     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4555   }
4556 }
4557 
4558 // To install functions for atexit system call
4559 extern "C" {
4560   static void perfMemory_exit_helper() {
4561     perfMemory_exit();
4562   }
4563 }
4564 
4565 // this is called _after_ the global arguments have been parsed
4566 jint os::init_2(void) {
4567   // try to enable extended file IO ASAP, see 6431278
4568   os::Solaris::try_enable_extended_io();
4569 
4570   // Allocate a single page and mark it as readable for safepoint polling.  Also
4571   // use this first mmap call to check support for MAP_ALIGN.
4572   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4573                                                       page_size,
4574                                                       MAP_PRIVATE | MAP_ALIGN,
4575                                                       PROT_READ);
4576   if (polling_page == NULL) {
4577     has_map_align = false;
4578     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4579                                                 PROT_READ);
4580   }
4581 
4582   os::set_polling_page(polling_page);
4583 
4584 #ifndef PRODUCT
4585   if( Verbose && PrintMiscellaneous )
4586     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
4587 #endif
4588 
4589   if (!UseMembar) {
4590     address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
4591     guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4592     os::set_memory_serialize_page( mem_serialize_page );
4593 
4594 #ifndef PRODUCT
4595     if(Verbose && PrintMiscellaneous)
4596       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
4597 #endif
4598   }
4599 
4600   // Check minimum allowable stack size for thread creation and to initialize
4601   // the java system classes, including StackOverflowError - depends on page
4602   // size.  Add a page for compiler2 recursion in main thread.
4603   // Add in 2*BytesPerWord times page size to account for VM stack during
4604   // class initialization depending on 32 or 64 bit VM.
4605   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4606             (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4607                     2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4608 
4609   size_t threadStackSizeInBytes = ThreadStackSize * K;
4610   if (threadStackSizeInBytes != 0 &&
4611     threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4612     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4613                   os::Solaris::min_stack_allowed/K);
4614     return JNI_ERR;
4615   }
4616 
4617   // For 64kbps there will be a 64kb page size, which makes
4618   // the usable default stack size quite a bit less.  Increase the
4619   // stack for 64kb (or any > than 8kb) pages, this increases
4620   // virtual memory fragmentation (since we're not creating the
4621   // stack on a power of 2 boundary.  The real fix for this
4622   // should be to fix the guard page mechanism.
4623 
4624   if (vm_page_size() > 8*K) {
4625       threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4626          ? threadStackSizeInBytes +
4627            ((StackYellowPages + StackRedPages) * vm_page_size())
4628          : 0;
4629       ThreadStackSize = threadStackSizeInBytes/K;
4630   }
4631 
4632   // Make the stack size a multiple of the page size so that
4633   // the yellow/red zones can be guarded.
4634   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4635         vm_page_size()));
4636 
4637   Solaris::libthread_init();
4638 
4639   if (UseNUMA) {
4640     if (!Solaris::liblgrp_init()) {
4641       UseNUMA = false;
4642     } else {
4643       size_t lgrp_limit = os::numa_get_groups_num();
4644       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4645       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4646       FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
4647       if (lgrp_num < 2) {
4648         // There's only one locality group, disable NUMA.
4649         UseNUMA = false;
4650       }
4651     }
4652     if (!UseNUMA && ForceNUMA) {
4653       UseNUMA = true;
4654     }
4655   }
4656 
4657   Solaris::signal_sets_init();
4658   Solaris::init_signal_mem();
4659   Solaris::install_signal_handlers();
4660 
4661   if (libjsigversion < JSIG_VERSION_1_4_1) {
4662     Maxlibjsigsigs = OLDMAXSIGNUM;
4663   }
4664 
4665   // initialize synchronization primitives to use either thread or
4666   // lwp synchronization (controlled by UseLWPSynchronization)
4667   Solaris::synchronization_init();
4668 
4669   if (MaxFDLimit) {
4670     // set the number of file descriptors to max. print out error
4671     // if getrlimit/setrlimit fails but continue regardless.
4672     struct rlimit nbr_files;
4673     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4674     if (status != 0) {
4675       if (PrintMiscellaneous && (Verbose || WizardMode))
4676         perror("os::init_2 getrlimit failed");
4677     } else {
4678       nbr_files.rlim_cur = nbr_files.rlim_max;
4679       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4680       if (status != 0) {
4681         if (PrintMiscellaneous && (Verbose || WizardMode))
4682           perror("os::init_2 setrlimit failed");
4683       }
4684     }
4685   }
4686 
4687   // Calculate theoretical max. size of Threads to guard gainst
4688   // artifical out-of-memory situations, where all available address-
4689   // space has been reserved by thread stacks. Default stack size is 1Mb.
4690   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
4691     JavaThread::stack_size_at_create() : (1*K*K);
4692   assert(pre_thread_stack_size != 0, "Must have a stack");
4693   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
4694   // we should start doing Virtual Memory banging. Currently when the threads will
4695   // have used all but 200Mb of space.
4696   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
4697   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
4698 
4699   // at-exit methods are called in the reverse order of their registration.
4700   // In Solaris 7 and earlier, atexit functions are called on return from
4701   // main or as a result of a call to exit(3C). There can be only 32 of
4702   // these functions registered and atexit() does not set errno. In Solaris
4703   // 8 and later, there is no limit to the number of functions registered
4704   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
4705   // functions are called upon dlclose(3DL) in addition to return from main
4706   // and exit(3C).
4707 
4708   if (PerfAllowAtExitRegistration) {
4709     // only register atexit functions if PerfAllowAtExitRegistration is set.
4710     // atexit functions can be delayed until process exit time, which
4711     // can be problematic for embedded VM situations. Embedded VMs should
4712     // call DestroyJavaVM() to assure that VM resources are released.
4713 
4714     // note: perfMemory_exit_helper atexit function may be removed in
4715     // the future if the appropriate cleanup code can be added to the
4716     // VM_Exit VMOperation's doit method.
4717     if (atexit(perfMemory_exit_helper) != 0) {
4718       warning("os::init2 atexit(perfMemory_exit_helper) failed");
4719     }
4720   }
4721 
4722   // Init pset_loadavg function pointer
4723   init_pset_getloadavg_ptr();
4724 
4725   return JNI_OK;
4726 }
4727 
4728 void os::init_3(void) {
4729   return;
4730 }
4731 
4732 // Mark the polling page as unreadable
4733 void os::make_polling_page_unreadable(void) {
4734   if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
4735     fatal("Could not disable polling page");
4736 };
4737 
4738 // Mark the polling page as readable
4739 void os::make_polling_page_readable(void) {
4740   if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
4741     fatal("Could not enable polling page");
4742 };
4743 
4744 // OS interface.
4745 
4746 bool os::check_heap(bool force) { return true; }
4747 
4748 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
4749 static vsnprintf_t sol_vsnprintf = NULL;
4750 
4751 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
4752   if (!sol_vsnprintf) {
4753     //search  for the named symbol in the objects that were loaded after libjvm
4754     void* where = RTLD_NEXT;
4755     if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
4756         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
4757     if (!sol_vsnprintf){
4758       //search  for the named symbol in the objects that were loaded before libjvm
4759       where = RTLD_DEFAULT;
4760       if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
4761         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
4762       assert(sol_vsnprintf != NULL, "vsnprintf not found");
4763     }
4764   }
4765   return (*sol_vsnprintf)(buf, count, fmt, argptr);
4766 }
4767 
4768 
4769 // Is a (classpath) directory empty?
4770 bool os::dir_is_empty(const char* path) {
4771   DIR *dir = NULL;
4772   struct dirent *ptr;
4773 
4774   dir = opendir(path);
4775   if (dir == NULL) return true;
4776 
4777   /* Scan the directory */
4778   bool result = true;
4779   char buf[sizeof(struct dirent) + MAX_PATH];
4780   struct dirent *dbuf = (struct dirent *) buf;
4781   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
4782     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4783       result = false;
4784     }
4785   }
4786   closedir(dir);
4787   return result;
4788 }
4789 
4790 // This code originates from JDK's sysOpen and open64_w
4791 // from src/solaris/hpi/src/system_md.c
4792 
4793 #ifndef O_DELETE
4794 #define O_DELETE 0x10000
4795 #endif
4796 
4797 // Open a file. Unlink the file immediately after open returns
4798 // if the specified oflag has the O_DELETE flag set.
4799 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
4800 
4801 int os::open(const char *path, int oflag, int mode) {
4802   if (strlen(path) > MAX_PATH - 1) {
4803     errno = ENAMETOOLONG;
4804     return -1;
4805   }
4806   int fd;
4807   int o_delete = (oflag & O_DELETE);
4808   oflag = oflag & ~O_DELETE;
4809 
4810   fd = ::open64(path, oflag, mode);
4811   if (fd == -1) return -1;
4812 
4813   //If the open succeeded, the file might still be a directory
4814   {
4815     struct stat64 buf64;
4816     int ret = ::fstat64(fd, &buf64);
4817     int st_mode = buf64.st_mode;
4818 
4819     if (ret != -1) {
4820       if ((st_mode & S_IFMT) == S_IFDIR) {
4821         errno = EISDIR;
4822         ::close(fd);
4823         return -1;
4824       }
4825     } else {
4826       ::close(fd);
4827       return -1;
4828     }
4829   }
4830     /*
4831      * 32-bit Solaris systems suffer from:
4832      *
4833      * - an historical default soft limit of 256 per-process file
4834      *   descriptors that is too low for many Java programs.
4835      *
4836      * - a design flaw where file descriptors created using stdio
4837      *   fopen must be less than 256, _even_ when the first limit above
4838      *   has been raised.  This can cause calls to fopen (but not calls to
4839      *   open, for example) to fail mysteriously, perhaps in 3rd party
4840      *   native code (although the JDK itself uses fopen).  One can hardly
4841      *   criticize them for using this most standard of all functions.
4842      *
4843      * We attempt to make everything work anyways by:
4844      *
4845      * - raising the soft limit on per-process file descriptors beyond
4846      *   256
4847      *
4848      * - As of Solaris 10u4, we can request that Solaris raise the 256
4849      *   stdio fopen limit by calling function enable_extended_FILE_stdio.
4850      *   This is done in init_2 and recorded in enabled_extended_FILE_stdio
4851      *
4852      * - If we are stuck on an old (pre 10u4) Solaris system, we can
4853      *   workaround the bug by remapping non-stdio file descriptors below
4854      *   256 to ones beyond 256, which is done below.
4855      *
4856      * See:
4857      * 1085341: 32-bit stdio routines should support file descriptors >255
4858      * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
4859      * 6431278: Netbeans crash on 32 bit Solaris: need to call
4860      *          enable_extended_FILE_stdio() in VM initialisation
4861      * Giri Mandalika's blog
4862      * http://technopark02.blogspot.com/2005_05_01_archive.html
4863      */
4864 #ifndef  _LP64
4865      if ((!enabled_extended_FILE_stdio) && fd < 256) {
4866          int newfd = ::fcntl(fd, F_DUPFD, 256);
4867          if (newfd != -1) {
4868              ::close(fd);
4869              fd = newfd;
4870          }
4871      }
4872 #endif // 32-bit Solaris
4873     /*
4874      * All file descriptors that are opened in the JVM and not
4875      * specifically destined for a subprocess should have the
4876      * close-on-exec flag set.  If we don't set it, then careless 3rd
4877      * party native code might fork and exec without closing all
4878      * appropriate file descriptors (e.g. as we do in closeDescriptors in
4879      * UNIXProcess.c), and this in turn might:
4880      *
4881      * - cause end-of-file to fail to be detected on some file
4882      *   descriptors, resulting in mysterious hangs, or
4883      *
4884      * - might cause an fopen in the subprocess to fail on a system
4885      *   suffering from bug 1085341.
4886      *
4887      * (Yes, the default setting of the close-on-exec flag is a Unix
4888      * design flaw)
4889      *
4890      * See:
4891      * 1085341: 32-bit stdio routines should support file descriptors >255
4892      * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4893      * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4894      */
4895 #ifdef FD_CLOEXEC
4896     {
4897         int flags = ::fcntl(fd, F_GETFD);
4898         if (flags != -1)
4899             ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4900     }
4901 #endif
4902 
4903   if (o_delete != 0) {
4904     ::unlink(path);
4905   }
4906   return fd;
4907 }
4908 
4909 // create binary file, rewriting existing file if required
4910 int os::create_binary_file(const char* path, bool rewrite_existing) {
4911   int oflags = O_WRONLY | O_CREAT;
4912   if (!rewrite_existing) {
4913     oflags |= O_EXCL;
4914   }
4915   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4916 }
4917 
4918 // return current position of file pointer
4919 jlong os::current_file_offset(int fd) {
4920   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4921 }
4922 
4923 // move file pointer to the specified offset
4924 jlong os::seek_to_file_offset(int fd, jlong offset) {
4925   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4926 }
4927 
4928 jlong os::lseek(int fd, jlong offset, int whence) {
4929   return (jlong) ::lseek64(fd, offset, whence);
4930 }
4931 
4932 char * os::native_path(char *path) {
4933   return path;
4934 }
4935 
4936 int os::ftruncate(int fd, jlong length) {
4937   return ::ftruncate64(fd, length);
4938 }
4939 
4940 int os::fsync(int fd)  {
4941   RESTARTABLE_RETURN_INT(::fsync(fd));
4942 }
4943 
4944 int os::available(int fd, jlong *bytes) {
4945   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
4946           "Assumed _thread_in_native");
4947   jlong cur, end;
4948   int mode;
4949   struct stat64 buf64;
4950 
4951   if (::fstat64(fd, &buf64) >= 0) {
4952     mode = buf64.st_mode;
4953     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4954       int n,ioctl_return;
4955 
4956       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
4957       if (ioctl_return>= 0) {
4958           *bytes = n;
4959         return 1;
4960       }
4961     }
4962   }
4963   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4964     return 0;
4965   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4966     return 0;
4967   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4968     return 0;
4969   }
4970   *bytes = end - cur;
4971   return 1;
4972 }
4973 
4974 // Map a block of memory.
4975 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4976                      char *addr, size_t bytes, bool read_only,
4977                      bool allow_exec) {
4978   int prot;
4979   int flags;
4980 
4981   if (read_only) {
4982     prot = PROT_READ;
4983     flags = MAP_SHARED;
4984   } else {
4985     prot = PROT_READ | PROT_WRITE;
4986     flags = MAP_PRIVATE;
4987   }
4988 
4989   if (allow_exec) {
4990     prot |= PROT_EXEC;
4991   }
4992 
4993   if (addr != NULL) {
4994     flags |= MAP_FIXED;
4995   }
4996 
4997   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
4998                                      fd, file_offset);
4999   if (mapped_address == MAP_FAILED) {
5000     return NULL;
5001   }
5002   return mapped_address;
5003 }
5004 
5005 
5006 // Remap a block of memory.
5007 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5008                        char *addr, size_t bytes, bool read_only,
5009                        bool allow_exec) {
5010   // same as map_memory() on this OS
5011   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5012                         allow_exec);
5013 }
5014 
5015 
5016 // Unmap a block of memory.
5017 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5018   return munmap(addr, bytes) == 0;
5019 }
5020 
5021 void os::pause() {
5022   char filename[MAX_PATH];
5023   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5024     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5025   } else {
5026     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5027   }
5028 
5029   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5030   if (fd != -1) {
5031     struct stat buf;
5032     ::close(fd);
5033     while (::stat(filename, &buf) == 0) {
5034       (void)::poll(NULL, 0, 100);
5035     }
5036   } else {
5037     jio_fprintf(stderr,
5038       "Could not open pause file '%s', continuing immediately.\n", filename);
5039   }
5040 }
5041 
5042 #ifndef PRODUCT
5043 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5044 // Turn this on if you need to trace synch operations.
5045 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5046 // and call record_synch_enable and record_synch_disable
5047 // around the computation of interest.
5048 
5049 void record_synch(char* name, bool returning);  // defined below
5050 
5051 class RecordSynch {
5052   char* _name;
5053  public:
5054   RecordSynch(char* name) :_name(name)
5055                  { record_synch(_name, false); }
5056   ~RecordSynch() { record_synch(_name,   true);  }
5057 };
5058 
5059 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5060 extern "C" ret name params {                                    \
5061   typedef ret name##_t params;                                  \
5062   static name##_t* implem = NULL;                               \
5063   static int callcount = 0;                                     \
5064   if (implem == NULL) {                                         \
5065     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5066     if (implem == NULL)  fatal(dlerror());                      \
5067   }                                                             \
5068   ++callcount;                                                  \
5069   RecordSynch _rs(#name);                                       \
5070   inner;                                                        \
5071   return implem args;                                           \
5072 }
5073 // in dbx, examine callcounts this way:
5074 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5075 
5076 #define CHECK_POINTER_OK(p) \
5077   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5078 #define CHECK_MU \
5079   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5080 #define CHECK_CV \
5081   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5082 #define CHECK_P(p) \
5083   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5084 
5085 #define CHECK_MUTEX(mutex_op) \
5086 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5087 
5088 CHECK_MUTEX(   mutex_lock)
5089 CHECK_MUTEX(  _mutex_lock)
5090 CHECK_MUTEX( mutex_unlock)
5091 CHECK_MUTEX(_mutex_unlock)
5092 CHECK_MUTEX( mutex_trylock)
5093 CHECK_MUTEX(_mutex_trylock)
5094 
5095 #define CHECK_COND(cond_op) \
5096 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5097 
5098 CHECK_COND( cond_wait);
5099 CHECK_COND(_cond_wait);
5100 CHECK_COND(_cond_wait_cancel);
5101 
5102 #define CHECK_COND2(cond_op) \
5103 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5104 
5105 CHECK_COND2( cond_timedwait);
5106 CHECK_COND2(_cond_timedwait);
5107 CHECK_COND2(_cond_timedwait_cancel);
5108 
5109 // do the _lwp_* versions too
5110 #define mutex_t lwp_mutex_t
5111 #define cond_t  lwp_cond_t
5112 CHECK_MUTEX(  _lwp_mutex_lock)
5113 CHECK_MUTEX(  _lwp_mutex_unlock)
5114 CHECK_MUTEX(  _lwp_mutex_trylock)
5115 CHECK_MUTEX( __lwp_mutex_lock)
5116 CHECK_MUTEX( __lwp_mutex_unlock)
5117 CHECK_MUTEX( __lwp_mutex_trylock)
5118 CHECK_MUTEX(___lwp_mutex_lock)
5119 CHECK_MUTEX(___lwp_mutex_unlock)
5120 
5121 CHECK_COND(  _lwp_cond_wait);
5122 CHECK_COND( __lwp_cond_wait);
5123 CHECK_COND(___lwp_cond_wait);
5124 
5125 CHECK_COND2(  _lwp_cond_timedwait);
5126 CHECK_COND2( __lwp_cond_timedwait);
5127 #undef mutex_t
5128 #undef cond_t
5129 
5130 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5131 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5132 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5133 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5134 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5135 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5136 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5137 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5138 
5139 
5140 // recording machinery:
5141 
5142 enum { RECORD_SYNCH_LIMIT = 200 };
5143 char* record_synch_name[RECORD_SYNCH_LIMIT];
5144 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5145 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5146 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5147 int record_synch_count = 0;
5148 bool record_synch_enabled = false;
5149 
5150 // in dbx, examine recorded data this way:
5151 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5152 
5153 void record_synch(char* name, bool returning) {
5154   if (record_synch_enabled) {
5155     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5156       record_synch_name[record_synch_count] = name;
5157       record_synch_returning[record_synch_count] = returning;
5158       record_synch_thread[record_synch_count] = thr_self();
5159       record_synch_arg0ptr[record_synch_count] = &name;
5160       record_synch_count++;
5161     }
5162     // put more checking code here:
5163     // ...
5164   }
5165 }
5166 
5167 void record_synch_enable() {
5168   // start collecting trace data, if not already doing so
5169   if (!record_synch_enabled)  record_synch_count = 0;
5170   record_synch_enabled = true;
5171 }
5172 
5173 void record_synch_disable() {
5174   // stop collecting trace data
5175   record_synch_enabled = false;
5176 }
5177 
5178 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5179 #endif // PRODUCT
5180 
5181 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5182 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5183                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5184 
5185 
5186 // JVMTI & JVM monitoring and management support
5187 // The thread_cpu_time() and current_thread_cpu_time() are only
5188 // supported if is_thread_cpu_time_supported() returns true.
5189 // They are not supported on Solaris T1.
5190 
5191 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5192 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5193 // of a thread.
5194 //
5195 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5196 // returns the fast estimate available on the platform.
5197 
5198 // hrtime_t gethrvtime() return value includes
5199 // user time but does not include system time
5200 jlong os::current_thread_cpu_time() {
5201   return (jlong) gethrvtime();
5202 }
5203 
5204 jlong os::thread_cpu_time(Thread *thread) {
5205   // return user level CPU time only to be consistent with
5206   // what current_thread_cpu_time returns.
5207   // thread_cpu_time_info() must be changed if this changes
5208   return os::thread_cpu_time(thread, false /* user time only */);
5209 }
5210 
5211 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5212   if (user_sys_cpu_time) {
5213     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5214   } else {
5215     return os::current_thread_cpu_time();
5216   }
5217 }
5218 
5219 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5220   char proc_name[64];
5221   int count;
5222   prusage_t prusage;
5223   jlong lwp_time;
5224   int fd;
5225 
5226   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5227                      getpid(),
5228                      thread->osthread()->lwp_id());
5229   fd = ::open(proc_name, O_RDONLY);
5230   if ( fd == -1 ) return -1;
5231 
5232   do {
5233     count = ::pread(fd,
5234                   (void *)&prusage.pr_utime,
5235                   thr_time_size,
5236                   thr_time_off);
5237   } while (count < 0 && errno == EINTR);
5238   ::close(fd);
5239   if ( count < 0 ) return -1;
5240 
5241   if (user_sys_cpu_time) {
5242     // user + system CPU time
5243     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5244                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5245                  (jlong)prusage.pr_stime.tv_nsec +
5246                  (jlong)prusage.pr_utime.tv_nsec;
5247   } else {
5248     // user level CPU time only
5249     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5250                 (jlong)prusage.pr_utime.tv_nsec;
5251   }
5252 
5253   return(lwp_time);
5254 }
5255 
5256 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5257   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5258   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5259   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5260   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5261 }
5262 
5263 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5264   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5265   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5266   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5267   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5268 }
5269 
5270 bool os::is_thread_cpu_time_supported() {
5271   return true;
5272 }
5273 
5274 // System loadavg support.  Returns -1 if load average cannot be obtained.
5275 // Return the load average for our processor set if the primitive exists
5276 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5277 int os::loadavg(double loadavg[], int nelem) {
5278   if (pset_getloadavg_ptr != NULL) {
5279     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5280   } else {
5281     return ::getloadavg(loadavg, nelem);
5282   }
5283 }
5284 
5285 //---------------------------------------------------------------------------------
5286 
5287 bool os::find(address addr, outputStream* st) {
5288   Dl_info dlinfo;
5289   memset(&dlinfo, 0, sizeof(dlinfo));
5290   if (dladdr(addr, &dlinfo) != 0) {
5291     st->print(PTR_FORMAT ": ", addr);
5292     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5293       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5294     } else if (dlinfo.dli_fbase != NULL)
5295       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5296     else
5297       st->print("<absolute address>");
5298     if (dlinfo.dli_fname != NULL) {
5299       st->print(" in %s", dlinfo.dli_fname);
5300     }
5301     if (dlinfo.dli_fbase != NULL) {
5302       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5303     }
5304     st->cr();
5305 
5306     if (Verbose) {
5307       // decode some bytes around the PC
5308       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5309       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5310       address       lowest = (address) dlinfo.dli_sname;
5311       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5312       if (begin < lowest)  begin = lowest;
5313       Dl_info dlinfo2;
5314       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5315           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5316         end = (address) dlinfo2.dli_saddr;
5317       Disassembler::decode(begin, end, st);
5318     }
5319     return true;
5320   }
5321   return false;
5322 }
5323 
5324 // Following function has been added to support HotSparc's libjvm.so running
5325 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5326 // src/solaris/hpi/native_threads in the EVM codebase.
5327 //
5328 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5329 // libraries and should thus be removed. We will leave it behind for a while
5330 // until we no longer want to able to run on top of 1.3.0 Solaris production
5331 // JDK. See 4341971.
5332 
5333 #define STACK_SLACK 0x800
5334 
5335 extern "C" {
5336   intptr_t sysThreadAvailableStackWithSlack() {
5337     stack_t st;
5338     intptr_t retval, stack_top;
5339     retval = thr_stksegment(&st);
5340     assert(retval == 0, "incorrect return value from thr_stksegment");
5341     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5342     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5343     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5344     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5345   }
5346 }
5347 
5348 // ObjectMonitor park-unpark infrastructure ...
5349 //
5350 // We implement Solaris and Linux PlatformEvents with the
5351 // obvious condvar-mutex-flag triple.
5352 // Another alternative that works quite well is pipes:
5353 // Each PlatformEvent consists of a pipe-pair.
5354 // The thread associated with the PlatformEvent
5355 // calls park(), which reads from the input end of the pipe.
5356 // Unpark() writes into the other end of the pipe.
5357 // The write-side of the pipe must be set NDELAY.
5358 // Unfortunately pipes consume a large # of handles.
5359 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5360 // Using pipes for the 1st few threads might be workable, however.
5361 //
5362 // park() is permitted to return spuriously.
5363 // Callers of park() should wrap the call to park() in
5364 // an appropriate loop.  A litmus test for the correct
5365 // usage of park is the following: if park() were modified
5366 // to immediately return 0 your code should still work,
5367 // albeit degenerating to a spin loop.
5368 //
5369 // An interesting optimization for park() is to use a trylock()
5370 // to attempt to acquire the mutex.  If the trylock() fails
5371 // then we know that a concurrent unpark() operation is in-progress.
5372 // in that case the park() code could simply set _count to 0
5373 // and return immediately.  The subsequent park() operation *might*
5374 // return immediately.  That's harmless as the caller of park() is
5375 // expected to loop.  By using trylock() we will have avoided a
5376 // avoided a context switch caused by contention on the per-thread mutex.
5377 //
5378 // TODO-FIXME:
5379 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
5380 //     objectmonitor implementation.
5381 // 2.  Collapse the JSR166 parker event, and the
5382 //     objectmonitor ParkEvent into a single "Event" construct.
5383 // 3.  In park() and unpark() add:
5384 //     assert (Thread::current() == AssociatedWith).
5385 // 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5386 //     1-out-of-N park() operations will return immediately.
5387 //
5388 // _Event transitions in park()
5389 //   -1 => -1 : illegal
5390 //    1 =>  0 : pass - return immediately
5391 //    0 => -1 : block
5392 //
5393 // _Event serves as a restricted-range semaphore.
5394 //
5395 // Another possible encoding of _Event would be with
5396 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5397 //
5398 // TODO-FIXME: add DTRACE probes for:
5399 // 1.   Tx parks
5400 // 2.   Ty unparks Tx
5401 // 3.   Tx resumes from park
5402 
5403 
5404 // value determined through experimentation
5405 #define ROUNDINGFIX 11
5406 
5407 // utility to compute the abstime argument to timedwait.
5408 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5409 
5410 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5411   // millis is the relative timeout time
5412   // abstime will be the absolute timeout time
5413   if (millis < 0)  millis = 0;
5414   struct timeval now;
5415   int status = gettimeofday(&now, NULL);
5416   assert(status == 0, "gettimeofday");
5417   jlong seconds = millis / 1000;
5418   jlong max_wait_period;
5419 
5420   if (UseLWPSynchronization) {
5421     // forward port of fix for 4275818 (not sleeping long enough)
5422     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5423     // _lwp_cond_timedwait() used a round_down algorithm rather
5424     // than a round_up. For millis less than our roundfactor
5425     // it rounded down to 0 which doesn't meet the spec.
5426     // For millis > roundfactor we may return a bit sooner, but
5427     // since we can not accurately identify the patch level and
5428     // this has already been fixed in Solaris 9 and 8 we will
5429     // leave it alone rather than always rounding down.
5430 
5431     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5432        // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5433            // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5434            max_wait_period = 21000000;
5435   } else {
5436     max_wait_period = 50000000;
5437   }
5438   millis %= 1000;
5439   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5440      seconds = max_wait_period;
5441   }
5442   abstime->tv_sec = now.tv_sec  + seconds;
5443   long       usec = now.tv_usec + millis * 1000;
5444   if (usec >= 1000000) {
5445     abstime->tv_sec += 1;
5446     usec -= 1000000;
5447   }
5448   abstime->tv_nsec = usec * 1000;
5449   return abstime;
5450 }
5451 
5452 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5453 // Conceptually TryPark() should be equivalent to park(0).
5454 
5455 int os::PlatformEvent::TryPark() {
5456   for (;;) {
5457     const int v = _Event ;
5458     guarantee ((v == 0) || (v == 1), "invariant") ;
5459     if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
5460   }
5461 }
5462 
5463 void os::PlatformEvent::park() {           // AKA: down()
5464   // Invariant: Only the thread associated with the Event/PlatformEvent
5465   // may call park().
5466   int v ;
5467   for (;;) {
5468       v = _Event ;
5469       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5470   }
5471   guarantee (v >= 0, "invariant") ;
5472   if (v == 0) {
5473      // Do this the hard way by blocking ...
5474      // See http://monaco.sfbay/detail.jsf?cr=5094058.
5475      // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5476      // Only for SPARC >= V8PlusA
5477 #if defined(__sparc) && defined(COMPILER2)
5478      if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5479 #endif
5480      int status = os::Solaris::mutex_lock(_mutex);
5481      assert_status(status == 0, status,  "mutex_lock");
5482      guarantee (_nParked == 0, "invariant") ;
5483      ++ _nParked ;
5484      while (_Event < 0) {
5485         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5486         // Treat this the same as if the wait was interrupted
5487         // With usr/lib/lwp going to kernel, always handle ETIME
5488         status = os::Solaris::cond_wait(_cond, _mutex);
5489         if (status == ETIME) status = EINTR ;
5490         assert_status(status == 0 || status == EINTR, status, "cond_wait");
5491      }
5492      -- _nParked ;
5493      _Event = 0 ;
5494      status = os::Solaris::mutex_unlock(_mutex);
5495      assert_status(status == 0, status, "mutex_unlock");
5496     // Paranoia to ensure our locked and lock-free paths interact
5497     // correctly with each other.
5498     OrderAccess::fence();
5499   }
5500 }
5501 
5502 int os::PlatformEvent::park(jlong millis) {
5503   guarantee (_nParked == 0, "invariant") ;
5504   int v ;
5505   for (;;) {
5506       v = _Event ;
5507       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5508   }
5509   guarantee (v >= 0, "invariant") ;
5510   if (v != 0) return OS_OK ;
5511 
5512   int ret = OS_TIMEOUT;
5513   timestruc_t abst;
5514   compute_abstime (&abst, millis);
5515 
5516   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5517   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5518   // Only for SPARC >= V8PlusA
5519 #if defined(__sparc) && defined(COMPILER2)
5520  if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5521 #endif
5522   int status = os::Solaris::mutex_lock(_mutex);
5523   assert_status(status == 0, status, "mutex_lock");
5524   guarantee (_nParked == 0, "invariant") ;
5525   ++ _nParked ;
5526   while (_Event < 0) {
5527      int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5528      assert_status(status == 0 || status == EINTR ||
5529                    status == ETIME || status == ETIMEDOUT,
5530                    status, "cond_timedwait");
5531      if (!FilterSpuriousWakeups) break ;                // previous semantics
5532      if (status == ETIME || status == ETIMEDOUT) break ;
5533      // We consume and ignore EINTR and spurious wakeups.
5534   }
5535   -- _nParked ;
5536   if (_Event >= 0) ret = OS_OK ;
5537   _Event = 0 ;
5538   status = os::Solaris::mutex_unlock(_mutex);
5539   assert_status(status == 0, status, "mutex_unlock");
5540   // Paranoia to ensure our locked and lock-free paths interact
5541   // correctly with each other.
5542   OrderAccess::fence();
5543   return ret;
5544 }
5545 
5546 void os::PlatformEvent::unpark() {
5547   // Transitions for _Event:
5548   //    0 :=> 1
5549   //    1 :=> 1
5550   //   -1 :=> either 0 or 1; must signal target thread
5551   //          That is, we can safely transition _Event from -1 to either
5552   //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
5553   //          unpark() calls.
5554   // See also: "Semaphores in Plan 9" by Mullender & Cox
5555   //
5556   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5557   // that it will take two back-to-back park() calls for the owning
5558   // thread to block. This has the benefit of forcing a spurious return
5559   // from the first park() call after an unpark() call which will help
5560   // shake out uses of park() and unpark() without condition variables.
5561 
5562   if (Atomic::xchg(1, &_Event) >= 0) return;
5563 
5564   // If the thread associated with the event was parked, wake it.
5565   // Wait for the thread assoc with the PlatformEvent to vacate.
5566   int status = os::Solaris::mutex_lock(_mutex);
5567   assert_status(status == 0, status, "mutex_lock");
5568   int AnyWaiters = _nParked;
5569   status = os::Solaris::mutex_unlock(_mutex);
5570   assert_status(status == 0, status, "mutex_unlock");
5571   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5572   if (AnyWaiters != 0) {
5573     // We intentional signal *after* dropping the lock
5574     // to avoid a common class of futile wakeups.
5575     status = os::Solaris::cond_signal(_cond);
5576     assert_status(status == 0, status, "cond_signal");
5577   }
5578 }
5579 
5580 // JSR166
5581 // -------------------------------------------------------
5582 
5583 /*
5584  * The solaris and linux implementations of park/unpark are fairly
5585  * conservative for now, but can be improved. They currently use a
5586  * mutex/condvar pair, plus _counter.
5587  * Park decrements _counter if > 0, else does a condvar wait.  Unpark
5588  * sets count to 1 and signals condvar.  Only one thread ever waits
5589  * on the condvar. Contention seen when trying to park implies that someone
5590  * is unparking you, so don't wait. And spurious returns are fine, so there
5591  * is no need to track notifications.
5592  */
5593 
5594 #define MAX_SECS 100000000
5595 /*
5596  * This code is common to linux and solaris and will be moved to a
5597  * common place in dolphin.
5598  *
5599  * The passed in time value is either a relative time in nanoseconds
5600  * or an absolute time in milliseconds. Either way it has to be unpacked
5601  * into suitable seconds and nanoseconds components and stored in the
5602  * given timespec structure.
5603  * Given time is a 64-bit value and the time_t used in the timespec is only
5604  * a signed-32-bit value (except on 64-bit Linux) we have to watch for
5605  * overflow if times way in the future are given. Further on Solaris versions
5606  * prior to 10 there is a restriction (see cond_timedwait) that the specified
5607  * number of seconds, in abstime, is less than current_time  + 100,000,000.
5608  * As it will be 28 years before "now + 100000000" will overflow we can
5609  * ignore overflow and just impose a hard-limit on seconds using the value
5610  * of "now + 100,000,000". This places a limit on the timeout of about 3.17
5611  * years from "now".
5612  */
5613 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5614   assert (time > 0, "convertTime");
5615 
5616   struct timeval now;
5617   int status = gettimeofday(&now, NULL);
5618   assert(status == 0, "gettimeofday");
5619 
5620   time_t max_secs = now.tv_sec + MAX_SECS;
5621 
5622   if (isAbsolute) {
5623     jlong secs = time / 1000;
5624     if (secs > max_secs) {
5625       absTime->tv_sec = max_secs;
5626     }
5627     else {
5628       absTime->tv_sec = secs;
5629     }
5630     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5631   }
5632   else {
5633     jlong secs = time / NANOSECS_PER_SEC;
5634     if (secs >= MAX_SECS) {
5635       absTime->tv_sec = max_secs;
5636       absTime->tv_nsec = 0;
5637     }
5638     else {
5639       absTime->tv_sec = now.tv_sec + secs;
5640       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5641       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5642         absTime->tv_nsec -= NANOSECS_PER_SEC;
5643         ++absTime->tv_sec; // note: this must be <= max_secs
5644       }
5645     }
5646   }
5647   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5648   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5649   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5650   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5651 }
5652 
5653 void Parker::park(bool isAbsolute, jlong time) {
5654   // Ideally we'd do something useful while spinning, such
5655   // as calling unpackTime().
5656 
5657   // Optional fast-path check:
5658   // Return immediately if a permit is available.
5659   // We depend on Atomic::xchg() having full barrier semantics
5660   // since we are doing a lock-free update to _counter.
5661   if (Atomic::xchg(0, &_counter) > 0) return;
5662 
5663   // Optional fast-exit: Check interrupt before trying to wait
5664   Thread* thread = Thread::current();
5665   assert(thread->is_Java_thread(), "Must be JavaThread");
5666   JavaThread *jt = (JavaThread *)thread;
5667   if (Thread::is_interrupted(thread, false)) {
5668     return;
5669   }
5670 
5671   // First, demultiplex/decode time arguments
5672   timespec absTime;
5673   if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
5674     return;
5675   }
5676   if (time > 0) {
5677     // Warning: this code might be exposed to the old Solaris time
5678     // round-down bugs.  Grep "roundingFix" for details.
5679     unpackTime(&absTime, isAbsolute, time);
5680   }
5681 
5682   // Enter safepoint region
5683   // Beware of deadlocks such as 6317397.
5684   // The per-thread Parker:: _mutex is a classic leaf-lock.
5685   // In particular a thread must never block on the Threads_lock while
5686   // holding the Parker:: mutex.  If safepoints are pending both the
5687   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5688   ThreadBlockInVM tbivm(jt);
5689 
5690   // Don't wait if cannot get lock since interference arises from
5691   // unblocking.  Also. check interrupt before trying wait
5692   if (Thread::is_interrupted(thread, false) ||
5693       os::Solaris::mutex_trylock(_mutex) != 0) {
5694     return;
5695   }
5696 
5697   int status ;
5698 
5699   if (_counter > 0)  { // no wait needed
5700     _counter = 0;
5701     status = os::Solaris::mutex_unlock(_mutex);
5702     assert (status == 0, "invariant") ;
5703     // Paranoia to ensure our locked and lock-free paths interact
5704     // correctly with each other and Java-level accesses.
5705     OrderAccess::fence();
5706     return;
5707   }
5708 
5709 #ifdef ASSERT
5710   // Don't catch signals while blocked; let the running threads have the signals.
5711   // (This allows a debugger to break into the running thread.)
5712   sigset_t oldsigs;
5713   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
5714   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5715 #endif
5716 
5717   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5718   jt->set_suspend_equivalent();
5719   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5720 
5721   // Do this the hard way by blocking ...
5722   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5723   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5724   // Only for SPARC >= V8PlusA
5725 #if defined(__sparc) && defined(COMPILER2)
5726   if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5727 #endif
5728 
5729   if (time == 0) {
5730     status = os::Solaris::cond_wait (_cond, _mutex) ;
5731   } else {
5732     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
5733   }
5734   // Note that an untimed cond_wait() can sometimes return ETIME on older
5735   // versions of the Solaris.
5736   assert_status(status == 0 || status == EINTR ||
5737                 status == ETIME || status == ETIMEDOUT,
5738                 status, "cond_timedwait");
5739 
5740 #ifdef ASSERT
5741   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
5742 #endif
5743   _counter = 0 ;
5744   status = os::Solaris::mutex_unlock(_mutex);
5745   assert_status(status == 0, status, "mutex_unlock") ;
5746   // Paranoia to ensure our locked and lock-free paths interact
5747   // correctly with each other and Java-level accesses.
5748   OrderAccess::fence();
5749 
5750   // If externally suspended while waiting, re-suspend
5751   if (jt->handle_special_suspend_equivalent_condition()) {
5752     jt->java_suspend_self();
5753   }
5754 }
5755 
5756 void Parker::unpark() {
5757   int s, status ;
5758   status = os::Solaris::mutex_lock (_mutex) ;
5759   assert (status == 0, "invariant") ;
5760   s = _counter;
5761   _counter = 1;
5762   status = os::Solaris::mutex_unlock (_mutex) ;
5763   assert (status == 0, "invariant") ;
5764 
5765   if (s < 1) {
5766     status = os::Solaris::cond_signal (_cond) ;
5767     assert (status == 0, "invariant") ;
5768   }
5769 }
5770 
5771 extern char** environ;
5772 
5773 // Run the specified command in a separate process. Return its exit value,
5774 // or -1 on failure (e.g. can't fork a new process).
5775 // Unlike system(), this function can be called from signal handler. It
5776 // doesn't block SIGINT et al.
5777 int os::fork_and_exec(char* cmd) {
5778   char * argv[4];
5779   argv[0] = (char *)"sh";
5780   argv[1] = (char *)"-c";
5781   argv[2] = cmd;
5782   argv[3] = NULL;
5783 
5784   // fork is async-safe, fork1 is not so can't use in signal handler
5785   pid_t pid;
5786   Thread* t = ThreadLocalStorage::get_thread_slow();
5787   if (t != NULL && t->is_inside_signal_handler()) {
5788     pid = fork();
5789   } else {
5790     pid = fork1();
5791   }
5792 
5793   if (pid < 0) {
5794     // fork failed
5795     warning("fork failed: %s", strerror(errno));
5796     return -1;
5797 
5798   } else if (pid == 0) {
5799     // child process
5800 
5801     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
5802     execve("/usr/bin/sh", argv, environ);
5803 
5804     // execve failed
5805     _exit(-1);
5806 
5807   } else  {
5808     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5809     // care about the actual exit code, for now.
5810 
5811     int status;
5812 
5813     // Wait for the child process to exit.  This returns immediately if
5814     // the child has already exited. */
5815     while (waitpid(pid, &status, 0) < 0) {
5816         switch (errno) {
5817         case ECHILD: return 0;
5818         case EINTR: break;
5819         default: return -1;
5820         }
5821     }
5822 
5823     if (WIFEXITED(status)) {
5824        // The child exited normally; get its exit code.
5825        return WEXITSTATUS(status);
5826     } else if (WIFSIGNALED(status)) {
5827        // The child exited because of a signal
5828        // The best value to return is 0x80 + signal number,
5829        // because that is what all Unix shells do, and because
5830        // it allows callers to distinguish between process exit and
5831        // process death by signal.
5832        return 0x80 + WTERMSIG(status);
5833     } else {
5834        // Unknown exit code; pass it through
5835        return status;
5836     }
5837   }
5838 }
5839 
5840 // is_headless_jre()
5841 //
5842 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5843 // in order to report if we are running in a headless jre
5844 //
5845 // Since JDK8 xawt/libmawt.so was moved into the same directory
5846 // as libawt.so, and renamed libawt_xawt.so
5847 //
5848 bool os::is_headless_jre() {
5849     struct stat statbuf;
5850     char buf[MAXPATHLEN];
5851     char libmawtpath[MAXPATHLEN];
5852     const char *xawtstr  = "/xawt/libmawt.so";
5853     const char *new_xawtstr = "/libawt_xawt.so";
5854     char *p;
5855 
5856     // Get path to libjvm.so
5857     os::jvm_path(buf, sizeof(buf));
5858 
5859     // Get rid of libjvm.so
5860     p = strrchr(buf, '/');
5861     if (p == NULL) return false;
5862     else *p = '\0';
5863 
5864     // Get rid of client or server
5865     p = strrchr(buf, '/');
5866     if (p == NULL) return false;
5867     else *p = '\0';
5868 
5869     // check xawt/libmawt.so
5870     strcpy(libmawtpath, buf);
5871     strcat(libmawtpath, xawtstr);
5872     if (::stat(libmawtpath, &statbuf) == 0) return false;
5873 
5874     // check libawt_xawt.so
5875     strcpy(libmawtpath, buf);
5876     strcat(libmawtpath, new_xawtstr);
5877     if (::stat(libmawtpath, &statbuf) == 0) return false;
5878 
5879     return true;
5880 }
5881 
5882 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
5883   size_t res;
5884   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5885           "Assumed _thread_in_native");
5886   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
5887   return res;
5888 }
5889 
5890 int os::close(int fd) {
5891   return ::close(fd);
5892 }
5893 
5894 int os::socket_close(int fd) {
5895   return ::close(fd);
5896 }
5897 
5898 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5899   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5900           "Assumed _thread_in_native");
5901   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
5902 }
5903 
5904 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5905   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5906           "Assumed _thread_in_native");
5907   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5908 }
5909 
5910 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5911   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5912 }
5913 
5914 // As both poll and select can be interrupted by signals, we have to be
5915 // prepared to restart the system call after updating the timeout, unless
5916 // a poll() is done with timeout == -1, in which case we repeat with this
5917 // "wait forever" value.
5918 
5919 int os::timeout(int fd, long timeout) {
5920   int res;
5921   struct timeval t;
5922   julong prevtime, newtime;
5923   static const char* aNull = 0;
5924   struct pollfd pfd;
5925   pfd.fd = fd;
5926   pfd.events = POLLIN;
5927 
5928   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5929           "Assumed _thread_in_native");
5930 
5931   gettimeofday(&t, &aNull);
5932   prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
5933 
5934   for(;;) {
5935     res = ::poll(&pfd, 1, timeout);
5936     if(res == OS_ERR && errno == EINTR) {
5937         if(timeout != -1) {
5938           gettimeofday(&t, &aNull);
5939           newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
5940           timeout -= newtime - prevtime;
5941           if(timeout <= 0)
5942             return OS_OK;
5943           prevtime = newtime;
5944         }
5945     } else return res;
5946   }
5947 }
5948 
5949 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
5950   int _result;
5951   _result = ::connect(fd, him, len);
5952 
5953   // On Solaris, when a connect() call is interrupted, the connection
5954   // can be established asynchronously (see 6343810). Subsequent calls
5955   // to connect() must check the errno value which has the semantic
5956   // described below (copied from the connect() man page). Handling
5957   // of asynchronously established connections is required for both
5958   // blocking and non-blocking sockets.
5959   //     EINTR            The  connection  attempt  was   interrupted
5960   //                      before  any data arrived by the delivery of
5961   //                      a signal. The connection, however, will  be
5962   //                      established asynchronously.
5963   //
5964   //     EINPROGRESS      The socket is non-blocking, and the connec-
5965   //                      tion  cannot  be completed immediately.
5966   //
5967   //     EALREADY         The socket is non-blocking,  and a previous
5968   //                      connection  attempt  has  not yet been com-
5969   //                      pleted.
5970   //
5971   //     EISCONN          The socket is already connected.
5972   if (_result == OS_ERR && errno == EINTR) {
5973      /* restarting a connect() changes its errno semantics */
5974      RESTARTABLE(::connect(fd, him, len), _result);
5975      /* undo these changes */
5976      if (_result == OS_ERR) {
5977        if (errno == EALREADY) {
5978          errno = EINPROGRESS; /* fall through */
5979        } else if (errno == EISCONN) {
5980          errno = 0;
5981          return OS_OK;
5982        }
5983      }
5984    }
5985    return _result;
5986  }
5987 
5988 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
5989   if (fd < 0) {
5990     return OS_ERR;
5991   }
5992   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5993           "Assumed _thread_in_native");
5994   RESTARTABLE_RETURN_INT((int)::accept(fd, him, len));
5995 }
5996 
5997 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
5998                  sockaddr* from, socklen_t* fromlen) {
5999   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
6000           "Assumed _thread_in_native");
6001   RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
6002 }
6003 
6004 int os::sendto(int fd, char* buf, size_t len, uint flags,
6005                struct sockaddr* to, socklen_t tolen) {
6006   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
6007           "Assumed _thread_in_native");
6008   RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
6009 }
6010 
6011 int os::socket_available(int fd, jint *pbytes) {
6012   if (fd < 0) {
6013     return OS_OK;
6014   }
6015   int ret;
6016   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6017   // note: ioctl can return 0 when successful, JVM_SocketAvailable
6018   // is expected to return 0 on failure and 1 on success to the jdk.
6019   return (ret == OS_ERR) ? 0 : 1;
6020 }
6021 
6022 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6023   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
6024           "Assumed _thread_in_native");
6025    return ::bind(fd, him, len);
6026 }
6027 
6028 // Get the default path to the core file
6029 // Returns the length of the string
6030 int os::get_core_path(char* buffer, size_t bufferSize) {
6031   const char* p = get_current_directory(buffer, bufferSize);
6032 
6033   if (p == NULL) {
6034     assert(p != NULL, "failed to get current directory");
6035     return 0;
6036   }
6037 
6038   return strlen(buffer);
6039 }
6040 
6041 #ifndef PRODUCT
6042 void TestReserveMemorySpecial_test() {
6043   // No tests available for this platform
6044 }
6045 #endif