1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "prims/jniFastGetField.hpp"
  41 #include "prims/jvm.h"
  42 #include "prims/jvm_misc.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/extendedPC.hpp"
  45 #include "runtime/globals.hpp"
  46 #include "runtime/interfaceSupport.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/javaCalls.hpp"
  49 #include "runtime/mutexLocker.hpp"
  50 #include "runtime/objectMonitor.hpp"
  51 #include "runtime/osThread.hpp"
  52 #include "runtime/perfMemory.hpp"
  53 #include "runtime/sharedRuntime.hpp"
  54 #include "runtime/statSampler.hpp"
  55 #include "runtime/stubRoutines.hpp"
  56 #include "runtime/thread.inline.hpp"
  57 #include "runtime/threadCritical.hpp"
  58 #include "runtime/timer.hpp"
  59 #include "services/attachListener.hpp"
  60 #include "services/memTracker.hpp"
  61 #include "services/runtimeService.hpp"
  62 #include "utilities/decoder.hpp"
  63 #include "utilities/defaultStream.hpp"
  64 #include "utilities/events.hpp"
  65 #include "utilities/growableArray.hpp"
  66 #include "utilities/vmError.hpp"
  67 
  68 // put OS-includes here
  69 # include <dlfcn.h>
  70 # include <errno.h>
  71 # include <exception>
  72 # include <link.h>
  73 # include <poll.h>
  74 # include <pthread.h>
  75 # include <pwd.h>
  76 # include <schedctl.h>
  77 # include <setjmp.h>
  78 # include <signal.h>
  79 # include <stdio.h>
  80 # include <alloca.h>
  81 # include <sys/filio.h>
  82 # include <sys/ipc.h>
  83 # include <sys/lwp.h>
  84 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  85 # include <sys/mman.h>
  86 # include <sys/processor.h>
  87 # include <sys/procset.h>
  88 # include <sys/pset.h>
  89 # include <sys/resource.h>
  90 # include <sys/shm.h>
  91 # include <sys/socket.h>
  92 # include <sys/stat.h>
  93 # include <sys/systeminfo.h>
  94 # include <sys/time.h>
  95 # include <sys/times.h>
  96 # include <sys/types.h>
  97 # include <sys/wait.h>
  98 # include <sys/utsname.h>
  99 # include <thread.h>
 100 # include <unistd.h>
 101 # include <sys/priocntl.h>
 102 # include <sys/rtpriocntl.h>
 103 # include <sys/tspriocntl.h>
 104 # include <sys/iapriocntl.h>
 105 # include <sys/fxpriocntl.h>
 106 # include <sys/loadavg.h>
 107 # include <string.h>
 108 # include <stdio.h>
 109 
 110 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 111 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 112 
 113 #define MAX_PATH (2 * K)
 114 
 115 // for timer info max values which include all bits
 116 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 117 
 118 
 119 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 120 // compile on older systems without this header file.
 121 
 122 #ifndef MADV_ACCESS_LWP
 123 # define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
 124 #endif
 125 #ifndef MADV_ACCESS_MANY
 126 # define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
 127 #endif
 128 
 129 #ifndef LGRP_RSRC_CPU
 130 # define LGRP_RSRC_CPU           0       /* CPU resources */
 131 #endif
 132 #ifndef LGRP_RSRC_MEM
 133 # define LGRP_RSRC_MEM           1       /* memory resources */
 134 #endif
 135 
 136 // see thr_setprio(3T) for the basis of these numbers
 137 #define MinimumPriority 0
 138 #define NormalPriority  64
 139 #define MaximumPriority 127
 140 
 141 // Values for ThreadPriorityPolicy == 1
 142 int prio_policy1[CriticalPriority+1] = {
 143   -99999,  0, 16,  32,  48,  64,
 144           80, 96, 112, 124, 127, 127 };
 145 
 146 // System parameters used internally
 147 static clock_t clock_tics_per_sec = 100;
 148 
 149 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 150 static bool enabled_extended_FILE_stdio = false;
 151 
 152 // For diagnostics to print a message once. see run_periodic_checks
 153 static bool check_addr0_done = false;
 154 static sigset_t check_signal_done;
 155 static bool check_signals = true;
 156 
 157 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 158 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 159 
 160 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 161 
 162 
 163 // "default" initializers for missing libc APIs
 164 extern "C" {
 165   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 166   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 167 
 168   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 169   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 170 }
 171 
 172 // "default" initializers for pthread-based synchronization
 173 extern "C" {
 174   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 175   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 176 }
 177 
 178 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 179 
 180 // Thread Local Storage
 181 // This is common to all Solaris platforms so it is defined here,
 182 // in this common file.
 183 // The declarations are in the os_cpu threadLS*.hpp files.
 184 //
 185 // Static member initialization for TLS
 186 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
 187 
 188 #ifndef PRODUCT
 189 #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
 190 
 191 int ThreadLocalStorage::_tcacheHit = 0;
 192 int ThreadLocalStorage::_tcacheMiss = 0;
 193 
 194 void ThreadLocalStorage::print_statistics() {
 195   int total = _tcacheMiss+_tcacheHit;
 196   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
 197                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
 198 }
 199 #undef _PCT
 200 #endif // PRODUCT
 201 
 202 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
 203                                                         int index) {
 204   Thread *thread = get_thread_slow();
 205   if (thread != NULL) {
 206     address sp = os::current_stack_pointer();
 207     guarantee(thread->_stack_base == NULL ||
 208               (sp <= thread->_stack_base &&
 209                  sp >= thread->_stack_base - thread->_stack_size) ||
 210                is_error_reported(),
 211               "sp must be inside of selected thread stack");
 212 
 213     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
 214     _get_thread_cache[ index ] = thread;
 215   }
 216   return thread;
 217 }
 218 
 219 
 220 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
 221 #define NO_CACHED_THREAD ((Thread*)all_zero)
 222 
 223 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
 224 
 225   // Store the new value before updating the cache to prevent a race
 226   // between get_thread_via_cache_slowly() and this store operation.
 227   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
 228 
 229   // Update thread cache with new thread if setting on thread create,
 230   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
 231   uintptr_t raw = pd_raw_thread_id();
 232   int ix = pd_cache_index(raw);
 233   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
 234 }
 235 
 236 void ThreadLocalStorage::pd_init() {
 237   for (int i = 0; i < _pd_cache_size; i++) {
 238     _get_thread_cache[i] = NO_CACHED_THREAD;
 239   }
 240 }
 241 
 242 // Invalidate all the caches (happens to be the same as pd_init).
 243 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
 244 
 245 #undef NO_CACHED_THREAD
 246 
 247 // END Thread Local Storage
 248 
 249 static inline size_t adjust_stack_size(address base, size_t size) {
 250   if ((ssize_t)size < 0) {
 251     // 4759953: Compensate for ridiculous stack size.
 252     size = max_intx;
 253   }
 254   if (size > (size_t)base) {
 255     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 256     size = (size_t)base;
 257   }
 258   return size;
 259 }
 260 
 261 static inline stack_t get_stack_info() {
 262   stack_t st;
 263   int retval = thr_stksegment(&st);
 264   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 265   assert(retval == 0, "incorrect return value from thr_stksegment");
 266   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 267   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 268   return st;
 269 }
 270 
 271 address os::current_stack_base() {
 272   int r = thr_main() ;
 273   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 274   bool is_primordial_thread = r;
 275 
 276   // Workaround 4352906, avoid calls to thr_stksegment by
 277   // thr_main after the first one (it looks like we trash
 278   // some data, causing the value for ss_sp to be incorrect).
 279   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 280     stack_t st = get_stack_info();
 281     if (is_primordial_thread) {
 282       // cache initial value of stack base
 283       os::Solaris::_main_stack_base = (address)st.ss_sp;
 284     }
 285     return (address)st.ss_sp;
 286   } else {
 287     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 288     return os::Solaris::_main_stack_base;
 289   }
 290 }
 291 
 292 size_t os::current_stack_size() {
 293   size_t size;
 294 
 295   int r = thr_main() ;
 296   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 297   if(!r) {
 298     size = get_stack_info().ss_size;
 299   } else {
 300     struct rlimit limits;
 301     getrlimit(RLIMIT_STACK, &limits);
 302     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 303   }
 304   // base may not be page aligned
 305   address base = current_stack_base();
 306   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 307   return (size_t)(base - bottom);
 308 }
 309 
 310 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 311   return localtime_r(clock, res);
 312 }
 313 
 314 void os::Solaris::try_enable_extended_io() {
 315   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 316 
 317   if (!UseExtendedFileIO) {
 318     return;
 319   }
 320 
 321   enable_extended_FILE_stdio_t enabler =
 322     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 323                                          "enable_extended_FILE_stdio");
 324   if (enabler) {
 325     enabler(-1, -1);
 326   }
 327 }
 328 
 329 static int _processors_online = 0;
 330 
 331          jint os::Solaris::_os_thread_limit = 0;
 332 volatile jint os::Solaris::_os_thread_count = 0;
 333 
 334 julong os::available_memory() {
 335   return Solaris::available_memory();
 336 }
 337 
 338 julong os::Solaris::available_memory() {
 339   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 340 }
 341 
 342 julong os::Solaris::_physical_memory = 0;
 343 
 344 julong os::physical_memory() {
 345    return Solaris::physical_memory();
 346 }
 347 
 348 static hrtime_t first_hrtime = 0;
 349 static const hrtime_t hrtime_hz = 1000*1000*1000;
 350 static volatile hrtime_t max_hrtime = 0;
 351 
 352 
 353 void os::Solaris::initialize_system_info() {
 354   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 355   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
 356   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 357 }
 358 
 359 int os::active_processor_count() {
 360   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 361   pid_t pid = getpid();
 362   psetid_t pset = PS_NONE;
 363   // Are we running in a processor set or is there any processor set around?
 364   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 365     uint_t pset_cpus;
 366     // Query the number of cpus available to us.
 367     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 368       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 369       _processors_online = pset_cpus;
 370       return pset_cpus;
 371     }
 372   }
 373   // Otherwise return number of online cpus
 374   return online_cpus;
 375 }
 376 
 377 static bool find_processors_in_pset(psetid_t        pset,
 378                                     processorid_t** id_array,
 379                                     uint_t*         id_length) {
 380   bool result = false;
 381   // Find the number of processors in the processor set.
 382   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 383     // Make up an array to hold their ids.
 384     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 385     // Fill in the array with their processor ids.
 386     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 387       result = true;
 388     }
 389   }
 390   return result;
 391 }
 392 
 393 // Callers of find_processors_online() must tolerate imprecise results --
 394 // the system configuration can change asynchronously because of DR
 395 // or explicit psradm operations.
 396 //
 397 // We also need to take care that the loop (below) terminates as the
 398 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 399 // request and the loop that builds the list of processor ids.   Unfortunately
 400 // there's no reliable way to determine the maximum valid processor id,
 401 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 402 // man pages, which claim the processor id set is "sparse, but
 403 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 404 // exit the loop.
 405 //
 406 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 407 // not available on S8.0.
 408 
 409 static bool find_processors_online(processorid_t** id_array,
 410                                    uint*           id_length) {
 411   const processorid_t MAX_PROCESSOR_ID = 100000 ;
 412   // Find the number of processors online.
 413   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 414   // Make up an array to hold their ids.
 415   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 416   // Processors need not be numbered consecutively.
 417   long found = 0;
 418   processorid_t next = 0;
 419   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 420     processor_info_t info;
 421     if (processor_info(next, &info) == 0) {
 422       // NB, PI_NOINTR processors are effectively online ...
 423       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 424         (*id_array)[found] = next;
 425         found += 1;
 426       }
 427     }
 428     next += 1;
 429   }
 430   if (found < *id_length) {
 431       // The loop above didn't identify the expected number of processors.
 432       // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 433       // and re-running the loop, above, but there's no guarantee of progress
 434       // if the system configuration is in flux.  Instead, we just return what
 435       // we've got.  Note that in the worst case find_processors_online() could
 436       // return an empty set.  (As a fall-back in the case of the empty set we
 437       // could just return the ID of the current processor).
 438       *id_length = found ;
 439   }
 440 
 441   return true;
 442 }
 443 
 444 static bool assign_distribution(processorid_t* id_array,
 445                                 uint           id_length,
 446                                 uint*          distribution,
 447                                 uint           distribution_length) {
 448   // We assume we can assign processorid_t's to uint's.
 449   assert(sizeof(processorid_t) == sizeof(uint),
 450          "can't convert processorid_t to uint");
 451   // Quick check to see if we won't succeed.
 452   if (id_length < distribution_length) {
 453     return false;
 454   }
 455   // Assign processor ids to the distribution.
 456   // Try to shuffle processors to distribute work across boards,
 457   // assuming 4 processors per board.
 458   const uint processors_per_board = ProcessDistributionStride;
 459   // Find the maximum processor id.
 460   processorid_t max_id = 0;
 461   for (uint m = 0; m < id_length; m += 1) {
 462     max_id = MAX2(max_id, id_array[m]);
 463   }
 464   // The next id, to limit loops.
 465   const processorid_t limit_id = max_id + 1;
 466   // Make up markers for available processors.
 467   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 468   for (uint c = 0; c < limit_id; c += 1) {
 469     available_id[c] = false;
 470   }
 471   for (uint a = 0; a < id_length; a += 1) {
 472     available_id[id_array[a]] = true;
 473   }
 474   // Step by "boards", then by "slot", copying to "assigned".
 475   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 476   //                remembering which processors have been assigned by
 477   //                previous calls, etc., so as to distribute several
 478   //                independent calls of this method.  What we'd like is
 479   //                It would be nice to have an API that let us ask
 480   //                how many processes are bound to a processor,
 481   //                but we don't have that, either.
 482   //                In the short term, "board" is static so that
 483   //                subsequent distributions don't all start at board 0.
 484   static uint board = 0;
 485   uint assigned = 0;
 486   // Until we've found enough processors ....
 487   while (assigned < distribution_length) {
 488     // ... find the next available processor in the board.
 489     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 490       uint try_id = board * processors_per_board + slot;
 491       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 492         distribution[assigned] = try_id;
 493         available_id[try_id] = false;
 494         assigned += 1;
 495         break;
 496       }
 497     }
 498     board += 1;
 499     if (board * processors_per_board + 0 >= limit_id) {
 500       board = 0;
 501     }
 502   }
 503   if (available_id != NULL) {
 504     FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
 505   }
 506   return true;
 507 }
 508 
 509 void os::set_native_thread_name(const char *name) {
 510   // Not yet implemented.
 511   return;
 512 }
 513 
 514 bool os::distribute_processes(uint length, uint* distribution) {
 515   bool result = false;
 516   // Find the processor id's of all the available CPUs.
 517   processorid_t* id_array  = NULL;
 518   uint           id_length = 0;
 519   // There are some races between querying information and using it,
 520   // since processor sets can change dynamically.
 521   psetid_t pset = PS_NONE;
 522   // Are we running in a processor set?
 523   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 524     result = find_processors_in_pset(pset, &id_array, &id_length);
 525   } else {
 526     result = find_processors_online(&id_array, &id_length);
 527   }
 528   if (result == true) {
 529     if (id_length >= length) {
 530       result = assign_distribution(id_array, id_length, distribution, length);
 531     } else {
 532       result = false;
 533     }
 534   }
 535   if (id_array != NULL) {
 536     FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
 537   }
 538   return result;
 539 }
 540 
 541 bool os::bind_to_processor(uint processor_id) {
 542   // We assume that a processorid_t can be stored in a uint.
 543   assert(sizeof(uint) == sizeof(processorid_t),
 544          "can't convert uint to processorid_t");
 545   int bind_result =
 546     processor_bind(P_LWPID,                       // bind LWP.
 547                    P_MYID,                        // bind current LWP.
 548                    (processorid_t) processor_id,  // id.
 549                    NULL);                         // don't return old binding.
 550   return (bind_result == 0);
 551 }
 552 
 553 bool os::getenv(const char* name, char* buffer, int len) {
 554   char* val = ::getenv( name );
 555   if ( val == NULL
 556   ||   strlen(val) + 1  >  len ) {
 557     if (len > 0)  buffer[0] = 0; // return a null string
 558     return false;
 559   }
 560   strcpy( buffer, val );
 561   return true;
 562 }
 563 
 564 
 565 // Return true if user is running as root.
 566 
 567 bool os::have_special_privileges() {
 568   static bool init = false;
 569   static bool privileges = false;
 570   if (!init) {
 571     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 572     init = true;
 573   }
 574   return privileges;
 575 }
 576 
 577 
 578 void os::init_system_properties_values() {
 579   // The next steps are taken in the product version:
 580   //
 581   // Obtain the JAVA_HOME value from the location of libjvm.so.
 582   // This library should be located at:
 583   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 584   //
 585   // If "/jre/lib/" appears at the right place in the path, then we
 586   // assume libjvm.so is installed in a JDK and we use this path.
 587   //
 588   // Otherwise exit with message: "Could not create the Java virtual machine."
 589   //
 590   // The following extra steps are taken in the debugging version:
 591   //
 592   // If "/jre/lib/" does NOT appear at the right place in the path
 593   // instead of exit check for $JAVA_HOME environment variable.
 594   //
 595   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 596   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 597   // it looks like libjvm.so is installed there
 598   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 599   //
 600   // Otherwise exit.
 601   //
 602   // Important note: if the location of libjvm.so changes this
 603   // code needs to be changed accordingly.
 604 
 605 // Base path of extensions installed on the system.
 606 #define SYS_EXT_DIR     "/usr/jdk/packages"
 607 #define EXTENSIONS_DIR  "/lib/ext"
 608 #define ENDORSED_DIR    "/lib/endorsed"
 609 
 610   char cpu_arch[12];
 611   // Buffer that fits several sprintfs.
 612   // Note that the space for the colon and the trailing null are provided
 613   // by the nulls included by the sizeof operator.
 614   const size_t bufsize =
 615     MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
 616          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 617          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
 618          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
 619   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 620 
 621   // sysclasspath, java_home, dll_dir
 622   {
 623     char *pslash;
 624     os::jvm_path(buf, bufsize);
 625 
 626     // Found the full path to libjvm.so.
 627     // Now cut the path to <java_home>/jre if we can.
 628     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 629     pslash = strrchr(buf, '/');
 630     if (pslash != NULL) {
 631       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 632     }
 633     Arguments::set_dll_dir(buf);
 634 
 635     if (pslash != NULL) {
 636       pslash = strrchr(buf, '/');
 637       if (pslash != NULL) {
 638         *pslash = '\0';          // Get rid of /<arch>.
 639         pslash = strrchr(buf, '/');
 640         if (pslash != NULL) {
 641           *pslash = '\0';        // Get rid of /lib.
 642         }
 643       }
 644     }
 645     Arguments::set_java_home(buf);
 646     set_boot_path('/', ':');
 647   }
 648 
 649   // Where to look for native libraries.
 650   {
 651     // Use dlinfo() to determine the correct java.library.path.
 652     //
 653     // If we're launched by the Java launcher, and the user
 654     // does not set java.library.path explicitly on the commandline,
 655     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 656     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 657     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 658     // /usr/lib), which is exactly what we want.
 659     //
 660     // If the user does set java.library.path, it completely
 661     // overwrites this setting, and always has.
 662     //
 663     // If we're not launched by the Java launcher, we may
 664     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 665     // settings.  Again, dlinfo does exactly what we want.
 666 
 667     Dl_serinfo     info_sz, *info = &info_sz;
 668     Dl_serpath     *path;
 669     char           *library_path;
 670     char           *common_path = buf;
 671 
 672     // Determine search path count and required buffer size.
 673     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 674       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 675       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 676     }
 677 
 678     // Allocate new buffer and initialize.
 679     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 680     info->dls_size = info_sz.dls_size;
 681     info->dls_cnt = info_sz.dls_cnt;
 682 
 683     // Obtain search path information.
 684     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 685       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 686       FREE_C_HEAP_ARRAY(char, info, mtInternal);
 687       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 688     }
 689 
 690     path = &info->dls_serpath[0];
 691 
 692     // Note: Due to a legacy implementation, most of the library path
 693     // is set in the launcher. This was to accomodate linking restrictions
 694     // on legacy Solaris implementations (which are no longer supported).
 695     // Eventually, all the library path setting will be done here.
 696     //
 697     // However, to prevent the proliferation of improperly built native
 698     // libraries, the new path component /usr/jdk/packages is added here.
 699 
 700     // Determine the actual CPU architecture.
 701     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 702 #ifdef _LP64
 703     // If we are a 64-bit vm, perform the following translations:
 704     //   sparc   -> sparcv9
 705     //   i386    -> amd64
 706     if (strcmp(cpu_arch, "sparc") == 0) {
 707       strcat(cpu_arch, "v9");
 708     } else if (strcmp(cpu_arch, "i386") == 0) {
 709       strcpy(cpu_arch, "amd64");
 710     }
 711 #endif
 712 
 713     // Construct the invariant part of ld_library_path.
 714     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 715 
 716     // Struct size is more than sufficient for the path components obtained
 717     // through the dlinfo() call, so only add additional space for the path
 718     // components explicitly added here.
 719     size_t library_path_size = info->dls_size + strlen(common_path);
 720     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 721     library_path[0] = '\0';
 722 
 723     // Construct the desired Java library path from the linker's library
 724     // search path.
 725     //
 726     // For compatibility, it is optimal that we insert the additional path
 727     // components specific to the Java VM after those components specified
 728     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 729     // infrastructure.
 730     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 731       strcpy(library_path, common_path);
 732     } else {
 733       int inserted = 0;
 734       int i;
 735       for (i = 0; i < info->dls_cnt; i++, path++) {
 736         uint_t flags = path->dls_flags & LA_SER_MASK;
 737         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 738           strcat(library_path, common_path);
 739           strcat(library_path, os::path_separator());
 740           inserted = 1;
 741         }
 742         strcat(library_path, path->dls_name);
 743         strcat(library_path, os::path_separator());
 744       }
 745       // Eliminate trailing path separator.
 746       library_path[strlen(library_path)-1] = '\0';
 747     }
 748 
 749     // happens before argument parsing - can't use a trace flag
 750     // tty->print_raw("init_system_properties_values: native lib path: ");
 751     // tty->print_raw_cr(library_path);
 752 
 753     // Callee copies into its own buffer.
 754     Arguments::set_library_path(library_path);
 755 
 756     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
 757     FREE_C_HEAP_ARRAY(char, info, mtInternal);
 758   }
 759 
 760   // Extensions directories.
 761   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 762   Arguments::set_ext_dirs(buf);
 763 
 764   // Endorsed standards default directory.
 765   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
 766   Arguments::set_endorsed_dirs(buf);
 767 
 768   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 769 
 770 #undef SYS_EXT_DIR
 771 #undef EXTENSIONS_DIR
 772 #undef ENDORSED_DIR
 773 }
 774 
 775 void os::breakpoint() {
 776   BREAKPOINT;
 777 }
 778 
 779 bool os::obsolete_option(const JavaVMOption *option)
 780 {
 781   if (!strncmp(option->optionString, "-Xt", 3)) {
 782     return true;
 783   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 784     return true;
 785   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 786     return true;
 787   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 788     return true;
 789   }
 790   return false;
 791 }
 792 
 793 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 794   address  stackStart  = (address)thread->stack_base();
 795   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 796   if (sp < stackStart && sp >= stackEnd ) return true;
 797   return false;
 798 }
 799 
 800 extern "C" void breakpoint() {
 801   // use debugger to set breakpoint here
 802 }
 803 
 804 static thread_t main_thread;
 805 
 806 // Thread start routine for all new Java threads
 807 extern "C" void* java_start(void* thread_addr) {
 808   // Try to randomize the cache line index of hot stack frames.
 809   // This helps when threads of the same stack traces evict each other's
 810   // cache lines. The threads can be either from the same JVM instance, or
 811   // from different JVM instances. The benefit is especially true for
 812   // processors with hyperthreading technology.
 813   static int counter = 0;
 814   int pid = os::current_process_id();
 815   alloca(((pid ^ counter++) & 7) * 128);
 816 
 817   int prio;
 818   Thread* thread = (Thread*)thread_addr;
 819   OSThread* osthr = thread->osthread();
 820 
 821   osthr->set_lwp_id( _lwp_self() );  // Store lwp in case we are bound
 822   thread->_schedctl = (void *) schedctl_init () ;
 823 
 824   if (UseNUMA) {
 825     int lgrp_id = os::numa_get_group_id();
 826     if (lgrp_id != -1) {
 827       thread->set_lgrp_id(lgrp_id);
 828     }
 829   }
 830 
 831   // If the creator called set priority before we started,
 832   // we need to call set_native_priority now that we have an lwp.
 833   // We used to get the priority from thr_getprio (we called
 834   // thr_setprio way back in create_thread) and pass it to
 835   // set_native_priority, but Solaris scales the priority
 836   // in java_to_os_priority, so when we read it back here,
 837   // we pass trash to set_native_priority instead of what's
 838   // in java_to_os_priority. So we save the native priority
 839   // in the osThread and recall it here.
 840 
 841   if ( osthr->thread_id() != -1 ) {
 842     if ( UseThreadPriorities ) {
 843       int prio = osthr->native_priority();
 844       if (ThreadPriorityVerbose) {
 845         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 846                       INTPTR_FORMAT ", setting priority: %d\n",
 847                       osthr->thread_id(), osthr->lwp_id(), prio);
 848       }
 849       os::set_native_priority(thread, prio);
 850     }
 851   } else if (ThreadPriorityVerbose) {
 852     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 853   }
 854 
 855   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 856 
 857   // initialize signal mask for this thread
 858   os::Solaris::hotspot_sigmask(thread);
 859 
 860   thread->run();
 861 
 862   // One less thread is executing
 863   // When the VMThread gets here, the main thread may have already exited
 864   // which frees the CodeHeap containing the Atomic::dec code
 865   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 866     Atomic::dec(&os::Solaris::_os_thread_count);
 867   }
 868 
 869   if (UseDetachedThreads) {
 870     thr_exit(NULL);
 871     ShouldNotReachHere();
 872   }
 873   return NULL;
 874 }
 875 
 876 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 877   // Allocate the OSThread object
 878   OSThread* osthread = new OSThread(NULL, NULL);
 879   if (osthread == NULL) return NULL;
 880 
 881   // Store info on the Solaris thread into the OSThread
 882   osthread->set_thread_id(thread_id);
 883   osthread->set_lwp_id(_lwp_self());
 884   thread->_schedctl = (void *) schedctl_init () ;
 885 
 886   if (UseNUMA) {
 887     int lgrp_id = os::numa_get_group_id();
 888     if (lgrp_id != -1) {
 889       thread->set_lgrp_id(lgrp_id);
 890     }
 891   }
 892 
 893   if ( ThreadPriorityVerbose ) {
 894     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 895                   osthread->thread_id(), osthread->lwp_id() );
 896   }
 897 
 898   // Initial thread state is INITIALIZED, not SUSPENDED
 899   osthread->set_state(INITIALIZED);
 900 
 901   return osthread;
 902 }
 903 
 904 void os::Solaris::hotspot_sigmask(Thread* thread) {
 905 
 906   //Save caller's signal mask
 907   sigset_t sigmask;
 908   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 909   OSThread *osthread = thread->osthread();
 910   osthread->set_caller_sigmask(sigmask);
 911 
 912   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 913   if (!ReduceSignalUsage) {
 914     if (thread->is_VM_thread()) {
 915       // Only the VM thread handles BREAK_SIGNAL ...
 916       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 917     } else {
 918       // ... all other threads block BREAK_SIGNAL
 919       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 920       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 921     }
 922   }
 923 }
 924 
 925 bool os::create_attached_thread(JavaThread* thread) {
 926 #ifdef ASSERT
 927   thread->verify_not_published();
 928 #endif
 929   OSThread* osthread = create_os_thread(thread, thr_self());
 930   if (osthread == NULL) {
 931      return false;
 932   }
 933 
 934   // Initial thread state is RUNNABLE
 935   osthread->set_state(RUNNABLE);
 936   thread->set_osthread(osthread);
 937 
 938   // initialize signal mask for this thread
 939   // and save the caller's signal mask
 940   os::Solaris::hotspot_sigmask(thread);
 941 
 942   return true;
 943 }
 944 
 945 bool os::create_main_thread(JavaThread* thread) {
 946 #ifdef ASSERT
 947   thread->verify_not_published();
 948 #endif
 949   if (_starting_thread == NULL) {
 950     _starting_thread = create_os_thread(thread, main_thread);
 951      if (_starting_thread == NULL) {
 952         return false;
 953      }
 954   }
 955 
 956   // The primodial thread is runnable from the start
 957   _starting_thread->set_state(RUNNABLE);
 958 
 959   thread->set_osthread(_starting_thread);
 960 
 961   // initialize signal mask for this thread
 962   // and save the caller's signal mask
 963   os::Solaris::hotspot_sigmask(thread);
 964 
 965   return true;
 966 }
 967 
 968 
 969 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 970   // Allocate the OSThread object
 971   OSThread* osthread = new OSThread(NULL, NULL);
 972   if (osthread == NULL) {
 973     return false;
 974   }
 975 
 976   if ( ThreadPriorityVerbose ) {
 977     char *thrtyp;
 978     switch ( thr_type ) {
 979       case vm_thread:
 980         thrtyp = (char *)"vm";
 981         break;
 982       case cgc_thread:
 983         thrtyp = (char *)"cgc";
 984         break;
 985       case pgc_thread:
 986         thrtyp = (char *)"pgc";
 987         break;
 988       case java_thread:
 989         thrtyp = (char *)"java";
 990         break;
 991       case compiler_thread:
 992         thrtyp = (char *)"compiler";
 993         break;
 994       case watcher_thread:
 995         thrtyp = (char *)"watcher";
 996         break;
 997       default:
 998         thrtyp = (char *)"unknown";
 999         break;
1000     }
1001     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1002   }
1003 
1004   // Calculate stack size if it's not specified by caller.
1005   if (stack_size == 0) {
1006     // The default stack size 1M (2M for LP64).
1007     stack_size = (BytesPerWord >> 2) * K * K;
1008 
1009     switch (thr_type) {
1010     case os::java_thread:
1011       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1012       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1013       break;
1014     case os::compiler_thread:
1015       if (CompilerThreadStackSize > 0) {
1016         stack_size = (size_t)(CompilerThreadStackSize * K);
1017         break;
1018       } // else fall through:
1019         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1020     case os::vm_thread:
1021     case os::pgc_thread:
1022     case os::cgc_thread:
1023     case os::watcher_thread:
1024       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1025       break;
1026     }
1027   }
1028   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1029 
1030   // Initial state is ALLOCATED but not INITIALIZED
1031   osthread->set_state(ALLOCATED);
1032 
1033   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1034     // We got lots of threads. Check if we still have some address space left.
1035     // Need to be at least 5Mb of unreserved address space. We do check by
1036     // trying to reserve some.
1037     const size_t VirtualMemoryBangSize = 20*K*K;
1038     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1039     if (mem == NULL) {
1040       delete osthread;
1041       return false;
1042     } else {
1043       // Release the memory again
1044       os::release_memory(mem, VirtualMemoryBangSize);
1045     }
1046   }
1047 
1048   // Setup osthread because the child thread may need it.
1049   thread->set_osthread(osthread);
1050 
1051   // Create the Solaris thread
1052   thread_t tid = 0;
1053   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
1054   int      status;
1055 
1056   // Mark that we don't have an lwp or thread id yet.
1057   // In case we attempt to set the priority before the thread starts.
1058   osthread->set_lwp_id(-1);
1059   osthread->set_thread_id(-1);
1060 
1061   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1062   if (status != 0) {
1063     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1064       perror("os::create_thread");
1065     }
1066     thread->set_osthread(NULL);
1067     // Need to clean up stuff we've allocated so far
1068     delete osthread;
1069     return false;
1070   }
1071 
1072   Atomic::inc(&os::Solaris::_os_thread_count);
1073 
1074   // Store info on the Solaris thread into the OSThread
1075   osthread->set_thread_id(tid);
1076 
1077   // Remember that we created this thread so we can set priority on it
1078   osthread->set_vm_created();
1079 
1080   // Initial thread state is INITIALIZED, not SUSPENDED
1081   osthread->set_state(INITIALIZED);
1082 
1083   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1084   return true;
1085 }
1086 
1087 /* defined for >= Solaris 10. This allows builds on earlier versions
1088  *  of Solaris to take advantage of the newly reserved Solaris JVM signals
1089  *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1090  *  and -XX:+UseAltSigs does nothing since these should have no conflict
1091  */
1092 #if !defined(SIGJVM1)
1093 #define SIGJVM1 39
1094 #define SIGJVM2 40
1095 #endif
1096 
1097 debug_only(static bool signal_sets_initialized = false);
1098 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1099 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1100 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1101 
1102 bool os::Solaris::is_sig_ignored(int sig) {
1103       struct sigaction oact;
1104       sigaction(sig, (struct sigaction*)NULL, &oact);
1105       void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1106                                      : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1107       if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1108            return true;
1109       else
1110            return false;
1111 }
1112 
1113 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1114 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1115 static bool isJVM1available() {
1116   return SIGJVM1 < SIGRTMIN;
1117 }
1118 
1119 void os::Solaris::signal_sets_init() {
1120   // Should also have an assertion stating we are still single-threaded.
1121   assert(!signal_sets_initialized, "Already initialized");
1122   // Fill in signals that are necessarily unblocked for all threads in
1123   // the VM. Currently, we unblock the following signals:
1124   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1125   //                         by -Xrs (=ReduceSignalUsage));
1126   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1127   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1128   // the dispositions or masks wrt these signals.
1129   // Programs embedding the VM that want to use the above signals for their
1130   // own purposes must, at this time, use the "-Xrs" option to prevent
1131   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1132   // (See bug 4345157, and other related bugs).
1133   // In reality, though, unblocking these signals is really a nop, since
1134   // these signals are not blocked by default.
1135   sigemptyset(&unblocked_sigs);
1136   sigemptyset(&allowdebug_blocked_sigs);
1137   sigaddset(&unblocked_sigs, SIGILL);
1138   sigaddset(&unblocked_sigs, SIGSEGV);
1139   sigaddset(&unblocked_sigs, SIGBUS);
1140   sigaddset(&unblocked_sigs, SIGFPE);
1141 
1142   if (isJVM1available) {
1143     os::Solaris::set_SIGinterrupt(SIGJVM1);
1144     os::Solaris::set_SIGasync(SIGJVM2);
1145   } else if (UseAltSigs) {
1146     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1147     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1148   } else {
1149     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1150     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1151   }
1152 
1153   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1154   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1155 
1156   if (!ReduceSignalUsage) {
1157    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1158       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1159       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1160    }
1161    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1162       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1163       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1164    }
1165    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1166       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1167       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1168    }
1169   }
1170   // Fill in signals that are blocked by all but the VM thread.
1171   sigemptyset(&vm_sigs);
1172   if (!ReduceSignalUsage)
1173     sigaddset(&vm_sigs, BREAK_SIGNAL);
1174   debug_only(signal_sets_initialized = true);
1175 
1176   // For diagnostics only used in run_periodic_checks
1177   sigemptyset(&check_signal_done);
1178 }
1179 
1180 // These are signals that are unblocked while a thread is running Java.
1181 // (For some reason, they get blocked by default.)
1182 sigset_t* os::Solaris::unblocked_signals() {
1183   assert(signal_sets_initialized, "Not initialized");
1184   return &unblocked_sigs;
1185 }
1186 
1187 // These are the signals that are blocked while a (non-VM) thread is
1188 // running Java. Only the VM thread handles these signals.
1189 sigset_t* os::Solaris::vm_signals() {
1190   assert(signal_sets_initialized, "Not initialized");
1191   return &vm_sigs;
1192 }
1193 
1194 // These are signals that are blocked during cond_wait to allow debugger in
1195 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1196   assert(signal_sets_initialized, "Not initialized");
1197   return &allowdebug_blocked_sigs;
1198 }
1199 
1200 
1201 void _handle_uncaught_cxx_exception() {
1202   VMError err("An uncaught C++ exception");
1203   err.report_and_die();
1204 }
1205 
1206 
1207 // First crack at OS-specific initialization, from inside the new thread.
1208 void os::initialize_thread(Thread* thr) {
1209   int r = thr_main() ;
1210   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
1211   if (r) {
1212     JavaThread* jt = (JavaThread *)thr;
1213     assert(jt != NULL,"Sanity check");
1214     size_t stack_size;
1215     address base = jt->stack_base();
1216     if (Arguments::created_by_java_launcher()) {
1217       // Use 2MB to allow for Solaris 7 64 bit mode.
1218       stack_size = JavaThread::stack_size_at_create() == 0
1219         ? 2048*K : JavaThread::stack_size_at_create();
1220 
1221       // There are rare cases when we may have already used more than
1222       // the basic stack size allotment before this method is invoked.
1223       // Attempt to allow for a normally sized java_stack.
1224       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1225       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1226     } else {
1227       // 6269555: If we were not created by a Java launcher, i.e. if we are
1228       // running embedded in a native application, treat the primordial thread
1229       // as much like a native attached thread as possible.  This means using
1230       // the current stack size from thr_stksegment(), unless it is too large
1231       // to reliably setup guard pages.  A reasonable max size is 8MB.
1232       size_t current_size = current_stack_size();
1233       // This should never happen, but just in case....
1234       if (current_size == 0) current_size = 2 * K * K;
1235       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1236     }
1237     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1238     stack_size = (size_t)(base - bottom);
1239 
1240     assert(stack_size > 0, "Stack size calculation problem");
1241 
1242     if (stack_size > jt->stack_size()) {
1243       NOT_PRODUCT(
1244         struct rlimit limits;
1245         getrlimit(RLIMIT_STACK, &limits);
1246         size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1247         assert(size >= jt->stack_size(), "Stack size problem in main thread");
1248       )
1249       tty->print_cr(
1250         "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1251         "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1252         "See limit(1) to increase the stack size limit.",
1253         stack_size / K, jt->stack_size() / K);
1254       vm_exit(1);
1255     }
1256     assert(jt->stack_size() >= stack_size,
1257           "Attempt to map more stack than was allocated");
1258     jt->set_stack_size(stack_size);
1259   }
1260 
1261   // With the T2 libthread (T1 is no longer supported) threads are always bound
1262   // and we use stackbanging in all cases.
1263 
1264   os::Solaris::init_thread_fpu_state();
1265   std::set_terminate(_handle_uncaught_cxx_exception);
1266 }
1267 
1268 
1269 
1270 // Free Solaris resources related to the OSThread
1271 void os::free_thread(OSThread* osthread) {
1272   assert(osthread != NULL, "os::free_thread but osthread not set");
1273 
1274 
1275   // We are told to free resources of the argument thread,
1276   // but we can only really operate on the current thread.
1277   // The main thread must take the VMThread down synchronously
1278   // before the main thread exits and frees up CodeHeap
1279   guarantee((Thread::current()->osthread() == osthread
1280      || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1281   if (Thread::current()->osthread() == osthread) {
1282     // Restore caller's signal mask
1283     sigset_t sigmask = osthread->caller_sigmask();
1284     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1285   }
1286   delete osthread;
1287 }
1288 
1289 void os::pd_start_thread(Thread* thread) {
1290   int status = thr_continue(thread->osthread()->thread_id());
1291   assert_status(status == 0, status, "thr_continue failed");
1292 }
1293 
1294 
1295 intx os::current_thread_id() {
1296   return (intx)thr_self();
1297 }
1298 
1299 static pid_t _initial_pid = 0;
1300 
1301 int os::current_process_id() {
1302   return (int)(_initial_pid ? _initial_pid : getpid());
1303 }
1304 
1305 int os::allocate_thread_local_storage() {
1306   // %%%       in Win32 this allocates a memory segment pointed to by a
1307   //           register.  Dan Stein can implement a similar feature in
1308   //           Solaris.  Alternatively, the VM can do the same thing
1309   //           explicitly: malloc some storage and keep the pointer in a
1310   //           register (which is part of the thread's context) (or keep it
1311   //           in TLS).
1312   // %%%       In current versions of Solaris, thr_self and TSD can
1313   //           be accessed via short sequences of displaced indirections.
1314   //           The value of thr_self is available as %g7(36).
1315   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1316   //           assuming that the current thread already has a value bound to k.
1317   //           It may be worth experimenting with such access patterns,
1318   //           and later having the parameters formally exported from a Solaris
1319   //           interface.  I think, however, that it will be faster to
1320   //           maintain the invariant that %g2 always contains the
1321   //           JavaThread in Java code, and have stubs simply
1322   //           treat %g2 as a caller-save register, preserving it in a %lN.
1323   thread_key_t tk;
1324   if (thr_keycreate( &tk, NULL ) )
1325     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1326                   "(%s)", strerror(errno)));
1327   return int(tk);
1328 }
1329 
1330 void os::free_thread_local_storage(int index) {
1331   // %%% don't think we need anything here
1332   // if ( pthread_key_delete((pthread_key_t) tk) )
1333   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
1334 }
1335 
1336 #define SMALLINT 32   // libthread allocate for tsd_common is a version specific
1337                       // small number - point is NO swap space available
1338 void os::thread_local_storage_at_put(int index, void* value) {
1339   // %%% this is used only in threadLocalStorage.cpp
1340   if (thr_setspecific((thread_key_t)index, value)) {
1341     if (errno == ENOMEM) {
1342        vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1343                              "thr_setspecific: out of swap space");
1344     } else {
1345       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1346                     "(%s)", strerror(errno)));
1347     }
1348   } else {
1349       ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
1350   }
1351 }
1352 
1353 // This function could be called before TLS is initialized, for example, when
1354 // VM receives an async signal or when VM causes a fatal error during
1355 // initialization. Return NULL if thr_getspecific() fails.
1356 void* os::thread_local_storage_at(int index) {
1357   // %%% this is used only in threadLocalStorage.cpp
1358   void* r = NULL;
1359   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1360 }
1361 
1362 
1363 // gethrtime() should be monotonic according to the documentation,
1364 // but is known not to guarantee this on virtualized platforms.
1365 // getTimeNanos() must be guaranteed not to move backwards, so we
1366 // are forced to add a check here.
1367 inline hrtime_t getTimeNanos() {
1368   const hrtime_t now = gethrtime();
1369   const hrtime_t prev = max_hrtime;
1370   if (now <= prev) {
1371     return prev;   // same or retrograde time;
1372   }
1373   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1374   assert(obsv >= prev, "invariant");   // Monotonicity
1375   // If the CAS succeeded then we're done and return "now".
1376   // If the CAS failed and the observed value "obsv" is >= now then
1377   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1378   // some other thread raced this thread and installed a new value, in which case
1379   // we could either (a) retry the entire operation, (b) retry trying to install now
1380   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1381   // we might discard a higher "now" value in deference to a slightly lower but freshly
1382   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1383   // to (a) or (b) -- and greatly reduces coherence traffic.
1384   // We might also condition (c) on the magnitude of the delta between obsv and now.
1385   // Avoiding excessive CAS operations to hot RW locations is critical.
1386   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1387   return (prev == obsv) ? now : obsv;
1388 }
1389 
1390 // Time since start-up in seconds to a fine granularity.
1391 // Used by VMSelfDestructTimer and the MemProfiler.
1392 double os::elapsedTime() {
1393   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1394 }
1395 
1396 jlong os::elapsed_counter() {
1397   return (jlong)(getTimeNanos() - first_hrtime);
1398 }
1399 
1400 jlong os::elapsed_frequency() {
1401    return hrtime_hz;
1402 }
1403 
1404 // Return the real, user, and system times in seconds from an
1405 // arbitrary fixed point in the past.
1406 bool os::getTimesSecs(double* process_real_time,
1407                   double* process_user_time,
1408                   double* process_system_time) {
1409   struct tms ticks;
1410   clock_t real_ticks = times(&ticks);
1411 
1412   if (real_ticks == (clock_t) (-1)) {
1413     return false;
1414   } else {
1415     double ticks_per_second = (double) clock_tics_per_sec;
1416     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1417     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1418     // For consistency return the real time from getTimeNanos()
1419     // converted to seconds.
1420     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1421 
1422     return true;
1423   }
1424 }
1425 
1426 bool os::supports_vtime() { return true; }
1427 
1428 bool os::enable_vtime() {
1429   int fd = ::open("/proc/self/ctl", O_WRONLY);
1430   if (fd == -1)
1431     return false;
1432 
1433   long cmd[] = { PCSET, PR_MSACCT };
1434   int res = ::write(fd, cmd, sizeof(long) * 2);
1435   ::close(fd);
1436   if (res != sizeof(long) * 2)
1437     return false;
1438 
1439   return true;
1440 }
1441 
1442 bool os::vtime_enabled() {
1443   int fd = ::open("/proc/self/status", O_RDONLY);
1444   if (fd == -1)
1445     return false;
1446 
1447   pstatus_t status;
1448   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1449   ::close(fd);
1450   if (res != sizeof(pstatus_t))
1451     return false;
1452 
1453   return status.pr_flags & PR_MSACCT;
1454 }
1455 
1456 double os::elapsedVTime() {
1457   return (double)gethrvtime() / (double)hrtime_hz;
1458 }
1459 
1460 // Used internally for comparisons only
1461 // getTimeMillis guaranteed to not move backwards on Solaris
1462 jlong getTimeMillis() {
1463   jlong nanotime = getTimeNanos();
1464   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1465 }
1466 
1467 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1468 jlong os::javaTimeMillis() {
1469   timeval t;
1470   if (gettimeofday( &t, NULL) == -1)
1471     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1472   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1473 }
1474 
1475 jlong os::javaTimeNanos() {
1476   return (jlong)getTimeNanos();
1477 }
1478 
1479 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1480   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1481   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1482   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1483   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1484 }
1485 
1486 char * os::local_time_string(char *buf, size_t buflen) {
1487   struct tm t;
1488   time_t long_time;
1489   time(&long_time);
1490   localtime_r(&long_time, &t);
1491   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1492                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1493                t.tm_hour, t.tm_min, t.tm_sec);
1494   return buf;
1495 }
1496 
1497 // Note: os::shutdown() might be called very early during initialization, or
1498 // called from signal handler. Before adding something to os::shutdown(), make
1499 // sure it is async-safe and can handle partially initialized VM.
1500 void os::shutdown() {
1501 
1502   // allow PerfMemory to attempt cleanup of any persistent resources
1503   perfMemory_exit();
1504 
1505   // needs to remove object in file system
1506   AttachListener::abort();
1507 
1508   // flush buffered output, finish log files
1509   ostream_abort();
1510 
1511   // Check for abort hook
1512   abort_hook_t abort_hook = Arguments::abort_hook();
1513   if (abort_hook != NULL) {
1514     abort_hook();
1515   }
1516 }
1517 
1518 // Note: os::abort() might be called very early during initialization, or
1519 // called from signal handler. Before adding something to os::abort(), make
1520 // sure it is async-safe and can handle partially initialized VM.
1521 void os::abort(bool dump_core) {
1522   os::shutdown();
1523   if (dump_core) {
1524 #ifndef PRODUCT
1525     fdStream out(defaultStream::output_fd());
1526     out.print_raw("Current thread is ");
1527     char buf[16];
1528     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1529     out.print_raw_cr(buf);
1530     out.print_raw_cr("Dumping core ...");
1531 #endif
1532     ::abort(); // dump core (for debugging)
1533   }
1534 
1535   ::exit(1);
1536 }
1537 
1538 // Die immediately, no exit hook, no abort hook, no cleanup.
1539 void os::die() {
1540   ::abort(); // dump core (for debugging)
1541 }
1542 
1543 // unused
1544 void os::set_error_file(const char *logfile) {}
1545 
1546 // DLL functions
1547 
1548 const char* os::dll_file_extension() { return ".so"; }
1549 
1550 // This must be hard coded because it's the system's temporary
1551 // directory not the java application's temp directory, ala java.io.tmpdir.
1552 const char* os::get_temp_directory() { return "/tmp"; }
1553 
1554 static bool file_exists(const char* filename) {
1555   struct stat statbuf;
1556   if (filename == NULL || strlen(filename) == 0) {
1557     return false;
1558   }
1559   return os::stat(filename, &statbuf) == 0;
1560 }
1561 
1562 bool os::dll_build_name(char* buffer, size_t buflen,
1563                         const char* pname, const char* fname) {
1564   bool retval = false;
1565   const size_t pnamelen = pname ? strlen(pname) : 0;
1566 
1567   // Return error on buffer overflow.
1568   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1569     return retval;
1570   }
1571 
1572   if (pnamelen == 0) {
1573     snprintf(buffer, buflen, "lib%s.so", fname);
1574     retval = true;
1575   } else if (strchr(pname, *os::path_separator()) != NULL) {
1576     int n;
1577     char** pelements = split_path(pname, &n);
1578     if (pelements == NULL) {
1579       return false;
1580     }
1581     for (int i = 0 ; i < n ; i++) {
1582       // really shouldn't be NULL but what the heck, check can't hurt
1583       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1584         continue; // skip the empty path values
1585       }
1586       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1587       if (file_exists(buffer)) {
1588         retval = true;
1589         break;
1590       }
1591     }
1592     // release the storage
1593     for (int i = 0 ; i < n ; i++) {
1594       if (pelements[i] != NULL) {
1595         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1596       }
1597     }
1598     if (pelements != NULL) {
1599       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1600     }
1601   } else {
1602     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1603     retval = true;
1604   }
1605   return retval;
1606 }
1607 
1608 // check if addr is inside libjvm.so
1609 bool os::address_is_in_vm(address addr) {
1610   static address libjvm_base_addr;
1611   Dl_info dlinfo;
1612 
1613   if (libjvm_base_addr == NULL) {
1614     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1615       libjvm_base_addr = (address)dlinfo.dli_fbase;
1616     }
1617     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1618   }
1619 
1620   if (dladdr((void *)addr, &dlinfo) != 0) {
1621     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1622   }
1623 
1624   return false;
1625 }
1626 
1627 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1628 static dladdr1_func_type dladdr1_func = NULL;
1629 
1630 bool os::dll_address_to_function_name(address addr, char *buf,
1631                                       int buflen, int * offset) {
1632   // buf is not optional, but offset is optional
1633   assert(buf != NULL, "sanity check");
1634 
1635   Dl_info dlinfo;
1636 
1637   // dladdr1_func was initialized in os::init()
1638   if (dladdr1_func != NULL) {
1639     // yes, we have dladdr1
1640 
1641     // Support for dladdr1 is checked at runtime; it may be
1642     // available even if the vm is built on a machine that does
1643     // not have dladdr1 support.  Make sure there is a value for
1644     // RTLD_DL_SYMENT.
1645     #ifndef RTLD_DL_SYMENT
1646     #define RTLD_DL_SYMENT 1
1647     #endif
1648 #ifdef _LP64
1649     Elf64_Sym * info;
1650 #else
1651     Elf32_Sym * info;
1652 #endif
1653     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1654                      RTLD_DL_SYMENT) != 0) {
1655       // see if we have a matching symbol that covers our address
1656       if (dlinfo.dli_saddr != NULL &&
1657           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1658         if (dlinfo.dli_sname != NULL) {
1659           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1660             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1661           }
1662           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1663           return true;
1664         }
1665       }
1666       // no matching symbol so try for just file info
1667       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1668         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1669                             buf, buflen, offset, dlinfo.dli_fname)) {
1670           return true;
1671         }
1672       }
1673     }
1674     buf[0] = '\0';
1675     if (offset != NULL) *offset  = -1;
1676     return false;
1677   }
1678 
1679   // no, only dladdr is available
1680   if (dladdr((void *)addr, &dlinfo) != 0) {
1681     // see if we have a matching symbol
1682     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1683       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1684         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1685       }
1686       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1687       return true;
1688     }
1689     // no matching symbol so try for just file info
1690     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1691       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1692                           buf, buflen, offset, dlinfo.dli_fname)) {
1693         return true;
1694       }
1695     }
1696   }
1697   buf[0] = '\0';
1698   if (offset != NULL) *offset  = -1;
1699   return false;
1700 }
1701 
1702 bool os::dll_address_to_library_name(address addr, char* buf,
1703                                      int buflen, int* offset) {
1704   // buf is not optional, but offset is optional
1705   assert(buf != NULL, "sanity check");
1706 
1707   Dl_info dlinfo;
1708 
1709   if (dladdr((void*)addr, &dlinfo) != 0) {
1710     if (dlinfo.dli_fname != NULL) {
1711       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1712     }
1713     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1714       *offset = addr - (address)dlinfo.dli_fbase;
1715     }
1716     return true;
1717   }
1718 
1719   buf[0] = '\0';
1720   if (offset) *offset = -1;
1721   return false;
1722 }
1723 
1724 // Prints the names and full paths of all opened dynamic libraries
1725 // for current process
1726 void os::print_dll_info(outputStream * st) {
1727   Dl_info dli;
1728   void *handle;
1729   Link_map *map;
1730   Link_map *p;
1731 
1732   st->print_cr("Dynamic libraries:"); st->flush();
1733 
1734   if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
1735       dli.dli_fname == NULL) {
1736     st->print_cr("Error: Cannot print dynamic libraries.");
1737     return;
1738   }
1739   handle = dlopen(dli.dli_fname, RTLD_LAZY);
1740   if (handle == NULL) {
1741     st->print_cr("Error: Cannot print dynamic libraries.");
1742     return;
1743   }
1744   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1745   if (map == NULL) {
1746     st->print_cr("Error: Cannot print dynamic libraries.");
1747     return;
1748   }
1749 
1750   while (map->l_prev != NULL)
1751     map = map->l_prev;
1752 
1753   while (map != NULL) {
1754     st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
1755     map = map->l_next;
1756   }
1757 
1758   dlclose(handle);
1759 }
1760 
1761   // Loads .dll/.so and
1762   // in case of error it checks if .dll/.so was built for the
1763   // same architecture as Hotspot is running on
1764 
1765 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1766 {
1767   void * result= ::dlopen(filename, RTLD_LAZY);
1768   if (result != NULL) {
1769     // Successful loading
1770     return result;
1771   }
1772 
1773   Elf32_Ehdr elf_head;
1774 
1775   // Read system error message into ebuf
1776   // It may or may not be overwritten below
1777   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1778   ebuf[ebuflen-1]='\0';
1779   int diag_msg_max_length=ebuflen-strlen(ebuf);
1780   char* diag_msg_buf=ebuf+strlen(ebuf);
1781 
1782   if (diag_msg_max_length==0) {
1783     // No more space in ebuf for additional diagnostics message
1784     return NULL;
1785   }
1786 
1787 
1788   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1789 
1790   if (file_descriptor < 0) {
1791     // Can't open library, report dlerror() message
1792     return NULL;
1793   }
1794 
1795   bool failed_to_read_elf_head=
1796     (sizeof(elf_head)!=
1797         (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1798 
1799   ::close(file_descriptor);
1800   if (failed_to_read_elf_head) {
1801     // file i/o error - report dlerror() msg
1802     return NULL;
1803   }
1804 
1805   typedef struct {
1806     Elf32_Half  code;         // Actual value as defined in elf.h
1807     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1808     char        elf_class;    // 32 or 64 bit
1809     char        endianess;    // MSB or LSB
1810     char*       name;         // String representation
1811   } arch_t;
1812 
1813   static const arch_t arch_array[]={
1814     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1815     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1816     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1817     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1818     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1819     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1820     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1821     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1822     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1823     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1824   };
1825 
1826   #if  (defined IA32)
1827     static  Elf32_Half running_arch_code=EM_386;
1828   #elif   (defined AMD64)
1829     static  Elf32_Half running_arch_code=EM_X86_64;
1830   #elif  (defined IA64)
1831     static  Elf32_Half running_arch_code=EM_IA_64;
1832   #elif  (defined __sparc) && (defined _LP64)
1833     static  Elf32_Half running_arch_code=EM_SPARCV9;
1834   #elif  (defined __sparc) && (!defined _LP64)
1835     static  Elf32_Half running_arch_code=EM_SPARC;
1836   #elif  (defined __powerpc64__)
1837     static  Elf32_Half running_arch_code=EM_PPC64;
1838   #elif  (defined __powerpc__)
1839     static  Elf32_Half running_arch_code=EM_PPC;
1840   #elif (defined ARM)
1841     static  Elf32_Half running_arch_code=EM_ARM;
1842   #else
1843     #error Method os::dll_load requires that one of following is defined:\
1844          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1845   #endif
1846 
1847   // Identify compatability class for VM's architecture and library's architecture
1848   // Obtain string descriptions for architectures
1849 
1850   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1851   int running_arch_index=-1;
1852 
1853   for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
1854     if (running_arch_code == arch_array[i].code) {
1855       running_arch_index    = i;
1856     }
1857     if (lib_arch.code == arch_array[i].code) {
1858       lib_arch.compat_class = arch_array[i].compat_class;
1859       lib_arch.name         = arch_array[i].name;
1860     }
1861   }
1862 
1863   assert(running_arch_index != -1,
1864     "Didn't find running architecture code (running_arch_code) in arch_array");
1865   if (running_arch_index == -1) {
1866     // Even though running architecture detection failed
1867     // we may still continue with reporting dlerror() message
1868     return NULL;
1869   }
1870 
1871   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1872     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1873     return NULL;
1874   }
1875 
1876   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1877     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1878     return NULL;
1879   }
1880 
1881   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1882     if ( lib_arch.name!=NULL ) {
1883       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1884         " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1885         lib_arch.name, arch_array[running_arch_index].name);
1886     } else {
1887       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1888       " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1889         lib_arch.code,
1890         arch_array[running_arch_index].name);
1891     }
1892   }
1893 
1894   return NULL;
1895 }
1896 
1897 void* os::dll_lookup(void* handle, const char* name) {
1898   return dlsym(handle, name);
1899 }
1900 
1901 void* os::get_default_process_handle() {
1902   return (void*)::dlopen(NULL, RTLD_LAZY);
1903 }
1904 
1905 int os::stat(const char *path, struct stat *sbuf) {
1906   char pathbuf[MAX_PATH];
1907   if (strlen(path) > MAX_PATH - 1) {
1908     errno = ENAMETOOLONG;
1909     return -1;
1910   }
1911   os::native_path(strcpy(pathbuf, path));
1912   return ::stat(pathbuf, sbuf);
1913 }
1914 
1915 static bool _print_ascii_file(const char* filename, outputStream* st) {
1916   int fd = ::open(filename, O_RDONLY);
1917   if (fd == -1) {
1918      return false;
1919   }
1920 
1921   char buf[32];
1922   int bytes;
1923   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1924     st->print_raw(buf, bytes);
1925   }
1926 
1927   ::close(fd);
1928 
1929   return true;
1930 }
1931 
1932 void os::print_os_info_brief(outputStream* st) {
1933   os::Solaris::print_distro_info(st);
1934 
1935   os::Posix::print_uname_info(st);
1936 
1937   os::Solaris::print_libversion_info(st);
1938 }
1939 
1940 void os::print_os_info(outputStream* st) {
1941   st->print("OS:");
1942 
1943   os::Solaris::print_distro_info(st);
1944 
1945   os::Posix::print_uname_info(st);
1946 
1947   os::Solaris::print_libversion_info(st);
1948 
1949   os::Posix::print_rlimit_info(st);
1950 
1951   os::Posix::print_load_average(st);
1952 }
1953 
1954 void os::Solaris::print_distro_info(outputStream* st) {
1955   if (!_print_ascii_file("/etc/release", st)) {
1956       st->print("Solaris");
1957     }
1958     st->cr();
1959 }
1960 
1961 void os::Solaris::print_libversion_info(outputStream* st) {
1962   st->print("  (T2 libthread)");
1963   st->cr();
1964 }
1965 
1966 static bool check_addr0(outputStream* st) {
1967   jboolean status = false;
1968   int fd = ::open("/proc/self/map",O_RDONLY);
1969   if (fd >= 0) {
1970     prmap_t p;
1971     while(::read(fd, &p, sizeof(p)) > 0) {
1972       if (p.pr_vaddr == 0x0) {
1973         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
1974         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
1975         st->print("Access:");
1976         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
1977         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
1978         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
1979         st->cr();
1980         status = true;
1981       }
1982     }
1983     ::close(fd);
1984   }
1985   return status;
1986 }
1987 
1988 void os::pd_print_cpu_info(outputStream* st) {
1989   // Nothing to do for now.
1990 }
1991 
1992 void os::print_memory_info(outputStream* st) {
1993   st->print("Memory:");
1994   st->print(" %dk page", os::vm_page_size()>>10);
1995   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
1996   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
1997   st->cr();
1998   (void) check_addr0(st);
1999 }
2000 
2001 void os::print_siginfo(outputStream* st, void* siginfo) {
2002   const siginfo_t* si = (const siginfo_t*)siginfo;
2003 
2004   os::Posix::print_siginfo_brief(st, si);
2005 
2006   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2007       UseSharedSpaces) {
2008     FileMapInfo* mapinfo = FileMapInfo::current_info();
2009     if (mapinfo->is_in_shared_space(si->si_addr)) {
2010       st->print("\n\nError accessing class data sharing archive."   \
2011                 " Mapped file inaccessible during execution, "      \
2012                 " possible disk/network problem.");
2013     }
2014   }
2015   st->cr();
2016 }
2017 
2018 // Moved from whole group, because we need them here for diagnostic
2019 // prints.
2020 #define OLDMAXSIGNUM 32
2021 static int Maxsignum = 0;
2022 static int *ourSigFlags = NULL;
2023 
2024 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2025 
2026 int os::Solaris::get_our_sigflags(int sig) {
2027   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2028   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2029   return ourSigFlags[sig];
2030 }
2031 
2032 void os::Solaris::set_our_sigflags(int sig, int flags) {
2033   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2034   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2035   ourSigFlags[sig] = flags;
2036 }
2037 
2038 
2039 static const char* get_signal_handler_name(address handler,
2040                                            char* buf, int buflen) {
2041   int offset;
2042   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2043   if (found) {
2044     // skip directory names
2045     const char *p1, *p2;
2046     p1 = buf;
2047     size_t len = strlen(os::file_separator());
2048     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2049     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2050   } else {
2051     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2052   }
2053   return buf;
2054 }
2055 
2056 static void print_signal_handler(outputStream* st, int sig,
2057                                   char* buf, size_t buflen) {
2058   struct sigaction sa;
2059 
2060   sigaction(sig, NULL, &sa);
2061 
2062   st->print("%s: ", os::exception_name(sig, buf, buflen));
2063 
2064   address handler = (sa.sa_flags & SA_SIGINFO)
2065                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2066                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2067 
2068   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2069     st->print("SIG_DFL");
2070   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2071     st->print("SIG_IGN");
2072   } else {
2073     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2074   }
2075 
2076   st->print(", sa_mask[0]=");
2077   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2078 
2079   address rh = VMError::get_resetted_sighandler(sig);
2080   // May be, handler was resetted by VMError?
2081   if(rh != NULL) {
2082     handler = rh;
2083     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2084   }
2085 
2086   st->print(", sa_flags=");
2087   os::Posix::print_sa_flags(st, sa.sa_flags);
2088 
2089   // Check: is it our handler?
2090   if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2091      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2092     // It is our signal handler
2093     // check for flags
2094     if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2095       st->print(
2096         ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2097         os::Solaris::get_our_sigflags(sig));
2098     }
2099   }
2100   st->cr();
2101 }
2102 
2103 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2104   st->print_cr("Signal Handlers:");
2105   print_signal_handler(st, SIGSEGV, buf, buflen);
2106   print_signal_handler(st, SIGBUS , buf, buflen);
2107   print_signal_handler(st, SIGFPE , buf, buflen);
2108   print_signal_handler(st, SIGPIPE, buf, buflen);
2109   print_signal_handler(st, SIGXFSZ, buf, buflen);
2110   print_signal_handler(st, SIGILL , buf, buflen);
2111   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2112   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2113   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2114   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2115   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2116   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2117   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2118   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2119 }
2120 
2121 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2122 
2123 // Find the full path to the current module, libjvm.so
2124 void os::jvm_path(char *buf, jint buflen) {
2125   // Error checking.
2126   if (buflen < MAXPATHLEN) {
2127     assert(false, "must use a large-enough buffer");
2128     buf[0] = '\0';
2129     return;
2130   }
2131   // Lazy resolve the path to current module.
2132   if (saved_jvm_path[0] != 0) {
2133     strcpy(buf, saved_jvm_path);
2134     return;
2135   }
2136 
2137   Dl_info dlinfo;
2138   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2139   assert(ret != 0, "cannot locate libjvm");
2140   if (ret != 0 && dlinfo.dli_fname != NULL) {
2141     realpath((char *)dlinfo.dli_fname, buf);
2142   } else {
2143     buf[0] = '\0';
2144     return;
2145   }
2146 
2147   if (Arguments::sun_java_launcher_is_altjvm()) {
2148     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2149     // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
2150     // If "/jre/lib/" appears at the right place in the string, then
2151     // assume we are installed in a JDK and we're done.  Otherwise, check
2152     // for a JAVA_HOME environment variable and fix up the path so it
2153     // looks like libjvm.so is installed there (append a fake suffix
2154     // hotspot/libjvm.so).
2155     const char *p = buf + strlen(buf) - 1;
2156     for (int count = 0; p > buf && count < 5; ++count) {
2157       for (--p; p > buf && *p != '/'; --p)
2158         /* empty */ ;
2159     }
2160 
2161     if (strncmp(p, "/jre/lib/", 9) != 0) {
2162       // Look for JAVA_HOME in the environment.
2163       char* java_home_var = ::getenv("JAVA_HOME");
2164       if (java_home_var != NULL && java_home_var[0] != 0) {
2165         char cpu_arch[12];
2166         char* jrelib_p;
2167         int   len;
2168         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2169 #ifdef _LP64
2170         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2171         if (strcmp(cpu_arch, "sparc") == 0) {
2172           strcat(cpu_arch, "v9");
2173         } else if (strcmp(cpu_arch, "i386") == 0) {
2174           strcpy(cpu_arch, "amd64");
2175         }
2176 #endif
2177         // Check the current module name "libjvm.so".
2178         p = strrchr(buf, '/');
2179         assert(strstr(p, "/libjvm") == p, "invalid library name");
2180 
2181         realpath(java_home_var, buf);
2182         // determine if this is a legacy image or modules image
2183         // modules image doesn't have "jre" subdirectory
2184         len = strlen(buf);
2185         jrelib_p = buf + len;
2186         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2187         if (0 != access(buf, F_OK)) {
2188           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2189         }
2190 
2191         if (0 == access(buf, F_OK)) {
2192           // Use current module name "libjvm.so"
2193           len = strlen(buf);
2194           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2195         } else {
2196           // Go back to path of .so
2197           realpath((char *)dlinfo.dli_fname, buf);
2198         }
2199       }
2200     }
2201   }
2202 
2203   strcpy(saved_jvm_path, buf);
2204 }
2205 
2206 
2207 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2208   // no prefix required, not even "_"
2209 }
2210 
2211 
2212 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2213   // no suffix required
2214 }
2215 
2216 // This method is a copy of JDK's sysGetLastErrorString
2217 // from src/solaris/hpi/src/system_md.c
2218 
2219 size_t os::lasterror(char *buf, size_t len) {
2220 
2221   if (errno == 0)  return 0;
2222 
2223   const char *s = ::strerror(errno);
2224   size_t n = ::strlen(s);
2225   if (n >= len) {
2226     n = len - 1;
2227   }
2228   ::strncpy(buf, s, n);
2229   buf[n] = '\0';
2230   return n;
2231 }
2232 
2233 
2234 // sun.misc.Signal
2235 
2236 extern "C" {
2237   static void UserHandler(int sig, void *siginfo, void *context) {
2238     // Ctrl-C is pressed during error reporting, likely because the error
2239     // handler fails to abort. Let VM die immediately.
2240     if (sig == SIGINT && is_error_reported()) {
2241        os::die();
2242     }
2243 
2244     os::signal_notify(sig);
2245     // We do not need to reinstate the signal handler each time...
2246   }
2247 }
2248 
2249 void* os::user_handler() {
2250   return CAST_FROM_FN_PTR(void*, UserHandler);
2251 }
2252 
2253 class Semaphore : public StackObj {
2254   public:
2255     Semaphore();
2256     ~Semaphore();
2257     void signal();
2258     void wait();
2259     bool trywait();
2260     bool timedwait(unsigned int sec, int nsec);
2261   private:
2262     sema_t _semaphore;
2263 };
2264 
2265 
2266 Semaphore::Semaphore() {
2267   sema_init(&_semaphore, 0, NULL, NULL);
2268 }
2269 
2270 Semaphore::~Semaphore() {
2271   sema_destroy(&_semaphore);
2272 }
2273 
2274 void Semaphore::signal() {
2275   sema_post(&_semaphore);
2276 }
2277 
2278 void Semaphore::wait() {
2279   sema_wait(&_semaphore);
2280 }
2281 
2282 bool Semaphore::trywait() {
2283   return sema_trywait(&_semaphore) == 0;
2284 }
2285 
2286 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2287   struct timespec ts;
2288   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2289 
2290   while (1) {
2291     int result = sema_timedwait(&_semaphore, &ts);
2292     if (result == 0) {
2293       return true;
2294     } else if (errno == EINTR) {
2295       continue;
2296     } else if (errno == ETIME) {
2297       return false;
2298     } else {
2299       return false;
2300     }
2301   }
2302 }
2303 
2304 extern "C" {
2305   typedef void (*sa_handler_t)(int);
2306   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2307 }
2308 
2309 void* os::signal(int signal_number, void* handler) {
2310   struct sigaction sigAct, oldSigAct;
2311   sigfillset(&(sigAct.sa_mask));
2312   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2313   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2314 
2315   if (sigaction(signal_number, &sigAct, &oldSigAct))
2316     // -1 means registration failed
2317     return (void *)-1;
2318 
2319   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2320 }
2321 
2322 void os::signal_raise(int signal_number) {
2323   raise(signal_number);
2324 }
2325 
2326 /*
2327  * The following code is moved from os.cpp for making this
2328  * code platform specific, which it is by its very nature.
2329  */
2330 
2331 // a counter for each possible signal value
2332 static int Sigexit = 0;
2333 static int Maxlibjsigsigs;
2334 static jint *pending_signals = NULL;
2335 static int *preinstalled_sigs = NULL;
2336 static struct sigaction *chainedsigactions = NULL;
2337 static sema_t sig_sem;
2338 typedef int (*version_getting_t)();
2339 version_getting_t os::Solaris::get_libjsig_version = NULL;
2340 static int libjsigversion = NULL;
2341 
2342 int os::sigexitnum_pd() {
2343   assert(Sigexit > 0, "signal memory not yet initialized");
2344   return Sigexit;
2345 }
2346 
2347 void os::Solaris::init_signal_mem() {
2348   // Initialize signal structures
2349   Maxsignum = SIGRTMAX;
2350   Sigexit = Maxsignum+1;
2351   assert(Maxsignum >0, "Unable to obtain max signal number");
2352 
2353   Maxlibjsigsigs = Maxsignum;
2354 
2355   // pending_signals has one int per signal
2356   // The additional signal is for SIGEXIT - exit signal to signal_thread
2357   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2358   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2359 
2360   if (UseSignalChaining) {
2361      chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2362        * (Maxsignum + 1), mtInternal);
2363      memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2364      preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2365      memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2366   }
2367   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
2368   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2369 }
2370 
2371 void os::signal_init_pd() {
2372   int ret;
2373 
2374   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2375   assert(ret == 0, "sema_init() failed");
2376 }
2377 
2378 void os::signal_notify(int signal_number) {
2379   int ret;
2380 
2381   Atomic::inc(&pending_signals[signal_number]);
2382   ret = ::sema_post(&sig_sem);
2383   assert(ret == 0, "sema_post() failed");
2384 }
2385 
2386 static int check_pending_signals(bool wait_for_signal) {
2387   int ret;
2388   while (true) {
2389     for (int i = 0; i < Sigexit + 1; i++) {
2390       jint n = pending_signals[i];
2391       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2392         return i;
2393       }
2394     }
2395     if (!wait_for_signal) {
2396       return -1;
2397     }
2398     JavaThread *thread = JavaThread::current();
2399     ThreadBlockInVM tbivm(thread);
2400 
2401     bool threadIsSuspended;
2402     do {
2403       thread->set_suspend_equivalent();
2404       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2405       while((ret = ::sema_wait(&sig_sem)) == EINTR)
2406           ;
2407       assert(ret == 0, "sema_wait() failed");
2408 
2409       // were we externally suspended while we were waiting?
2410       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2411       if (threadIsSuspended) {
2412         //
2413         // The semaphore has been incremented, but while we were waiting
2414         // another thread suspended us. We don't want to continue running
2415         // while suspended because that would surprise the thread that
2416         // suspended us.
2417         //
2418         ret = ::sema_post(&sig_sem);
2419         assert(ret == 0, "sema_post() failed");
2420 
2421         thread->java_suspend_self();
2422       }
2423     } while (threadIsSuspended);
2424   }
2425 }
2426 
2427 int os::signal_lookup() {
2428   return check_pending_signals(false);
2429 }
2430 
2431 int os::signal_wait() {
2432   return check_pending_signals(true);
2433 }
2434 
2435 ////////////////////////////////////////////////////////////////////////////////
2436 // Virtual Memory
2437 
2438 static int page_size = -1;
2439 
2440 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2441 // clear this var if support is not available.
2442 static bool has_map_align = true;
2443 
2444 int os::vm_page_size() {
2445   assert(page_size != -1, "must call os::init");
2446   return page_size;
2447 }
2448 
2449 // Solaris allocates memory by pages.
2450 int os::vm_allocation_granularity() {
2451   assert(page_size != -1, "must call os::init");
2452   return page_size;
2453 }
2454 
2455 static bool recoverable_mmap_error(int err) {
2456   // See if the error is one we can let the caller handle. This
2457   // list of errno values comes from the Solaris mmap(2) man page.
2458   switch (err) {
2459   case EBADF:
2460   case EINVAL:
2461   case ENOTSUP:
2462     // let the caller deal with these errors
2463     return true;
2464 
2465   default:
2466     // Any remaining errors on this OS can cause our reserved mapping
2467     // to be lost. That can cause confusion where different data
2468     // structures think they have the same memory mapped. The worst
2469     // scenario is if both the VM and a library think they have the
2470     // same memory mapped.
2471     return false;
2472   }
2473 }
2474 
2475 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2476                                     int err) {
2477   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2478           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2479           strerror(err), err);
2480 }
2481 
2482 static void warn_fail_commit_memory(char* addr, size_t bytes,
2483                                     size_t alignment_hint, bool exec,
2484                                     int err) {
2485   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2486           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2487           alignment_hint, exec, strerror(err), err);
2488 }
2489 
2490 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2491   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2492   size_t size = bytes;
2493   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2494   if (res != NULL) {
2495     if (UseNUMAInterleaving) {
2496       numa_make_global(addr, bytes);
2497     }
2498     return 0;
2499   }
2500 
2501   int err = errno;  // save errno from mmap() call in mmap_chunk()
2502 
2503   if (!recoverable_mmap_error(err)) {
2504     warn_fail_commit_memory(addr, bytes, exec, err);
2505     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2506   }
2507 
2508   return err;
2509 }
2510 
2511 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2512   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2513 }
2514 
2515 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2516                                   const char* mesg) {
2517   assert(mesg != NULL, "mesg must be specified");
2518   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2519   if (err != 0) {
2520     // the caller wants all commit errors to exit with the specified mesg:
2521     warn_fail_commit_memory(addr, bytes, exec, err);
2522     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2523   }
2524 }
2525 
2526 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2527                                     size_t alignment_hint, bool exec) {
2528   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2529   if (err == 0) {
2530     if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) {
2531       // If the large page size has been set and the VM
2532       // is using large pages, use the large page size
2533       // if it is smaller than the alignment hint. This is
2534       // a case where the VM wants to use a larger alignment size
2535       // for its own reasons but still want to use large pages
2536       // (which is what matters to setting the mpss range.
2537       size_t page_size = 0;
2538       if (large_page_size() < alignment_hint) {
2539         assert(UseLargePages, "Expected to be here for large page use only");
2540         page_size = large_page_size();
2541       } else {
2542         // If the alignment hint is less than the large page
2543         // size, the VM wants a particular alignment (thus the hint)
2544         // for internal reasons.  Try to set the mpss range using
2545         // the alignment_hint.
2546         page_size = alignment_hint;
2547       }
2548       // Since this is a hint, ignore any failures.
2549       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2550     }
2551   }
2552   return err;
2553 }
2554 
2555 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2556                           bool exec) {
2557   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2558 }
2559 
2560 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2561                                   size_t alignment_hint, bool exec,
2562                                   const char* mesg) {
2563   assert(mesg != NULL, "mesg must be specified");
2564   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2565   if (err != 0) {
2566     // the caller wants all commit errors to exit with the specified mesg:
2567     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2568     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2569   }
2570 }
2571 
2572 // Uncommit the pages in a specified region.
2573 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2574   if (madvise(addr, bytes, MADV_FREE) < 0) {
2575     debug_only(warning("MADV_FREE failed."));
2576     return;
2577   }
2578 }
2579 
2580 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2581   return os::commit_memory(addr, size, !ExecMem);
2582 }
2583 
2584 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2585   return os::uncommit_memory(addr, size);
2586 }
2587 
2588 // Change the page size in a given range.
2589 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2590   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2591   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2592   if (UseLargePages) {
2593     Solaris::setup_large_pages(addr, bytes, alignment_hint);
2594   }
2595 }
2596 
2597 // Tell the OS to make the range local to the first-touching LWP
2598 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2599   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2600   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2601     debug_only(warning("MADV_ACCESS_LWP failed."));
2602   }
2603 }
2604 
2605 // Tell the OS that this range would be accessed from different LWPs.
2606 void os::numa_make_global(char *addr, size_t bytes) {
2607   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2608   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2609     debug_only(warning("MADV_ACCESS_MANY failed."));
2610   }
2611 }
2612 
2613 // Get the number of the locality groups.
2614 size_t os::numa_get_groups_num() {
2615   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2616   return n != -1 ? n : 1;
2617 }
2618 
2619 // Get a list of leaf locality groups. A leaf lgroup is group that
2620 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2621 // board. An LWP is assigned to one of these groups upon creation.
2622 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2623    if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2624      ids[0] = 0;
2625      return 1;
2626    }
2627    int result_size = 0, top = 1, bottom = 0, cur = 0;
2628    for (int k = 0; k < size; k++) {
2629      int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2630                                     (Solaris::lgrp_id_t*)&ids[top], size - top);
2631      if (r == -1) {
2632        ids[0] = 0;
2633        return 1;
2634      }
2635      if (!r) {
2636        // That's a leaf node.
2637        assert (bottom <= cur, "Sanity check");
2638        // Check if the node has memory
2639        if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2640                                    NULL, 0, LGRP_RSRC_MEM) > 0) {
2641          ids[bottom++] = ids[cur];
2642        }
2643      }
2644      top += r;
2645      cur++;
2646    }
2647    if (bottom == 0) {
2648      // Handle a situation, when the OS reports no memory available.
2649      // Assume UMA architecture.
2650      ids[0] = 0;
2651      return 1;
2652    }
2653    return bottom;
2654 }
2655 
2656 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2657 bool os::numa_topology_changed() {
2658   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2659   if (is_stale != -1 && is_stale) {
2660     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2661     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2662     assert(c != 0, "Failure to initialize LGRP API");
2663     Solaris::set_lgrp_cookie(c);
2664     return true;
2665   }
2666   return false;
2667 }
2668 
2669 // Get the group id of the current LWP.
2670 int os::numa_get_group_id() {
2671   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2672   if (lgrp_id == -1) {
2673     return 0;
2674   }
2675   const int size = os::numa_get_groups_num();
2676   int *ids = (int*)alloca(size * sizeof(int));
2677 
2678   // Get the ids of all lgroups with memory; r is the count.
2679   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2680                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2681   if (r <= 0) {
2682     return 0;
2683   }
2684   return ids[os::random() % r];
2685 }
2686 
2687 // Request information about the page.
2688 bool os::get_page_info(char *start, page_info* info) {
2689   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2690   uint64_t addr = (uintptr_t)start;
2691   uint64_t outdata[2];
2692   uint_t validity = 0;
2693 
2694   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2695     return false;
2696   }
2697 
2698   info->size = 0;
2699   info->lgrp_id = -1;
2700 
2701   if ((validity & 1) != 0) {
2702     if ((validity & 2) != 0) {
2703       info->lgrp_id = outdata[0];
2704     }
2705     if ((validity & 4) != 0) {
2706       info->size = outdata[1];
2707     }
2708     return true;
2709   }
2710   return false;
2711 }
2712 
2713 // Scan the pages from start to end until a page different than
2714 // the one described in the info parameter is encountered.
2715 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2716   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2717   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2718   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2719   uint_t validity[MAX_MEMINFO_CNT];
2720 
2721   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2722   uint64_t p = (uint64_t)start;
2723   while (p < (uint64_t)end) {
2724     addrs[0] = p;
2725     size_t addrs_count = 1;
2726     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2727       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2728       addrs_count++;
2729     }
2730 
2731     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2732       return NULL;
2733     }
2734 
2735     size_t i = 0;
2736     for (; i < addrs_count; i++) {
2737       if ((validity[i] & 1) != 0) {
2738         if ((validity[i] & 4) != 0) {
2739           if (outdata[types * i + 1] != page_expected->size) {
2740             break;
2741           }
2742         } else
2743           if (page_expected->size != 0) {
2744             break;
2745           }
2746 
2747         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2748           if (outdata[types * i] != page_expected->lgrp_id) {
2749             break;
2750           }
2751         }
2752       } else {
2753         return NULL;
2754       }
2755     }
2756 
2757     if (i < addrs_count) {
2758       if ((validity[i] & 2) != 0) {
2759         page_found->lgrp_id = outdata[types * i];
2760       } else {
2761         page_found->lgrp_id = -1;
2762       }
2763       if ((validity[i] & 4) != 0) {
2764         page_found->size = outdata[types * i + 1];
2765       } else {
2766         page_found->size = 0;
2767       }
2768       return (char*)addrs[i];
2769     }
2770 
2771     p = addrs[addrs_count - 1] + page_size;
2772   }
2773   return end;
2774 }
2775 
2776 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2777   size_t size = bytes;
2778   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2779   // uncommitted page. Otherwise, the read/write might succeed if we
2780   // have enough swap space to back the physical page.
2781   return
2782     NULL != Solaris::mmap_chunk(addr, size,
2783                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2784                                 PROT_NONE);
2785 }
2786 
2787 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2788   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2789 
2790   if (b == MAP_FAILED) {
2791     return NULL;
2792   }
2793   return b;
2794 }
2795 
2796 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
2797   char* addr = requested_addr;
2798   int flags = MAP_PRIVATE | MAP_NORESERVE;
2799 
2800   assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
2801 
2802   if (fixed) {
2803     flags |= MAP_FIXED;
2804   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2805     flags |= MAP_ALIGN;
2806     addr = (char*) alignment_hint;
2807   }
2808 
2809   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2810   // uncommitted page. Otherwise, the read/write might succeed if we
2811   // have enough swap space to back the physical page.
2812   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2813 }
2814 
2815 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2816   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
2817 
2818   guarantee(requested_addr == NULL || requested_addr == addr,
2819             "OS failed to return requested mmap address.");
2820   return addr;
2821 }
2822 
2823 // Reserve memory at an arbitrary address, only if that area is
2824 // available (and not reserved for something else).
2825 
2826 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2827   const int max_tries = 10;
2828   char* base[max_tries];
2829   size_t size[max_tries];
2830 
2831   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2832   // is dependent on the requested size and the MMU.  Our initial gap
2833   // value here is just a guess and will be corrected later.
2834   bool had_top_overlap = false;
2835   bool have_adjusted_gap = false;
2836   size_t gap = 0x400000;
2837 
2838   // Assert only that the size is a multiple of the page size, since
2839   // that's all that mmap requires, and since that's all we really know
2840   // about at this low abstraction level.  If we need higher alignment,
2841   // we can either pass an alignment to this method or verify alignment
2842   // in one of the methods further up the call chain.  See bug 5044738.
2843   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2844 
2845   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2846   // Give it a try, if the kernel honors the hint we can return immediately.
2847   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2848 
2849   volatile int err = errno;
2850   if (addr == requested_addr) {
2851     return addr;
2852   } else if (addr != NULL) {
2853     pd_unmap_memory(addr, bytes);
2854   }
2855 
2856   if (PrintMiscellaneous && Verbose) {
2857     char buf[256];
2858     buf[0] = '\0';
2859     if (addr == NULL) {
2860       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2861     }
2862     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2863             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2864             "%s", bytes, requested_addr, addr, buf);
2865   }
2866 
2867   // Address hint method didn't work.  Fall back to the old method.
2868   // In theory, once SNV becomes our oldest supported platform, this
2869   // code will no longer be needed.
2870   //
2871   // Repeatedly allocate blocks until the block is allocated at the
2872   // right spot. Give up after max_tries.
2873   int i;
2874   for (i = 0; i < max_tries; ++i) {
2875     base[i] = reserve_memory(bytes);
2876 
2877     if (base[i] != NULL) {
2878       // Is this the block we wanted?
2879       if (base[i] == requested_addr) {
2880         size[i] = bytes;
2881         break;
2882       }
2883 
2884       // check that the gap value is right
2885       if (had_top_overlap && !have_adjusted_gap) {
2886         size_t actual_gap = base[i-1] - base[i] - bytes;
2887         if (gap != actual_gap) {
2888           // adjust the gap value and retry the last 2 allocations
2889           assert(i > 0, "gap adjustment code problem");
2890           have_adjusted_gap = true;  // adjust the gap only once, just in case
2891           gap = actual_gap;
2892           if (PrintMiscellaneous && Verbose) {
2893             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2894           }
2895           unmap_memory(base[i], bytes);
2896           unmap_memory(base[i-1], size[i-1]);
2897           i-=2;
2898           continue;
2899         }
2900       }
2901 
2902       // Does this overlap the block we wanted? Give back the overlapped
2903       // parts and try again.
2904       //
2905       // There is still a bug in this code: if top_overlap == bytes,
2906       // the overlap is offset from requested region by the value of gap.
2907       // In this case giving back the overlapped part will not work,
2908       // because we'll give back the entire block at base[i] and
2909       // therefore the subsequent allocation will not generate a new gap.
2910       // This could be fixed with a new algorithm that used larger
2911       // or variable size chunks to find the requested region -
2912       // but such a change would introduce additional complications.
2913       // It's rare enough that the planets align for this bug,
2914       // so we'll just wait for a fix for 6204603/5003415 which
2915       // will provide a mmap flag to allow us to avoid this business.
2916 
2917       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2918       if (top_overlap >= 0 && top_overlap < bytes) {
2919         had_top_overlap = true;
2920         unmap_memory(base[i], top_overlap);
2921         base[i] += top_overlap;
2922         size[i] = bytes - top_overlap;
2923       } else {
2924         size_t bottom_overlap = base[i] + bytes - requested_addr;
2925         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2926           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2927             warning("attempt_reserve_memory_at: possible alignment bug");
2928           }
2929           unmap_memory(requested_addr, bottom_overlap);
2930           size[i] = bytes - bottom_overlap;
2931         } else {
2932           size[i] = bytes;
2933         }
2934       }
2935     }
2936   }
2937 
2938   // Give back the unused reserved pieces.
2939 
2940   for (int j = 0; j < i; ++j) {
2941     if (base[j] != NULL) {
2942       unmap_memory(base[j], size[j]);
2943     }
2944   }
2945 
2946   return (i < max_tries) ? requested_addr : NULL;
2947 }
2948 
2949 bool os::pd_release_memory(char* addr, size_t bytes) {
2950   size_t size = bytes;
2951   return munmap(addr, size) == 0;
2952 }
2953 
2954 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
2955   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
2956          "addr must be page aligned");
2957   int retVal = mprotect(addr, bytes, prot);
2958   return retVal == 0;
2959 }
2960 
2961 // Protect memory (Used to pass readonly pages through
2962 // JNI GetArray<type>Elements with empty arrays.)
2963 // Also, used for serialization page and for compressed oops null pointer
2964 // checking.
2965 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2966                         bool is_committed) {
2967   unsigned int p = 0;
2968   switch (prot) {
2969   case MEM_PROT_NONE: p = PROT_NONE; break;
2970   case MEM_PROT_READ: p = PROT_READ; break;
2971   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2972   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2973   default:
2974     ShouldNotReachHere();
2975   }
2976   // is_committed is unused.
2977   return solaris_mprotect(addr, bytes, p);
2978 }
2979 
2980 // guard_memory and unguard_memory only happens within stack guard pages.
2981 // Since ISM pertains only to the heap, guard and unguard memory should not
2982 /// happen with an ISM region.
2983 bool os::guard_memory(char* addr, size_t bytes) {
2984   return solaris_mprotect(addr, bytes, PROT_NONE);
2985 }
2986 
2987 bool os::unguard_memory(char* addr, size_t bytes) {
2988   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
2989 }
2990 
2991 // Large page support
2992 static size_t _large_page_size = 0;
2993 
2994 // Insertion sort for small arrays (descending order).
2995 static void insertion_sort_descending(size_t* array, int len) {
2996   for (int i = 0; i < len; i++) {
2997     size_t val = array[i];
2998     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
2999       size_t tmp = array[key];
3000       array[key] = array[key - 1];
3001       array[key - 1] = tmp;
3002     }
3003   }
3004 }
3005 
3006 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3007   const unsigned int usable_count = VM_Version::page_size_count();
3008   if (usable_count == 1) {
3009     return false;
3010   }
3011 
3012   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3013   // build platform, getpagesizes() (without the '2') can be called directly.
3014   typedef int (*gps_t)(size_t[], int);
3015   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3016   if (gps_func == NULL) {
3017     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3018     if (gps_func == NULL) {
3019       if (warn) {
3020         warning("MPSS is not supported by the operating system.");
3021       }
3022       return false;
3023     }
3024   }
3025 
3026   // Fill the array of page sizes.
3027   int n = (*gps_func)(_page_sizes, page_sizes_max);
3028   assert(n > 0, "Solaris bug?");
3029 
3030   if (n == page_sizes_max) {
3031     // Add a sentinel value (necessary only if the array was completely filled
3032     // since it is static (zeroed at initialization)).
3033     _page_sizes[--n] = 0;
3034     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3035   }
3036   assert(_page_sizes[n] == 0, "missing sentinel");
3037   trace_page_sizes("available page sizes", _page_sizes, n);
3038 
3039   if (n == 1) return false;     // Only one page size available.
3040 
3041   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3042   // select up to usable_count elements.  First sort the array, find the first
3043   // acceptable value, then copy the usable sizes to the top of the array and
3044   // trim the rest.  Make sure to include the default page size :-).
3045   //
3046   // A better policy could get rid of the 4M limit by taking the sizes of the
3047   // important VM memory regions (java heap and possibly the code cache) into
3048   // account.
3049   insertion_sort_descending(_page_sizes, n);
3050   const size_t size_limit =
3051     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3052   int beg;
3053   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3054   const int end = MIN2((int)usable_count, n) - 1;
3055   for (int cur = 0; cur < end; ++cur, ++beg) {
3056     _page_sizes[cur] = _page_sizes[beg];
3057   }
3058   _page_sizes[end] = vm_page_size();
3059   _page_sizes[end + 1] = 0;
3060 
3061   if (_page_sizes[end] > _page_sizes[end - 1]) {
3062     // Default page size is not the smallest; sort again.
3063     insertion_sort_descending(_page_sizes, end + 1);
3064   }
3065   *page_size = _page_sizes[0];
3066 
3067   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3068   return true;
3069 }
3070 
3071 void os::large_page_init() {
3072   if (UseLargePages) {
3073     // print a warning if any large page related flag is specified on command line
3074     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3075                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3076 
3077     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3078   }
3079 }
3080 
3081 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3082   // Signal to OS that we want large pages for addresses
3083   // from addr, addr + bytes
3084   struct memcntl_mha mpss_struct;
3085   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3086   mpss_struct.mha_pagesize = align;
3087   mpss_struct.mha_flags = 0;
3088   // Upon successful completion, memcntl() returns 0
3089   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3090     debug_only(warning("Attempt to use MPSS failed."));
3091     return false;
3092   }
3093   return true;
3094 }
3095 
3096 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3097   fatal("os::reserve_memory_special should not be called on Solaris.");
3098   return NULL;
3099 }
3100 
3101 bool os::release_memory_special(char* base, size_t bytes) {
3102   fatal("os::release_memory_special should not be called on Solaris.");
3103   return false;
3104 }
3105 
3106 size_t os::large_page_size() {
3107   return _large_page_size;
3108 }
3109 
3110 // MPSS allows application to commit large page memory on demand; with ISM
3111 // the entire memory region must be allocated as shared memory.
3112 bool os::can_commit_large_page_memory() {
3113   return true;
3114 }
3115 
3116 bool os::can_execute_large_page_memory() {
3117   return true;
3118 }
3119 
3120 // Read calls from inside the vm need to perform state transitions
3121 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3122   size_t res;
3123   JavaThread* thread = (JavaThread*)Thread::current();
3124   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3125   ThreadBlockInVM tbiv(thread);
3126   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3127   return res;
3128 }
3129 
3130 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3131   size_t res;
3132   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
3133           "Assumed _thread_in_native");
3134   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3135   return res;
3136 }
3137 
3138 void os::naked_short_sleep(jlong ms) {
3139   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3140 
3141   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3142   // Solaris requires -lrt for this.
3143   usleep((ms * 1000));
3144 
3145   return;
3146 }
3147 
3148 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3149 void os::infinite_sleep() {
3150   while (true) {    // sleep forever ...
3151     ::sleep(100);   // ... 100 seconds at a time
3152   }
3153 }
3154 
3155 // Used to convert frequent JVM_Yield() to nops
3156 bool os::dont_yield() {
3157   if (DontYieldALot) {
3158     static hrtime_t last_time = 0;
3159     hrtime_t diff = getTimeNanos() - last_time;
3160 
3161     if (diff < DontYieldALotInterval * 1000000)
3162       return true;
3163 
3164     last_time += diff;
3165 
3166     return false;
3167   }
3168   else {
3169     return false;
3170   }
3171 }
3172 
3173 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3174 // the linux and win32 implementations do not.  This should be checked.
3175 
3176 void os::yield() {
3177   // Yields to all threads with same or greater priority
3178   os::sleep(Thread::current(), 0, false);
3179 }
3180 
3181 // Note that yield semantics are defined by the scheduling class to which
3182 // the thread currently belongs.  Typically, yield will _not yield to
3183 // other equal or higher priority threads that reside on the dispatch queues
3184 // of other CPUs.
3185 
3186 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3187 
3188 void os::yield_all() {
3189   // Yields to all threads, including threads with lower priorities
3190   os::sleep(Thread::current(), 1, false);
3191 }
3192 
3193 // Interface for setting lwp priorities.  If we are using T2 libthread,
3194 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3195 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3196 // function is meaningless in this mode so we must adjust the real lwp's priority
3197 // The routines below implement the getting and setting of lwp priorities.
3198 //
3199 // Note: T2 is now the only supported libthread. UseBoundThreads flag is
3200 //       being deprecated and all threads are now BoundThreads
3201 //
3202 // Note: There are three priority scales used on Solaris.  Java priotities
3203 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3204 //       from 0 to 127, and the current scheduling class of the process we
3205 //       are running in.  This is typically from -60 to +60.
3206 //       The setting of the lwp priorities in done after a call to thr_setprio
3207 //       so Java priorities are mapped to libthread priorities and we map from
3208 //       the latter to lwp priorities.  We don't keep priorities stored in
3209 //       Java priorities since some of our worker threads want to set priorities
3210 //       higher than all Java threads.
3211 //
3212 // For related information:
3213 // (1)  man -s 2 priocntl
3214 // (2)  man -s 4 priocntl
3215 // (3)  man dispadmin
3216 // =    librt.so
3217 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3218 // =    ps -cL <pid> ... to validate priority.
3219 // =    sched_get_priority_min and _max
3220 //              pthread_create
3221 //              sched_setparam
3222 //              pthread_setschedparam
3223 //
3224 // Assumptions:
3225 // +    We assume that all threads in the process belong to the same
3226 //              scheduling class.   IE. an homogenous process.
3227 // +    Must be root or in IA group to change change "interactive" attribute.
3228 //              Priocntl() will fail silently.  The only indication of failure is when
3229 //              we read-back the value and notice that it hasn't changed.
3230 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3231 // +    For RT, change timeslice as well.  Invariant:
3232 //              constant "priority integral"
3233 //              Konst == TimeSlice * (60-Priority)
3234 //              Given a priority, compute appropriate timeslice.
3235 // +    Higher numerical values have higher priority.
3236 
3237 // sched class attributes
3238 typedef struct {
3239         int   schedPolicy;              // classID
3240         int   maxPrio;
3241         int   minPrio;
3242 } SchedInfo;
3243 
3244 
3245 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3246 
3247 #ifdef ASSERT
3248 static int  ReadBackValidate = 1;
3249 #endif
3250 static int  myClass     = 0;
3251 static int  myMin       = 0;
3252 static int  myMax       = 0;
3253 static int  myCur       = 0;
3254 static bool priocntl_enable = false;
3255 
3256 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3257 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3258 
3259 
3260 // lwp_priocntl_init
3261 //
3262 // Try to determine the priority scale for our process.
3263 //
3264 // Return errno or 0 if OK.
3265 //
3266 static int lwp_priocntl_init () {
3267   int rslt;
3268   pcinfo_t ClassInfo;
3269   pcparms_t ParmInfo;
3270   int i;
3271 
3272   if (!UseThreadPriorities) return 0;
3273 
3274   // If ThreadPriorityPolicy is 1, switch tables
3275   if (ThreadPriorityPolicy == 1) {
3276     for (i = 0 ; i < CriticalPriority+1; i++)
3277       os::java_to_os_priority[i] = prio_policy1[i];
3278   }
3279   if (UseCriticalJavaThreadPriority) {
3280     // MaxPriority always maps to the FX scheduling class and criticalPrio.
3281     // See set_native_priority() and set_lwp_class_and_priority().
3282     // Save original MaxPriority mapping in case attempt to
3283     // use critical priority fails.
3284     java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3285     // Set negative to distinguish from other priorities
3286     os::java_to_os_priority[MaxPriority] = -criticalPrio;
3287   }
3288 
3289   // Get IDs for a set of well-known scheduling classes.
3290   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3291   // the system.  We should have a loop that iterates over the
3292   // classID values, which are known to be "small" integers.
3293 
3294   strcpy(ClassInfo.pc_clname, "TS");
3295   ClassInfo.pc_cid = -1;
3296   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3297   if (rslt < 0) return errno;
3298   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3299   tsLimits.schedPolicy = ClassInfo.pc_cid;
3300   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3301   tsLimits.minPrio = -tsLimits.maxPrio;
3302 
3303   strcpy(ClassInfo.pc_clname, "IA");
3304   ClassInfo.pc_cid = -1;
3305   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3306   if (rslt < 0) return errno;
3307   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3308   iaLimits.schedPolicy = ClassInfo.pc_cid;
3309   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3310   iaLimits.minPrio = -iaLimits.maxPrio;
3311 
3312   strcpy(ClassInfo.pc_clname, "RT");
3313   ClassInfo.pc_cid = -1;
3314   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3315   if (rslt < 0) return errno;
3316   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3317   rtLimits.schedPolicy = ClassInfo.pc_cid;
3318   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3319   rtLimits.minPrio = 0;
3320 
3321   strcpy(ClassInfo.pc_clname, "FX");
3322   ClassInfo.pc_cid = -1;
3323   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3324   if (rslt < 0) return errno;
3325   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3326   fxLimits.schedPolicy = ClassInfo.pc_cid;
3327   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3328   fxLimits.minPrio = 0;
3329 
3330   // Query our "current" scheduling class.
3331   // This will normally be IA, TS or, rarely, FX or RT.
3332   memset(&ParmInfo, 0, sizeof(ParmInfo));
3333   ParmInfo.pc_cid = PC_CLNULL;
3334   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3335   if (rslt < 0) return errno;
3336   myClass = ParmInfo.pc_cid;
3337 
3338   // We now know our scheduling classId, get specific information
3339   // about the class.
3340   ClassInfo.pc_cid = myClass;
3341   ClassInfo.pc_clname[0] = 0;
3342   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3343   if (rslt < 0) return errno;
3344 
3345   if (ThreadPriorityVerbose) {
3346     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3347   }
3348 
3349   memset(&ParmInfo, 0, sizeof(pcparms_t));
3350   ParmInfo.pc_cid = PC_CLNULL;
3351   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3352   if (rslt < 0) return errno;
3353 
3354   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3355     myMin = rtLimits.minPrio;
3356     myMax = rtLimits.maxPrio;
3357   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3358     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3359     myMin = iaLimits.minPrio;
3360     myMax = iaLimits.maxPrio;
3361     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3362   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3363     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3364     myMin = tsLimits.minPrio;
3365     myMax = tsLimits.maxPrio;
3366     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3367   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3368     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3369     myMin = fxLimits.minPrio;
3370     myMax = fxLimits.maxPrio;
3371     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3372   } else {
3373     // No clue - punt
3374     if (ThreadPriorityVerbose)
3375       tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3376     return EINVAL;      // no clue, punt
3377   }
3378 
3379   if (ThreadPriorityVerbose) {
3380     tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3381   }
3382 
3383   priocntl_enable = true;  // Enable changing priorities
3384   return 0;
3385 }
3386 
3387 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3388 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3389 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3390 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3391 
3392 
3393 // scale_to_lwp_priority
3394 //
3395 // Convert from the libthread "thr_setprio" scale to our current
3396 // lwp scheduling class scale.
3397 //
3398 static
3399 int     scale_to_lwp_priority (int rMin, int rMax, int x)
3400 {
3401   int v;
3402 
3403   if (x == 127) return rMax;            // avoid round-down
3404     v = (((x*(rMax-rMin)))/128)+rMin;
3405   return v;
3406 }
3407 
3408 
3409 // set_lwp_class_and_priority
3410 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3411                                int newPrio, int new_class, bool scale) {
3412   int rslt;
3413   int Actual, Expected, prv;
3414   pcparms_t ParmInfo;                   // for GET-SET
3415 #ifdef ASSERT
3416   pcparms_t ReadBack;                   // for readback
3417 #endif
3418 
3419   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3420   // Query current values.
3421   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3422   // Cache "pcparms_t" in global ParmCache.
3423   // TODO: elide set-to-same-value
3424 
3425   // If something went wrong on init, don't change priorities.
3426   if ( !priocntl_enable ) {
3427     if (ThreadPriorityVerbose)
3428       tty->print_cr("Trying to set priority but init failed, ignoring");
3429     return EINVAL;
3430   }
3431 
3432   // If lwp hasn't started yet, just return
3433   // the _start routine will call us again.
3434   if ( lwpid <= 0 ) {
3435     if (ThreadPriorityVerbose) {
3436       tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
3437                      INTPTR_FORMAT " to %d, lwpid not set",
3438                      ThreadID, newPrio);
3439     }
3440     return 0;
3441   }
3442 
3443   if (ThreadPriorityVerbose) {
3444     tty->print_cr ("set_lwp_class_and_priority("
3445                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3446                    ThreadID, lwpid, newPrio);
3447   }
3448 
3449   memset(&ParmInfo, 0, sizeof(pcparms_t));
3450   ParmInfo.pc_cid = PC_CLNULL;
3451   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3452   if (rslt < 0) return errno;
3453 
3454   int cur_class = ParmInfo.pc_cid;
3455   ParmInfo.pc_cid = (id_t)new_class;
3456 
3457   if (new_class == rtLimits.schedPolicy) {
3458     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3459     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3460                                                        rtLimits.maxPrio, newPrio)
3461                                : newPrio;
3462     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3463     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3464     if (ThreadPriorityVerbose) {
3465       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3466     }
3467   } else if (new_class == iaLimits.schedPolicy) {
3468     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3469     int maxClamped     = MIN2(iaLimits.maxPrio,
3470                               cur_class == new_class
3471                                 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3472     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3473                                                        maxClamped, newPrio)
3474                                : newPrio;
3475     iaInfo->ia_uprilim = cur_class == new_class
3476                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3477     iaInfo->ia_mode    = IA_NOCHANGE;
3478     if (ThreadPriorityVerbose) {
3479       tty->print_cr("IA: [%d...%d] %d->%d\n",
3480                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3481     }
3482   } else if (new_class == tsLimits.schedPolicy) {
3483     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3484     int maxClamped     = MIN2(tsLimits.maxPrio,
3485                               cur_class == new_class
3486                                 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3487     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3488                                                        maxClamped, newPrio)
3489                                : newPrio;
3490     tsInfo->ts_uprilim = cur_class == new_class
3491                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3492     if (ThreadPriorityVerbose) {
3493       tty->print_cr("TS: [%d...%d] %d->%d\n",
3494                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3495     }
3496   } else if (new_class == fxLimits.schedPolicy) {
3497     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3498     int maxClamped     = MIN2(fxLimits.maxPrio,
3499                               cur_class == new_class
3500                                 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3501     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3502                                                        maxClamped, newPrio)
3503                                : newPrio;
3504     fxInfo->fx_uprilim = cur_class == new_class
3505                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3506     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3507     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3508     if (ThreadPriorityVerbose) {
3509       tty->print_cr("FX: [%d...%d] %d->%d\n",
3510                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3511     }
3512   } else {
3513     if (ThreadPriorityVerbose) {
3514       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3515     }
3516     return EINVAL;    // no clue, punt
3517   }
3518 
3519   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3520   if (ThreadPriorityVerbose && rslt) {
3521     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3522   }
3523   if (rslt < 0) return errno;
3524 
3525 #ifdef ASSERT
3526   // Sanity check: read back what we just attempted to set.
3527   // In theory it could have changed in the interim ...
3528   //
3529   // The priocntl system call is tricky.
3530   // Sometimes it'll validate the priority value argument and
3531   // return EINVAL if unhappy.  At other times it fails silently.
3532   // Readbacks are prudent.
3533 
3534   if (!ReadBackValidate) return 0;
3535 
3536   memset(&ReadBack, 0, sizeof(pcparms_t));
3537   ReadBack.pc_cid = PC_CLNULL;
3538   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3539   assert(rslt >= 0, "priocntl failed");
3540   Actual = Expected = 0xBAD;
3541   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3542   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3543     Actual   = RTPRI(ReadBack)->rt_pri;
3544     Expected = RTPRI(ParmInfo)->rt_pri;
3545   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3546     Actual   = IAPRI(ReadBack)->ia_upri;
3547     Expected = IAPRI(ParmInfo)->ia_upri;
3548   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3549     Actual   = TSPRI(ReadBack)->ts_upri;
3550     Expected = TSPRI(ParmInfo)->ts_upri;
3551   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3552     Actual   = FXPRI(ReadBack)->fx_upri;
3553     Expected = FXPRI(ParmInfo)->fx_upri;
3554   } else {
3555     if (ThreadPriorityVerbose) {
3556       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3557                     ParmInfo.pc_cid);
3558     }
3559   }
3560 
3561   if (Actual != Expected) {
3562     if (ThreadPriorityVerbose) {
3563       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3564                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3565     }
3566   }
3567 #endif
3568 
3569   return 0;
3570 }
3571 
3572 // Solaris only gives access to 128 real priorities at a time,
3573 // so we expand Java's ten to fill this range.  This would be better
3574 // if we dynamically adjusted relative priorities.
3575 //
3576 // The ThreadPriorityPolicy option allows us to select 2 different
3577 // priority scales.
3578 //
3579 // ThreadPriorityPolicy=0
3580 // Since the Solaris' default priority is MaximumPriority, we do not
3581 // set a priority lower than Max unless a priority lower than
3582 // NormPriority is requested.
3583 //
3584 // ThreadPriorityPolicy=1
3585 // This mode causes the priority table to get filled with
3586 // linear values.  NormPriority get's mapped to 50% of the
3587 // Maximum priority an so on.  This will cause VM threads
3588 // to get unfair treatment against other Solaris processes
3589 // which do not explicitly alter their thread priorities.
3590 //
3591 
3592 int os::java_to_os_priority[CriticalPriority + 1] = {
3593   -99999,         // 0 Entry should never be used
3594 
3595   0,              // 1 MinPriority
3596   32,             // 2
3597   64,             // 3
3598 
3599   96,             // 4
3600   127,            // 5 NormPriority
3601   127,            // 6
3602 
3603   127,            // 7
3604   127,            // 8
3605   127,            // 9 NearMaxPriority
3606 
3607   127,            // 10 MaxPriority
3608 
3609   -criticalPrio   // 11 CriticalPriority
3610 };
3611 
3612 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3613   OSThread* osthread = thread->osthread();
3614 
3615   // Save requested priority in case the thread hasn't been started
3616   osthread->set_native_priority(newpri);
3617 
3618   // Check for critical priority request
3619   bool fxcritical = false;
3620   if (newpri == -criticalPrio) {
3621     fxcritical = true;
3622     newpri = criticalPrio;
3623   }
3624 
3625   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3626   if (!UseThreadPriorities) return OS_OK;
3627 
3628   int status = 0;
3629 
3630   if (!fxcritical) {
3631     // Use thr_setprio only if we have a priority that thr_setprio understands
3632     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3633   }
3634 
3635   int lwp_status =
3636           set_lwp_class_and_priority(osthread->thread_id(),
3637           osthread->lwp_id(),
3638           newpri,
3639           fxcritical ? fxLimits.schedPolicy : myClass,
3640           !fxcritical);
3641   if (lwp_status != 0 && fxcritical) {
3642     // Try again, this time without changing the scheduling class
3643     newpri = java_MaxPriority_to_os_priority;
3644     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3645             osthread->lwp_id(),
3646             newpri, myClass, false);
3647   }
3648   status |= lwp_status;
3649   return (status == 0) ? OS_OK : OS_ERR;
3650 }
3651 
3652 
3653 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3654   int p;
3655   if ( !UseThreadPriorities ) {
3656     *priority_ptr = NormalPriority;
3657     return OS_OK;
3658   }
3659   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3660   if (status != 0) {
3661     return OS_ERR;
3662   }
3663   *priority_ptr = p;
3664   return OS_OK;
3665 }
3666 
3667 
3668 // Hint to the underlying OS that a task switch would not be good.
3669 // Void return because it's a hint and can fail.
3670 void os::hint_no_preempt() {
3671   schedctl_start(schedctl_init());
3672 }
3673 
3674 static void resume_clear_context(OSThread *osthread) {
3675   osthread->set_ucontext(NULL);
3676 }
3677 
3678 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3679   osthread->set_ucontext(context);
3680 }
3681 
3682 static Semaphore sr_semaphore;
3683 
3684 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3685   // Save and restore errno to avoid confusing native code with EINTR
3686   // after sigsuspend.
3687   int old_errno = errno;
3688 
3689   OSThread* osthread = thread->osthread();
3690   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3691 
3692   os::SuspendResume::State current = osthread->sr.state();
3693   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3694     suspend_save_context(osthread, uc);
3695 
3696     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3697     os::SuspendResume::State state = osthread->sr.suspended();
3698     if (state == os::SuspendResume::SR_SUSPENDED) {
3699       sigset_t suspend_set;  // signals for sigsuspend()
3700 
3701       // get current set of blocked signals and unblock resume signal
3702       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3703       sigdelset(&suspend_set, os::Solaris::SIGasync());
3704 
3705       sr_semaphore.signal();
3706       // wait here until we are resumed
3707       while (1) {
3708         sigsuspend(&suspend_set);
3709 
3710         os::SuspendResume::State result = osthread->sr.running();
3711         if (result == os::SuspendResume::SR_RUNNING) {
3712           sr_semaphore.signal();
3713           break;
3714         }
3715       }
3716 
3717     } else if (state == os::SuspendResume::SR_RUNNING) {
3718       // request was cancelled, continue
3719     } else {
3720       ShouldNotReachHere();
3721     }
3722 
3723     resume_clear_context(osthread);
3724   } else if (current == os::SuspendResume::SR_RUNNING) {
3725     // request was cancelled, continue
3726   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3727     // ignore
3728   } else {
3729     // ignore
3730   }
3731 
3732   errno = old_errno;
3733 }
3734 
3735 void os::print_statistics() {
3736 }
3737 
3738 int os::message_box(const char* title, const char* message) {
3739   int i;
3740   fdStream err(defaultStream::error_fd());
3741   for (i = 0; i < 78; i++) err.print_raw("=");
3742   err.cr();
3743   err.print_raw_cr(title);
3744   for (i = 0; i < 78; i++) err.print_raw("-");
3745   err.cr();
3746   err.print_raw_cr(message);
3747   for (i = 0; i < 78; i++) err.print_raw("=");
3748   err.cr();
3749 
3750   char buf[16];
3751   // Prevent process from exiting upon "read error" without consuming all CPU
3752   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3753 
3754   return buf[0] == 'y' || buf[0] == 'Y';
3755 }
3756 
3757 static int sr_notify(OSThread* osthread) {
3758   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
3759   assert_status(status == 0, status, "thr_kill");
3760   return status;
3761 }
3762 
3763 // "Randomly" selected value for how long we want to spin
3764 // before bailing out on suspending a thread, also how often
3765 // we send a signal to a thread we want to resume
3766 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3767 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3768 
3769 static bool do_suspend(OSThread* osthread) {
3770   assert(osthread->sr.is_running(), "thread should be running");
3771   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
3772 
3773   // mark as suspended and send signal
3774   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3775     // failed to switch, state wasn't running?
3776     ShouldNotReachHere();
3777     return false;
3778   }
3779 
3780   if (sr_notify(osthread) != 0) {
3781     ShouldNotReachHere();
3782   }
3783 
3784   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3785   while (true) {
3786     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
3787       break;
3788     } else {
3789       // timeout
3790       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3791       if (cancelled == os::SuspendResume::SR_RUNNING) {
3792         return false;
3793       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3794         // make sure that we consume the signal on the semaphore as well
3795         sr_semaphore.wait();
3796         break;
3797       } else {
3798         ShouldNotReachHere();
3799         return false;
3800       }
3801     }
3802   }
3803 
3804   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3805   return true;
3806 }
3807 
3808 static void do_resume(OSThread* osthread) {
3809   assert(osthread->sr.is_suspended(), "thread should be suspended");
3810   assert(!sr_semaphore.trywait(), "invalid semaphore state");
3811 
3812   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3813     // failed to switch to WAKEUP_REQUEST
3814     ShouldNotReachHere();
3815     return;
3816   }
3817 
3818   while (true) {
3819     if (sr_notify(osthread) == 0) {
3820       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
3821         if (osthread->sr.is_running()) {
3822           return;
3823         }
3824       }
3825     } else {
3826       ShouldNotReachHere();
3827     }
3828   }
3829 
3830   guarantee(osthread->sr.is_running(), "Must be running!");
3831 }
3832 
3833 void os::SuspendedThreadTask::internal_do_task() {
3834   if (do_suspend(_thread->osthread())) {
3835     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3836     do_task(context);
3837     do_resume(_thread->osthread());
3838   }
3839 }
3840 
3841 class PcFetcher : public os::SuspendedThreadTask {
3842 public:
3843   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3844   ExtendedPC result();
3845 protected:
3846   void do_task(const os::SuspendedThreadTaskContext& context);
3847 private:
3848   ExtendedPC _epc;
3849 };
3850 
3851 ExtendedPC PcFetcher::result() {
3852   guarantee(is_done(), "task is not done yet.");
3853   return _epc;
3854 }
3855 
3856 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3857   Thread* thread = context.thread();
3858   OSThread* osthread = thread->osthread();
3859   if (osthread->ucontext() != NULL) {
3860     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
3861   } else {
3862     // NULL context is unexpected, double-check this is the VMThread
3863     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3864   }
3865 }
3866 
3867 // A lightweight implementation that does not suspend the target thread and
3868 // thus returns only a hint. Used for profiling only!
3869 ExtendedPC os::get_thread_pc(Thread* thread) {
3870   // Make sure that it is called by the watcher and the Threads lock is owned.
3871   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
3872   // For now, is only used to profile the VM Thread
3873   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3874   PcFetcher fetcher(thread);
3875   fetcher.run();
3876   return fetcher.result();
3877 }
3878 
3879 
3880 // This does not do anything on Solaris. This is basically a hook for being
3881 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
3882 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
3883   f(value, method, args, thread);
3884 }
3885 
3886 // This routine may be used by user applications as a "hook" to catch signals.
3887 // The user-defined signal handler must pass unrecognized signals to this
3888 // routine, and if it returns true (non-zero), then the signal handler must
3889 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
3890 // routine will never retun false (zero), but instead will execute a VM panic
3891 // routine kill the process.
3892 //
3893 // If this routine returns false, it is OK to call it again.  This allows
3894 // the user-defined signal handler to perform checks either before or after
3895 // the VM performs its own checks.  Naturally, the user code would be making
3896 // a serious error if it tried to handle an exception (such as a null check
3897 // or breakpoint) that the VM was generating for its own correct operation.
3898 //
3899 // This routine may recognize any of the following kinds of signals:
3900 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
3901 // os::Solaris::SIGasync
3902 // It should be consulted by handlers for any of those signals.
3903 // It explicitly does not recognize os::Solaris::SIGinterrupt
3904 //
3905 // The caller of this routine must pass in the three arguments supplied
3906 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3907 // field of the structure passed to sigaction().  This routine assumes that
3908 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3909 //
3910 // Note that the VM will print warnings if it detects conflicting signal
3911 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3912 //
3913 extern "C" JNIEXPORT int
3914 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
3915                           int abort_if_unrecognized);
3916 
3917 
3918 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
3919   int orig_errno = errno;  // Preserve errno value over signal handler.
3920   JVM_handle_solaris_signal(sig, info, ucVoid, true);
3921   errno = orig_errno;
3922 }
3923 
3924 /* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
3925    is needed to provoke threads blocked on IO to return an EINTR
3926    Note: this explicitly does NOT call JVM_handle_solaris_signal and
3927    does NOT participate in signal chaining due to requirement for
3928    NOT setting SA_RESTART to make EINTR work. */
3929 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
3930    if (UseSignalChaining) {
3931       struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
3932       if (actp && actp->sa_handler) {
3933         vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
3934       }
3935    }
3936 }
3937 
3938 // This boolean allows users to forward their own non-matching signals
3939 // to JVM_handle_solaris_signal, harmlessly.
3940 bool os::Solaris::signal_handlers_are_installed = false;
3941 
3942 // For signal-chaining
3943 bool os::Solaris::libjsig_is_loaded = false;
3944 typedef struct sigaction *(*get_signal_t)(int);
3945 get_signal_t os::Solaris::get_signal_action = NULL;
3946 
3947 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
3948   struct sigaction *actp = NULL;
3949 
3950   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
3951     // Retrieve the old signal handler from libjsig
3952     actp = (*get_signal_action)(sig);
3953   }
3954   if (actp == NULL) {
3955     // Retrieve the preinstalled signal handler from jvm
3956     actp = get_preinstalled_handler(sig);
3957   }
3958 
3959   return actp;
3960 }
3961 
3962 static bool call_chained_handler(struct sigaction *actp, int sig,
3963                                  siginfo_t *siginfo, void *context) {
3964   // Call the old signal handler
3965   if (actp->sa_handler == SIG_DFL) {
3966     // It's more reasonable to let jvm treat it as an unexpected exception
3967     // instead of taking the default action.
3968     return false;
3969   } else if (actp->sa_handler != SIG_IGN) {
3970     if ((actp->sa_flags & SA_NODEFER) == 0) {
3971       // automaticlly block the signal
3972       sigaddset(&(actp->sa_mask), sig);
3973     }
3974 
3975     sa_handler_t hand;
3976     sa_sigaction_t sa;
3977     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3978     // retrieve the chained handler
3979     if (siginfo_flag_set) {
3980       sa = actp->sa_sigaction;
3981     } else {
3982       hand = actp->sa_handler;
3983     }
3984 
3985     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3986       actp->sa_handler = SIG_DFL;
3987     }
3988 
3989     // try to honor the signal mask
3990     sigset_t oset;
3991     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3992 
3993     // call into the chained handler
3994     if (siginfo_flag_set) {
3995       (*sa)(sig, siginfo, context);
3996     } else {
3997       (*hand)(sig);
3998     }
3999 
4000     // restore the signal mask
4001     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4002   }
4003   // Tell jvm's signal handler the signal is taken care of.
4004   return true;
4005 }
4006 
4007 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4008   bool chained = false;
4009   // signal-chaining
4010   if (UseSignalChaining) {
4011     struct sigaction *actp = get_chained_signal_action(sig);
4012     if (actp != NULL) {
4013       chained = call_chained_handler(actp, sig, siginfo, context);
4014     }
4015   }
4016   return chained;
4017 }
4018 
4019 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4020   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4021   if (preinstalled_sigs[sig] != 0) {
4022     return &chainedsigactions[sig];
4023   }
4024   return NULL;
4025 }
4026 
4027 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4028 
4029   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4030   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4031   chainedsigactions[sig] = oldAct;
4032   preinstalled_sigs[sig] = 1;
4033 }
4034 
4035 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4036   // Check for overwrite.
4037   struct sigaction oldAct;
4038   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4039   void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4040                                       : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4041   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4042       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4043       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4044     if (AllowUserSignalHandlers || !set_installed) {
4045       // Do not overwrite; user takes responsibility to forward to us.
4046       return;
4047     } else if (UseSignalChaining) {
4048       if (oktochain) {
4049         // save the old handler in jvm
4050         save_preinstalled_handler(sig, oldAct);
4051       } else {
4052         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4053       }
4054       // libjsig also interposes the sigaction() call below and saves the
4055       // old sigaction on it own.
4056     } else {
4057       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4058                     "%#lx for signal %d.", (long)oldhand, sig));
4059     }
4060   }
4061 
4062   struct sigaction sigAct;
4063   sigfillset(&(sigAct.sa_mask));
4064   sigAct.sa_handler = SIG_DFL;
4065 
4066   sigAct.sa_sigaction = signalHandler;
4067   // Handle SIGSEGV on alternate signal stack if
4068   // not using stack banging
4069   if (!UseStackBanging && sig == SIGSEGV) {
4070     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4071   // Interruptible i/o requires SA_RESTART cleared so EINTR
4072   // is returned instead of restarting system calls
4073   } else if (sig == os::Solaris::SIGinterrupt()) {
4074     sigemptyset(&sigAct.sa_mask);
4075     sigAct.sa_handler = NULL;
4076     sigAct.sa_flags = SA_SIGINFO;
4077     sigAct.sa_sigaction = sigINTRHandler;
4078   } else {
4079     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4080   }
4081   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4082 
4083   sigaction(sig, &sigAct, &oldAct);
4084 
4085   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4086                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4087   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4088 }
4089 
4090 
4091 #define DO_SIGNAL_CHECK(sig) \
4092   if (!sigismember(&check_signal_done, sig)) \
4093     os::Solaris::check_signal_handler(sig)
4094 
4095 // This method is a periodic task to check for misbehaving JNI applications
4096 // under CheckJNI, we can add any periodic checks here
4097 
4098 void os::run_periodic_checks() {
4099   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4100   // thereby preventing a NULL checks.
4101   if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4102 
4103   if (check_signals == false) return;
4104 
4105   // SEGV and BUS if overridden could potentially prevent
4106   // generation of hs*.log in the event of a crash, debugging
4107   // such a case can be very challenging, so we absolutely
4108   // check for the following for a good measure:
4109   DO_SIGNAL_CHECK(SIGSEGV);
4110   DO_SIGNAL_CHECK(SIGILL);
4111   DO_SIGNAL_CHECK(SIGFPE);
4112   DO_SIGNAL_CHECK(SIGBUS);
4113   DO_SIGNAL_CHECK(SIGPIPE);
4114   DO_SIGNAL_CHECK(SIGXFSZ);
4115 
4116   // ReduceSignalUsage allows the user to override these handlers
4117   // see comments at the very top and jvm_solaris.h
4118   if (!ReduceSignalUsage) {
4119     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4120     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4121     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4122     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4123   }
4124 
4125   // See comments above for using JVM1/JVM2 and UseAltSigs
4126   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4127   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4128 
4129 }
4130 
4131 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4132 
4133 static os_sigaction_t os_sigaction = NULL;
4134 
4135 void os::Solaris::check_signal_handler(int sig) {
4136   char buf[O_BUFLEN];
4137   address jvmHandler = NULL;
4138 
4139   struct sigaction act;
4140   if (os_sigaction == NULL) {
4141     // only trust the default sigaction, in case it has been interposed
4142     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4143     if (os_sigaction == NULL) return;
4144   }
4145 
4146   os_sigaction(sig, (struct sigaction*)NULL, &act);
4147 
4148   address thisHandler = (act.sa_flags & SA_SIGINFO)
4149     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4150     : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4151 
4152 
4153   switch(sig) {
4154     case SIGSEGV:
4155     case SIGBUS:
4156     case SIGFPE:
4157     case SIGPIPE:
4158     case SIGXFSZ:
4159     case SIGILL:
4160       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4161       break;
4162 
4163     case SHUTDOWN1_SIGNAL:
4164     case SHUTDOWN2_SIGNAL:
4165     case SHUTDOWN3_SIGNAL:
4166     case BREAK_SIGNAL:
4167       jvmHandler = (address)user_handler();
4168       break;
4169 
4170     default:
4171       int intrsig = os::Solaris::SIGinterrupt();
4172       int asynsig = os::Solaris::SIGasync();
4173 
4174       if (sig == intrsig) {
4175         jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4176       } else if (sig == asynsig) {
4177         jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4178       } else {
4179         return;
4180       }
4181       break;
4182   }
4183 
4184 
4185   if (thisHandler != jvmHandler) {
4186     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4187     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4188     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4189     // No need to check this sig any longer
4190     sigaddset(&check_signal_done, sig);
4191     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4192     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4193       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4194                     exception_name(sig, buf, O_BUFLEN));
4195     }
4196   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4197     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4198     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4199     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4200     // No need to check this sig any longer
4201     sigaddset(&check_signal_done, sig);
4202   }
4203 
4204   // Print all the signal handler state
4205   if (sigismember(&check_signal_done, sig)) {
4206     print_signal_handlers(tty, buf, O_BUFLEN);
4207   }
4208 
4209 }
4210 
4211 void os::Solaris::install_signal_handlers() {
4212   bool libjsigdone = false;
4213   signal_handlers_are_installed = true;
4214 
4215   // signal-chaining
4216   typedef void (*signal_setting_t)();
4217   signal_setting_t begin_signal_setting = NULL;
4218   signal_setting_t end_signal_setting = NULL;
4219   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4220                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4221   if (begin_signal_setting != NULL) {
4222     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4223                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4224     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4225                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4226     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4227                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4228     libjsig_is_loaded = true;
4229     if (os::Solaris::get_libjsig_version != NULL) {
4230       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4231     }
4232     assert(UseSignalChaining, "should enable signal-chaining");
4233   }
4234   if (libjsig_is_loaded) {
4235     // Tell libjsig jvm is setting signal handlers
4236     (*begin_signal_setting)();
4237   }
4238 
4239   set_signal_handler(SIGSEGV, true, true);
4240   set_signal_handler(SIGPIPE, true, true);
4241   set_signal_handler(SIGXFSZ, true, true);
4242   set_signal_handler(SIGBUS, true, true);
4243   set_signal_handler(SIGILL, true, true);
4244   set_signal_handler(SIGFPE, true, true);
4245 
4246 
4247   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4248 
4249     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4250     // can not register overridable signals which might be > 32
4251     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4252     // Tell libjsig jvm has finished setting signal handlers
4253       (*end_signal_setting)();
4254       libjsigdone = true;
4255     }
4256   }
4257 
4258   // Never ok to chain our SIGinterrupt
4259   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4260   set_signal_handler(os::Solaris::SIGasync(), true, true);
4261 
4262   if (libjsig_is_loaded && !libjsigdone) {
4263     // Tell libjsig jvm finishes setting signal handlers
4264     (*end_signal_setting)();
4265   }
4266 
4267   // We don't activate signal checker if libjsig is in place, we trust ourselves
4268   // and if UserSignalHandler is installed all bets are off.
4269   // Log that signal checking is off only if -verbose:jni is specified.
4270   if (CheckJNICalls) {
4271     if (libjsig_is_loaded) {
4272       if (PrintJNIResolving) {
4273         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4274       }
4275       check_signals = false;
4276     }
4277     if (AllowUserSignalHandlers) {
4278       if (PrintJNIResolving) {
4279         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4280       }
4281       check_signals = false;
4282     }
4283   }
4284 }
4285 
4286 
4287 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4288 
4289 const char * signames[] = {
4290   "SIG0",
4291   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4292   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4293   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4294   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4295   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4296   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4297   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4298   "SIGCANCEL", "SIGLOST"
4299 };
4300 
4301 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4302   if (0 < exception_code && exception_code <= SIGRTMAX) {
4303     // signal
4304     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4305        jio_snprintf(buf, size, "%s", signames[exception_code]);
4306     } else {
4307        jio_snprintf(buf, size, "SIG%d", exception_code);
4308     }
4309     return buf;
4310   } else {
4311     return NULL;
4312   }
4313 }
4314 
4315 // (Static) wrapper for getisax(2) call.
4316 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4317 
4318 // (Static) wrappers for the liblgrp API
4319 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4320 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4321 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4322 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4323 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4324 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4325 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4326 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4327 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4328 
4329 // (Static) wrapper for meminfo() call.
4330 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4331 
4332 static address resolve_symbol_lazy(const char* name) {
4333   address addr = (address) dlsym(RTLD_DEFAULT, name);
4334   if(addr == NULL) {
4335     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4336     addr = (address) dlsym(RTLD_NEXT, name);
4337   }
4338   return addr;
4339 }
4340 
4341 static address resolve_symbol(const char* name) {
4342   address addr = resolve_symbol_lazy(name);
4343   if(addr == NULL) {
4344     fatal(dlerror());
4345   }
4346   return addr;
4347 }
4348 
4349 void os::Solaris::libthread_init() {
4350   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4351 
4352   lwp_priocntl_init();
4353 
4354   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4355   if(func == NULL) {
4356     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4357     // Guarantee that this VM is running on an new enough OS (5.6 or
4358     // later) that it will have a new enough libthread.so.
4359     guarantee(func != NULL, "libthread.so is too old.");
4360   }
4361 
4362   int size;
4363   void (*handler_info_func)(address *, int *);
4364   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4365   handler_info_func(&handler_start, &size);
4366   handler_end = handler_start + size;
4367 }
4368 
4369 
4370 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4371 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4372 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4373 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4374 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4375 int os::Solaris::_mutex_scope = USYNC_THREAD;
4376 
4377 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4378 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4379 int_fnP_cond_tP os::Solaris::_cond_signal;
4380 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4381 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4382 int_fnP_cond_tP os::Solaris::_cond_destroy;
4383 int os::Solaris::_cond_scope = USYNC_THREAD;
4384 
4385 void os::Solaris::synchronization_init() {
4386   if(UseLWPSynchronization) {
4387     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4388     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4389     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4390     os::Solaris::set_mutex_init(lwp_mutex_init);
4391     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4392     os::Solaris::set_mutex_scope(USYNC_THREAD);
4393 
4394     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4395     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4396     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4397     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4398     os::Solaris::set_cond_init(lwp_cond_init);
4399     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4400     os::Solaris::set_cond_scope(USYNC_THREAD);
4401   }
4402   else {
4403     os::Solaris::set_mutex_scope(USYNC_THREAD);
4404     os::Solaris::set_cond_scope(USYNC_THREAD);
4405 
4406     if(UsePthreads) {
4407       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4408       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4409       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4410       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4411       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4412 
4413       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4414       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4415       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4416       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4417       os::Solaris::set_cond_init(pthread_cond_default_init);
4418       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4419     }
4420     else {
4421       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4422       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4423       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4424       os::Solaris::set_mutex_init(::mutex_init);
4425       os::Solaris::set_mutex_destroy(::mutex_destroy);
4426 
4427       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4428       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4429       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4430       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4431       os::Solaris::set_cond_init(::cond_init);
4432       os::Solaris::set_cond_destroy(::cond_destroy);
4433     }
4434   }
4435 }
4436 
4437 bool os::Solaris::liblgrp_init() {
4438   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4439   if (handle != NULL) {
4440     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4441     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4442     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4443     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4444     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4445     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4446     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4447     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4448                                        dlsym(handle, "lgrp_cookie_stale")));
4449 
4450     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4451     set_lgrp_cookie(c);
4452     return true;
4453   }
4454   return false;
4455 }
4456 
4457 void os::Solaris::misc_sym_init() {
4458   address func;
4459 
4460   // getisax
4461   func = resolve_symbol_lazy("getisax");
4462   if (func != NULL) {
4463     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4464   }
4465 
4466   // meminfo
4467   func = resolve_symbol_lazy("meminfo");
4468   if (func != NULL) {
4469     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4470   }
4471 }
4472 
4473 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4474   assert(_getisax != NULL, "_getisax not set");
4475   return _getisax(array, n);
4476 }
4477 
4478 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4479 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4480 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4481 
4482 void init_pset_getloadavg_ptr(void) {
4483   pset_getloadavg_ptr =
4484     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4485   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4486     warning("pset_getloadavg function not found");
4487   }
4488 }
4489 
4490 int os::Solaris::_dev_zero_fd = -1;
4491 
4492 // this is called _before_ the global arguments have been parsed
4493 void os::init(void) {
4494   _initial_pid = getpid();
4495 
4496   max_hrtime = first_hrtime = gethrtime();
4497 
4498   init_random(1234567);
4499 
4500   page_size = sysconf(_SC_PAGESIZE);
4501   if (page_size == -1)
4502     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4503                   strerror(errno)));
4504   init_page_sizes((size_t) page_size);
4505 
4506   Solaris::initialize_system_info();
4507 
4508   // Initialize misc. symbols as soon as possible, so we can use them
4509   // if we need them.
4510   Solaris::misc_sym_init();
4511 
4512   int fd = ::open("/dev/zero", O_RDWR);
4513   if (fd < 0) {
4514     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4515   } else {
4516     Solaris::set_dev_zero_fd(fd);
4517 
4518     // Close on exec, child won't inherit.
4519     fcntl(fd, F_SETFD, FD_CLOEXEC);
4520   }
4521 
4522   clock_tics_per_sec = CLK_TCK;
4523 
4524   // check if dladdr1() exists; dladdr1 can provide more information than
4525   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4526   // and is available on linker patches for 5.7 and 5.8.
4527   // libdl.so must have been loaded, this call is just an entry lookup
4528   void * hdl = dlopen("libdl.so", RTLD_NOW);
4529   if (hdl)
4530     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4531 
4532   // (Solaris only) this switches to calls that actually do locking.
4533   ThreadCritical::initialize();
4534 
4535   main_thread = thr_self();
4536 
4537   // Constant minimum stack size allowed. It must be at least
4538   // the minimum of what the OS supports (thr_min_stack()), and
4539   // enough to allow the thread to get to user bytecode execution.
4540   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4541   // If the pagesize of the VM is greater than 8K determine the appropriate
4542   // number of initial guard pages.  The user can change this with the
4543   // command line arguments, if needed.
4544   if (vm_page_size() > 8*K) {
4545     StackYellowPages = 1;
4546     StackRedPages = 1;
4547     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4548   }
4549 }
4550 
4551 // To install functions for atexit system call
4552 extern "C" {
4553   static void perfMemory_exit_helper() {
4554     perfMemory_exit();
4555   }
4556 }
4557 
4558 // this is called _after_ the global arguments have been parsed
4559 jint os::init_2(void) {
4560   // try to enable extended file IO ASAP, see 6431278
4561   os::Solaris::try_enable_extended_io();
4562 
4563   // Allocate a single page and mark it as readable for safepoint polling.  Also
4564   // use this first mmap call to check support for MAP_ALIGN.
4565   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4566                                                       page_size,
4567                                                       MAP_PRIVATE | MAP_ALIGN,
4568                                                       PROT_READ);
4569   if (polling_page == NULL) {
4570     has_map_align = false;
4571     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4572                                                 PROT_READ);
4573   }
4574 
4575   os::set_polling_page(polling_page);
4576 
4577 #ifndef PRODUCT
4578   if( Verbose && PrintMiscellaneous )
4579     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
4580 #endif
4581 
4582   if (!UseMembar) {
4583     address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
4584     guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4585     os::set_memory_serialize_page( mem_serialize_page );
4586 
4587 #ifndef PRODUCT
4588     if(Verbose && PrintMiscellaneous)
4589       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
4590 #endif
4591   }
4592 
4593   // Check minimum allowable stack size for thread creation and to initialize
4594   // the java system classes, including StackOverflowError - depends on page
4595   // size.  Add a page for compiler2 recursion in main thread.
4596   // Add in 2*BytesPerWord times page size to account for VM stack during
4597   // class initialization depending on 32 or 64 bit VM.
4598   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4599             (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4600                     2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4601 
4602   size_t threadStackSizeInBytes = ThreadStackSize * K;
4603   if (threadStackSizeInBytes != 0 &&
4604     threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4605     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4606                   os::Solaris::min_stack_allowed/K);
4607     return JNI_ERR;
4608   }
4609 
4610   // For 64kbps there will be a 64kb page size, which makes
4611   // the usable default stack size quite a bit less.  Increase the
4612   // stack for 64kb (or any > than 8kb) pages, this increases
4613   // virtual memory fragmentation (since we're not creating the
4614   // stack on a power of 2 boundary.  The real fix for this
4615   // should be to fix the guard page mechanism.
4616 
4617   if (vm_page_size() > 8*K) {
4618       threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4619          ? threadStackSizeInBytes +
4620            ((StackYellowPages + StackRedPages) * vm_page_size())
4621          : 0;
4622       ThreadStackSize = threadStackSizeInBytes/K;
4623   }
4624 
4625   // Make the stack size a multiple of the page size so that
4626   // the yellow/red zones can be guarded.
4627   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4628         vm_page_size()));
4629 
4630   Solaris::libthread_init();
4631 
4632   if (UseNUMA) {
4633     if (!Solaris::liblgrp_init()) {
4634       UseNUMA = false;
4635     } else {
4636       size_t lgrp_limit = os::numa_get_groups_num();
4637       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4638       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4639       FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
4640       if (lgrp_num < 2) {
4641         // There's only one locality group, disable NUMA.
4642         UseNUMA = false;
4643       }
4644     }
4645     if (!UseNUMA && ForceNUMA) {
4646       UseNUMA = true;
4647     }
4648   }
4649 
4650   Solaris::signal_sets_init();
4651   Solaris::init_signal_mem();
4652   Solaris::install_signal_handlers();
4653 
4654   if (libjsigversion < JSIG_VERSION_1_4_1) {
4655     Maxlibjsigsigs = OLDMAXSIGNUM;
4656   }
4657 
4658   // initialize synchronization primitives to use either thread or
4659   // lwp synchronization (controlled by UseLWPSynchronization)
4660   Solaris::synchronization_init();
4661 
4662   if (MaxFDLimit) {
4663     // set the number of file descriptors to max. print out error
4664     // if getrlimit/setrlimit fails but continue regardless.
4665     struct rlimit nbr_files;
4666     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4667     if (status != 0) {
4668       if (PrintMiscellaneous && (Verbose || WizardMode))
4669         perror("os::init_2 getrlimit failed");
4670     } else {
4671       nbr_files.rlim_cur = nbr_files.rlim_max;
4672       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4673       if (status != 0) {
4674         if (PrintMiscellaneous && (Verbose || WizardMode))
4675           perror("os::init_2 setrlimit failed");
4676       }
4677     }
4678   }
4679 
4680   // Calculate theoretical max. size of Threads to guard gainst
4681   // artifical out-of-memory situations, where all available address-
4682   // space has been reserved by thread stacks. Default stack size is 1Mb.
4683   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
4684     JavaThread::stack_size_at_create() : (1*K*K);
4685   assert(pre_thread_stack_size != 0, "Must have a stack");
4686   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
4687   // we should start doing Virtual Memory banging. Currently when the threads will
4688   // have used all but 200Mb of space.
4689   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
4690   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
4691 
4692   // at-exit methods are called in the reverse order of their registration.
4693   // In Solaris 7 and earlier, atexit functions are called on return from
4694   // main or as a result of a call to exit(3C). There can be only 32 of
4695   // these functions registered and atexit() does not set errno. In Solaris
4696   // 8 and later, there is no limit to the number of functions registered
4697   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
4698   // functions are called upon dlclose(3DL) in addition to return from main
4699   // and exit(3C).
4700 
4701   if (PerfAllowAtExitRegistration) {
4702     // only register atexit functions if PerfAllowAtExitRegistration is set.
4703     // atexit functions can be delayed until process exit time, which
4704     // can be problematic for embedded VM situations. Embedded VMs should
4705     // call DestroyJavaVM() to assure that VM resources are released.
4706 
4707     // note: perfMemory_exit_helper atexit function may be removed in
4708     // the future if the appropriate cleanup code can be added to the
4709     // VM_Exit VMOperation's doit method.
4710     if (atexit(perfMemory_exit_helper) != 0) {
4711       warning("os::init2 atexit(perfMemory_exit_helper) failed");
4712     }
4713   }
4714 
4715   // Init pset_loadavg function pointer
4716   init_pset_getloadavg_ptr();
4717 
4718   return JNI_OK;
4719 }
4720 
4721 void os::init_3(void) {
4722   return;
4723 }
4724 
4725 // Mark the polling page as unreadable
4726 void os::make_polling_page_unreadable(void) {
4727   if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
4728     fatal("Could not disable polling page");
4729 };
4730 
4731 // Mark the polling page as readable
4732 void os::make_polling_page_readable(void) {
4733   if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
4734     fatal("Could not enable polling page");
4735 };
4736 
4737 // OS interface.
4738 
4739 bool os::check_heap(bool force) { return true; }
4740 
4741 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
4742 static vsnprintf_t sol_vsnprintf = NULL;
4743 
4744 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
4745   if (!sol_vsnprintf) {
4746     //search  for the named symbol in the objects that were loaded after libjvm
4747     void* where = RTLD_NEXT;
4748     if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
4749         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
4750     if (!sol_vsnprintf){
4751       //search  for the named symbol in the objects that were loaded before libjvm
4752       where = RTLD_DEFAULT;
4753       if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
4754         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
4755       assert(sol_vsnprintf != NULL, "vsnprintf not found");
4756     }
4757   }
4758   return (*sol_vsnprintf)(buf, count, fmt, argptr);
4759 }
4760 
4761 
4762 // Is a (classpath) directory empty?
4763 bool os::dir_is_empty(const char* path) {
4764   DIR *dir = NULL;
4765   struct dirent *ptr;
4766 
4767   dir = opendir(path);
4768   if (dir == NULL) return true;
4769 
4770   /* Scan the directory */
4771   bool result = true;
4772   char buf[sizeof(struct dirent) + MAX_PATH];
4773   struct dirent *dbuf = (struct dirent *) buf;
4774   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
4775     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4776       result = false;
4777     }
4778   }
4779   closedir(dir);
4780   return result;
4781 }
4782 
4783 // This code originates from JDK's sysOpen and open64_w
4784 // from src/solaris/hpi/src/system_md.c
4785 
4786 #ifndef O_DELETE
4787 #define O_DELETE 0x10000
4788 #endif
4789 
4790 // Open a file. Unlink the file immediately after open returns
4791 // if the specified oflag has the O_DELETE flag set.
4792 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
4793 
4794 int os::open(const char *path, int oflag, int mode) {
4795   if (strlen(path) > MAX_PATH - 1) {
4796     errno = ENAMETOOLONG;
4797     return -1;
4798   }
4799   int fd;
4800   int o_delete = (oflag & O_DELETE);
4801   oflag = oflag & ~O_DELETE;
4802 
4803   fd = ::open64(path, oflag, mode);
4804   if (fd == -1) return -1;
4805 
4806   //If the open succeeded, the file might still be a directory
4807   {
4808     struct stat64 buf64;
4809     int ret = ::fstat64(fd, &buf64);
4810     int st_mode = buf64.st_mode;
4811 
4812     if (ret != -1) {
4813       if ((st_mode & S_IFMT) == S_IFDIR) {
4814         errno = EISDIR;
4815         ::close(fd);
4816         return -1;
4817       }
4818     } else {
4819       ::close(fd);
4820       return -1;
4821     }
4822   }
4823     /*
4824      * 32-bit Solaris systems suffer from:
4825      *
4826      * - an historical default soft limit of 256 per-process file
4827      *   descriptors that is too low for many Java programs.
4828      *
4829      * - a design flaw where file descriptors created using stdio
4830      *   fopen must be less than 256, _even_ when the first limit above
4831      *   has been raised.  This can cause calls to fopen (but not calls to
4832      *   open, for example) to fail mysteriously, perhaps in 3rd party
4833      *   native code (although the JDK itself uses fopen).  One can hardly
4834      *   criticize them for using this most standard of all functions.
4835      *
4836      * We attempt to make everything work anyways by:
4837      *
4838      * - raising the soft limit on per-process file descriptors beyond
4839      *   256
4840      *
4841      * - As of Solaris 10u4, we can request that Solaris raise the 256
4842      *   stdio fopen limit by calling function enable_extended_FILE_stdio.
4843      *   This is done in init_2 and recorded in enabled_extended_FILE_stdio
4844      *
4845      * - If we are stuck on an old (pre 10u4) Solaris system, we can
4846      *   workaround the bug by remapping non-stdio file descriptors below
4847      *   256 to ones beyond 256, which is done below.
4848      *
4849      * See:
4850      * 1085341: 32-bit stdio routines should support file descriptors >255
4851      * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
4852      * 6431278: Netbeans crash on 32 bit Solaris: need to call
4853      *          enable_extended_FILE_stdio() in VM initialisation
4854      * Giri Mandalika's blog
4855      * http://technopark02.blogspot.com/2005_05_01_archive.html
4856      */
4857 #ifndef  _LP64
4858      if ((!enabled_extended_FILE_stdio) && fd < 256) {
4859          int newfd = ::fcntl(fd, F_DUPFD, 256);
4860          if (newfd != -1) {
4861              ::close(fd);
4862              fd = newfd;
4863          }
4864      }
4865 #endif // 32-bit Solaris
4866     /*
4867      * All file descriptors that are opened in the JVM and not
4868      * specifically destined for a subprocess should have the
4869      * close-on-exec flag set.  If we don't set it, then careless 3rd
4870      * party native code might fork and exec without closing all
4871      * appropriate file descriptors (e.g. as we do in closeDescriptors in
4872      * UNIXProcess.c), and this in turn might:
4873      *
4874      * - cause end-of-file to fail to be detected on some file
4875      *   descriptors, resulting in mysterious hangs, or
4876      *
4877      * - might cause an fopen in the subprocess to fail on a system
4878      *   suffering from bug 1085341.
4879      *
4880      * (Yes, the default setting of the close-on-exec flag is a Unix
4881      * design flaw)
4882      *
4883      * See:
4884      * 1085341: 32-bit stdio routines should support file descriptors >255
4885      * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4886      * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4887      */
4888 #ifdef FD_CLOEXEC
4889     {
4890         int flags = ::fcntl(fd, F_GETFD);
4891         if (flags != -1)
4892             ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4893     }
4894 #endif
4895 
4896   if (o_delete != 0) {
4897     ::unlink(path);
4898   }
4899   return fd;
4900 }
4901 
4902 // create binary file, rewriting existing file if required
4903 int os::create_binary_file(const char* path, bool rewrite_existing) {
4904   int oflags = O_WRONLY | O_CREAT;
4905   if (!rewrite_existing) {
4906     oflags |= O_EXCL;
4907   }
4908   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4909 }
4910 
4911 // return current position of file pointer
4912 jlong os::current_file_offset(int fd) {
4913   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4914 }
4915 
4916 // move file pointer to the specified offset
4917 jlong os::seek_to_file_offset(int fd, jlong offset) {
4918   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4919 }
4920 
4921 jlong os::lseek(int fd, jlong offset, int whence) {
4922   return (jlong) ::lseek64(fd, offset, whence);
4923 }
4924 
4925 char * os::native_path(char *path) {
4926   return path;
4927 }
4928 
4929 int os::ftruncate(int fd, jlong length) {
4930   return ::ftruncate64(fd, length);
4931 }
4932 
4933 int os::fsync(int fd)  {
4934   RESTARTABLE_RETURN_INT(::fsync(fd));
4935 }
4936 
4937 int os::available(int fd, jlong *bytes) {
4938   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
4939           "Assumed _thread_in_native");
4940   jlong cur, end;
4941   int mode;
4942   struct stat64 buf64;
4943 
4944   if (::fstat64(fd, &buf64) >= 0) {
4945     mode = buf64.st_mode;
4946     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4947       int n,ioctl_return;
4948 
4949       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
4950       if (ioctl_return>= 0) {
4951           *bytes = n;
4952         return 1;
4953       }
4954     }
4955   }
4956   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4957     return 0;
4958   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4959     return 0;
4960   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4961     return 0;
4962   }
4963   *bytes = end - cur;
4964   return 1;
4965 }
4966 
4967 // Map a block of memory.
4968 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4969                      char *addr, size_t bytes, bool read_only,
4970                      bool allow_exec) {
4971   int prot;
4972   int flags;
4973 
4974   if (read_only) {
4975     prot = PROT_READ;
4976     flags = MAP_SHARED;
4977   } else {
4978     prot = PROT_READ | PROT_WRITE;
4979     flags = MAP_PRIVATE;
4980   }
4981 
4982   if (allow_exec) {
4983     prot |= PROT_EXEC;
4984   }
4985 
4986   if (addr != NULL) {
4987     flags |= MAP_FIXED;
4988   }
4989 
4990   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
4991                                      fd, file_offset);
4992   if (mapped_address == MAP_FAILED) {
4993     return NULL;
4994   }
4995   return mapped_address;
4996 }
4997 
4998 
4999 // Remap a block of memory.
5000 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5001                        char *addr, size_t bytes, bool read_only,
5002                        bool allow_exec) {
5003   // same as map_memory() on this OS
5004   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5005                         allow_exec);
5006 }
5007 
5008 
5009 // Unmap a block of memory.
5010 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5011   return munmap(addr, bytes) == 0;
5012 }
5013 
5014 void os::pause() {
5015   char filename[MAX_PATH];
5016   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5017     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5018   } else {
5019     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5020   }
5021 
5022   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5023   if (fd != -1) {
5024     struct stat buf;
5025     ::close(fd);
5026     while (::stat(filename, &buf) == 0) {
5027       (void)::poll(NULL, 0, 100);
5028     }
5029   } else {
5030     jio_fprintf(stderr,
5031       "Could not open pause file '%s', continuing immediately.\n", filename);
5032   }
5033 }
5034 
5035 #ifndef PRODUCT
5036 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5037 // Turn this on if you need to trace synch operations.
5038 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5039 // and call record_synch_enable and record_synch_disable
5040 // around the computation of interest.
5041 
5042 void record_synch(char* name, bool returning);  // defined below
5043 
5044 class RecordSynch {
5045   char* _name;
5046  public:
5047   RecordSynch(char* name) :_name(name)
5048                  { record_synch(_name, false); }
5049   ~RecordSynch() { record_synch(_name,   true);  }
5050 };
5051 
5052 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5053 extern "C" ret name params {                                    \
5054   typedef ret name##_t params;                                  \
5055   static name##_t* implem = NULL;                               \
5056   static int callcount = 0;                                     \
5057   if (implem == NULL) {                                         \
5058     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5059     if (implem == NULL)  fatal(dlerror());                      \
5060   }                                                             \
5061   ++callcount;                                                  \
5062   RecordSynch _rs(#name);                                       \
5063   inner;                                                        \
5064   return implem args;                                           \
5065 }
5066 // in dbx, examine callcounts this way:
5067 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5068 
5069 #define CHECK_POINTER_OK(p) \
5070   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5071 #define CHECK_MU \
5072   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5073 #define CHECK_CV \
5074   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5075 #define CHECK_P(p) \
5076   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5077 
5078 #define CHECK_MUTEX(mutex_op) \
5079 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5080 
5081 CHECK_MUTEX(   mutex_lock)
5082 CHECK_MUTEX(  _mutex_lock)
5083 CHECK_MUTEX( mutex_unlock)
5084 CHECK_MUTEX(_mutex_unlock)
5085 CHECK_MUTEX( mutex_trylock)
5086 CHECK_MUTEX(_mutex_trylock)
5087 
5088 #define CHECK_COND(cond_op) \
5089 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5090 
5091 CHECK_COND( cond_wait);
5092 CHECK_COND(_cond_wait);
5093 CHECK_COND(_cond_wait_cancel);
5094 
5095 #define CHECK_COND2(cond_op) \
5096 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5097 
5098 CHECK_COND2( cond_timedwait);
5099 CHECK_COND2(_cond_timedwait);
5100 CHECK_COND2(_cond_timedwait_cancel);
5101 
5102 // do the _lwp_* versions too
5103 #define mutex_t lwp_mutex_t
5104 #define cond_t  lwp_cond_t
5105 CHECK_MUTEX(  _lwp_mutex_lock)
5106 CHECK_MUTEX(  _lwp_mutex_unlock)
5107 CHECK_MUTEX(  _lwp_mutex_trylock)
5108 CHECK_MUTEX( __lwp_mutex_lock)
5109 CHECK_MUTEX( __lwp_mutex_unlock)
5110 CHECK_MUTEX( __lwp_mutex_trylock)
5111 CHECK_MUTEX(___lwp_mutex_lock)
5112 CHECK_MUTEX(___lwp_mutex_unlock)
5113 
5114 CHECK_COND(  _lwp_cond_wait);
5115 CHECK_COND( __lwp_cond_wait);
5116 CHECK_COND(___lwp_cond_wait);
5117 
5118 CHECK_COND2(  _lwp_cond_timedwait);
5119 CHECK_COND2( __lwp_cond_timedwait);
5120 #undef mutex_t
5121 #undef cond_t
5122 
5123 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5124 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5125 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5126 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5127 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5128 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5129 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5130 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5131 
5132 
5133 // recording machinery:
5134 
5135 enum { RECORD_SYNCH_LIMIT = 200 };
5136 char* record_synch_name[RECORD_SYNCH_LIMIT];
5137 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5138 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5139 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5140 int record_synch_count = 0;
5141 bool record_synch_enabled = false;
5142 
5143 // in dbx, examine recorded data this way:
5144 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5145 
5146 void record_synch(char* name, bool returning) {
5147   if (record_synch_enabled) {
5148     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5149       record_synch_name[record_synch_count] = name;
5150       record_synch_returning[record_synch_count] = returning;
5151       record_synch_thread[record_synch_count] = thr_self();
5152       record_synch_arg0ptr[record_synch_count] = &name;
5153       record_synch_count++;
5154     }
5155     // put more checking code here:
5156     // ...
5157   }
5158 }
5159 
5160 void record_synch_enable() {
5161   // start collecting trace data, if not already doing so
5162   if (!record_synch_enabled)  record_synch_count = 0;
5163   record_synch_enabled = true;
5164 }
5165 
5166 void record_synch_disable() {
5167   // stop collecting trace data
5168   record_synch_enabled = false;
5169 }
5170 
5171 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5172 #endif // PRODUCT
5173 
5174 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5175 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5176                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5177 
5178 
5179 // JVMTI & JVM monitoring and management support
5180 // The thread_cpu_time() and current_thread_cpu_time() are only
5181 // supported if is_thread_cpu_time_supported() returns true.
5182 // They are not supported on Solaris T1.
5183 
5184 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5185 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5186 // of a thread.
5187 //
5188 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5189 // returns the fast estimate available on the platform.
5190 
5191 // hrtime_t gethrvtime() return value includes
5192 // user time but does not include system time
5193 jlong os::current_thread_cpu_time() {
5194   return (jlong) gethrvtime();
5195 }
5196 
5197 jlong os::thread_cpu_time(Thread *thread) {
5198   // return user level CPU time only to be consistent with
5199   // what current_thread_cpu_time returns.
5200   // thread_cpu_time_info() must be changed if this changes
5201   return os::thread_cpu_time(thread, false /* user time only */);
5202 }
5203 
5204 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5205   if (user_sys_cpu_time) {
5206     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5207   } else {
5208     return os::current_thread_cpu_time();
5209   }
5210 }
5211 
5212 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5213   char proc_name[64];
5214   int count;
5215   prusage_t prusage;
5216   jlong lwp_time;
5217   int fd;
5218 
5219   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5220                      getpid(),
5221                      thread->osthread()->lwp_id());
5222   fd = ::open(proc_name, O_RDONLY);
5223   if ( fd == -1 ) return -1;
5224 
5225   do {
5226     count = ::pread(fd,
5227                   (void *)&prusage.pr_utime,
5228                   thr_time_size,
5229                   thr_time_off);
5230   } while (count < 0 && errno == EINTR);
5231   ::close(fd);
5232   if ( count < 0 ) return -1;
5233 
5234   if (user_sys_cpu_time) {
5235     // user + system CPU time
5236     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5237                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5238                  (jlong)prusage.pr_stime.tv_nsec +
5239                  (jlong)prusage.pr_utime.tv_nsec;
5240   } else {
5241     // user level CPU time only
5242     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5243                 (jlong)prusage.pr_utime.tv_nsec;
5244   }
5245 
5246   return(lwp_time);
5247 }
5248 
5249 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5250   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5251   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5252   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5253   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5254 }
5255 
5256 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5257   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5258   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5259   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5260   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5261 }
5262 
5263 bool os::is_thread_cpu_time_supported() {
5264   return true;
5265 }
5266 
5267 // System loadavg support.  Returns -1 if load average cannot be obtained.
5268 // Return the load average for our processor set if the primitive exists
5269 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5270 int os::loadavg(double loadavg[], int nelem) {
5271   if (pset_getloadavg_ptr != NULL) {
5272     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5273   } else {
5274     return ::getloadavg(loadavg, nelem);
5275   }
5276 }
5277 
5278 //---------------------------------------------------------------------------------
5279 
5280 bool os::find(address addr, outputStream* st) {
5281   Dl_info dlinfo;
5282   memset(&dlinfo, 0, sizeof(dlinfo));
5283   if (dladdr(addr, &dlinfo) != 0) {
5284     st->print(PTR_FORMAT ": ", addr);
5285     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5286       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5287     } else if (dlinfo.dli_fbase != NULL)
5288       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5289     else
5290       st->print("<absolute address>");
5291     if (dlinfo.dli_fname != NULL) {
5292       st->print(" in %s", dlinfo.dli_fname);
5293     }
5294     if (dlinfo.dli_fbase != NULL) {
5295       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5296     }
5297     st->cr();
5298 
5299     if (Verbose) {
5300       // decode some bytes around the PC
5301       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5302       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5303       address       lowest = (address) dlinfo.dli_sname;
5304       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5305       if (begin < lowest)  begin = lowest;
5306       Dl_info dlinfo2;
5307       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5308           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5309         end = (address) dlinfo2.dli_saddr;
5310       Disassembler::decode(begin, end, st);
5311     }
5312     return true;
5313   }
5314   return false;
5315 }
5316 
5317 // Following function has been added to support HotSparc's libjvm.so running
5318 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5319 // src/solaris/hpi/native_threads in the EVM codebase.
5320 //
5321 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5322 // libraries and should thus be removed. We will leave it behind for a while
5323 // until we no longer want to able to run on top of 1.3.0 Solaris production
5324 // JDK. See 4341971.
5325 
5326 #define STACK_SLACK 0x800
5327 
5328 extern "C" {
5329   intptr_t sysThreadAvailableStackWithSlack() {
5330     stack_t st;
5331     intptr_t retval, stack_top;
5332     retval = thr_stksegment(&st);
5333     assert(retval == 0, "incorrect return value from thr_stksegment");
5334     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5335     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5336     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5337     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5338   }
5339 }
5340 
5341 // ObjectMonitor park-unpark infrastructure ...
5342 //
5343 // We implement Solaris and Linux PlatformEvents with the
5344 // obvious condvar-mutex-flag triple.
5345 // Another alternative that works quite well is pipes:
5346 // Each PlatformEvent consists of a pipe-pair.
5347 // The thread associated with the PlatformEvent
5348 // calls park(), which reads from the input end of the pipe.
5349 // Unpark() writes into the other end of the pipe.
5350 // The write-side of the pipe must be set NDELAY.
5351 // Unfortunately pipes consume a large # of handles.
5352 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5353 // Using pipes for the 1st few threads might be workable, however.
5354 //
5355 // park() is permitted to return spuriously.
5356 // Callers of park() should wrap the call to park() in
5357 // an appropriate loop.  A litmus test for the correct
5358 // usage of park is the following: if park() were modified
5359 // to immediately return 0 your code should still work,
5360 // albeit degenerating to a spin loop.
5361 //
5362 // An interesting optimization for park() is to use a trylock()
5363 // to attempt to acquire the mutex.  If the trylock() fails
5364 // then we know that a concurrent unpark() operation is in-progress.
5365 // in that case the park() code could simply set _count to 0
5366 // and return immediately.  The subsequent park() operation *might*
5367 // return immediately.  That's harmless as the caller of park() is
5368 // expected to loop.  By using trylock() we will have avoided a
5369 // avoided a context switch caused by contention on the per-thread mutex.
5370 //
5371 // TODO-FIXME:
5372 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
5373 //     objectmonitor implementation.
5374 // 2.  Collapse the JSR166 parker event, and the
5375 //     objectmonitor ParkEvent into a single "Event" construct.
5376 // 3.  In park() and unpark() add:
5377 //     assert (Thread::current() == AssociatedWith).
5378 // 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5379 //     1-out-of-N park() operations will return immediately.
5380 //
5381 // _Event transitions in park()
5382 //   -1 => -1 : illegal
5383 //    1 =>  0 : pass - return immediately
5384 //    0 => -1 : block
5385 //
5386 // _Event serves as a restricted-range semaphore.
5387 //
5388 // Another possible encoding of _Event would be with
5389 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5390 //
5391 // TODO-FIXME: add DTRACE probes for:
5392 // 1.   Tx parks
5393 // 2.   Ty unparks Tx
5394 // 3.   Tx resumes from park
5395 
5396 
5397 // value determined through experimentation
5398 #define ROUNDINGFIX 11
5399 
5400 // utility to compute the abstime argument to timedwait.
5401 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5402 
5403 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5404   // millis is the relative timeout time
5405   // abstime will be the absolute timeout time
5406   if (millis < 0)  millis = 0;
5407   struct timeval now;
5408   int status = gettimeofday(&now, NULL);
5409   assert(status == 0, "gettimeofday");
5410   jlong seconds = millis / 1000;
5411   jlong max_wait_period;
5412 
5413   if (UseLWPSynchronization) {
5414     // forward port of fix for 4275818 (not sleeping long enough)
5415     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5416     // _lwp_cond_timedwait() used a round_down algorithm rather
5417     // than a round_up. For millis less than our roundfactor
5418     // it rounded down to 0 which doesn't meet the spec.
5419     // For millis > roundfactor we may return a bit sooner, but
5420     // since we can not accurately identify the patch level and
5421     // this has already been fixed in Solaris 9 and 8 we will
5422     // leave it alone rather than always rounding down.
5423 
5424     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5425        // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5426            // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5427            max_wait_period = 21000000;
5428   } else {
5429     max_wait_period = 50000000;
5430   }
5431   millis %= 1000;
5432   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5433      seconds = max_wait_period;
5434   }
5435   abstime->tv_sec = now.tv_sec  + seconds;
5436   long       usec = now.tv_usec + millis * 1000;
5437   if (usec >= 1000000) {
5438     abstime->tv_sec += 1;
5439     usec -= 1000000;
5440   }
5441   abstime->tv_nsec = usec * 1000;
5442   return abstime;
5443 }
5444 
5445 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5446 // Conceptually TryPark() should be equivalent to park(0).
5447 
5448 int os::PlatformEvent::TryPark() {
5449   for (;;) {
5450     const int v = _Event ;
5451     guarantee ((v == 0) || (v == 1), "invariant") ;
5452     if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
5453   }
5454 }
5455 
5456 void os::PlatformEvent::park() {           // AKA: down()
5457   // Invariant: Only the thread associated with the Event/PlatformEvent
5458   // may call park().
5459   int v ;
5460   for (;;) {
5461       v = _Event ;
5462       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5463   }
5464   guarantee (v >= 0, "invariant") ;
5465   if (v == 0) {
5466      // Do this the hard way by blocking ...
5467      // See http://monaco.sfbay/detail.jsf?cr=5094058.
5468      // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5469      // Only for SPARC >= V8PlusA
5470 #if defined(__sparc) && defined(COMPILER2)
5471      if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5472 #endif
5473      int status = os::Solaris::mutex_lock(_mutex);
5474      assert_status(status == 0, status,  "mutex_lock");
5475      guarantee (_nParked == 0, "invariant") ;
5476      ++ _nParked ;
5477      while (_Event < 0) {
5478         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5479         // Treat this the same as if the wait was interrupted
5480         // With usr/lib/lwp going to kernel, always handle ETIME
5481         status = os::Solaris::cond_wait(_cond, _mutex);
5482         if (status == ETIME) status = EINTR ;
5483         assert_status(status == 0 || status == EINTR, status, "cond_wait");
5484      }
5485      -- _nParked ;
5486      _Event = 0 ;
5487      status = os::Solaris::mutex_unlock(_mutex);
5488      assert_status(status == 0, status, "mutex_unlock");
5489     // Paranoia to ensure our locked and lock-free paths interact
5490     // correctly with each other.
5491     OrderAccess::fence();
5492   }
5493 }
5494 
5495 int os::PlatformEvent::park(jlong millis) {
5496   guarantee (_nParked == 0, "invariant") ;
5497   int v ;
5498   for (;;) {
5499       v = _Event ;
5500       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5501   }
5502   guarantee (v >= 0, "invariant") ;
5503   if (v != 0) return OS_OK ;
5504 
5505   int ret = OS_TIMEOUT;
5506   timestruc_t abst;
5507   compute_abstime (&abst, millis);
5508 
5509   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5510   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5511   // Only for SPARC >= V8PlusA
5512 #if defined(__sparc) && defined(COMPILER2)
5513  if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5514 #endif
5515   int status = os::Solaris::mutex_lock(_mutex);
5516   assert_status(status == 0, status, "mutex_lock");
5517   guarantee (_nParked == 0, "invariant") ;
5518   ++ _nParked ;
5519   while (_Event < 0) {
5520      int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5521      assert_status(status == 0 || status == EINTR ||
5522                    status == ETIME || status == ETIMEDOUT,
5523                    status, "cond_timedwait");
5524      if (!FilterSpuriousWakeups) break ;                // previous semantics
5525      if (status == ETIME || status == ETIMEDOUT) break ;
5526      // We consume and ignore EINTR and spurious wakeups.
5527   }
5528   -- _nParked ;
5529   if (_Event >= 0) ret = OS_OK ;
5530   _Event = 0 ;
5531   status = os::Solaris::mutex_unlock(_mutex);
5532   assert_status(status == 0, status, "mutex_unlock");
5533   // Paranoia to ensure our locked and lock-free paths interact
5534   // correctly with each other.
5535   OrderAccess::fence();
5536   return ret;
5537 }
5538 
5539 void os::PlatformEvent::unpark() {
5540   // Transitions for _Event:
5541   //    0 :=> 1
5542   //    1 :=> 1
5543   //   -1 :=> either 0 or 1; must signal target thread
5544   //          That is, we can safely transition _Event from -1 to either
5545   //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
5546   //          unpark() calls.
5547   // See also: "Semaphores in Plan 9" by Mullender & Cox
5548   //
5549   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5550   // that it will take two back-to-back park() calls for the owning
5551   // thread to block. This has the benefit of forcing a spurious return
5552   // from the first park() call after an unpark() call which will help
5553   // shake out uses of park() and unpark() without condition variables.
5554 
5555   if (Atomic::xchg(1, &_Event) >= 0) return;
5556 
5557   // If the thread associated with the event was parked, wake it.
5558   // Wait for the thread assoc with the PlatformEvent to vacate.
5559   int status = os::Solaris::mutex_lock(_mutex);
5560   assert_status(status == 0, status, "mutex_lock");
5561   int AnyWaiters = _nParked;
5562   status = os::Solaris::mutex_unlock(_mutex);
5563   assert_status(status == 0, status, "mutex_unlock");
5564   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5565   if (AnyWaiters != 0) {
5566     // We intentional signal *after* dropping the lock
5567     // to avoid a common class of futile wakeups.
5568     status = os::Solaris::cond_signal(_cond);
5569     assert_status(status == 0, status, "cond_signal");
5570   }
5571 }
5572 
5573 // JSR166
5574 // -------------------------------------------------------
5575 
5576 /*
5577  * The solaris and linux implementations of park/unpark are fairly
5578  * conservative for now, but can be improved. They currently use a
5579  * mutex/condvar pair, plus _counter.
5580  * Park decrements _counter if > 0, else does a condvar wait.  Unpark
5581  * sets count to 1 and signals condvar.  Only one thread ever waits
5582  * on the condvar. Contention seen when trying to park implies that someone
5583  * is unparking you, so don't wait. And spurious returns are fine, so there
5584  * is no need to track notifications.
5585  */
5586 
5587 #define MAX_SECS 100000000
5588 /*
5589  * This code is common to linux and solaris and will be moved to a
5590  * common place in dolphin.
5591  *
5592  * The passed in time value is either a relative time in nanoseconds
5593  * or an absolute time in milliseconds. Either way it has to be unpacked
5594  * into suitable seconds and nanoseconds components and stored in the
5595  * given timespec structure.
5596  * Given time is a 64-bit value and the time_t used in the timespec is only
5597  * a signed-32-bit value (except on 64-bit Linux) we have to watch for
5598  * overflow if times way in the future are given. Further on Solaris versions
5599  * prior to 10 there is a restriction (see cond_timedwait) that the specified
5600  * number of seconds, in abstime, is less than current_time  + 100,000,000.
5601  * As it will be 28 years before "now + 100000000" will overflow we can
5602  * ignore overflow and just impose a hard-limit on seconds using the value
5603  * of "now + 100,000,000". This places a limit on the timeout of about 3.17
5604  * years from "now".
5605  */
5606 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5607   assert (time > 0, "convertTime");
5608 
5609   struct timeval now;
5610   int status = gettimeofday(&now, NULL);
5611   assert(status == 0, "gettimeofday");
5612 
5613   time_t max_secs = now.tv_sec + MAX_SECS;
5614 
5615   if (isAbsolute) {
5616     jlong secs = time / 1000;
5617     if (secs > max_secs) {
5618       absTime->tv_sec = max_secs;
5619     }
5620     else {
5621       absTime->tv_sec = secs;
5622     }
5623     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5624   }
5625   else {
5626     jlong secs = time / NANOSECS_PER_SEC;
5627     if (secs >= MAX_SECS) {
5628       absTime->tv_sec = max_secs;
5629       absTime->tv_nsec = 0;
5630     }
5631     else {
5632       absTime->tv_sec = now.tv_sec + secs;
5633       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5634       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5635         absTime->tv_nsec -= NANOSECS_PER_SEC;
5636         ++absTime->tv_sec; // note: this must be <= max_secs
5637       }
5638     }
5639   }
5640   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5641   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5642   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5643   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5644 }
5645 
5646 void Parker::park(bool isAbsolute, jlong time) {
5647   // Ideally we'd do something useful while spinning, such
5648   // as calling unpackTime().
5649 
5650   // Optional fast-path check:
5651   // Return immediately if a permit is available.
5652   // We depend on Atomic::xchg() having full barrier semantics
5653   // since we are doing a lock-free update to _counter.
5654   if (Atomic::xchg(0, &_counter) > 0) return;
5655 
5656   // Optional fast-exit: Check interrupt before trying to wait
5657   Thread* thread = Thread::current();
5658   assert(thread->is_Java_thread(), "Must be JavaThread");
5659   JavaThread *jt = (JavaThread *)thread;
5660   if (Thread::is_interrupted(thread, false)) {
5661     return;
5662   }
5663 
5664   // First, demultiplex/decode time arguments
5665   timespec absTime;
5666   if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
5667     return;
5668   }
5669   if (time > 0) {
5670     // Warning: this code might be exposed to the old Solaris time
5671     // round-down bugs.  Grep "roundingFix" for details.
5672     unpackTime(&absTime, isAbsolute, time);
5673   }
5674 
5675   // Enter safepoint region
5676   // Beware of deadlocks such as 6317397.
5677   // The per-thread Parker:: _mutex is a classic leaf-lock.
5678   // In particular a thread must never block on the Threads_lock while
5679   // holding the Parker:: mutex.  If safepoints are pending both the
5680   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5681   ThreadBlockInVM tbivm(jt);
5682 
5683   // Don't wait if cannot get lock since interference arises from
5684   // unblocking.  Also. check interrupt before trying wait
5685   if (Thread::is_interrupted(thread, false) ||
5686       os::Solaris::mutex_trylock(_mutex) != 0) {
5687     return;
5688   }
5689 
5690   int status ;
5691 
5692   if (_counter > 0)  { // no wait needed
5693     _counter = 0;
5694     status = os::Solaris::mutex_unlock(_mutex);
5695     assert (status == 0, "invariant") ;
5696     // Paranoia to ensure our locked and lock-free paths interact
5697     // correctly with each other and Java-level accesses.
5698     OrderAccess::fence();
5699     return;
5700   }
5701 
5702 #ifdef ASSERT
5703   // Don't catch signals while blocked; let the running threads have the signals.
5704   // (This allows a debugger to break into the running thread.)
5705   sigset_t oldsigs;
5706   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
5707   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5708 #endif
5709 
5710   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5711   jt->set_suspend_equivalent();
5712   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5713 
5714   // Do this the hard way by blocking ...
5715   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5716   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5717   // Only for SPARC >= V8PlusA
5718 #if defined(__sparc) && defined(COMPILER2)
5719   if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5720 #endif
5721 
5722   if (time == 0) {
5723     status = os::Solaris::cond_wait (_cond, _mutex) ;
5724   } else {
5725     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
5726   }
5727   // Note that an untimed cond_wait() can sometimes return ETIME on older
5728   // versions of the Solaris.
5729   assert_status(status == 0 || status == EINTR ||
5730                 status == ETIME || status == ETIMEDOUT,
5731                 status, "cond_timedwait");
5732 
5733 #ifdef ASSERT
5734   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
5735 #endif
5736   _counter = 0 ;
5737   status = os::Solaris::mutex_unlock(_mutex);
5738   assert_status(status == 0, status, "mutex_unlock") ;
5739   // Paranoia to ensure our locked and lock-free paths interact
5740   // correctly with each other and Java-level accesses.
5741   OrderAccess::fence();
5742 
5743   // If externally suspended while waiting, re-suspend
5744   if (jt->handle_special_suspend_equivalent_condition()) {
5745     jt->java_suspend_self();
5746   }
5747 }
5748 
5749 void Parker::unpark() {
5750   int s, status ;
5751   status = os::Solaris::mutex_lock (_mutex) ;
5752   assert (status == 0, "invariant") ;
5753   s = _counter;
5754   _counter = 1;
5755   status = os::Solaris::mutex_unlock (_mutex) ;
5756   assert (status == 0, "invariant") ;
5757 
5758   if (s < 1) {
5759     status = os::Solaris::cond_signal (_cond) ;
5760     assert (status == 0, "invariant") ;
5761   }
5762 }
5763 
5764 extern char** environ;
5765 
5766 // Run the specified command in a separate process. Return its exit value,
5767 // or -1 on failure (e.g. can't fork a new process).
5768 // Unlike system(), this function can be called from signal handler. It
5769 // doesn't block SIGINT et al.
5770 int os::fork_and_exec(char* cmd) {
5771   char * argv[4];
5772   argv[0] = (char *)"sh";
5773   argv[1] = (char *)"-c";
5774   argv[2] = cmd;
5775   argv[3] = NULL;
5776 
5777   // fork is async-safe, fork1 is not so can't use in signal handler
5778   pid_t pid;
5779   Thread* t = ThreadLocalStorage::get_thread_slow();
5780   if (t != NULL && t->is_inside_signal_handler()) {
5781     pid = fork();
5782   } else {
5783     pid = fork1();
5784   }
5785 
5786   if (pid < 0) {
5787     // fork failed
5788     warning("fork failed: %s", strerror(errno));
5789     return -1;
5790 
5791   } else if (pid == 0) {
5792     // child process
5793 
5794     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
5795     execve("/usr/bin/sh", argv, environ);
5796 
5797     // execve failed
5798     _exit(-1);
5799 
5800   } else  {
5801     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5802     // care about the actual exit code, for now.
5803 
5804     int status;
5805 
5806     // Wait for the child process to exit.  This returns immediately if
5807     // the child has already exited. */
5808     while (waitpid(pid, &status, 0) < 0) {
5809         switch (errno) {
5810         case ECHILD: return 0;
5811         case EINTR: break;
5812         default: return -1;
5813         }
5814     }
5815 
5816     if (WIFEXITED(status)) {
5817        // The child exited normally; get its exit code.
5818        return WEXITSTATUS(status);
5819     } else if (WIFSIGNALED(status)) {
5820        // The child exited because of a signal
5821        // The best value to return is 0x80 + signal number,
5822        // because that is what all Unix shells do, and because
5823        // it allows callers to distinguish between process exit and
5824        // process death by signal.
5825        return 0x80 + WTERMSIG(status);
5826     } else {
5827        // Unknown exit code; pass it through
5828        return status;
5829     }
5830   }
5831 }
5832 
5833 // is_headless_jre()
5834 //
5835 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5836 // in order to report if we are running in a headless jre
5837 //
5838 // Since JDK8 xawt/libmawt.so was moved into the same directory
5839 // as libawt.so, and renamed libawt_xawt.so
5840 //
5841 bool os::is_headless_jre() {
5842     struct stat statbuf;
5843     char buf[MAXPATHLEN];
5844     char libmawtpath[MAXPATHLEN];
5845     const char *xawtstr  = "/xawt/libmawt.so";
5846     const char *new_xawtstr = "/libawt_xawt.so";
5847     char *p;
5848 
5849     // Get path to libjvm.so
5850     os::jvm_path(buf, sizeof(buf));
5851 
5852     // Get rid of libjvm.so
5853     p = strrchr(buf, '/');
5854     if (p == NULL) return false;
5855     else *p = '\0';
5856 
5857     // Get rid of client or server
5858     p = strrchr(buf, '/');
5859     if (p == NULL) return false;
5860     else *p = '\0';
5861 
5862     // check xawt/libmawt.so
5863     strcpy(libmawtpath, buf);
5864     strcat(libmawtpath, xawtstr);
5865     if (::stat(libmawtpath, &statbuf) == 0) return false;
5866 
5867     // check libawt_xawt.so
5868     strcpy(libmawtpath, buf);
5869     strcat(libmawtpath, new_xawtstr);
5870     if (::stat(libmawtpath, &statbuf) == 0) return false;
5871 
5872     return true;
5873 }
5874 
5875 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
5876   size_t res;
5877   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5878           "Assumed _thread_in_native");
5879   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
5880   return res;
5881 }
5882 
5883 int os::close(int fd) {
5884   return ::close(fd);
5885 }
5886 
5887 int os::socket_close(int fd) {
5888   return ::close(fd);
5889 }
5890 
5891 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5892   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5893           "Assumed _thread_in_native");
5894   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
5895 }
5896 
5897 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5898   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5899           "Assumed _thread_in_native");
5900   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5901 }
5902 
5903 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5904   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5905 }
5906 
5907 // As both poll and select can be interrupted by signals, we have to be
5908 // prepared to restart the system call after updating the timeout, unless
5909 // a poll() is done with timeout == -1, in which case we repeat with this
5910 // "wait forever" value.
5911 
5912 int os::timeout(int fd, long timeout) {
5913   int res;
5914   struct timeval t;
5915   julong prevtime, newtime;
5916   static const char* aNull = 0;
5917   struct pollfd pfd;
5918   pfd.fd = fd;
5919   pfd.events = POLLIN;
5920 
5921   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5922           "Assumed _thread_in_native");
5923 
5924   gettimeofday(&t, &aNull);
5925   prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
5926 
5927   for(;;) {
5928     res = ::poll(&pfd, 1, timeout);
5929     if(res == OS_ERR && errno == EINTR) {
5930         if(timeout != -1) {
5931           gettimeofday(&t, &aNull);
5932           newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
5933           timeout -= newtime - prevtime;
5934           if(timeout <= 0)
5935             return OS_OK;
5936           prevtime = newtime;
5937         }
5938     } else return res;
5939   }
5940 }
5941 
5942 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
5943   int _result;
5944   _result = ::connect(fd, him, len);
5945 
5946   // On Solaris, when a connect() call is interrupted, the connection
5947   // can be established asynchronously (see 6343810). Subsequent calls
5948   // to connect() must check the errno value which has the semantic
5949   // described below (copied from the connect() man page). Handling
5950   // of asynchronously established connections is required for both
5951   // blocking and non-blocking sockets.
5952   //     EINTR            The  connection  attempt  was   interrupted
5953   //                      before  any data arrived by the delivery of
5954   //                      a signal. The connection, however, will  be
5955   //                      established asynchronously.
5956   //
5957   //     EINPROGRESS      The socket is non-blocking, and the connec-
5958   //                      tion  cannot  be completed immediately.
5959   //
5960   //     EALREADY         The socket is non-blocking,  and a previous
5961   //                      connection  attempt  has  not yet been com-
5962   //                      pleted.
5963   //
5964   //     EISCONN          The socket is already connected.
5965   if (_result == OS_ERR && errno == EINTR) {
5966      /* restarting a connect() changes its errno semantics */
5967      RESTARTABLE(::connect(fd, him, len), _result);
5968      /* undo these changes */
5969      if (_result == OS_ERR) {
5970        if (errno == EALREADY) {
5971          errno = EINPROGRESS; /* fall through */
5972        } else if (errno == EISCONN) {
5973          errno = 0;
5974          return OS_OK;
5975        }
5976      }
5977    }
5978    return _result;
5979  }
5980 
5981 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
5982   if (fd < 0) {
5983     return OS_ERR;
5984   }
5985   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5986           "Assumed _thread_in_native");
5987   RESTARTABLE_RETURN_INT((int)::accept(fd, him, len));
5988 }
5989 
5990 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
5991                  sockaddr* from, socklen_t* fromlen) {
5992   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5993           "Assumed _thread_in_native");
5994   RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
5995 }
5996 
5997 int os::sendto(int fd, char* buf, size_t len, uint flags,
5998                struct sockaddr* to, socklen_t tolen) {
5999   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
6000           "Assumed _thread_in_native");
6001   RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
6002 }
6003 
6004 int os::socket_available(int fd, jint *pbytes) {
6005   if (fd < 0) {
6006     return OS_OK;
6007   }
6008   int ret;
6009   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6010   // note: ioctl can return 0 when successful, JVM_SocketAvailable
6011   // is expected to return 0 on failure and 1 on success to the jdk.
6012   return (ret == OS_ERR) ? 0 : 1;
6013 }
6014 
6015 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6016   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
6017           "Assumed _thread_in_native");
6018    return ::bind(fd, him, len);
6019 }
6020 
6021 // Get the default path to the core file
6022 // Returns the length of the string
6023 int os::get_core_path(char* buffer, size_t bufferSize) {
6024   const char* p = get_current_directory(buffer, bufferSize);
6025 
6026   if (p == NULL) {
6027     assert(p != NULL, "failed to get current directory");
6028     return 0;
6029   }
6030 
6031   return strlen(buffer);
6032 }
6033 
6034 #ifndef PRODUCT
6035 void TestReserveMemorySpecial_test() {
6036   // No tests available for this platform
6037 }
6038 #endif