1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "prims/jniFastGetField.hpp"
  41 #include "prims/jvm.h"
  42 #include "prims/jvm_misc.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/extendedPC.hpp"
  45 #include "runtime/globals.hpp"
  46 #include "runtime/interfaceSupport.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/javaCalls.hpp"
  49 #include "runtime/mutexLocker.hpp"
  50 #include "runtime/objectMonitor.hpp"
  51 #include "runtime/orderAccess.inline.hpp"
  52 #include "runtime/osThread.hpp"
  53 #include "runtime/perfMemory.hpp"
  54 #include "runtime/sharedRuntime.hpp"
  55 #include "runtime/statSampler.hpp"
  56 #include "runtime/stubRoutines.hpp"
  57 #include "runtime/thread.inline.hpp"
  58 #include "runtime/threadCritical.hpp"
  59 #include "runtime/timer.hpp"
  60 #include "services/attachListener.hpp"
  61 #include "services/memTracker.hpp"
  62 #include "services/runtimeService.hpp"
  63 #include "utilities/decoder.hpp"
  64 #include "utilities/defaultStream.hpp"
  65 #include "utilities/events.hpp"
  66 #include "utilities/growableArray.hpp"
  67 #include "utilities/vmError.hpp"
  68 
  69 // put OS-includes here
  70 # include <dlfcn.h>
  71 # include <errno.h>
  72 # include <exception>
  73 # include <link.h>
  74 # include <poll.h>
  75 # include <pthread.h>
  76 # include <pwd.h>
  77 # include <schedctl.h>
  78 # include <setjmp.h>
  79 # include <signal.h>
  80 # include <stdio.h>
  81 # include <alloca.h>
  82 # include <sys/filio.h>
  83 # include <sys/ipc.h>
  84 # include <sys/lwp.h>
  85 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  86 # include <sys/mman.h>
  87 # include <sys/processor.h>
  88 # include <sys/procset.h>
  89 # include <sys/pset.h>
  90 # include <sys/resource.h>
  91 # include <sys/shm.h>
  92 # include <sys/socket.h>
  93 # include <sys/stat.h>
  94 # include <sys/systeminfo.h>
  95 # include <sys/time.h>
  96 # include <sys/times.h>
  97 # include <sys/types.h>
  98 # include <sys/wait.h>
  99 # include <sys/utsname.h>
 100 # include <thread.h>
 101 # include <unistd.h>
 102 # include <sys/priocntl.h>
 103 # include <sys/rtpriocntl.h>
 104 # include <sys/tspriocntl.h>
 105 # include <sys/iapriocntl.h>
 106 # include <sys/fxpriocntl.h>
 107 # include <sys/loadavg.h>
 108 # include <string.h>
 109 # include <stdio.h>
 110 
 111 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 112 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 113 
 114 #define MAX_PATH (2 * K)
 115 
 116 // for timer info max values which include all bits
 117 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 118 
 119 
 120 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 121 // compile on older systems without this header file.
 122 
 123 #ifndef MADV_ACCESS_LWP
 124 # define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
 125 #endif
 126 #ifndef MADV_ACCESS_MANY
 127 # define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
 128 #endif
 129 
 130 #ifndef LGRP_RSRC_CPU
 131 # define LGRP_RSRC_CPU           0       /* CPU resources */
 132 #endif
 133 #ifndef LGRP_RSRC_MEM
 134 # define LGRP_RSRC_MEM           1       /* memory resources */
 135 #endif
 136 
 137 // see thr_setprio(3T) for the basis of these numbers
 138 #define MinimumPriority 0
 139 #define NormalPriority  64
 140 #define MaximumPriority 127
 141 
 142 // Values for ThreadPriorityPolicy == 1
 143 int prio_policy1[CriticalPriority+1] = {
 144   -99999,  0, 16,  32,  48,  64,
 145           80, 96, 112, 124, 127, 127 };
 146 
 147 // System parameters used internally
 148 static clock_t clock_tics_per_sec = 100;
 149 
 150 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 151 static bool enabled_extended_FILE_stdio = false;
 152 
 153 // For diagnostics to print a message once. see run_periodic_checks
 154 static bool check_addr0_done = false;
 155 static sigset_t check_signal_done;
 156 static bool check_signals = true;
 157 
 158 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 159 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 160 
 161 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 162 
 163 
 164 // "default" initializers for missing libc APIs
 165 extern "C" {
 166   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 167   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 168 
 169   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 170   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 171 }
 172 
 173 // "default" initializers for pthread-based synchronization
 174 extern "C" {
 175   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 176   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 177 }
 178 
 179 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 180 
 181 static inline size_t adjust_stack_size(address base, size_t size) {
 182   if ((ssize_t)size < 0) {
 183     // 4759953: Compensate for ridiculous stack size.
 184     size = max_intx;
 185   }
 186   if (size > (size_t)base) {
 187     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 188     size = (size_t)base;
 189   }
 190   return size;
 191 }
 192 
 193 static inline stack_t get_stack_info() {
 194   stack_t st;
 195   int retval = thr_stksegment(&st);
 196   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 197   assert(retval == 0, "incorrect return value from thr_stksegment");
 198   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 199   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 200   return st;
 201 }
 202 
 203 address os::current_stack_base() {
 204   int r = thr_main() ;
 205   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 206   bool is_primordial_thread = r;
 207 
 208   // Workaround 4352906, avoid calls to thr_stksegment by
 209   // thr_main after the first one (it looks like we trash
 210   // some data, causing the value for ss_sp to be incorrect).
 211   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 212     stack_t st = get_stack_info();
 213     if (is_primordial_thread) {
 214       // cache initial value of stack base
 215       os::Solaris::_main_stack_base = (address)st.ss_sp;
 216     }
 217     return (address)st.ss_sp;
 218   } else {
 219     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 220     return os::Solaris::_main_stack_base;
 221   }
 222 }
 223 
 224 size_t os::current_stack_size() {
 225   size_t size;
 226 
 227   int r = thr_main() ;
 228   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 229   if(!r) {
 230     size = get_stack_info().ss_size;
 231   } else {
 232     struct rlimit limits;
 233     getrlimit(RLIMIT_STACK, &limits);
 234     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 235   }
 236   // base may not be page aligned
 237   address base = current_stack_base();
 238   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 239   return (size_t)(base - bottom);
 240 }
 241 
 242 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 243   return localtime_r(clock, res);
 244 }
 245 
 246 // interruptible infrastructure
 247 
 248 // setup_interruptible saves the thread state before going into an
 249 // interruptible system call.
 250 // The saved state is used to restore the thread to
 251 // its former state whether or not an interrupt is received.
 252 // Used by classloader os::read
 253 // os::restartable_read calls skip this layer and stay in _thread_in_native
 254 
 255 void os::Solaris::setup_interruptible(JavaThread* thread) {
 256 
 257   JavaThreadState thread_state = thread->thread_state();
 258 
 259   assert(thread_state != _thread_blocked, "Coming from the wrong thread");
 260   assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
 261   OSThread* osthread = thread->osthread();
 262   osthread->set_saved_interrupt_thread_state(thread_state);
 263   thread->frame_anchor()->make_walkable(thread);
 264   ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
 265 }
 266 
 267 // Version of setup_interruptible() for threads that are already in
 268 // _thread_blocked. Used by os_sleep().
 269 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
 270   thread->frame_anchor()->make_walkable(thread);
 271 }
 272 
 273 JavaThread* os::Solaris::setup_interruptible() {
 274   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
 275   setup_interruptible(thread);
 276   return thread;
 277 }
 278 
 279 void os::Solaris::try_enable_extended_io() {
 280   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 281 
 282   if (!UseExtendedFileIO) {
 283     return;
 284   }
 285 
 286   enable_extended_FILE_stdio_t enabler =
 287     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 288                                          "enable_extended_FILE_stdio");
 289   if (enabler) {
 290     enabler(-1, -1);
 291   }
 292 }
 293 
 294 
 295 #ifdef ASSERT
 296 
 297 JavaThread* os::Solaris::setup_interruptible_native() {
 298   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
 299   JavaThreadState thread_state = thread->thread_state();
 300   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
 301   return thread;
 302 }
 303 
 304 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
 305   JavaThreadState thread_state = thread->thread_state();
 306   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
 307 }
 308 #endif
 309 
 310 // cleanup_interruptible reverses the effects of setup_interruptible
 311 // setup_interruptible_already_blocked() does not need any cleanup.
 312 
 313 void os::Solaris::cleanup_interruptible(JavaThread* thread) {
 314   OSThread* osthread = thread->osthread();
 315 
 316   ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
 317 }
 318 
 319 // I/O interruption related counters called in _INTERRUPTIBLE
 320 
 321 void os::Solaris::bump_interrupted_before_count() {
 322   RuntimeService::record_interrupted_before_count();
 323 }
 324 
 325 void os::Solaris::bump_interrupted_during_count() {
 326   RuntimeService::record_interrupted_during_count();
 327 }
 328 
 329 static int _processors_online = 0;
 330 
 331          jint os::Solaris::_os_thread_limit = 0;
 332 volatile jint os::Solaris::_os_thread_count = 0;
 333 
 334 julong os::available_memory() {
 335   return Solaris::available_memory();
 336 }
 337 
 338 julong os::Solaris::available_memory() {
 339   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 340 }
 341 
 342 julong os::Solaris::_physical_memory = 0;
 343 
 344 julong os::physical_memory() {
 345    return Solaris::physical_memory();
 346 }
 347 
 348 static hrtime_t first_hrtime = 0;
 349 static const hrtime_t hrtime_hz = 1000*1000*1000;
 350 static volatile hrtime_t max_hrtime = 0;
 351 
 352 
 353 void os::Solaris::initialize_system_info() {
 354   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 355   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
 356   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 357 }
 358 
 359 int os::active_processor_count() {
 360   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 361   pid_t pid = getpid();
 362   psetid_t pset = PS_NONE;
 363   // Are we running in a processor set or is there any processor set around?
 364   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 365     uint_t pset_cpus;
 366     // Query the number of cpus available to us.
 367     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 368       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 369       _processors_online = pset_cpus;
 370       return pset_cpus;
 371     }
 372   }
 373   // Otherwise return number of online cpus
 374   return online_cpus;
 375 }
 376 
 377 static bool find_processors_in_pset(psetid_t        pset,
 378                                     processorid_t** id_array,
 379                                     uint_t*         id_length) {
 380   bool result = false;
 381   // Find the number of processors in the processor set.
 382   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 383     // Make up an array to hold their ids.
 384     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 385     // Fill in the array with their processor ids.
 386     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 387       result = true;
 388     }
 389   }
 390   return result;
 391 }
 392 
 393 // Callers of find_processors_online() must tolerate imprecise results --
 394 // the system configuration can change asynchronously because of DR
 395 // or explicit psradm operations.
 396 //
 397 // We also need to take care that the loop (below) terminates as the
 398 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 399 // request and the loop that builds the list of processor ids.   Unfortunately
 400 // there's no reliable way to determine the maximum valid processor id,
 401 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 402 // man pages, which claim the processor id set is "sparse, but
 403 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 404 // exit the loop.
 405 //
 406 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 407 // not available on S8.0.
 408 
 409 static bool find_processors_online(processorid_t** id_array,
 410                                    uint*           id_length) {
 411   const processorid_t MAX_PROCESSOR_ID = 100000 ;
 412   // Find the number of processors online.
 413   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 414   // Make up an array to hold their ids.
 415   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 416   // Processors need not be numbered consecutively.
 417   long found = 0;
 418   processorid_t next = 0;
 419   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 420     processor_info_t info;
 421     if (processor_info(next, &info) == 0) {
 422       // NB, PI_NOINTR processors are effectively online ...
 423       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 424         (*id_array)[found] = next;
 425         found += 1;
 426       }
 427     }
 428     next += 1;
 429   }
 430   if (found < *id_length) {
 431       // The loop above didn't identify the expected number of processors.
 432       // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 433       // and re-running the loop, above, but there's no guarantee of progress
 434       // if the system configuration is in flux.  Instead, we just return what
 435       // we've got.  Note that in the worst case find_processors_online() could
 436       // return an empty set.  (As a fall-back in the case of the empty set we
 437       // could just return the ID of the current processor).
 438       *id_length = found ;
 439   }
 440 
 441   return true;
 442 }
 443 
 444 static bool assign_distribution(processorid_t* id_array,
 445                                 uint           id_length,
 446                                 uint*          distribution,
 447                                 uint           distribution_length) {
 448   // We assume we can assign processorid_t's to uint's.
 449   assert(sizeof(processorid_t) == sizeof(uint),
 450          "can't convert processorid_t to uint");
 451   // Quick check to see if we won't succeed.
 452   if (id_length < distribution_length) {
 453     return false;
 454   }
 455   // Assign processor ids to the distribution.
 456   // Try to shuffle processors to distribute work across boards,
 457   // assuming 4 processors per board.
 458   const uint processors_per_board = ProcessDistributionStride;
 459   // Find the maximum processor id.
 460   processorid_t max_id = 0;
 461   for (uint m = 0; m < id_length; m += 1) {
 462     max_id = MAX2(max_id, id_array[m]);
 463   }
 464   // The next id, to limit loops.
 465   const processorid_t limit_id = max_id + 1;
 466   // Make up markers for available processors.
 467   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 468   for (uint c = 0; c < limit_id; c += 1) {
 469     available_id[c] = false;
 470   }
 471   for (uint a = 0; a < id_length; a += 1) {
 472     available_id[id_array[a]] = true;
 473   }
 474   // Step by "boards", then by "slot", copying to "assigned".
 475   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 476   //                remembering which processors have been assigned by
 477   //                previous calls, etc., so as to distribute several
 478   //                independent calls of this method.  What we'd like is
 479   //                It would be nice to have an API that let us ask
 480   //                how many processes are bound to a processor,
 481   //                but we don't have that, either.
 482   //                In the short term, "board" is static so that
 483   //                subsequent distributions don't all start at board 0.
 484   static uint board = 0;
 485   uint assigned = 0;
 486   // Until we've found enough processors ....
 487   while (assigned < distribution_length) {
 488     // ... find the next available processor in the board.
 489     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 490       uint try_id = board * processors_per_board + slot;
 491       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 492         distribution[assigned] = try_id;
 493         available_id[try_id] = false;
 494         assigned += 1;
 495         break;
 496       }
 497     }
 498     board += 1;
 499     if (board * processors_per_board + 0 >= limit_id) {
 500       board = 0;
 501     }
 502   }
 503   if (available_id != NULL) {
 504     FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
 505   }
 506   return true;
 507 }
 508 
 509 void os::set_native_thread_name(const char *name) {
 510   // Not yet implemented.
 511   return;
 512 }
 513 
 514 bool os::distribute_processes(uint length, uint* distribution) {
 515   bool result = false;
 516   // Find the processor id's of all the available CPUs.
 517   processorid_t* id_array  = NULL;
 518   uint           id_length = 0;
 519   // There are some races between querying information and using it,
 520   // since processor sets can change dynamically.
 521   psetid_t pset = PS_NONE;
 522   // Are we running in a processor set?
 523   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 524     result = find_processors_in_pset(pset, &id_array, &id_length);
 525   } else {
 526     result = find_processors_online(&id_array, &id_length);
 527   }
 528   if (result == true) {
 529     if (id_length >= length) {
 530       result = assign_distribution(id_array, id_length, distribution, length);
 531     } else {
 532       result = false;
 533     }
 534   }
 535   if (id_array != NULL) {
 536     FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
 537   }
 538   return result;
 539 }
 540 
 541 bool os::bind_to_processor(uint processor_id) {
 542   // We assume that a processorid_t can be stored in a uint.
 543   assert(sizeof(uint) == sizeof(processorid_t),
 544          "can't convert uint to processorid_t");
 545   int bind_result =
 546     processor_bind(P_LWPID,                       // bind LWP.
 547                    P_MYID,                        // bind current LWP.
 548                    (processorid_t) processor_id,  // id.
 549                    NULL);                         // don't return old binding.
 550   return (bind_result == 0);
 551 }
 552 
 553 bool os::getenv(const char* name, char* buffer, int len) {
 554   char* val = ::getenv( name );
 555   if ( val == NULL
 556   ||   strlen(val) + 1  >  len ) {
 557     if (len > 0)  buffer[0] = 0; // return a null string
 558     return false;
 559   }
 560   strcpy( buffer, val );
 561   return true;
 562 }
 563 
 564 
 565 // Return true if user is running as root.
 566 
 567 bool os::have_special_privileges() {
 568   static bool init = false;
 569   static bool privileges = false;
 570   if (!init) {
 571     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 572     init = true;
 573   }
 574   return privileges;
 575 }
 576 
 577 
 578 void os::init_system_properties_values() {
 579   // The next steps are taken in the product version:
 580   //
 581   // Obtain the JAVA_HOME value from the location of libjvm.so.
 582   // This library should be located at:
 583   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 584   //
 585   // If "/jre/lib/" appears at the right place in the path, then we
 586   // assume libjvm.so is installed in a JDK and we use this path.
 587   //
 588   // Otherwise exit with message: "Could not create the Java virtual machine."
 589   //
 590   // The following extra steps are taken in the debugging version:
 591   //
 592   // If "/jre/lib/" does NOT appear at the right place in the path
 593   // instead of exit check for $JAVA_HOME environment variable.
 594   //
 595   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 596   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 597   // it looks like libjvm.so is installed there
 598   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 599   //
 600   // Otherwise exit.
 601   //
 602   // Important note: if the location of libjvm.so changes this
 603   // code needs to be changed accordingly.
 604 
 605 // Base path of extensions installed on the system.
 606 #define SYS_EXT_DIR     "/usr/jdk/packages"
 607 #define EXTENSIONS_DIR  "/lib/ext"
 608 #define ENDORSED_DIR    "/lib/endorsed"
 609 
 610   char cpu_arch[12];
 611   // Buffer that fits several sprintfs.
 612   // Note that the space for the colon and the trailing null are provided
 613   // by the nulls included by the sizeof operator.
 614   const size_t bufsize =
 615     MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
 616          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 617          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
 618          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
 619   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 620 
 621   // sysclasspath, java_home, dll_dir
 622   {
 623     char *pslash;
 624     os::jvm_path(buf, bufsize);
 625 
 626     // Found the full path to libjvm.so.
 627     // Now cut the path to <java_home>/jre if we can.
 628     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 629     pslash = strrchr(buf, '/');
 630     if (pslash != NULL) {
 631       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 632     }
 633     Arguments::set_dll_dir(buf);
 634 
 635     if (pslash != NULL) {
 636       pslash = strrchr(buf, '/');
 637       if (pslash != NULL) {
 638         *pslash = '\0';          // Get rid of /<arch>.
 639         pslash = strrchr(buf, '/');
 640         if (pslash != NULL) {
 641           *pslash = '\0';        // Get rid of /lib.
 642         }
 643       }
 644     }
 645     Arguments::set_java_home(buf);
 646     set_boot_path('/', ':');
 647   }
 648 
 649   // Where to look for native libraries.
 650   {
 651     // Use dlinfo() to determine the correct java.library.path.
 652     //
 653     // If we're launched by the Java launcher, and the user
 654     // does not set java.library.path explicitly on the commandline,
 655     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 656     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 657     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 658     // /usr/lib), which is exactly what we want.
 659     //
 660     // If the user does set java.library.path, it completely
 661     // overwrites this setting, and always has.
 662     //
 663     // If we're not launched by the Java launcher, we may
 664     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 665     // settings.  Again, dlinfo does exactly what we want.
 666 
 667     Dl_serinfo     info_sz, *info = &info_sz;
 668     Dl_serpath     *path;
 669     char           *library_path;
 670     char           *common_path = buf;
 671 
 672     // Determine search path count and required buffer size.
 673     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 674       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 675       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 676     }
 677 
 678     // Allocate new buffer and initialize.
 679     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 680     info->dls_size = info_sz.dls_size;
 681     info->dls_cnt = info_sz.dls_cnt;
 682 
 683     // Obtain search path information.
 684     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 685       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 686       FREE_C_HEAP_ARRAY(char, info, mtInternal);
 687       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 688     }
 689 
 690     path = &info->dls_serpath[0];
 691 
 692     // Note: Due to a legacy implementation, most of the library path
 693     // is set in the launcher. This was to accomodate linking restrictions
 694     // on legacy Solaris implementations (which are no longer supported).
 695     // Eventually, all the library path setting will be done here.
 696     //
 697     // However, to prevent the proliferation of improperly built native
 698     // libraries, the new path component /usr/jdk/packages is added here.
 699 
 700     // Determine the actual CPU architecture.
 701     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 702 #ifdef _LP64
 703     // If we are a 64-bit vm, perform the following translations:
 704     //   sparc   -> sparcv9
 705     //   i386    -> amd64
 706     if (strcmp(cpu_arch, "sparc") == 0) {
 707       strcat(cpu_arch, "v9");
 708     } else if (strcmp(cpu_arch, "i386") == 0) {
 709       strcpy(cpu_arch, "amd64");
 710     }
 711 #endif
 712 
 713     // Construct the invariant part of ld_library_path.
 714     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 715 
 716     // Struct size is more than sufficient for the path components obtained
 717     // through the dlinfo() call, so only add additional space for the path
 718     // components explicitly added here.
 719     size_t library_path_size = info->dls_size + strlen(common_path);
 720     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 721     library_path[0] = '\0';
 722 
 723     // Construct the desired Java library path from the linker's library
 724     // search path.
 725     //
 726     // For compatibility, it is optimal that we insert the additional path
 727     // components specific to the Java VM after those components specified
 728     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 729     // infrastructure.
 730     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 731       strcpy(library_path, common_path);
 732     } else {
 733       int inserted = 0;
 734       int i;
 735       for (i = 0; i < info->dls_cnt; i++, path++) {
 736         uint_t flags = path->dls_flags & LA_SER_MASK;
 737         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 738           strcat(library_path, common_path);
 739           strcat(library_path, os::path_separator());
 740           inserted = 1;
 741         }
 742         strcat(library_path, path->dls_name);
 743         strcat(library_path, os::path_separator());
 744       }
 745       // Eliminate trailing path separator.
 746       library_path[strlen(library_path)-1] = '\0';
 747     }
 748 
 749     // happens before argument parsing - can't use a trace flag
 750     // tty->print_raw("init_system_properties_values: native lib path: ");
 751     // tty->print_raw_cr(library_path);
 752 
 753     // Callee copies into its own buffer.
 754     Arguments::set_library_path(library_path);
 755 
 756     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
 757     FREE_C_HEAP_ARRAY(char, info, mtInternal);
 758   }
 759 
 760   // Extensions directories.
 761   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 762   Arguments::set_ext_dirs(buf);
 763 
 764   // Endorsed standards default directory.
 765   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
 766   Arguments::set_endorsed_dirs(buf);
 767 
 768   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 769 
 770 #undef SYS_EXT_DIR
 771 #undef EXTENSIONS_DIR
 772 #undef ENDORSED_DIR
 773 }
 774 
 775 void os::breakpoint() {
 776   BREAKPOINT;
 777 }
 778 
 779 bool os::obsolete_option(const JavaVMOption *option)
 780 {
 781   if (!strncmp(option->optionString, "-Xt", 3)) {
 782     return true;
 783   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 784     return true;
 785   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 786     return true;
 787   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 788     return true;
 789   }
 790   return false;
 791 }
 792 
 793 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 794   address  stackStart  = (address)thread->stack_base();
 795   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 796   if (sp < stackStart && sp >= stackEnd ) return true;
 797   return false;
 798 }
 799 
 800 extern "C" void breakpoint() {
 801   // use debugger to set breakpoint here
 802 }
 803 
 804 static thread_t main_thread;
 805 
 806 // Thread start routine for all new Java threads
 807 extern "C" void* java_start(void* thread_addr) {
 808   // Try to randomize the cache line index of hot stack frames.
 809   // This helps when threads of the same stack traces evict each other's
 810   // cache lines. The threads can be either from the same JVM instance, or
 811   // from different JVM instances. The benefit is especially true for
 812   // processors with hyperthreading technology.
 813   static int counter = 0;
 814   int pid = os::current_process_id();
 815   alloca(((pid ^ counter++) & 7) * 128);
 816 
 817   int prio;
 818   Thread* thread = (Thread*)thread_addr;
 819   OSThread* osthr = thread->osthread();
 820 
 821   osthr->set_lwp_id( _lwp_self() );  // Store lwp in case we are bound
 822   thread->_schedctl = (void *) schedctl_init () ;
 823 
 824   if (UseNUMA) {
 825     int lgrp_id = os::numa_get_group_id();
 826     if (lgrp_id != -1) {
 827       thread->set_lgrp_id(lgrp_id);
 828     }
 829   }
 830 
 831   // If the creator called set priority before we started,
 832   // we need to call set_native_priority now that we have an lwp.
 833   // We used to get the priority from thr_getprio (we called
 834   // thr_setprio way back in create_thread) and pass it to
 835   // set_native_priority, but Solaris scales the priority
 836   // in java_to_os_priority, so when we read it back here,
 837   // we pass trash to set_native_priority instead of what's
 838   // in java_to_os_priority. So we save the native priority
 839   // in the osThread and recall it here.
 840 
 841   if ( osthr->thread_id() != -1 ) {
 842     if ( UseThreadPriorities ) {
 843       int prio = osthr->native_priority();
 844       if (ThreadPriorityVerbose) {
 845         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 846                       INTPTR_FORMAT ", setting priority: %d\n",
 847                       osthr->thread_id(), osthr->lwp_id(), prio);
 848       }
 849       os::set_native_priority(thread, prio);
 850     }
 851   } else if (ThreadPriorityVerbose) {
 852     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 853   }
 854 
 855   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 856 
 857   // initialize signal mask for this thread
 858   os::Solaris::hotspot_sigmask(thread);
 859 
 860   thread->run();
 861 
 862   // One less thread is executing
 863   // When the VMThread gets here, the main thread may have already exited
 864   // which frees the CodeHeap containing the Atomic::dec code
 865   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 866     Atomic::dec(&os::Solaris::_os_thread_count);
 867   }
 868 
 869   if (UseDetachedThreads) {
 870     thr_exit(NULL);
 871     ShouldNotReachHere();
 872   }
 873   return NULL;
 874 }
 875 
 876 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 877   // Allocate the OSThread object
 878   OSThread* osthread = new OSThread(NULL, NULL);
 879   if (osthread == NULL) return NULL;
 880 
 881   // Store info on the Solaris thread into the OSThread
 882   osthread->set_thread_id(thread_id);
 883   osthread->set_lwp_id(_lwp_self());
 884   thread->_schedctl = (void *) schedctl_init () ;
 885 
 886   if (UseNUMA) {
 887     int lgrp_id = os::numa_get_group_id();
 888     if (lgrp_id != -1) {
 889       thread->set_lgrp_id(lgrp_id);
 890     }
 891   }
 892 
 893   if ( ThreadPriorityVerbose ) {
 894     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 895                   osthread->thread_id(), osthread->lwp_id() );
 896   }
 897 
 898   // Initial thread state is INITIALIZED, not SUSPENDED
 899   osthread->set_state(INITIALIZED);
 900 
 901   return osthread;
 902 }
 903 
 904 void os::Solaris::hotspot_sigmask(Thread* thread) {
 905 
 906   //Save caller's signal mask
 907   sigset_t sigmask;
 908   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 909   OSThread *osthread = thread->osthread();
 910   osthread->set_caller_sigmask(sigmask);
 911 
 912   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 913   if (!ReduceSignalUsage) {
 914     if (thread->is_VM_thread()) {
 915       // Only the VM thread handles BREAK_SIGNAL ...
 916       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 917     } else {
 918       // ... all other threads block BREAK_SIGNAL
 919       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 920       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 921     }
 922   }
 923 }
 924 
 925 bool os::create_attached_thread(JavaThread* thread) {
 926 #ifdef ASSERT
 927   thread->verify_not_published();
 928 #endif
 929   OSThread* osthread = create_os_thread(thread, thr_self());
 930   if (osthread == NULL) {
 931      return false;
 932   }
 933 
 934   // Initial thread state is RUNNABLE
 935   osthread->set_state(RUNNABLE);
 936   thread->set_osthread(osthread);
 937 
 938   // initialize signal mask for this thread
 939   // and save the caller's signal mask
 940   os::Solaris::hotspot_sigmask(thread);
 941 
 942   return true;
 943 }
 944 
 945 bool os::create_main_thread(JavaThread* thread) {
 946 #ifdef ASSERT
 947   thread->verify_not_published();
 948 #endif
 949   if (_starting_thread == NULL) {
 950     _starting_thread = create_os_thread(thread, main_thread);
 951      if (_starting_thread == NULL) {
 952         return false;
 953      }
 954   }
 955 
 956   // The primodial thread is runnable from the start
 957   _starting_thread->set_state(RUNNABLE);
 958 
 959   thread->set_osthread(_starting_thread);
 960 
 961   // initialize signal mask for this thread
 962   // and save the caller's signal mask
 963   os::Solaris::hotspot_sigmask(thread);
 964 
 965   return true;
 966 }
 967 
 968 // _T2_libthread is true if we believe we are running with the newer
 969 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
 970 bool os::Solaris::_T2_libthread = false;
 971 
 972 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 973   // Allocate the OSThread object
 974   OSThread* osthread = new OSThread(NULL, NULL);
 975   if (osthread == NULL) {
 976     return false;
 977   }
 978 
 979   if ( ThreadPriorityVerbose ) {
 980     char *thrtyp;
 981     switch ( thr_type ) {
 982       case vm_thread:
 983         thrtyp = (char *)"vm";
 984         break;
 985       case cgc_thread:
 986         thrtyp = (char *)"cgc";
 987         break;
 988       case pgc_thread:
 989         thrtyp = (char *)"pgc";
 990         break;
 991       case java_thread:
 992         thrtyp = (char *)"java";
 993         break;
 994       case compiler_thread:
 995         thrtyp = (char *)"compiler";
 996         break;
 997       case watcher_thread:
 998         thrtyp = (char *)"watcher";
 999         break;
1000       default:
1001         thrtyp = (char *)"unknown";
1002         break;
1003     }
1004     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1005   }
1006 
1007   // Calculate stack size if it's not specified by caller.
1008   if (stack_size == 0) {
1009     // The default stack size 1M (2M for LP64).
1010     stack_size = (BytesPerWord >> 2) * K * K;
1011 
1012     switch (thr_type) {
1013     case os::java_thread:
1014       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1015       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1016       break;
1017     case os::compiler_thread:
1018       if (CompilerThreadStackSize > 0) {
1019         stack_size = (size_t)(CompilerThreadStackSize * K);
1020         break;
1021       } // else fall through:
1022         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1023     case os::vm_thread:
1024     case os::pgc_thread:
1025     case os::cgc_thread:
1026     case os::watcher_thread:
1027       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1028       break;
1029     }
1030   }
1031   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1032 
1033   // Initial state is ALLOCATED but not INITIALIZED
1034   osthread->set_state(ALLOCATED);
1035 
1036   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1037     // We got lots of threads. Check if we still have some address space left.
1038     // Need to be at least 5Mb of unreserved address space. We do check by
1039     // trying to reserve some.
1040     const size_t VirtualMemoryBangSize = 20*K*K;
1041     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1042     if (mem == NULL) {
1043       delete osthread;
1044       return false;
1045     } else {
1046       // Release the memory again
1047       os::release_memory(mem, VirtualMemoryBangSize);
1048     }
1049   }
1050 
1051   // Setup osthread because the child thread may need it.
1052   thread->set_osthread(osthread);
1053 
1054   // Create the Solaris thread
1055   // explicit THR_BOUND for T2_libthread case in case
1056   // that assumption is not accurate, but our alternate signal stack
1057   // handling is based on it which must have bound threads
1058   thread_t tid = 0;
1059   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
1060                    | ((UseBoundThreads || os::Solaris::T2_libthread() ||
1061                        (thr_type == vm_thread) ||
1062                        (thr_type == cgc_thread) ||
1063                        (thr_type == pgc_thread) ||
1064                        (thr_type == compiler_thread && BackgroundCompilation)) ?
1065                       THR_BOUND : 0);
1066   int      status;
1067 
1068   // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
1069   //
1070   // On multiprocessors systems, libthread sometimes under-provisions our
1071   // process with LWPs.  On a 30-way systems, for instance, we could have
1072   // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
1073   // to our process.  This can result in under utilization of PEs.
1074   // I suspect the problem is related to libthread's LWP
1075   // pool management and to the kernel's SIGBLOCKING "last LWP parked"
1076   // upcall policy.
1077   //
1078   // The following code is palliative -- it attempts to ensure that our
1079   // process has sufficient LWPs to take advantage of multiple PEs.
1080   // Proper long-term cures include using user-level threads bound to LWPs
1081   // (THR_BOUND) or using LWP-based synchronization.  Note that there is a
1082   // slight timing window with respect to sampling _os_thread_count, but
1083   // the race is benign.  Also, we should periodically recompute
1084   // _processors_online as the min of SC_NPROCESSORS_ONLN and the
1085   // the number of PEs in our partition.  You might be tempted to use
1086   // THR_NEW_LWP here, but I'd recommend against it as that could
1087   // result in undesirable growth of the libthread's LWP pool.
1088   // The fix below isn't sufficient; for instance, it doesn't take into count
1089   // LWPs parked on IO.  It does, however, help certain CPU-bound benchmarks.
1090   //
1091   // Some pathologies this scheme doesn't handle:
1092   // *  Threads can block, releasing the LWPs.  The LWPs can age out.
1093   //    When a large number of threads become ready again there aren't
1094   //    enough LWPs available to service them.  This can occur when the
1095   //    number of ready threads oscillates.
1096   // *  LWPs/Threads park on IO, thus taking the LWP out of circulation.
1097   //
1098   // Finally, we should call thr_setconcurrency() periodically to refresh
1099   // the LWP pool and thwart the LWP age-out mechanism.
1100   // The "+3" term provides a little slop -- we want to slightly overprovision.
1101 
1102   if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
1103     if (!(flags & THR_BOUND)) {
1104       thr_setconcurrency (os::Solaris::_os_thread_count);       // avoid starvation
1105     }
1106   }
1107   // Although this doesn't hurt, we should warn of undefined behavior
1108   // when using unbound T1 threads with schedctl().  This should never
1109   // happen, as the compiler and VM threads are always created bound
1110   DEBUG_ONLY(
1111       if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
1112           (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
1113           ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
1114            (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
1115          warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
1116       }
1117   );
1118 
1119 
1120   // Mark that we don't have an lwp or thread id yet.
1121   // In case we attempt to set the priority before the thread starts.
1122   osthread->set_lwp_id(-1);
1123   osthread->set_thread_id(-1);
1124 
1125   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1126   if (status != 0) {
1127     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1128       perror("os::create_thread");
1129     }
1130     thread->set_osthread(NULL);
1131     // Need to clean up stuff we've allocated so far
1132     delete osthread;
1133     return false;
1134   }
1135 
1136   Atomic::inc(&os::Solaris::_os_thread_count);
1137 
1138   // Store info on the Solaris thread into the OSThread
1139   osthread->set_thread_id(tid);
1140 
1141   // Remember that we created this thread so we can set priority on it
1142   osthread->set_vm_created();
1143 
1144   // Set the default thread priority.  If using bound threads, setting
1145   // lwp priority will be delayed until thread start.
1146   set_native_priority(thread,
1147                       DefaultThreadPriority == -1 ?
1148                         java_to_os_priority[NormPriority] :
1149                         DefaultThreadPriority);
1150 
1151   // Initial thread state is INITIALIZED, not SUSPENDED
1152   osthread->set_state(INITIALIZED);
1153 
1154   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1155   return true;
1156 }
1157 
1158 /* defined for >= Solaris 10. This allows builds on earlier versions
1159  *  of Solaris to take advantage of the newly reserved Solaris JVM signals
1160  *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1161  *  and -XX:+UseAltSigs does nothing since these should have no conflict
1162  */
1163 #if !defined(SIGJVM1)
1164 #define SIGJVM1 39
1165 #define SIGJVM2 40
1166 #endif
1167 
1168 debug_only(static bool signal_sets_initialized = false);
1169 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1170 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1171 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1172 
1173 bool os::Solaris::is_sig_ignored(int sig) {
1174       struct sigaction oact;
1175       sigaction(sig, (struct sigaction*)NULL, &oact);
1176       void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1177                                      : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1178       if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1179            return true;
1180       else
1181            return false;
1182 }
1183 
1184 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1185 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1186 static bool isJVM1available() {
1187   return SIGJVM1 < SIGRTMIN;
1188 }
1189 
1190 void os::Solaris::signal_sets_init() {
1191   // Should also have an assertion stating we are still single-threaded.
1192   assert(!signal_sets_initialized, "Already initialized");
1193   // Fill in signals that are necessarily unblocked for all threads in
1194   // the VM. Currently, we unblock the following signals:
1195   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1196   //                         by -Xrs (=ReduceSignalUsage));
1197   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1198   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1199   // the dispositions or masks wrt these signals.
1200   // Programs embedding the VM that want to use the above signals for their
1201   // own purposes must, at this time, use the "-Xrs" option to prevent
1202   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1203   // (See bug 4345157, and other related bugs).
1204   // In reality, though, unblocking these signals is really a nop, since
1205   // these signals are not blocked by default.
1206   sigemptyset(&unblocked_sigs);
1207   sigemptyset(&allowdebug_blocked_sigs);
1208   sigaddset(&unblocked_sigs, SIGILL);
1209   sigaddset(&unblocked_sigs, SIGSEGV);
1210   sigaddset(&unblocked_sigs, SIGBUS);
1211   sigaddset(&unblocked_sigs, SIGFPE);
1212 
1213   if (isJVM1available) {
1214     os::Solaris::set_SIGinterrupt(SIGJVM1);
1215     os::Solaris::set_SIGasync(SIGJVM2);
1216   } else if (UseAltSigs) {
1217     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1218     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1219   } else {
1220     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1221     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1222   }
1223 
1224   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1225   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1226 
1227   if (!ReduceSignalUsage) {
1228    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1229       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1230       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1231    }
1232    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1233       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1234       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1235    }
1236    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1237       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1238       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1239    }
1240   }
1241   // Fill in signals that are blocked by all but the VM thread.
1242   sigemptyset(&vm_sigs);
1243   if (!ReduceSignalUsage)
1244     sigaddset(&vm_sigs, BREAK_SIGNAL);
1245   debug_only(signal_sets_initialized = true);
1246 
1247   // For diagnostics only used in run_periodic_checks
1248   sigemptyset(&check_signal_done);
1249 }
1250 
1251 // These are signals that are unblocked while a thread is running Java.
1252 // (For some reason, they get blocked by default.)
1253 sigset_t* os::Solaris::unblocked_signals() {
1254   assert(signal_sets_initialized, "Not initialized");
1255   return &unblocked_sigs;
1256 }
1257 
1258 // These are the signals that are blocked while a (non-VM) thread is
1259 // running Java. Only the VM thread handles these signals.
1260 sigset_t* os::Solaris::vm_signals() {
1261   assert(signal_sets_initialized, "Not initialized");
1262   return &vm_sigs;
1263 }
1264 
1265 // These are signals that are blocked during cond_wait to allow debugger in
1266 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1267   assert(signal_sets_initialized, "Not initialized");
1268   return &allowdebug_blocked_sigs;
1269 }
1270 
1271 
1272 void _handle_uncaught_cxx_exception() {
1273   VMError err("An uncaught C++ exception");
1274   err.report_and_die();
1275 }
1276 
1277 
1278 // First crack at OS-specific initialization, from inside the new thread.
1279 void os::initialize_thread(Thread* thr) {
1280   int r = thr_main() ;
1281   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
1282   if (r) {
1283     JavaThread* jt = (JavaThread *)thr;
1284     assert(jt != NULL,"Sanity check");
1285     size_t stack_size;
1286     address base = jt->stack_base();
1287     if (Arguments::created_by_java_launcher()) {
1288       // Use 2MB to allow for Solaris 7 64 bit mode.
1289       stack_size = JavaThread::stack_size_at_create() == 0
1290         ? 2048*K : JavaThread::stack_size_at_create();
1291 
1292       // There are rare cases when we may have already used more than
1293       // the basic stack size allotment before this method is invoked.
1294       // Attempt to allow for a normally sized java_stack.
1295       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1296       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1297     } else {
1298       // 6269555: If we were not created by a Java launcher, i.e. if we are
1299       // running embedded in a native application, treat the primordial thread
1300       // as much like a native attached thread as possible.  This means using
1301       // the current stack size from thr_stksegment(), unless it is too large
1302       // to reliably setup guard pages.  A reasonable max size is 8MB.
1303       size_t current_size = current_stack_size();
1304       // This should never happen, but just in case....
1305       if (current_size == 0) current_size = 2 * K * K;
1306       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1307     }
1308     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1309     stack_size = (size_t)(base - bottom);
1310 
1311     assert(stack_size > 0, "Stack size calculation problem");
1312 
1313     if (stack_size > jt->stack_size()) {
1314       NOT_PRODUCT(
1315         struct rlimit limits;
1316         getrlimit(RLIMIT_STACK, &limits);
1317         size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1318         assert(size >= jt->stack_size(), "Stack size problem in main thread");
1319       )
1320       tty->print_cr(
1321         "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1322         "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1323         "See limit(1) to increase the stack size limit.",
1324         stack_size / K, jt->stack_size() / K);
1325       vm_exit(1);
1326     }
1327     assert(jt->stack_size() >= stack_size,
1328           "Attempt to map more stack than was allocated");
1329     jt->set_stack_size(stack_size);
1330   }
1331 
1332    // 5/22/01: Right now alternate signal stacks do not handle
1333    // throwing stack overflow exceptions, see bug 4463178
1334    // Until a fix is found for this, T2 will NOT imply alternate signal
1335    // stacks.
1336    // If using T2 libthread threads, install an alternate signal stack.
1337    // Because alternate stacks associate with LWPs on Solaris,
1338    // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
1339    // we prefer to explicitly stack bang.
1340    // If not using T2 libthread, but using UseBoundThreads any threads
1341    // (primordial thread, jni_attachCurrentThread) we do not create,
1342    // probably are not bound, therefore they can not have an alternate
1343    // signal stack. Since our stack banging code is generated and
1344    // is shared across threads, all threads must be bound to allow
1345    // using alternate signal stacks.  The alternative is to interpose
1346    // on _lwp_create to associate an alt sig stack with each LWP,
1347    // and this could be a problem when the JVM is embedded.
1348    // We would prefer to use alternate signal stacks with T2
1349    // Since there is currently no accurate way to detect T2
1350    // we do not. Assuming T2 when running T1 causes sig 11s or assertions
1351    // on installing alternate signal stacks
1352 
1353 
1354    // 05/09/03: removed alternate signal stack support for Solaris
1355    // The alternate signal stack mechanism is no longer needed to
1356    // handle stack overflow. This is now handled by allocating
1357    // guard pages (red zone) and stackbanging.
1358    // Initially the alternate signal stack mechanism was removed because
1359    // it did not work with T1 llibthread. Alternate
1360    // signal stacks MUST have all threads bound to lwps. Applications
1361    // can create their own threads and attach them without their being
1362    // bound under T1. This is frequently the case for the primordial thread.
1363    // If we were ever to reenable this mechanism we would need to
1364    // use the dynamic check for T2 libthread.
1365 
1366   os::Solaris::init_thread_fpu_state();
1367   std::set_terminate(_handle_uncaught_cxx_exception);
1368 }
1369 
1370 
1371 
1372 // Free Solaris resources related to the OSThread
1373 void os::free_thread(OSThread* osthread) {
1374   assert(osthread != NULL, "os::free_thread but osthread not set");
1375 
1376 
1377   // We are told to free resources of the argument thread,
1378   // but we can only really operate on the current thread.
1379   // The main thread must take the VMThread down synchronously
1380   // before the main thread exits and frees up CodeHeap
1381   guarantee((Thread::current()->osthread() == osthread
1382      || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1383   if (Thread::current()->osthread() == osthread) {
1384     // Restore caller's signal mask
1385     sigset_t sigmask = osthread->caller_sigmask();
1386     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1387   }
1388   delete osthread;
1389 }
1390 
1391 void os::pd_start_thread(Thread* thread) {
1392   int status = thr_continue(thread->osthread()->thread_id());
1393   assert_status(status == 0, status, "thr_continue failed");
1394 }
1395 
1396 
1397 intx os::current_thread_id() {
1398   return (intx)thr_self();
1399 }
1400 
1401 static pid_t _initial_pid = 0;
1402 
1403 int os::current_process_id() {
1404   return (int)(_initial_pid ? _initial_pid : getpid());
1405 }
1406 
1407 // gethrtime() should be monotonic according to the documentation,
1408 // but some virtualized platforms are known to break this guarantee.
1409 // getTimeNanos() must be guaranteed not to move backwards, so we
1410 // are forced to add a check here.
1411 inline hrtime_t getTimeNanos() {
1412   const hrtime_t now = gethrtime();
1413   const hrtime_t prev = max_hrtime;
1414   if (now <= prev) {
1415     return prev;   // same or retrograde time;
1416   }
1417   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1418   assert(obsv >= prev, "invariant");   // Monotonicity
1419   // If the CAS succeeded then we're done and return "now".
1420   // If the CAS failed and the observed value "obsv" is >= now then
1421   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1422   // some other thread raced this thread and installed a new value, in which case
1423   // we could either (a) retry the entire operation, (b) retry trying to install now
1424   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1425   // we might discard a higher "now" value in deference to a slightly lower but freshly
1426   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1427   // to (a) or (b) -- and greatly reduces coherence traffic.
1428   // We might also condition (c) on the magnitude of the delta between obsv and now.
1429   // Avoiding excessive CAS operations to hot RW locations is critical.
1430   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1431   return (prev == obsv) ? now : obsv;
1432 }
1433 
1434 // Time since start-up in seconds to a fine granularity.
1435 // Used by VMSelfDestructTimer and the MemProfiler.
1436 double os::elapsedTime() {
1437   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1438 }
1439 
1440 jlong os::elapsed_counter() {
1441   return (jlong)(getTimeNanos() - first_hrtime);
1442 }
1443 
1444 jlong os::elapsed_frequency() {
1445    return hrtime_hz;
1446 }
1447 
1448 // Return the real, user, and system times in seconds from an
1449 // arbitrary fixed point in the past.
1450 bool os::getTimesSecs(double* process_real_time,
1451                   double* process_user_time,
1452                   double* process_system_time) {
1453   struct tms ticks;
1454   clock_t real_ticks = times(&ticks);
1455 
1456   if (real_ticks == (clock_t) (-1)) {
1457     return false;
1458   } else {
1459     double ticks_per_second = (double) clock_tics_per_sec;
1460     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1461     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1462     // For consistency return the real time from getTimeNanos()
1463     // converted to seconds.
1464     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1465 
1466     return true;
1467   }
1468 }
1469 
1470 bool os::supports_vtime() { return true; }
1471 
1472 bool os::enable_vtime() {
1473   int fd = ::open("/proc/self/ctl", O_WRONLY);
1474   if (fd == -1)
1475     return false;
1476 
1477   long cmd[] = { PCSET, PR_MSACCT };
1478   int res = ::write(fd, cmd, sizeof(long) * 2);
1479   ::close(fd);
1480   if (res != sizeof(long) * 2)
1481     return false;
1482 
1483   return true;
1484 }
1485 
1486 bool os::vtime_enabled() {
1487   int fd = ::open("/proc/self/status", O_RDONLY);
1488   if (fd == -1)
1489     return false;
1490 
1491   pstatus_t status;
1492   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1493   ::close(fd);
1494   if (res != sizeof(pstatus_t))
1495     return false;
1496 
1497   return status.pr_flags & PR_MSACCT;
1498 }
1499 
1500 double os::elapsedVTime() {
1501   return (double)gethrvtime() / (double)hrtime_hz;
1502 }
1503 
1504 // Used internally for comparisons only
1505 // getTimeMillis guaranteed to not move backwards on Solaris
1506 jlong getTimeMillis() {
1507   jlong nanotime = getTimeNanos();
1508   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1509 }
1510 
1511 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1512 jlong os::javaTimeMillis() {
1513   timeval t;
1514   if (gettimeofday( &t, NULL) == -1)
1515     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1516   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1517 }
1518 
1519 jlong os::javaTimeNanos() {
1520   return (jlong)getTimeNanos();
1521 }
1522 
1523 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1524   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1525   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1526   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1527   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1528 }
1529 
1530 char * os::local_time_string(char *buf, size_t buflen) {
1531   struct tm t;
1532   time_t long_time;
1533   time(&long_time);
1534   localtime_r(&long_time, &t);
1535   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1536                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1537                t.tm_hour, t.tm_min, t.tm_sec);
1538   return buf;
1539 }
1540 
1541 // Note: os::shutdown() might be called very early during initialization, or
1542 // called from signal handler. Before adding something to os::shutdown(), make
1543 // sure it is async-safe and can handle partially initialized VM.
1544 void os::shutdown() {
1545 
1546   // allow PerfMemory to attempt cleanup of any persistent resources
1547   perfMemory_exit();
1548 
1549   // needs to remove object in file system
1550   AttachListener::abort();
1551 
1552   // flush buffered output, finish log files
1553   ostream_abort();
1554 
1555   // Check for abort hook
1556   abort_hook_t abort_hook = Arguments::abort_hook();
1557   if (abort_hook != NULL) {
1558     abort_hook();
1559   }
1560 }
1561 
1562 // Note: os::abort() might be called very early during initialization, or
1563 // called from signal handler. Before adding something to os::abort(), make
1564 // sure it is async-safe and can handle partially initialized VM.
1565 void os::abort(bool dump_core) {
1566   os::shutdown();
1567   if (dump_core) {
1568 #ifndef PRODUCT
1569     fdStream out(defaultStream::output_fd());
1570     out.print_raw("Current thread is ");
1571     char buf[16];
1572     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1573     out.print_raw_cr(buf);
1574     out.print_raw_cr("Dumping core ...");
1575 #endif
1576     ::abort(); // dump core (for debugging)
1577   }
1578 
1579   ::exit(1);
1580 }
1581 
1582 // Die immediately, no exit hook, no abort hook, no cleanup.
1583 void os::die() {
1584   ::abort(); // dump core (for debugging)
1585 }
1586 
1587 // DLL functions
1588 
1589 const char* os::dll_file_extension() { return ".so"; }
1590 
1591 // This must be hard coded because it's the system's temporary
1592 // directory not the java application's temp directory, ala java.io.tmpdir.
1593 const char* os::get_temp_directory() { return "/tmp"; }
1594 
1595 static bool file_exists(const char* filename) {
1596   struct stat statbuf;
1597   if (filename == NULL || strlen(filename) == 0) {
1598     return false;
1599   }
1600   return os::stat(filename, &statbuf) == 0;
1601 }
1602 
1603 bool os::dll_build_name(char* buffer, size_t buflen,
1604                         const char* pname, const char* fname) {
1605   bool retval = false;
1606   const size_t pnamelen = pname ? strlen(pname) : 0;
1607 
1608   // Return error on buffer overflow.
1609   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1610     return retval;
1611   }
1612 
1613   if (pnamelen == 0) {
1614     snprintf(buffer, buflen, "lib%s.so", fname);
1615     retval = true;
1616   } else if (strchr(pname, *os::path_separator()) != NULL) {
1617     int n;
1618     char** pelements = split_path(pname, &n);
1619     if (pelements == NULL) {
1620       return false;
1621     }
1622     for (int i = 0 ; i < n ; i++) {
1623       // really shouldn't be NULL but what the heck, check can't hurt
1624       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1625         continue; // skip the empty path values
1626       }
1627       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1628       if (file_exists(buffer)) {
1629         retval = true;
1630         break;
1631       }
1632     }
1633     // release the storage
1634     for (int i = 0 ; i < n ; i++) {
1635       if (pelements[i] != NULL) {
1636         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1637       }
1638     }
1639     if (pelements != NULL) {
1640       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1641     }
1642   } else {
1643     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1644     retval = true;
1645   }
1646   return retval;
1647 }
1648 
1649 // check if addr is inside libjvm.so
1650 bool os::address_is_in_vm(address addr) {
1651   static address libjvm_base_addr;
1652   Dl_info dlinfo;
1653 
1654   if (libjvm_base_addr == NULL) {
1655     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1656       libjvm_base_addr = (address)dlinfo.dli_fbase;
1657     }
1658     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1659   }
1660 
1661   if (dladdr((void *)addr, &dlinfo) != 0) {
1662     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1663   }
1664 
1665   return false;
1666 }
1667 
1668 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1669 static dladdr1_func_type dladdr1_func = NULL;
1670 
1671 bool os::dll_address_to_function_name(address addr, char *buf,
1672                                       int buflen, int * offset) {
1673   // buf is not optional, but offset is optional
1674   assert(buf != NULL, "sanity check");
1675 
1676   Dl_info dlinfo;
1677 
1678   // dladdr1_func was initialized in os::init()
1679   if (dladdr1_func != NULL) {
1680     // yes, we have dladdr1
1681 
1682     // Support for dladdr1 is checked at runtime; it may be
1683     // available even if the vm is built on a machine that does
1684     // not have dladdr1 support.  Make sure there is a value for
1685     // RTLD_DL_SYMENT.
1686     #ifndef RTLD_DL_SYMENT
1687     #define RTLD_DL_SYMENT 1
1688     #endif
1689 #ifdef _LP64
1690     Elf64_Sym * info;
1691 #else
1692     Elf32_Sym * info;
1693 #endif
1694     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1695                      RTLD_DL_SYMENT) != 0) {
1696       // see if we have a matching symbol that covers our address
1697       if (dlinfo.dli_saddr != NULL &&
1698           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1699         if (dlinfo.dli_sname != NULL) {
1700           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1701             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1702           }
1703           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1704           return true;
1705         }
1706       }
1707       // no matching symbol so try for just file info
1708       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1709         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1710                             buf, buflen, offset, dlinfo.dli_fname)) {
1711           return true;
1712         }
1713       }
1714     }
1715     buf[0] = '\0';
1716     if (offset != NULL) *offset  = -1;
1717     return false;
1718   }
1719 
1720   // no, only dladdr is available
1721   if (dladdr((void *)addr, &dlinfo) != 0) {
1722     // see if we have a matching symbol
1723     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1724       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1725         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1726       }
1727       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1728       return true;
1729     }
1730     // no matching symbol so try for just file info
1731     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1732       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1733                           buf, buflen, offset, dlinfo.dli_fname)) {
1734         return true;
1735       }
1736     }
1737   }
1738   buf[0] = '\0';
1739   if (offset != NULL) *offset  = -1;
1740   return false;
1741 }
1742 
1743 bool os::dll_address_to_library_name(address addr, char* buf,
1744                                      int buflen, int* offset) {
1745   // buf is not optional, but offset is optional
1746   assert(buf != NULL, "sanity check");
1747 
1748   Dl_info dlinfo;
1749 
1750   if (dladdr((void*)addr, &dlinfo) != 0) {
1751     if (dlinfo.dli_fname != NULL) {
1752       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1753     }
1754     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1755       *offset = addr - (address)dlinfo.dli_fbase;
1756     }
1757     return true;
1758   }
1759 
1760   buf[0] = '\0';
1761   if (offset) *offset = -1;
1762   return false;
1763 }
1764 
1765 // Prints the names and full paths of all opened dynamic libraries
1766 // for current process
1767 void os::print_dll_info(outputStream * st) {
1768   Dl_info dli;
1769   void *handle;
1770   Link_map *map;
1771   Link_map *p;
1772 
1773   st->print_cr("Dynamic libraries:"); st->flush();
1774 
1775   if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
1776       dli.dli_fname == NULL) {
1777     st->print_cr("Error: Cannot print dynamic libraries.");
1778     return;
1779   }
1780   handle = dlopen(dli.dli_fname, RTLD_LAZY);
1781   if (handle == NULL) {
1782     st->print_cr("Error: Cannot print dynamic libraries.");
1783     return;
1784   }
1785   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1786   if (map == NULL) {
1787     st->print_cr("Error: Cannot print dynamic libraries.");
1788     return;
1789   }
1790 
1791   while (map->l_prev != NULL)
1792     map = map->l_prev;
1793 
1794   while (map != NULL) {
1795     st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
1796     map = map->l_next;
1797   }
1798 
1799   dlclose(handle);
1800 }
1801 
1802   // Loads .dll/.so and
1803   // in case of error it checks if .dll/.so was built for the
1804   // same architecture as Hotspot is running on
1805 
1806 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1807 {
1808   void * result= ::dlopen(filename, RTLD_LAZY);
1809   if (result != NULL) {
1810     // Successful loading
1811     return result;
1812   }
1813 
1814   Elf32_Ehdr elf_head;
1815 
1816   // Read system error message into ebuf
1817   // It may or may not be overwritten below
1818   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1819   ebuf[ebuflen-1]='\0';
1820   int diag_msg_max_length=ebuflen-strlen(ebuf);
1821   char* diag_msg_buf=ebuf+strlen(ebuf);
1822 
1823   if (diag_msg_max_length==0) {
1824     // No more space in ebuf for additional diagnostics message
1825     return NULL;
1826   }
1827 
1828 
1829   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1830 
1831   if (file_descriptor < 0) {
1832     // Can't open library, report dlerror() message
1833     return NULL;
1834   }
1835 
1836   bool failed_to_read_elf_head=
1837     (sizeof(elf_head)!=
1838         (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1839 
1840   ::close(file_descriptor);
1841   if (failed_to_read_elf_head) {
1842     // file i/o error - report dlerror() msg
1843     return NULL;
1844   }
1845 
1846   typedef struct {
1847     Elf32_Half  code;         // Actual value as defined in elf.h
1848     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1849     char        elf_class;    // 32 or 64 bit
1850     char        endianess;    // MSB or LSB
1851     char*       name;         // String representation
1852   } arch_t;
1853 
1854   static const arch_t arch_array[]={
1855     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1856     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1857     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1858     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1859     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1860     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1861     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1862     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1863     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1864     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1865   };
1866 
1867   #if  (defined IA32)
1868     static  Elf32_Half running_arch_code=EM_386;
1869   #elif   (defined AMD64)
1870     static  Elf32_Half running_arch_code=EM_X86_64;
1871   #elif  (defined IA64)
1872     static  Elf32_Half running_arch_code=EM_IA_64;
1873   #elif  (defined __sparc) && (defined _LP64)
1874     static  Elf32_Half running_arch_code=EM_SPARCV9;
1875   #elif  (defined __sparc) && (!defined _LP64)
1876     static  Elf32_Half running_arch_code=EM_SPARC;
1877   #elif  (defined __powerpc64__)
1878     static  Elf32_Half running_arch_code=EM_PPC64;
1879   #elif  (defined __powerpc__)
1880     static  Elf32_Half running_arch_code=EM_PPC;
1881   #elif (defined ARM)
1882     static  Elf32_Half running_arch_code=EM_ARM;
1883   #else
1884     #error Method os::dll_load requires that one of following is defined:\
1885          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1886   #endif
1887 
1888   // Identify compatability class for VM's architecture and library's architecture
1889   // Obtain string descriptions for architectures
1890 
1891   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1892   int running_arch_index=-1;
1893 
1894   for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
1895     if (running_arch_code == arch_array[i].code) {
1896       running_arch_index    = i;
1897     }
1898     if (lib_arch.code == arch_array[i].code) {
1899       lib_arch.compat_class = arch_array[i].compat_class;
1900       lib_arch.name         = arch_array[i].name;
1901     }
1902   }
1903 
1904   assert(running_arch_index != -1,
1905     "Didn't find running architecture code (running_arch_code) in arch_array");
1906   if (running_arch_index == -1) {
1907     // Even though running architecture detection failed
1908     // we may still continue with reporting dlerror() message
1909     return NULL;
1910   }
1911 
1912   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1913     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1914     return NULL;
1915   }
1916 
1917   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1918     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1919     return NULL;
1920   }
1921 
1922   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1923     if ( lib_arch.name!=NULL ) {
1924       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1925         " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1926         lib_arch.name, arch_array[running_arch_index].name);
1927     } else {
1928       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1929       " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1930         lib_arch.code,
1931         arch_array[running_arch_index].name);
1932     }
1933   }
1934 
1935   return NULL;
1936 }
1937 
1938 void* os::dll_lookup(void* handle, const char* name) {
1939   return dlsym(handle, name);
1940 }
1941 
1942 void* os::get_default_process_handle() {
1943   return (void*)::dlopen(NULL, RTLD_LAZY);
1944 }
1945 
1946 int os::stat(const char *path, struct stat *sbuf) {
1947   char pathbuf[MAX_PATH];
1948   if (strlen(path) > MAX_PATH - 1) {
1949     errno = ENAMETOOLONG;
1950     return -1;
1951   }
1952   os::native_path(strcpy(pathbuf, path));
1953   return ::stat(pathbuf, sbuf);
1954 }
1955 
1956 static bool _print_ascii_file(const char* filename, outputStream* st) {
1957   int fd = ::open(filename, O_RDONLY);
1958   if (fd == -1) {
1959      return false;
1960   }
1961 
1962   char buf[32];
1963   int bytes;
1964   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1965     st->print_raw(buf, bytes);
1966   }
1967 
1968   ::close(fd);
1969 
1970   return true;
1971 }
1972 
1973 void os::print_os_info_brief(outputStream* st) {
1974   os::Solaris::print_distro_info(st);
1975 
1976   os::Posix::print_uname_info(st);
1977 
1978   os::Solaris::print_libversion_info(st);
1979 }
1980 
1981 void os::print_os_info(outputStream* st) {
1982   st->print("OS:");
1983 
1984   os::Solaris::print_distro_info(st);
1985 
1986   os::Posix::print_uname_info(st);
1987 
1988   os::Solaris::print_libversion_info(st);
1989 
1990   os::Posix::print_rlimit_info(st);
1991 
1992   os::Posix::print_load_average(st);
1993 }
1994 
1995 void os::Solaris::print_distro_info(outputStream* st) {
1996   if (!_print_ascii_file("/etc/release", st)) {
1997       st->print("Solaris");
1998     }
1999     st->cr();
2000 }
2001 
2002 void os::Solaris::print_libversion_info(outputStream* st) {
2003   if (os::Solaris::T2_libthread()) {
2004     st->print("  (T2 libthread)");
2005   }
2006   else {
2007     st->print("  (T1 libthread)");
2008   }
2009   st->cr();
2010 }
2011 
2012 static bool check_addr0(outputStream* st) {
2013   jboolean status = false;
2014   int fd = ::open("/proc/self/map",O_RDONLY);
2015   if (fd >= 0) {
2016     prmap_t p;
2017     while(::read(fd, &p, sizeof(p)) > 0) {
2018       if (p.pr_vaddr == 0x0) {
2019         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
2020         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
2021         st->print("Access:");
2022         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
2023         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
2024         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
2025         st->cr();
2026         status = true;
2027       }
2028     }
2029     ::close(fd);
2030   }
2031   return status;
2032 }
2033 
2034 void os::pd_print_cpu_info(outputStream* st) {
2035   // Nothing to do for now.
2036 }
2037 
2038 void os::print_memory_info(outputStream* st) {
2039   st->print("Memory:");
2040   st->print(" %dk page", os::vm_page_size()>>10);
2041   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2042   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2043   st->cr();
2044   if (VMError::fatal_error_in_progress()){
2045      (void) check_addr0(st);
2046   }
2047 }
2048 
2049 void os::print_siginfo(outputStream* st, void* siginfo) {
2050   const siginfo_t* si = (const siginfo_t*)siginfo;
2051 
2052   os::Posix::print_siginfo_brief(st, si);
2053 
2054   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2055       UseSharedSpaces) {
2056     FileMapInfo* mapinfo = FileMapInfo::current_info();
2057     if (mapinfo->is_in_shared_space(si->si_addr)) {
2058       st->print("\n\nError accessing class data sharing archive."   \
2059                 " Mapped file inaccessible during execution, "      \
2060                 " possible disk/network problem.");
2061     }
2062   }
2063   st->cr();
2064 }
2065 
2066 // Moved from whole group, because we need them here for diagnostic
2067 // prints.
2068 #define OLDMAXSIGNUM 32
2069 static int Maxsignum = 0;
2070 static int *ourSigFlags = NULL;
2071 
2072 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2073 
2074 int os::Solaris::get_our_sigflags(int sig) {
2075   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2076   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2077   return ourSigFlags[sig];
2078 }
2079 
2080 void os::Solaris::set_our_sigflags(int sig, int flags) {
2081   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2082   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2083   ourSigFlags[sig] = flags;
2084 }
2085 
2086 
2087 static const char* get_signal_handler_name(address handler,
2088                                            char* buf, int buflen) {
2089   int offset;
2090   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2091   if (found) {
2092     // skip directory names
2093     const char *p1, *p2;
2094     p1 = buf;
2095     size_t len = strlen(os::file_separator());
2096     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2097     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2098   } else {
2099     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2100   }
2101   return buf;
2102 }
2103 
2104 static void print_signal_handler(outputStream* st, int sig,
2105                                   char* buf, size_t buflen) {
2106   struct sigaction sa;
2107 
2108   sigaction(sig, NULL, &sa);
2109 
2110   st->print("%s: ", os::exception_name(sig, buf, buflen));
2111 
2112   address handler = (sa.sa_flags & SA_SIGINFO)
2113                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2114                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2115 
2116   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2117     st->print("SIG_DFL");
2118   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2119     st->print("SIG_IGN");
2120   } else {
2121     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2122   }
2123 
2124   st->print(", sa_mask[0]=");
2125   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2126 
2127   address rh = VMError::get_resetted_sighandler(sig);
2128   // May be, handler was resetted by VMError?
2129   if(rh != NULL) {
2130     handler = rh;
2131     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2132   }
2133 
2134   st->print(", sa_flags=");
2135   os::Posix::print_sa_flags(st, sa.sa_flags);
2136 
2137   // Check: is it our handler?
2138   if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2139      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2140     // It is our signal handler
2141     // check for flags
2142     if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2143       st->print(
2144         ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2145         os::Solaris::get_our_sigflags(sig));
2146     }
2147   }
2148   st->cr();
2149 }
2150 
2151 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2152   st->print_cr("Signal Handlers:");
2153   print_signal_handler(st, SIGSEGV, buf, buflen);
2154   print_signal_handler(st, SIGBUS , buf, buflen);
2155   print_signal_handler(st, SIGFPE , buf, buflen);
2156   print_signal_handler(st, SIGPIPE, buf, buflen);
2157   print_signal_handler(st, SIGXFSZ, buf, buflen);
2158   print_signal_handler(st, SIGILL , buf, buflen);
2159   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2160   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2161   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2162   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2163   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2164   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2165   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2166   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2167 }
2168 
2169 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2170 
2171 // Find the full path to the current module, libjvm.so
2172 void os::jvm_path(char *buf, jint buflen) {
2173   // Error checking.
2174   if (buflen < MAXPATHLEN) {
2175     assert(false, "must use a large-enough buffer");
2176     buf[0] = '\0';
2177     return;
2178   }
2179   // Lazy resolve the path to current module.
2180   if (saved_jvm_path[0] != 0) {
2181     strcpy(buf, saved_jvm_path);
2182     return;
2183   }
2184 
2185   Dl_info dlinfo;
2186   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2187   assert(ret != 0, "cannot locate libjvm");
2188   if (ret != 0 && dlinfo.dli_fname != NULL) {
2189     realpath((char *)dlinfo.dli_fname, buf);
2190   } else {
2191     buf[0] = '\0';
2192     return;
2193   }
2194 
2195   if (Arguments::created_by_gamma_launcher()) {
2196     // Support for the gamma launcher.  Typical value for buf is
2197     // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
2198     // the right place in the string, then assume we are installed in a JDK and
2199     // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
2200     // up the path so it looks like libjvm.so is installed there (append a
2201     // fake suffix hotspot/libjvm.so).
2202     const char *p = buf + strlen(buf) - 1;
2203     for (int count = 0; p > buf && count < 5; ++count) {
2204       for (--p; p > buf && *p != '/'; --p)
2205         /* empty */ ;
2206     }
2207 
2208     if (strncmp(p, "/jre/lib/", 9) != 0) {
2209       // Look for JAVA_HOME in the environment.
2210       char* java_home_var = ::getenv("JAVA_HOME");
2211       if (java_home_var != NULL && java_home_var[0] != 0) {
2212         char cpu_arch[12];
2213         char* jrelib_p;
2214         int   len;
2215         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2216 #ifdef _LP64
2217         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2218         if (strcmp(cpu_arch, "sparc") == 0) {
2219           strcat(cpu_arch, "v9");
2220         } else if (strcmp(cpu_arch, "i386") == 0) {
2221           strcpy(cpu_arch, "amd64");
2222         }
2223 #endif
2224         // Check the current module name "libjvm.so".
2225         p = strrchr(buf, '/');
2226         assert(strstr(p, "/libjvm") == p, "invalid library name");
2227 
2228         realpath(java_home_var, buf);
2229         // determine if this is a legacy image or modules image
2230         // modules image doesn't have "jre" subdirectory
2231         len = strlen(buf);
2232         assert(len < buflen, "Ran out of buffer space");
2233         jrelib_p = buf + len;
2234         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2235         if (0 != access(buf, F_OK)) {
2236           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2237         }
2238 
2239         if (0 == access(buf, F_OK)) {
2240           // Use current module name "libjvm.so"
2241           len = strlen(buf);
2242           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2243         } else {
2244           // Go back to path of .so
2245           realpath((char *)dlinfo.dli_fname, buf);
2246         }
2247       }
2248     }
2249   }
2250 
2251   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2252 }
2253 
2254 
2255 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2256   // no prefix required, not even "_"
2257 }
2258 
2259 
2260 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2261   // no suffix required
2262 }
2263 
2264 // This method is a copy of JDK's sysGetLastErrorString
2265 // from src/solaris/hpi/src/system_md.c
2266 
2267 size_t os::lasterror(char *buf, size_t len) {
2268 
2269   if (errno == 0)  return 0;
2270 
2271   const char *s = ::strerror(errno);
2272   size_t n = ::strlen(s);
2273   if (n >= len) {
2274     n = len - 1;
2275   }
2276   ::strncpy(buf, s, n);
2277   buf[n] = '\0';
2278   return n;
2279 }
2280 
2281 
2282 // sun.misc.Signal
2283 
2284 extern "C" {
2285   static void UserHandler(int sig, void *siginfo, void *context) {
2286     // Ctrl-C is pressed during error reporting, likely because the error
2287     // handler fails to abort. Let VM die immediately.
2288     if (sig == SIGINT && is_error_reported()) {
2289        os::die();
2290     }
2291 
2292     os::signal_notify(sig);
2293     // We do not need to reinstate the signal handler each time...
2294   }
2295 }
2296 
2297 void* os::user_handler() {
2298   return CAST_FROM_FN_PTR(void*, UserHandler);
2299 }
2300 
2301 class Semaphore : public StackObj {
2302   public:
2303     Semaphore();
2304     ~Semaphore();
2305     void signal();
2306     void wait();
2307     bool trywait();
2308     bool timedwait(unsigned int sec, int nsec);
2309   private:
2310     sema_t _semaphore;
2311 };
2312 
2313 
2314 Semaphore::Semaphore() {
2315   sema_init(&_semaphore, 0, NULL, NULL);
2316 }
2317 
2318 Semaphore::~Semaphore() {
2319   sema_destroy(&_semaphore);
2320 }
2321 
2322 void Semaphore::signal() {
2323   sema_post(&_semaphore);
2324 }
2325 
2326 void Semaphore::wait() {
2327   sema_wait(&_semaphore);
2328 }
2329 
2330 bool Semaphore::trywait() {
2331   return sema_trywait(&_semaphore) == 0;
2332 }
2333 
2334 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2335   struct timespec ts;
2336   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2337 
2338   while (1) {
2339     int result = sema_timedwait(&_semaphore, &ts);
2340     if (result == 0) {
2341       return true;
2342     } else if (errno == EINTR) {
2343       continue;
2344     } else if (errno == ETIME) {
2345       return false;
2346     } else {
2347       return false;
2348     }
2349   }
2350 }
2351 
2352 extern "C" {
2353   typedef void (*sa_handler_t)(int);
2354   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2355 }
2356 
2357 void* os::signal(int signal_number, void* handler) {
2358   struct sigaction sigAct, oldSigAct;
2359   sigfillset(&(sigAct.sa_mask));
2360   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2361   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2362 
2363   if (sigaction(signal_number, &sigAct, &oldSigAct))
2364     // -1 means registration failed
2365     return (void *)-1;
2366 
2367   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2368 }
2369 
2370 void os::signal_raise(int signal_number) {
2371   raise(signal_number);
2372 }
2373 
2374 /*
2375  * The following code is moved from os.cpp for making this
2376  * code platform specific, which it is by its very nature.
2377  */
2378 
2379 // a counter for each possible signal value
2380 static int Sigexit = 0;
2381 static int Maxlibjsigsigs;
2382 static jint *pending_signals = NULL;
2383 static int *preinstalled_sigs = NULL;
2384 static struct sigaction *chainedsigactions = NULL;
2385 static sema_t sig_sem;
2386 typedef int (*version_getting_t)();
2387 version_getting_t os::Solaris::get_libjsig_version = NULL;
2388 static int libjsigversion = NULL;
2389 
2390 int os::sigexitnum_pd() {
2391   assert(Sigexit > 0, "signal memory not yet initialized");
2392   return Sigexit;
2393 }
2394 
2395 void os::Solaris::init_signal_mem() {
2396   // Initialize signal structures
2397   Maxsignum = SIGRTMAX;
2398   Sigexit = Maxsignum+1;
2399   assert(Maxsignum >0, "Unable to obtain max signal number");
2400 
2401   Maxlibjsigsigs = Maxsignum;
2402 
2403   // pending_signals has one int per signal
2404   // The additional signal is for SIGEXIT - exit signal to signal_thread
2405   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2406   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2407 
2408   if (UseSignalChaining) {
2409      chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2410        * (Maxsignum + 1), mtInternal);
2411      memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2412      preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2413      memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2414   }
2415   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
2416   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2417 }
2418 
2419 void os::signal_init_pd() {
2420   int ret;
2421 
2422   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2423   assert(ret == 0, "sema_init() failed");
2424 }
2425 
2426 void os::signal_notify(int signal_number) {
2427   int ret;
2428 
2429   Atomic::inc(&pending_signals[signal_number]);
2430   ret = ::sema_post(&sig_sem);
2431   assert(ret == 0, "sema_post() failed");
2432 }
2433 
2434 static int check_pending_signals(bool wait_for_signal) {
2435   int ret;
2436   while (true) {
2437     for (int i = 0; i < Sigexit + 1; i++) {
2438       jint n = pending_signals[i];
2439       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2440         return i;
2441       }
2442     }
2443     if (!wait_for_signal) {
2444       return -1;
2445     }
2446     JavaThread *thread = JavaThread::current();
2447     ThreadBlockInVM tbivm(thread);
2448 
2449     bool threadIsSuspended;
2450     do {
2451       thread->set_suspend_equivalent();
2452       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2453       while((ret = ::sema_wait(&sig_sem)) == EINTR)
2454           ;
2455       assert(ret == 0, "sema_wait() failed");
2456 
2457       // were we externally suspended while we were waiting?
2458       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2459       if (threadIsSuspended) {
2460         //
2461         // The semaphore has been incremented, but while we were waiting
2462         // another thread suspended us. We don't want to continue running
2463         // while suspended because that would surprise the thread that
2464         // suspended us.
2465         //
2466         ret = ::sema_post(&sig_sem);
2467         assert(ret == 0, "sema_post() failed");
2468 
2469         thread->java_suspend_self();
2470       }
2471     } while (threadIsSuspended);
2472   }
2473 }
2474 
2475 int os::signal_lookup() {
2476   return check_pending_signals(false);
2477 }
2478 
2479 int os::signal_wait() {
2480   return check_pending_signals(true);
2481 }
2482 
2483 ////////////////////////////////////////////////////////////////////////////////
2484 // Virtual Memory
2485 
2486 static int page_size = -1;
2487 
2488 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2489 // clear this var if support is not available.
2490 static bool has_map_align = true;
2491 
2492 int os::vm_page_size() {
2493   assert(page_size != -1, "must call os::init");
2494   return page_size;
2495 }
2496 
2497 // Solaris allocates memory by pages.
2498 int os::vm_allocation_granularity() {
2499   assert(page_size != -1, "must call os::init");
2500   return page_size;
2501 }
2502 
2503 static bool recoverable_mmap_error(int err) {
2504   // See if the error is one we can let the caller handle. This
2505   // list of errno values comes from the Solaris mmap(2) man page.
2506   switch (err) {
2507   case EBADF:
2508   case EINVAL:
2509   case ENOTSUP:
2510     // let the caller deal with these errors
2511     return true;
2512 
2513   default:
2514     // Any remaining errors on this OS can cause our reserved mapping
2515     // to be lost. That can cause confusion where different data
2516     // structures think they have the same memory mapped. The worst
2517     // scenario is if both the VM and a library think they have the
2518     // same memory mapped.
2519     return false;
2520   }
2521 }
2522 
2523 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2524                                     int err) {
2525   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2526           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2527           strerror(err), err);
2528 }
2529 
2530 static void warn_fail_commit_memory(char* addr, size_t bytes,
2531                                     size_t alignment_hint, bool exec,
2532                                     int err) {
2533   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2534           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2535           alignment_hint, exec, strerror(err), err);
2536 }
2537 
2538 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2539   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2540   size_t size = bytes;
2541   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2542   if (res != NULL) {
2543     if (UseNUMAInterleaving) {
2544       numa_make_global(addr, bytes);
2545     }
2546     return 0;
2547   }
2548 
2549   int err = errno;  // save errno from mmap() call in mmap_chunk()
2550 
2551   if (!recoverable_mmap_error(err)) {
2552     warn_fail_commit_memory(addr, bytes, exec, err);
2553     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2554   }
2555 
2556   return err;
2557 }
2558 
2559 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2560   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2561 }
2562 
2563 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2564                                   const char* mesg) {
2565   assert(mesg != NULL, "mesg must be specified");
2566   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2567   if (err != 0) {
2568     // the caller wants all commit errors to exit with the specified mesg:
2569     warn_fail_commit_memory(addr, bytes, exec, err);
2570     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2571   }
2572 }
2573 
2574 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
2575   assert(is_size_aligned(alignment, (size_t) vm_page_size()),
2576          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
2577                  alignment, (size_t) vm_page_size()));
2578 
2579   for (int i = 0; _page_sizes[i] != 0; i++) {
2580     if (is_size_aligned(alignment, _page_sizes[i])) {
2581       return _page_sizes[i];
2582     }
2583   }
2584 
2585   return (size_t) vm_page_size();
2586 }
2587 
2588 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2589                                     size_t alignment_hint, bool exec) {
2590   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2591   if (err == 0 && UseLargePages && alignment_hint > 0) {
2592     assert(is_size_aligned(bytes, alignment_hint),
2593            err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
2594 
2595     // The syscall memcntl requires an exact page size (see man memcntl for details).
2596     size_t page_size = page_size_for_alignment(alignment_hint);
2597     if (page_size > (size_t) vm_page_size()) {
2598       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2599     }
2600   }
2601   return err;
2602 }
2603 
2604 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2605                           bool exec) {
2606   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2607 }
2608 
2609 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2610                                   size_t alignment_hint, bool exec,
2611                                   const char* mesg) {
2612   assert(mesg != NULL, "mesg must be specified");
2613   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2614   if (err != 0) {
2615     // the caller wants all commit errors to exit with the specified mesg:
2616     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2617     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2618   }
2619 }
2620 
2621 // Uncommit the pages in a specified region.
2622 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2623   if (madvise(addr, bytes, MADV_FREE) < 0) {
2624     debug_only(warning("MADV_FREE failed."));
2625     return;
2626   }
2627 }
2628 
2629 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2630   return os::commit_memory(addr, size, !ExecMem);
2631 }
2632 
2633 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2634   return os::uncommit_memory(addr, size);
2635 }
2636 
2637 // Change the page size in a given range.
2638 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2639   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2640   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2641   if (UseLargePages) {
2642     Solaris::setup_large_pages(addr, bytes, alignment_hint);
2643   }
2644 }
2645 
2646 // Tell the OS to make the range local to the first-touching LWP
2647 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2648   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2649   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2650     debug_only(warning("MADV_ACCESS_LWP failed."));
2651   }
2652 }
2653 
2654 // Tell the OS that this range would be accessed from different LWPs.
2655 void os::numa_make_global(char *addr, size_t bytes) {
2656   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2657   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2658     debug_only(warning("MADV_ACCESS_MANY failed."));
2659   }
2660 }
2661 
2662 // Get the number of the locality groups.
2663 size_t os::numa_get_groups_num() {
2664   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2665   return n != -1 ? n : 1;
2666 }
2667 
2668 // Get a list of leaf locality groups. A leaf lgroup is group that
2669 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2670 // board. An LWP is assigned to one of these groups upon creation.
2671 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2672    if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2673      ids[0] = 0;
2674      return 1;
2675    }
2676    int result_size = 0, top = 1, bottom = 0, cur = 0;
2677    for (int k = 0; k < size; k++) {
2678      int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2679                                     (Solaris::lgrp_id_t*)&ids[top], size - top);
2680      if (r == -1) {
2681        ids[0] = 0;
2682        return 1;
2683      }
2684      if (!r) {
2685        // That's a leaf node.
2686        assert (bottom <= cur, "Sanity check");
2687        // Check if the node has memory
2688        if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2689                                    NULL, 0, LGRP_RSRC_MEM) > 0) {
2690          ids[bottom++] = ids[cur];
2691        }
2692      }
2693      top += r;
2694      cur++;
2695    }
2696    if (bottom == 0) {
2697      // Handle a situation, when the OS reports no memory available.
2698      // Assume UMA architecture.
2699      ids[0] = 0;
2700      return 1;
2701    }
2702    return bottom;
2703 }
2704 
2705 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2706 bool os::numa_topology_changed() {
2707   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2708   if (is_stale != -1 && is_stale) {
2709     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2710     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2711     assert(c != 0, "Failure to initialize LGRP API");
2712     Solaris::set_lgrp_cookie(c);
2713     return true;
2714   }
2715   return false;
2716 }
2717 
2718 // Get the group id of the current LWP.
2719 int os::numa_get_group_id() {
2720   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2721   if (lgrp_id == -1) {
2722     return 0;
2723   }
2724   const int size = os::numa_get_groups_num();
2725   int *ids = (int*)alloca(size * sizeof(int));
2726 
2727   // Get the ids of all lgroups with memory; r is the count.
2728   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2729                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2730   if (r <= 0) {
2731     return 0;
2732   }
2733   return ids[os::random() % r];
2734 }
2735 
2736 // Request information about the page.
2737 bool os::get_page_info(char *start, page_info* info) {
2738   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2739   uint64_t addr = (uintptr_t)start;
2740   uint64_t outdata[2];
2741   uint_t validity = 0;
2742 
2743   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2744     return false;
2745   }
2746 
2747   info->size = 0;
2748   info->lgrp_id = -1;
2749 
2750   if ((validity & 1) != 0) {
2751     if ((validity & 2) != 0) {
2752       info->lgrp_id = outdata[0];
2753     }
2754     if ((validity & 4) != 0) {
2755       info->size = outdata[1];
2756     }
2757     return true;
2758   }
2759   return false;
2760 }
2761 
2762 // Scan the pages from start to end until a page different than
2763 // the one described in the info parameter is encountered.
2764 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2765   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2766   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2767   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2768   uint_t validity[MAX_MEMINFO_CNT];
2769 
2770   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2771   uint64_t p = (uint64_t)start;
2772   while (p < (uint64_t)end) {
2773     addrs[0] = p;
2774     size_t addrs_count = 1;
2775     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2776       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2777       addrs_count++;
2778     }
2779 
2780     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2781       return NULL;
2782     }
2783 
2784     size_t i = 0;
2785     for (; i < addrs_count; i++) {
2786       if ((validity[i] & 1) != 0) {
2787         if ((validity[i] & 4) != 0) {
2788           if (outdata[types * i + 1] != page_expected->size) {
2789             break;
2790           }
2791         } else
2792           if (page_expected->size != 0) {
2793             break;
2794           }
2795 
2796         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2797           if (outdata[types * i] != page_expected->lgrp_id) {
2798             break;
2799           }
2800         }
2801       } else {
2802         return NULL;
2803       }
2804     }
2805 
2806     if (i < addrs_count) {
2807       if ((validity[i] & 2) != 0) {
2808         page_found->lgrp_id = outdata[types * i];
2809       } else {
2810         page_found->lgrp_id = -1;
2811       }
2812       if ((validity[i] & 4) != 0) {
2813         page_found->size = outdata[types * i + 1];
2814       } else {
2815         page_found->size = 0;
2816       }
2817       return (char*)addrs[i];
2818     }
2819 
2820     p = addrs[addrs_count - 1] + page_size;
2821   }
2822   return end;
2823 }
2824 
2825 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2826   size_t size = bytes;
2827   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2828   // uncommitted page. Otherwise, the read/write might succeed if we
2829   // have enough swap space to back the physical page.
2830   return
2831     NULL != Solaris::mmap_chunk(addr, size,
2832                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2833                                 PROT_NONE);
2834 }
2835 
2836 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2837   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2838 
2839   if (b == MAP_FAILED) {
2840     return NULL;
2841   }
2842   return b;
2843 }
2844 
2845 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
2846   char* addr = requested_addr;
2847   int flags = MAP_PRIVATE | MAP_NORESERVE;
2848 
2849   assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
2850 
2851   if (fixed) {
2852     flags |= MAP_FIXED;
2853   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2854     flags |= MAP_ALIGN;
2855     addr = (char*) alignment_hint;
2856   }
2857 
2858   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2859   // uncommitted page. Otherwise, the read/write might succeed if we
2860   // have enough swap space to back the physical page.
2861   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2862 }
2863 
2864 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2865   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
2866 
2867   guarantee(requested_addr == NULL || requested_addr == addr,
2868             "OS failed to return requested mmap address.");
2869   return addr;
2870 }
2871 
2872 // Reserve memory at an arbitrary address, only if that area is
2873 // available (and not reserved for something else).
2874 
2875 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2876   const int max_tries = 10;
2877   char* base[max_tries];
2878   size_t size[max_tries];
2879 
2880   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2881   // is dependent on the requested size and the MMU.  Our initial gap
2882   // value here is just a guess and will be corrected later.
2883   bool had_top_overlap = false;
2884   bool have_adjusted_gap = false;
2885   size_t gap = 0x400000;
2886 
2887   // Assert only that the size is a multiple of the page size, since
2888   // that's all that mmap requires, and since that's all we really know
2889   // about at this low abstraction level.  If we need higher alignment,
2890   // we can either pass an alignment to this method or verify alignment
2891   // in one of the methods further up the call chain.  See bug 5044738.
2892   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2893 
2894   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2895   // Give it a try, if the kernel honors the hint we can return immediately.
2896   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2897 
2898   volatile int err = errno;
2899   if (addr == requested_addr) {
2900     return addr;
2901   } else if (addr != NULL) {
2902     pd_unmap_memory(addr, bytes);
2903   }
2904 
2905   if (PrintMiscellaneous && Verbose) {
2906     char buf[256];
2907     buf[0] = '\0';
2908     if (addr == NULL) {
2909       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2910     }
2911     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2912             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2913             "%s", bytes, requested_addr, addr, buf);
2914   }
2915 
2916   // Address hint method didn't work.  Fall back to the old method.
2917   // In theory, once SNV becomes our oldest supported platform, this
2918   // code will no longer be needed.
2919   //
2920   // Repeatedly allocate blocks until the block is allocated at the
2921   // right spot. Give up after max_tries.
2922   int i;
2923   for (i = 0; i < max_tries; ++i) {
2924     base[i] = reserve_memory(bytes);
2925 
2926     if (base[i] != NULL) {
2927       // Is this the block we wanted?
2928       if (base[i] == requested_addr) {
2929         size[i] = bytes;
2930         break;
2931       }
2932 
2933       // check that the gap value is right
2934       if (had_top_overlap && !have_adjusted_gap) {
2935         size_t actual_gap = base[i-1] - base[i] - bytes;
2936         if (gap != actual_gap) {
2937           // adjust the gap value and retry the last 2 allocations
2938           assert(i > 0, "gap adjustment code problem");
2939           have_adjusted_gap = true;  // adjust the gap only once, just in case
2940           gap = actual_gap;
2941           if (PrintMiscellaneous && Verbose) {
2942             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2943           }
2944           unmap_memory(base[i], bytes);
2945           unmap_memory(base[i-1], size[i-1]);
2946           i-=2;
2947           continue;
2948         }
2949       }
2950 
2951       // Does this overlap the block we wanted? Give back the overlapped
2952       // parts and try again.
2953       //
2954       // There is still a bug in this code: if top_overlap == bytes,
2955       // the overlap is offset from requested region by the value of gap.
2956       // In this case giving back the overlapped part will not work,
2957       // because we'll give back the entire block at base[i] and
2958       // therefore the subsequent allocation will not generate a new gap.
2959       // This could be fixed with a new algorithm that used larger
2960       // or variable size chunks to find the requested region -
2961       // but such a change would introduce additional complications.
2962       // It's rare enough that the planets align for this bug,
2963       // so we'll just wait for a fix for 6204603/5003415 which
2964       // will provide a mmap flag to allow us to avoid this business.
2965 
2966       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2967       if (top_overlap >= 0 && top_overlap < bytes) {
2968         had_top_overlap = true;
2969         unmap_memory(base[i], top_overlap);
2970         base[i] += top_overlap;
2971         size[i] = bytes - top_overlap;
2972       } else {
2973         size_t bottom_overlap = base[i] + bytes - requested_addr;
2974         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2975           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2976             warning("attempt_reserve_memory_at: possible alignment bug");
2977           }
2978           unmap_memory(requested_addr, bottom_overlap);
2979           size[i] = bytes - bottom_overlap;
2980         } else {
2981           size[i] = bytes;
2982         }
2983       }
2984     }
2985   }
2986 
2987   // Give back the unused reserved pieces.
2988 
2989   for (int j = 0; j < i; ++j) {
2990     if (base[j] != NULL) {
2991       unmap_memory(base[j], size[j]);
2992     }
2993   }
2994 
2995   return (i < max_tries) ? requested_addr : NULL;
2996 }
2997 
2998 bool os::pd_release_memory(char* addr, size_t bytes) {
2999   size_t size = bytes;
3000   return munmap(addr, size) == 0;
3001 }
3002 
3003 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
3004   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
3005          "addr must be page aligned");
3006   int retVal = mprotect(addr, bytes, prot);
3007   return retVal == 0;
3008 }
3009 
3010 // Protect memory (Used to pass readonly pages through
3011 // JNI GetArray<type>Elements with empty arrays.)
3012 // Also, used for serialization page and for compressed oops null pointer
3013 // checking.
3014 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3015                         bool is_committed) {
3016   unsigned int p = 0;
3017   switch (prot) {
3018   case MEM_PROT_NONE: p = PROT_NONE; break;
3019   case MEM_PROT_READ: p = PROT_READ; break;
3020   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
3021   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3022   default:
3023     ShouldNotReachHere();
3024   }
3025   // is_committed is unused.
3026   return solaris_mprotect(addr, bytes, p);
3027 }
3028 
3029 // guard_memory and unguard_memory only happens within stack guard pages.
3030 // Since ISM pertains only to the heap, guard and unguard memory should not
3031 /// happen with an ISM region.
3032 bool os::guard_memory(char* addr, size_t bytes) {
3033   return solaris_mprotect(addr, bytes, PROT_NONE);
3034 }
3035 
3036 bool os::unguard_memory(char* addr, size_t bytes) {
3037   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3038 }
3039 
3040 // Large page support
3041 static size_t _large_page_size = 0;
3042 
3043 // Insertion sort for small arrays (descending order).
3044 static void insertion_sort_descending(size_t* array, int len) {
3045   for (int i = 0; i < len; i++) {
3046     size_t val = array[i];
3047     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3048       size_t tmp = array[key];
3049       array[key] = array[key - 1];
3050       array[key - 1] = tmp;
3051     }
3052   }
3053 }
3054 
3055 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3056   const unsigned int usable_count = VM_Version::page_size_count();
3057   if (usable_count == 1) {
3058     return false;
3059   }
3060 
3061   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3062   // build platform, getpagesizes() (without the '2') can be called directly.
3063   typedef int (*gps_t)(size_t[], int);
3064   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3065   if (gps_func == NULL) {
3066     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3067     if (gps_func == NULL) {
3068       if (warn) {
3069         warning("MPSS is not supported by the operating system.");
3070       }
3071       return false;
3072     }
3073   }
3074 
3075   // Fill the array of page sizes.
3076   int n = (*gps_func)(_page_sizes, page_sizes_max);
3077   assert(n > 0, "Solaris bug?");
3078 
3079   if (n == page_sizes_max) {
3080     // Add a sentinel value (necessary only if the array was completely filled
3081     // since it is static (zeroed at initialization)).
3082     _page_sizes[--n] = 0;
3083     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3084   }
3085   assert(_page_sizes[n] == 0, "missing sentinel");
3086   trace_page_sizes("available page sizes", _page_sizes, n);
3087 
3088   if (n == 1) return false;     // Only one page size available.
3089 
3090   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3091   // select up to usable_count elements.  First sort the array, find the first
3092   // acceptable value, then copy the usable sizes to the top of the array and
3093   // trim the rest.  Make sure to include the default page size :-).
3094   //
3095   // A better policy could get rid of the 4M limit by taking the sizes of the
3096   // important VM memory regions (java heap and possibly the code cache) into
3097   // account.
3098   insertion_sort_descending(_page_sizes, n);
3099   const size_t size_limit =
3100     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3101   int beg;
3102   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3103   const int end = MIN2((int)usable_count, n) - 1;
3104   for (int cur = 0; cur < end; ++cur, ++beg) {
3105     _page_sizes[cur] = _page_sizes[beg];
3106   }
3107   _page_sizes[end] = vm_page_size();
3108   _page_sizes[end + 1] = 0;
3109 
3110   if (_page_sizes[end] > _page_sizes[end - 1]) {
3111     // Default page size is not the smallest; sort again.
3112     insertion_sort_descending(_page_sizes, end + 1);
3113   }
3114   *page_size = _page_sizes[0];
3115 
3116   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3117   return true;
3118 }
3119 
3120 void os::large_page_init() {
3121   if (UseLargePages) {
3122     // print a warning if any large page related flag is specified on command line
3123     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3124                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3125 
3126     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3127   }
3128 }
3129 
3130 bool os::Solaris::is_valid_page_size(size_t bytes) {
3131   for (int i = 0; _page_sizes[i] != 0; i++) {
3132     if (_page_sizes[i] == bytes) {
3133       return true;
3134     }
3135   }
3136   return false;
3137 }
3138 
3139 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3140   assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
3141   assert(is_ptr_aligned((void*) start, align),
3142          err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
3143   assert(is_size_aligned(bytes, align),
3144          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
3145 
3146   // Signal to OS that we want large pages for addresses
3147   // from addr, addr + bytes
3148   struct memcntl_mha mpss_struct;
3149   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3150   mpss_struct.mha_pagesize = align;
3151   mpss_struct.mha_flags = 0;
3152   // Upon successful completion, memcntl() returns 0
3153   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3154     debug_only(warning("Attempt to use MPSS failed."));
3155     return false;
3156   }
3157   return true;
3158 }
3159 
3160 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3161   fatal("os::reserve_memory_special should not be called on Solaris.");
3162   return NULL;
3163 }
3164 
3165 bool os::release_memory_special(char* base, size_t bytes) {
3166   fatal("os::release_memory_special should not be called on Solaris.");
3167   return false;
3168 }
3169 
3170 size_t os::large_page_size() {
3171   return _large_page_size;
3172 }
3173 
3174 // MPSS allows application to commit large page memory on demand; with ISM
3175 // the entire memory region must be allocated as shared memory.
3176 bool os::can_commit_large_page_memory() {
3177   return true;
3178 }
3179 
3180 bool os::can_execute_large_page_memory() {
3181   return true;
3182 }
3183 
3184 static int os_sleep(jlong millis, bool interruptible) {
3185   const jlong limit = INT_MAX;
3186   jlong prevtime;
3187   int res;
3188 
3189   while (millis > limit) {
3190     if ((res = os_sleep(limit, interruptible)) != OS_OK)
3191       return res;
3192     millis -= limit;
3193   }
3194 
3195   // Restart interrupted polls with new parameters until the proper delay
3196   // has been completed.
3197 
3198   prevtime = getTimeMillis();
3199 
3200   while (millis > 0) {
3201     jlong newtime;
3202 
3203     if (!interruptible) {
3204       // Following assert fails for os::yield_all:
3205       // assert(!thread->is_Java_thread(), "must not be java thread");
3206       res = poll(NULL, 0, millis);
3207     } else {
3208       JavaThread *jt = JavaThread::current();
3209 
3210       INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
3211         os::Solaris::clear_interrupted);
3212     }
3213 
3214     // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
3215     // thread.Interrupt.
3216 
3217     // See c/r 6751923. Poll can return 0 before time
3218     // has elapsed if time is set via clock_settime (as NTP does).
3219     // res == 0 if poll timed out (see man poll RETURN VALUES)
3220     // using the logic below checks that we really did
3221     // sleep at least "millis" if not we'll sleep again.
3222     if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) {
3223       newtime = getTimeMillis();
3224       assert(newtime >= prevtime, "time moving backwards");
3225     /* Doing prevtime and newtime in microseconds doesn't help precision,
3226        and trying to round up to avoid lost milliseconds can result in a
3227        too-short delay. */
3228       millis -= newtime - prevtime;
3229       if(millis <= 0)
3230         return OS_OK;
3231       prevtime = newtime;
3232     } else
3233       return res;
3234   }
3235 
3236   return OS_OK;
3237 }
3238 
3239 // Read calls from inside the vm need to perform state transitions
3240 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3241   INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3242 }
3243 
3244 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3245   INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3246 }
3247 
3248 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
3249   assert(thread == Thread::current(),  "thread consistency check");
3250 
3251   // TODO-FIXME: this should be removed.
3252   // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
3253   // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
3254   // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
3255   // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
3256   // is fooled into believing that the system is making progress. In the code below we block the
3257   // the watcher thread while safepoint is in progress so that it would not appear as though the
3258   // system is making progress.
3259   if (!Solaris::T2_libthread() &&
3260       thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
3261     // We now try to acquire the threads lock. Since this lock is held by the VM thread during
3262     // the entire safepoint, the watcher thread will  line up here during the safepoint.
3263     Threads_lock->lock_without_safepoint_check();
3264     Threads_lock->unlock();
3265   }
3266 
3267   if (thread->is_Java_thread()) {
3268     // This is a JavaThread so we honor the _thread_blocked protocol
3269     // even for sleeps of 0 milliseconds. This was originally done
3270     // as a workaround for bug 4338139. However, now we also do it
3271     // to honor the suspend-equivalent protocol.
3272 
3273     JavaThread *jt = (JavaThread *) thread;
3274     ThreadBlockInVM tbivm(jt);
3275 
3276     jt->set_suspend_equivalent();
3277     // cleared by handle_special_suspend_equivalent_condition() or
3278     // java_suspend_self() via check_and_wait_while_suspended()
3279 
3280     int ret_code;
3281     if (millis <= 0) {
3282       thr_yield();
3283       ret_code = 0;
3284     } else {
3285       // The original sleep() implementation did not create an
3286       // OSThreadWaitState helper for sleeps of 0 milliseconds.
3287       // I'm preserving that decision for now.
3288       OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
3289 
3290       ret_code = os_sleep(millis, interruptible);
3291     }
3292 
3293     // were we externally suspended while we were waiting?
3294     jt->check_and_wait_while_suspended();
3295 
3296     return ret_code;
3297   }
3298 
3299   // non-JavaThread from this point on:
3300 
3301   if (millis <= 0) {
3302     thr_yield();
3303     return 0;
3304   }
3305 
3306   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
3307 
3308   return os_sleep(millis, interruptible);
3309 }
3310 
3311 void os::naked_short_sleep(jlong ms) {
3312   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3313 
3314   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3315   // Solaris requires -lrt for this.
3316   usleep((ms * 1000));
3317 
3318   return;
3319 }
3320 
3321 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3322 void os::infinite_sleep() {
3323   while (true) {    // sleep forever ...
3324     ::sleep(100);   // ... 100 seconds at a time
3325   }
3326 }
3327 
3328 // Used to convert frequent JVM_Yield() to nops
3329 bool os::dont_yield() {
3330   if (DontYieldALot) {
3331     static hrtime_t last_time = 0;
3332     hrtime_t diff = getTimeNanos() - last_time;
3333 
3334     if (diff < DontYieldALotInterval * 1000000)
3335       return true;
3336 
3337     last_time += diff;
3338 
3339     return false;
3340   }
3341   else {
3342     return false;
3343   }
3344 }
3345 
3346 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3347 // the linux and win32 implementations do not.  This should be checked.
3348 
3349 void os::yield() {
3350   // Yields to all threads with same or greater priority
3351   os::sleep(Thread::current(), 0, false);
3352 }
3353 
3354 // Note that yield semantics are defined by the scheduling class to which
3355 // the thread currently belongs.  Typically, yield will _not yield to
3356 // other equal or higher priority threads that reside on the dispatch queues
3357 // of other CPUs.
3358 
3359 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3360 
3361 
3362 // On Solaris we found that yield_all doesn't always yield to all other threads.
3363 // There have been cases where there is a thread ready to execute but it doesn't
3364 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
3365 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a
3366 // SIGWAITING signal which will cause a new lwp to be created. So we count the
3367 // number of times yield_all is called in the one loop and increase the sleep
3368 // time after 8 attempts. If this fails too we increase the concurrency level
3369 // so that the starving thread would get an lwp
3370 
3371 void os::yield_all(int attempts) {
3372   // Yields to all threads, including threads with lower priorities
3373   if (attempts == 0) {
3374     os::sleep(Thread::current(), 1, false);
3375   } else {
3376     int iterations = attempts % 30;
3377     if (iterations == 0 && !os::Solaris::T2_libthread()) {
3378       // thr_setconcurrency and _getconcurrency make sense only under T1.
3379       int noofLWPS = thr_getconcurrency();
3380       if (noofLWPS < (Threads::number_of_threads() + 2)) {
3381         thr_setconcurrency(thr_getconcurrency() + 1);
3382       }
3383     } else if (iterations < 25) {
3384       os::sleep(Thread::current(), 1, false);
3385     } else {
3386       os::sleep(Thread::current(), 10, false);
3387     }
3388   }
3389 }
3390 
3391 // Called from the tight loops to possibly influence time-sharing heuristics
3392 void os::loop_breaker(int attempts) {
3393   os::yield_all(attempts);
3394 }
3395 
3396 
3397 // Interface for setting lwp priorities.  If we are using T2 libthread,
3398 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3399 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3400 // function is meaningless in this mode so we must adjust the real lwp's priority
3401 // The routines below implement the getting and setting of lwp priorities.
3402 //
3403 // Note: There are three priority scales used on Solaris.  Java priotities
3404 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3405 //       from 0 to 127, and the current scheduling class of the process we
3406 //       are running in.  This is typically from -60 to +60.
3407 //       The setting of the lwp priorities in done after a call to thr_setprio
3408 //       so Java priorities are mapped to libthread priorities and we map from
3409 //       the latter to lwp priorities.  We don't keep priorities stored in
3410 //       Java priorities since some of our worker threads want to set priorities
3411 //       higher than all Java threads.
3412 //
3413 // For related information:
3414 // (1)  man -s 2 priocntl
3415 // (2)  man -s 4 priocntl
3416 // (3)  man dispadmin
3417 // =    librt.so
3418 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3419 // =    ps -cL <pid> ... to validate priority.
3420 // =    sched_get_priority_min and _max
3421 //              pthread_create
3422 //              sched_setparam
3423 //              pthread_setschedparam
3424 //
3425 // Assumptions:
3426 // +    We assume that all threads in the process belong to the same
3427 //              scheduling class.   IE. an homogenous process.
3428 // +    Must be root or in IA group to change change "interactive" attribute.
3429 //              Priocntl() will fail silently.  The only indication of failure is when
3430 //              we read-back the value and notice that it hasn't changed.
3431 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3432 // +    For RT, change timeslice as well.  Invariant:
3433 //              constant "priority integral"
3434 //              Konst == TimeSlice * (60-Priority)
3435 //              Given a priority, compute appropriate timeslice.
3436 // +    Higher numerical values have higher priority.
3437 
3438 // sched class attributes
3439 typedef struct {
3440         int   schedPolicy;              // classID
3441         int   maxPrio;
3442         int   minPrio;
3443 } SchedInfo;
3444 
3445 
3446 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3447 
3448 #ifdef ASSERT
3449 static int  ReadBackValidate = 1;
3450 #endif
3451 static int  myClass     = 0;
3452 static int  myMin       = 0;
3453 static int  myMax       = 0;
3454 static int  myCur       = 0;
3455 static bool priocntl_enable = false;
3456 
3457 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3458 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3459 
3460 
3461 // lwp_priocntl_init
3462 //
3463 // Try to determine the priority scale for our process.
3464 //
3465 // Return errno or 0 if OK.
3466 //
3467 static int lwp_priocntl_init () {
3468   int rslt;
3469   pcinfo_t ClassInfo;
3470   pcparms_t ParmInfo;
3471   int i;
3472 
3473   if (!UseThreadPriorities) return 0;
3474 
3475   // We are using Bound threads, we need to determine our priority ranges
3476   if (os::Solaris::T2_libthread() || UseBoundThreads) {
3477     // If ThreadPriorityPolicy is 1, switch tables
3478     if (ThreadPriorityPolicy == 1) {
3479       for (i = 0 ; i < CriticalPriority+1; i++)
3480         os::java_to_os_priority[i] = prio_policy1[i];
3481     }
3482     if (UseCriticalJavaThreadPriority) {
3483       // MaxPriority always maps to the FX scheduling class and criticalPrio.
3484       // See set_native_priority() and set_lwp_class_and_priority().
3485       // Save original MaxPriority mapping in case attempt to
3486       // use critical priority fails.
3487       java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3488       // Set negative to distinguish from other priorities
3489       os::java_to_os_priority[MaxPriority] = -criticalPrio;
3490     }
3491   }
3492   // Not using Bound Threads, set to ThreadPolicy 1
3493   else {
3494     for ( i = 0 ; i < CriticalPriority+1; i++ ) {
3495       os::java_to_os_priority[i] = prio_policy1[i];
3496     }
3497     return 0;
3498   }
3499 
3500   // Get IDs for a set of well-known scheduling classes.
3501   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3502   // the system.  We should have a loop that iterates over the
3503   // classID values, which are known to be "small" integers.
3504 
3505   strcpy(ClassInfo.pc_clname, "TS");
3506   ClassInfo.pc_cid = -1;
3507   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3508   if (rslt < 0) return errno;
3509   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3510   tsLimits.schedPolicy = ClassInfo.pc_cid;
3511   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3512   tsLimits.minPrio = -tsLimits.maxPrio;
3513 
3514   strcpy(ClassInfo.pc_clname, "IA");
3515   ClassInfo.pc_cid = -1;
3516   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3517   if (rslt < 0) return errno;
3518   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3519   iaLimits.schedPolicy = ClassInfo.pc_cid;
3520   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3521   iaLimits.minPrio = -iaLimits.maxPrio;
3522 
3523   strcpy(ClassInfo.pc_clname, "RT");
3524   ClassInfo.pc_cid = -1;
3525   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3526   if (rslt < 0) return errno;
3527   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3528   rtLimits.schedPolicy = ClassInfo.pc_cid;
3529   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3530   rtLimits.minPrio = 0;
3531 
3532   strcpy(ClassInfo.pc_clname, "FX");
3533   ClassInfo.pc_cid = -1;
3534   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3535   if (rslt < 0) return errno;
3536   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3537   fxLimits.schedPolicy = ClassInfo.pc_cid;
3538   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3539   fxLimits.minPrio = 0;
3540 
3541   // Query our "current" scheduling class.
3542   // This will normally be IA, TS or, rarely, FX or RT.
3543   memset(&ParmInfo, 0, sizeof(ParmInfo));
3544   ParmInfo.pc_cid = PC_CLNULL;
3545   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3546   if (rslt < 0) return errno;
3547   myClass = ParmInfo.pc_cid;
3548 
3549   // We now know our scheduling classId, get specific information
3550   // about the class.
3551   ClassInfo.pc_cid = myClass;
3552   ClassInfo.pc_clname[0] = 0;
3553   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3554   if (rslt < 0) return errno;
3555 
3556   if (ThreadPriorityVerbose) {
3557     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3558   }
3559 
3560   memset(&ParmInfo, 0, sizeof(pcparms_t));
3561   ParmInfo.pc_cid = PC_CLNULL;
3562   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3563   if (rslt < 0) return errno;
3564 
3565   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3566     myMin = rtLimits.minPrio;
3567     myMax = rtLimits.maxPrio;
3568   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3569     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3570     myMin = iaLimits.minPrio;
3571     myMax = iaLimits.maxPrio;
3572     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3573   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3574     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3575     myMin = tsLimits.minPrio;
3576     myMax = tsLimits.maxPrio;
3577     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3578   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3579     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3580     myMin = fxLimits.minPrio;
3581     myMax = fxLimits.maxPrio;
3582     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3583   } else {
3584     // No clue - punt
3585     if (ThreadPriorityVerbose)
3586       tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3587     return EINVAL;      // no clue, punt
3588   }
3589 
3590   if (ThreadPriorityVerbose) {
3591     tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3592   }
3593 
3594   priocntl_enable = true;  // Enable changing priorities
3595   return 0;
3596 }
3597 
3598 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3599 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3600 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3601 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3602 
3603 
3604 // scale_to_lwp_priority
3605 //
3606 // Convert from the libthread "thr_setprio" scale to our current
3607 // lwp scheduling class scale.
3608 //
3609 static
3610 int     scale_to_lwp_priority (int rMin, int rMax, int x)
3611 {
3612   int v;
3613 
3614   if (x == 127) return rMax;            // avoid round-down
3615     v = (((x*(rMax-rMin)))/128)+rMin;
3616   return v;
3617 }
3618 
3619 
3620 // set_lwp_class_and_priority
3621 //
3622 // Set the class and priority of the lwp.  This call should only
3623 // be made when using bound threads (T2 threads are bound by default).
3624 //
3625 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3626                                int newPrio, int new_class, bool scale) {
3627   int rslt;
3628   int Actual, Expected, prv;
3629   pcparms_t ParmInfo;                   // for GET-SET
3630 #ifdef ASSERT
3631   pcparms_t ReadBack;                   // for readback
3632 #endif
3633 
3634   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3635   // Query current values.
3636   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3637   // Cache "pcparms_t" in global ParmCache.
3638   // TODO: elide set-to-same-value
3639 
3640   // If something went wrong on init, don't change priorities.
3641   if ( !priocntl_enable ) {
3642     if (ThreadPriorityVerbose)
3643       tty->print_cr("Trying to set priority but init failed, ignoring");
3644     return EINVAL;
3645   }
3646 
3647   // If lwp hasn't started yet, just return
3648   // the _start routine will call us again.
3649   if ( lwpid <= 0 ) {
3650     if (ThreadPriorityVerbose) {
3651       tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
3652                      INTPTR_FORMAT " to %d, lwpid not set",
3653                      ThreadID, newPrio);
3654     }
3655     return 0;
3656   }
3657 
3658   if (ThreadPriorityVerbose) {
3659     tty->print_cr ("set_lwp_class_and_priority("
3660                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3661                    ThreadID, lwpid, newPrio);
3662   }
3663 
3664   memset(&ParmInfo, 0, sizeof(pcparms_t));
3665   ParmInfo.pc_cid = PC_CLNULL;
3666   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3667   if (rslt < 0) return errno;
3668 
3669   int cur_class = ParmInfo.pc_cid;
3670   ParmInfo.pc_cid = (id_t)new_class;
3671 
3672   if (new_class == rtLimits.schedPolicy) {
3673     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3674     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3675                                                        rtLimits.maxPrio, newPrio)
3676                                : newPrio;
3677     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3678     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3679     if (ThreadPriorityVerbose) {
3680       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3681     }
3682   } else if (new_class == iaLimits.schedPolicy) {
3683     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3684     int maxClamped     = MIN2(iaLimits.maxPrio,
3685                               cur_class == new_class
3686                                 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3687     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3688                                                        maxClamped, newPrio)
3689                                : newPrio;
3690     iaInfo->ia_uprilim = cur_class == new_class
3691                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3692     iaInfo->ia_mode    = IA_NOCHANGE;
3693     if (ThreadPriorityVerbose) {
3694       tty->print_cr("IA: [%d...%d] %d->%d\n",
3695                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3696     }
3697   } else if (new_class == tsLimits.schedPolicy) {
3698     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3699     int maxClamped     = MIN2(tsLimits.maxPrio,
3700                               cur_class == new_class
3701                                 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3702     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3703                                                        maxClamped, newPrio)
3704                                : newPrio;
3705     tsInfo->ts_uprilim = cur_class == new_class
3706                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3707     if (ThreadPriorityVerbose) {
3708       tty->print_cr("TS: [%d...%d] %d->%d\n",
3709                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3710     }
3711   } else if (new_class == fxLimits.schedPolicy) {
3712     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3713     int maxClamped     = MIN2(fxLimits.maxPrio,
3714                               cur_class == new_class
3715                                 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3716     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3717                                                        maxClamped, newPrio)
3718                                : newPrio;
3719     fxInfo->fx_uprilim = cur_class == new_class
3720                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3721     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3722     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3723     if (ThreadPriorityVerbose) {
3724       tty->print_cr("FX: [%d...%d] %d->%d\n",
3725                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3726     }
3727   } else {
3728     if (ThreadPriorityVerbose) {
3729       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3730     }
3731     return EINVAL;    // no clue, punt
3732   }
3733 
3734   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3735   if (ThreadPriorityVerbose && rslt) {
3736     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3737   }
3738   if (rslt < 0) return errno;
3739 
3740 #ifdef ASSERT
3741   // Sanity check: read back what we just attempted to set.
3742   // In theory it could have changed in the interim ...
3743   //
3744   // The priocntl system call is tricky.
3745   // Sometimes it'll validate the priority value argument and
3746   // return EINVAL if unhappy.  At other times it fails silently.
3747   // Readbacks are prudent.
3748 
3749   if (!ReadBackValidate) return 0;
3750 
3751   memset(&ReadBack, 0, sizeof(pcparms_t));
3752   ReadBack.pc_cid = PC_CLNULL;
3753   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3754   assert(rslt >= 0, "priocntl failed");
3755   Actual = Expected = 0xBAD;
3756   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3757   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3758     Actual   = RTPRI(ReadBack)->rt_pri;
3759     Expected = RTPRI(ParmInfo)->rt_pri;
3760   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3761     Actual   = IAPRI(ReadBack)->ia_upri;
3762     Expected = IAPRI(ParmInfo)->ia_upri;
3763   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3764     Actual   = TSPRI(ReadBack)->ts_upri;
3765     Expected = TSPRI(ParmInfo)->ts_upri;
3766   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3767     Actual   = FXPRI(ReadBack)->fx_upri;
3768     Expected = FXPRI(ParmInfo)->fx_upri;
3769   } else {
3770     if (ThreadPriorityVerbose) {
3771       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3772                     ParmInfo.pc_cid);
3773     }
3774   }
3775 
3776   if (Actual != Expected) {
3777     if (ThreadPriorityVerbose) {
3778       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3779                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3780     }
3781   }
3782 #endif
3783 
3784   return 0;
3785 }
3786 
3787 // Solaris only gives access to 128 real priorities at a time,
3788 // so we expand Java's ten to fill this range.  This would be better
3789 // if we dynamically adjusted relative priorities.
3790 //
3791 // The ThreadPriorityPolicy option allows us to select 2 different
3792 // priority scales.
3793 //
3794 // ThreadPriorityPolicy=0
3795 // Since the Solaris' default priority is MaximumPriority, we do not
3796 // set a priority lower than Max unless a priority lower than
3797 // NormPriority is requested.
3798 //
3799 // ThreadPriorityPolicy=1
3800 // This mode causes the priority table to get filled with
3801 // linear values.  NormPriority get's mapped to 50% of the
3802 // Maximum priority an so on.  This will cause VM threads
3803 // to get unfair treatment against other Solaris processes
3804 // which do not explicitly alter their thread priorities.
3805 //
3806 
3807 int os::java_to_os_priority[CriticalPriority + 1] = {
3808   -99999,         // 0 Entry should never be used
3809 
3810   0,              // 1 MinPriority
3811   32,             // 2
3812   64,             // 3
3813 
3814   96,             // 4
3815   127,            // 5 NormPriority
3816   127,            // 6
3817 
3818   127,            // 7
3819   127,            // 8
3820   127,            // 9 NearMaxPriority
3821 
3822   127,            // 10 MaxPriority
3823 
3824   -criticalPrio   // 11 CriticalPriority
3825 };
3826 
3827 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3828   OSThread* osthread = thread->osthread();
3829 
3830   // Save requested priority in case the thread hasn't been started
3831   osthread->set_native_priority(newpri);
3832 
3833   // Check for critical priority request
3834   bool fxcritical = false;
3835   if (newpri == -criticalPrio) {
3836     fxcritical = true;
3837     newpri = criticalPrio;
3838   }
3839 
3840   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3841   if (!UseThreadPriorities) return OS_OK;
3842 
3843   int status = 0;
3844 
3845   if (!fxcritical) {
3846     // Use thr_setprio only if we have a priority that thr_setprio understands
3847     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3848   }
3849 
3850   if (os::Solaris::T2_libthread() ||
3851       (UseBoundThreads && osthread->is_vm_created())) {
3852     int lwp_status =
3853       set_lwp_class_and_priority(osthread->thread_id(),
3854                                  osthread->lwp_id(),
3855                                  newpri,
3856                                  fxcritical ? fxLimits.schedPolicy : myClass,
3857                                  !fxcritical);
3858     if (lwp_status != 0 && fxcritical) {
3859       // Try again, this time without changing the scheduling class
3860       newpri = java_MaxPriority_to_os_priority;
3861       lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3862                                               osthread->lwp_id(),
3863                                               newpri, myClass, false);
3864     }
3865     status |= lwp_status;
3866   }
3867   return (status == 0) ? OS_OK : OS_ERR;
3868 }
3869 
3870 
3871 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3872   int p;
3873   if ( !UseThreadPriorities ) {
3874     *priority_ptr = NormalPriority;
3875     return OS_OK;
3876   }
3877   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3878   if (status != 0) {
3879     return OS_ERR;
3880   }
3881   *priority_ptr = p;
3882   return OS_OK;
3883 }
3884 
3885 
3886 // Hint to the underlying OS that a task switch would not be good.
3887 // Void return because it's a hint and can fail.
3888 void os::hint_no_preempt() {
3889   schedctl_start(schedctl_init());
3890 }
3891 
3892 static void resume_clear_context(OSThread *osthread) {
3893   osthread->set_ucontext(NULL);
3894 }
3895 
3896 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3897   osthread->set_ucontext(context);
3898 }
3899 
3900 static Semaphore sr_semaphore;
3901 
3902 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3903   // Save and restore errno to avoid confusing native code with EINTR
3904   // after sigsuspend.
3905   int old_errno = errno;
3906 
3907   OSThread* osthread = thread->osthread();
3908   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3909 
3910   os::SuspendResume::State current = osthread->sr.state();
3911   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3912     suspend_save_context(osthread, uc);
3913 
3914     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3915     os::SuspendResume::State state = osthread->sr.suspended();
3916     if (state == os::SuspendResume::SR_SUSPENDED) {
3917       sigset_t suspend_set;  // signals for sigsuspend()
3918 
3919       // get current set of blocked signals and unblock resume signal
3920       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3921       sigdelset(&suspend_set, os::Solaris::SIGasync());
3922 
3923       sr_semaphore.signal();
3924       // wait here until we are resumed
3925       while (1) {
3926         sigsuspend(&suspend_set);
3927 
3928         os::SuspendResume::State result = osthread->sr.running();
3929         if (result == os::SuspendResume::SR_RUNNING) {
3930           sr_semaphore.signal();
3931           break;
3932         }
3933       }
3934 
3935     } else if (state == os::SuspendResume::SR_RUNNING) {
3936       // request was cancelled, continue
3937     } else {
3938       ShouldNotReachHere();
3939     }
3940 
3941     resume_clear_context(osthread);
3942   } else if (current == os::SuspendResume::SR_RUNNING) {
3943     // request was cancelled, continue
3944   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3945     // ignore
3946   } else {
3947     // ignore
3948   }
3949 
3950   errno = old_errno;
3951 }
3952 
3953 
3954 void os::interrupt(Thread* thread) {
3955   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
3956 
3957   OSThread* osthread = thread->osthread();
3958 
3959   int isInterrupted = osthread->interrupted();
3960   if (!isInterrupted) {
3961       osthread->set_interrupted(true);
3962       OrderAccess::fence();
3963       // os::sleep() is implemented with either poll (NULL,0,timeout) or
3964       // by parking on _SleepEvent.  If the former, thr_kill will unwedge
3965       // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
3966       ParkEvent * const slp = thread->_SleepEvent ;
3967       if (slp != NULL) slp->unpark() ;
3968   }
3969 
3970   // For JSR166:  unpark after setting status but before thr_kill -dl
3971   if (thread->is_Java_thread()) {
3972     ((JavaThread*)thread)->parker()->unpark();
3973   }
3974 
3975   // Handle interruptible wait() ...
3976   ParkEvent * const ev = thread->_ParkEvent ;
3977   if (ev != NULL) ev->unpark() ;
3978 
3979   // When events are used everywhere for os::sleep, then this thr_kill
3980   // will only be needed if UseVMInterruptibleIO is true.
3981 
3982   if (!isInterrupted) {
3983     int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
3984     assert_status(status == 0, status, "thr_kill");
3985 
3986     // Bump thread interruption counter
3987     RuntimeService::record_thread_interrupt_signaled_count();
3988   }
3989 }
3990 
3991 
3992 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3993   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
3994 
3995   OSThread* osthread = thread->osthread();
3996 
3997   bool res = osthread->interrupted();
3998 
3999   // NOTE that since there is no "lock" around these two operations,
4000   // there is the possibility that the interrupted flag will be
4001   // "false" but that the interrupt event will be set. This is
4002   // intentional. The effect of this is that Object.wait() will appear
4003   // to have a spurious wakeup, which is not harmful, and the
4004   // possibility is so rare that it is not worth the added complexity
4005   // to add yet another lock. It has also been recommended not to put
4006   // the interrupted flag into the os::Solaris::Event structure,
4007   // because it hides the issue.
4008   if (res && clear_interrupted) {
4009     osthread->set_interrupted(false);
4010   }
4011   return res;
4012 }
4013 
4014 
4015 void os::print_statistics() {
4016 }
4017 
4018 int os::message_box(const char* title, const char* message) {
4019   int i;
4020   fdStream err(defaultStream::error_fd());
4021   for (i = 0; i < 78; i++) err.print_raw("=");
4022   err.cr();
4023   err.print_raw_cr(title);
4024   for (i = 0; i < 78; i++) err.print_raw("-");
4025   err.cr();
4026   err.print_raw_cr(message);
4027   for (i = 0; i < 78; i++) err.print_raw("=");
4028   err.cr();
4029 
4030   char buf[16];
4031   // Prevent process from exiting upon "read error" without consuming all CPU
4032   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4033 
4034   return buf[0] == 'y' || buf[0] == 'Y';
4035 }
4036 
4037 static int sr_notify(OSThread* osthread) {
4038   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
4039   assert_status(status == 0, status, "thr_kill");
4040   return status;
4041 }
4042 
4043 // "Randomly" selected value for how long we want to spin
4044 // before bailing out on suspending a thread, also how often
4045 // we send a signal to a thread we want to resume
4046 static const int RANDOMLY_LARGE_INTEGER = 1000000;
4047 static const int RANDOMLY_LARGE_INTEGER2 = 100;
4048 
4049 static bool do_suspend(OSThread* osthread) {
4050   assert(osthread->sr.is_running(), "thread should be running");
4051   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
4052 
4053   // mark as suspended and send signal
4054   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
4055     // failed to switch, state wasn't running?
4056     ShouldNotReachHere();
4057     return false;
4058   }
4059 
4060   if (sr_notify(osthread) != 0) {
4061     ShouldNotReachHere();
4062   }
4063 
4064   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
4065   while (true) {
4066     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
4067       break;
4068     } else {
4069       // timeout
4070       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
4071       if (cancelled == os::SuspendResume::SR_RUNNING) {
4072         return false;
4073       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
4074         // make sure that we consume the signal on the semaphore as well
4075         sr_semaphore.wait();
4076         break;
4077       } else {
4078         ShouldNotReachHere();
4079         return false;
4080       }
4081     }
4082   }
4083 
4084   guarantee(osthread->sr.is_suspended(), "Must be suspended");
4085   return true;
4086 }
4087 
4088 static void do_resume(OSThread* osthread) {
4089   assert(osthread->sr.is_suspended(), "thread should be suspended");
4090   assert(!sr_semaphore.trywait(), "invalid semaphore state");
4091 
4092   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
4093     // failed to switch to WAKEUP_REQUEST
4094     ShouldNotReachHere();
4095     return;
4096   }
4097 
4098   while (true) {
4099     if (sr_notify(osthread) == 0) {
4100       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
4101         if (osthread->sr.is_running()) {
4102           return;
4103         }
4104       }
4105     } else {
4106       ShouldNotReachHere();
4107     }
4108   }
4109 
4110   guarantee(osthread->sr.is_running(), "Must be running!");
4111 }
4112 
4113 void os::SuspendedThreadTask::internal_do_task() {
4114   if (do_suspend(_thread->osthread())) {
4115     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
4116     do_task(context);
4117     do_resume(_thread->osthread());
4118   }
4119 }
4120 
4121 class PcFetcher : public os::SuspendedThreadTask {
4122 public:
4123   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
4124   ExtendedPC result();
4125 protected:
4126   void do_task(const os::SuspendedThreadTaskContext& context);
4127 private:
4128   ExtendedPC _epc;
4129 };
4130 
4131 ExtendedPC PcFetcher::result() {
4132   guarantee(is_done(), "task is not done yet.");
4133   return _epc;
4134 }
4135 
4136 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
4137   Thread* thread = context.thread();
4138   OSThread* osthread = thread->osthread();
4139   if (osthread->ucontext() != NULL) {
4140     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
4141   } else {
4142     // NULL context is unexpected, double-check this is the VMThread
4143     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
4144   }
4145 }
4146 
4147 // A lightweight implementation that does not suspend the target thread and
4148 // thus returns only a hint. Used for profiling only!
4149 ExtendedPC os::get_thread_pc(Thread* thread) {
4150   // Make sure that it is called by the watcher and the Threads lock is owned.
4151   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
4152   // For now, is only used to profile the VM Thread
4153   assert(thread->is_VM_thread(), "Can only be called for VMThread");
4154   PcFetcher fetcher(thread);
4155   fetcher.run();
4156   return fetcher.result();
4157 }
4158 
4159 
4160 // This does not do anything on Solaris. This is basically a hook for being
4161 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
4162 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
4163   f(value, method, args, thread);
4164 }
4165 
4166 // This routine may be used by user applications as a "hook" to catch signals.
4167 // The user-defined signal handler must pass unrecognized signals to this
4168 // routine, and if it returns true (non-zero), then the signal handler must
4169 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
4170 // routine will never retun false (zero), but instead will execute a VM panic
4171 // routine kill the process.
4172 //
4173 // If this routine returns false, it is OK to call it again.  This allows
4174 // the user-defined signal handler to perform checks either before or after
4175 // the VM performs its own checks.  Naturally, the user code would be making
4176 // a serious error if it tried to handle an exception (such as a null check
4177 // or breakpoint) that the VM was generating for its own correct operation.
4178 //
4179 // This routine may recognize any of the following kinds of signals:
4180 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
4181 // os::Solaris::SIGasync
4182 // It should be consulted by handlers for any of those signals.
4183 // It explicitly does not recognize os::Solaris::SIGinterrupt
4184 //
4185 // The caller of this routine must pass in the three arguments supplied
4186 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
4187 // field of the structure passed to sigaction().  This routine assumes that
4188 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
4189 //
4190 // Note that the VM will print warnings if it detects conflicting signal
4191 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
4192 //
4193 extern "C" JNIEXPORT int
4194 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
4195                           int abort_if_unrecognized);
4196 
4197 
4198 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
4199   int orig_errno = errno;  // Preserve errno value over signal handler.
4200   JVM_handle_solaris_signal(sig, info, ucVoid, true);
4201   errno = orig_errno;
4202 }
4203 
4204 /* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
4205    is needed to provoke threads blocked on IO to return an EINTR
4206    Note: this explicitly does NOT call JVM_handle_solaris_signal and
4207    does NOT participate in signal chaining due to requirement for
4208    NOT setting SA_RESTART to make EINTR work. */
4209 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
4210    if (UseSignalChaining) {
4211       struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
4212       if (actp && actp->sa_handler) {
4213         vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
4214       }
4215    }
4216 }
4217 
4218 // This boolean allows users to forward their own non-matching signals
4219 // to JVM_handle_solaris_signal, harmlessly.
4220 bool os::Solaris::signal_handlers_are_installed = false;
4221 
4222 // For signal-chaining
4223 bool os::Solaris::libjsig_is_loaded = false;
4224 typedef struct sigaction *(*get_signal_t)(int);
4225 get_signal_t os::Solaris::get_signal_action = NULL;
4226 
4227 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
4228   struct sigaction *actp = NULL;
4229 
4230   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
4231     // Retrieve the old signal handler from libjsig
4232     actp = (*get_signal_action)(sig);
4233   }
4234   if (actp == NULL) {
4235     // Retrieve the preinstalled signal handler from jvm
4236     actp = get_preinstalled_handler(sig);
4237   }
4238 
4239   return actp;
4240 }
4241 
4242 static bool call_chained_handler(struct sigaction *actp, int sig,
4243                                  siginfo_t *siginfo, void *context) {
4244   // Call the old signal handler
4245   if (actp->sa_handler == SIG_DFL) {
4246     // It's more reasonable to let jvm treat it as an unexpected exception
4247     // instead of taking the default action.
4248     return false;
4249   } else if (actp->sa_handler != SIG_IGN) {
4250     if ((actp->sa_flags & SA_NODEFER) == 0) {
4251       // automaticlly block the signal
4252       sigaddset(&(actp->sa_mask), sig);
4253     }
4254 
4255     sa_handler_t hand;
4256     sa_sigaction_t sa;
4257     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4258     // retrieve the chained handler
4259     if (siginfo_flag_set) {
4260       sa = actp->sa_sigaction;
4261     } else {
4262       hand = actp->sa_handler;
4263     }
4264 
4265     if ((actp->sa_flags & SA_RESETHAND) != 0) {
4266       actp->sa_handler = SIG_DFL;
4267     }
4268 
4269     // try to honor the signal mask
4270     sigset_t oset;
4271     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4272 
4273     // call into the chained handler
4274     if (siginfo_flag_set) {
4275       (*sa)(sig, siginfo, context);
4276     } else {
4277       (*hand)(sig);
4278     }
4279 
4280     // restore the signal mask
4281     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4282   }
4283   // Tell jvm's signal handler the signal is taken care of.
4284   return true;
4285 }
4286 
4287 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4288   bool chained = false;
4289   // signal-chaining
4290   if (UseSignalChaining) {
4291     struct sigaction *actp = get_chained_signal_action(sig);
4292     if (actp != NULL) {
4293       chained = call_chained_handler(actp, sig, siginfo, context);
4294     }
4295   }
4296   return chained;
4297 }
4298 
4299 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4300   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4301   if (preinstalled_sigs[sig] != 0) {
4302     return &chainedsigactions[sig];
4303   }
4304   return NULL;
4305 }
4306 
4307 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4308 
4309   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4310   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4311   chainedsigactions[sig] = oldAct;
4312   preinstalled_sigs[sig] = 1;
4313 }
4314 
4315 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4316   // Check for overwrite.
4317   struct sigaction oldAct;
4318   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4319   void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4320                                       : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4321   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4322       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4323       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4324     if (AllowUserSignalHandlers || !set_installed) {
4325       // Do not overwrite; user takes responsibility to forward to us.
4326       return;
4327     } else if (UseSignalChaining) {
4328       if (oktochain) {
4329         // save the old handler in jvm
4330         save_preinstalled_handler(sig, oldAct);
4331       } else {
4332         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4333       }
4334       // libjsig also interposes the sigaction() call below and saves the
4335       // old sigaction on it own.
4336     } else {
4337       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4338                     "%#lx for signal %d.", (long)oldhand, sig));
4339     }
4340   }
4341 
4342   struct sigaction sigAct;
4343   sigfillset(&(sigAct.sa_mask));
4344   sigAct.sa_handler = SIG_DFL;
4345 
4346   sigAct.sa_sigaction = signalHandler;
4347   // Handle SIGSEGV on alternate signal stack if
4348   // not using stack banging
4349   if (!UseStackBanging && sig == SIGSEGV) {
4350     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4351   // Interruptible i/o requires SA_RESTART cleared so EINTR
4352   // is returned instead of restarting system calls
4353   } else if (sig == os::Solaris::SIGinterrupt()) {
4354     sigemptyset(&sigAct.sa_mask);
4355     sigAct.sa_handler = NULL;
4356     sigAct.sa_flags = SA_SIGINFO;
4357     sigAct.sa_sigaction = sigINTRHandler;
4358   } else {
4359     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4360   }
4361   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4362 
4363   sigaction(sig, &sigAct, &oldAct);
4364 
4365   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4366                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4367   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4368 }
4369 
4370 
4371 #define DO_SIGNAL_CHECK(sig) \
4372   if (!sigismember(&check_signal_done, sig)) \
4373     os::Solaris::check_signal_handler(sig)
4374 
4375 // This method is a periodic task to check for misbehaving JNI applications
4376 // under CheckJNI, we can add any periodic checks here
4377 
4378 void os::run_periodic_checks() {
4379   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4380   // thereby preventing a NULL checks.
4381   if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4382 
4383   if (check_signals == false) return;
4384 
4385   // SEGV and BUS if overridden could potentially prevent
4386   // generation of hs*.log in the event of a crash, debugging
4387   // such a case can be very challenging, so we absolutely
4388   // check for the following for a good measure:
4389   DO_SIGNAL_CHECK(SIGSEGV);
4390   DO_SIGNAL_CHECK(SIGILL);
4391   DO_SIGNAL_CHECK(SIGFPE);
4392   DO_SIGNAL_CHECK(SIGBUS);
4393   DO_SIGNAL_CHECK(SIGPIPE);
4394   DO_SIGNAL_CHECK(SIGXFSZ);
4395 
4396   // ReduceSignalUsage allows the user to override these handlers
4397   // see comments at the very top and jvm_solaris.h
4398   if (!ReduceSignalUsage) {
4399     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4400     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4401     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4402     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4403   }
4404 
4405   // See comments above for using JVM1/JVM2 and UseAltSigs
4406   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4407   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4408 
4409 }
4410 
4411 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4412 
4413 static os_sigaction_t os_sigaction = NULL;
4414 
4415 void os::Solaris::check_signal_handler(int sig) {
4416   char buf[O_BUFLEN];
4417   address jvmHandler = NULL;
4418 
4419   struct sigaction act;
4420   if (os_sigaction == NULL) {
4421     // only trust the default sigaction, in case it has been interposed
4422     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4423     if (os_sigaction == NULL) return;
4424   }
4425 
4426   os_sigaction(sig, (struct sigaction*)NULL, &act);
4427 
4428   address thisHandler = (act.sa_flags & SA_SIGINFO)
4429     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4430     : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4431 
4432 
4433   switch(sig) {
4434     case SIGSEGV:
4435     case SIGBUS:
4436     case SIGFPE:
4437     case SIGPIPE:
4438     case SIGXFSZ:
4439     case SIGILL:
4440       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4441       break;
4442 
4443     case SHUTDOWN1_SIGNAL:
4444     case SHUTDOWN2_SIGNAL:
4445     case SHUTDOWN3_SIGNAL:
4446     case BREAK_SIGNAL:
4447       jvmHandler = (address)user_handler();
4448       break;
4449 
4450     default:
4451       int intrsig = os::Solaris::SIGinterrupt();
4452       int asynsig = os::Solaris::SIGasync();
4453 
4454       if (sig == intrsig) {
4455         jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4456       } else if (sig == asynsig) {
4457         jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4458       } else {
4459         return;
4460       }
4461       break;
4462   }
4463 
4464 
4465   if (thisHandler != jvmHandler) {
4466     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4467     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4468     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4469     // No need to check this sig any longer
4470     sigaddset(&check_signal_done, sig);
4471     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4472     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4473       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4474                     exception_name(sig, buf, O_BUFLEN));
4475     }
4476   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4477     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4478     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4479     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4480     // No need to check this sig any longer
4481     sigaddset(&check_signal_done, sig);
4482   }
4483 
4484   // Print all the signal handler state
4485   if (sigismember(&check_signal_done, sig)) {
4486     print_signal_handlers(tty, buf, O_BUFLEN);
4487   }
4488 
4489 }
4490 
4491 void os::Solaris::install_signal_handlers() {
4492   bool libjsigdone = false;
4493   signal_handlers_are_installed = true;
4494 
4495   // signal-chaining
4496   typedef void (*signal_setting_t)();
4497   signal_setting_t begin_signal_setting = NULL;
4498   signal_setting_t end_signal_setting = NULL;
4499   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4500                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4501   if (begin_signal_setting != NULL) {
4502     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4503                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4504     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4505                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4506     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4507                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4508     libjsig_is_loaded = true;
4509     if (os::Solaris::get_libjsig_version != NULL) {
4510       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4511     }
4512     assert(UseSignalChaining, "should enable signal-chaining");
4513   }
4514   if (libjsig_is_loaded) {
4515     // Tell libjsig jvm is setting signal handlers
4516     (*begin_signal_setting)();
4517   }
4518 
4519   set_signal_handler(SIGSEGV, true, true);
4520   set_signal_handler(SIGPIPE, true, true);
4521   set_signal_handler(SIGXFSZ, true, true);
4522   set_signal_handler(SIGBUS, true, true);
4523   set_signal_handler(SIGILL, true, true);
4524   set_signal_handler(SIGFPE, true, true);
4525 
4526 
4527   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4528 
4529     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4530     // can not register overridable signals which might be > 32
4531     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4532     // Tell libjsig jvm has finished setting signal handlers
4533       (*end_signal_setting)();
4534       libjsigdone = true;
4535     }
4536   }
4537 
4538   // Never ok to chain our SIGinterrupt
4539   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4540   set_signal_handler(os::Solaris::SIGasync(), true, true);
4541 
4542   if (libjsig_is_loaded && !libjsigdone) {
4543     // Tell libjsig jvm finishes setting signal handlers
4544     (*end_signal_setting)();
4545   }
4546 
4547   // We don't activate signal checker if libjsig is in place, we trust ourselves
4548   // and if UserSignalHandler is installed all bets are off.
4549   // Log that signal checking is off only if -verbose:jni is specified.
4550   if (CheckJNICalls) {
4551     if (libjsig_is_loaded) {
4552       if (PrintJNIResolving) {
4553         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4554       }
4555       check_signals = false;
4556     }
4557     if (AllowUserSignalHandlers) {
4558       if (PrintJNIResolving) {
4559         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4560       }
4561       check_signals = false;
4562     }
4563   }
4564 }
4565 
4566 
4567 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4568 
4569 const char * signames[] = {
4570   "SIG0",
4571   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4572   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4573   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4574   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4575   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4576   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4577   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4578   "SIGCANCEL", "SIGLOST"
4579 };
4580 
4581 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4582   if (0 < exception_code && exception_code <= SIGRTMAX) {
4583     // signal
4584     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4585        jio_snprintf(buf, size, "%s", signames[exception_code]);
4586     } else {
4587        jio_snprintf(buf, size, "SIG%d", exception_code);
4588     }
4589     return buf;
4590   } else {
4591     return NULL;
4592   }
4593 }
4594 
4595 // (Static) wrappers for the new libthread API
4596 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
4597 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
4598 int_fnP_thread_t_i os::Solaris::_thr_setmutator;
4599 int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
4600 int_fnP_thread_t os::Solaris::_thr_continue_mutator;
4601 
4602 // (Static) wrapper for getisax(2) call.
4603 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4604 
4605 // (Static) wrappers for the liblgrp API
4606 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4607 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4608 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4609 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4610 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4611 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4612 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4613 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4614 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4615 
4616 // (Static) wrapper for meminfo() call.
4617 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4618 
4619 static address resolve_symbol_lazy(const char* name) {
4620   address addr = (address) dlsym(RTLD_DEFAULT, name);
4621   if(addr == NULL) {
4622     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4623     addr = (address) dlsym(RTLD_NEXT, name);
4624   }
4625   return addr;
4626 }
4627 
4628 static address resolve_symbol(const char* name) {
4629   address addr = resolve_symbol_lazy(name);
4630   if(addr == NULL) {
4631     fatal(dlerror());
4632   }
4633   return addr;
4634 }
4635 
4636 
4637 
4638 // isT2_libthread()
4639 //
4640 // Routine to determine if we are currently using the new T2 libthread.
4641 //
4642 // We determine if we are using T2 by reading /proc/self/lstatus and
4643 // looking for a thread with the ASLWP bit set.  If we find this status
4644 // bit set, we must assume that we are NOT using T2.  The T2 team
4645 // has approved this algorithm.
4646 //
4647 // We need to determine if we are running with the new T2 libthread
4648 // since setting native thread priorities is handled differently
4649 // when using this library.  All threads created using T2 are bound
4650 // threads. Calling thr_setprio is meaningless in this case.
4651 //
4652 bool isT2_libthread() {
4653   static prheader_t * lwpArray = NULL;
4654   static int lwpSize = 0;
4655   static int lwpFile = -1;
4656   lwpstatus_t * that;
4657   char lwpName [128];
4658   bool isT2 = false;
4659 
4660 #define ADR(x)  ((uintptr_t)(x))
4661 #define LWPINDEX(ary,ix)   ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
4662 
4663   lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
4664   if (lwpFile < 0) {
4665       if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
4666       return false;
4667   }
4668   lwpSize = 16*1024;
4669   for (;;) {
4670     ::lseek64 (lwpFile, 0, SEEK_SET);
4671     lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
4672     if (::read(lwpFile, lwpArray, lwpSize) < 0) {
4673       if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
4674       break;
4675     }
4676     if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
4677        // We got a good snapshot - now iterate over the list.
4678       int aslwpcount = 0;
4679       for (int i = 0; i < lwpArray->pr_nent; i++ ) {
4680         that = LWPINDEX(lwpArray,i);
4681         if (that->pr_flags & PR_ASLWP) {
4682           aslwpcount++;
4683         }
4684       }
4685       if (aslwpcount == 0) isT2 = true;
4686       break;
4687     }
4688     lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
4689     FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);  // retry.
4690   }
4691 
4692   FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
4693   ::close (lwpFile);
4694   if (ThreadPriorityVerbose) {
4695     if (isT2) tty->print_cr("We are running with a T2 libthread\n");
4696     else tty->print_cr("We are not running with a T2 libthread\n");
4697   }
4698   return isT2;
4699 }
4700 
4701 
4702 void os::Solaris::libthread_init() {
4703   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4704 
4705   // Determine if we are running with the new T2 libthread
4706   os::Solaris::set_T2_libthread(isT2_libthread());
4707 
4708   lwp_priocntl_init();
4709 
4710   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4711   if(func == NULL) {
4712     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4713     // Guarantee that this VM is running on an new enough OS (5.6 or
4714     // later) that it will have a new enough libthread.so.
4715     guarantee(func != NULL, "libthread.so is too old.");
4716   }
4717 
4718   // Initialize the new libthread getstate API wrappers
4719   func = resolve_symbol("thr_getstate");
4720   os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
4721 
4722   func = resolve_symbol("thr_setstate");
4723   os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
4724 
4725   func = resolve_symbol("thr_setmutator");
4726   os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
4727 
4728   func = resolve_symbol("thr_suspend_mutator");
4729   os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4730 
4731   func = resolve_symbol("thr_continue_mutator");
4732   os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4733 
4734   int size;
4735   void (*handler_info_func)(address *, int *);
4736   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4737   handler_info_func(&handler_start, &size);
4738   handler_end = handler_start + size;
4739 }
4740 
4741 
4742 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4743 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4744 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4745 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4746 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4747 int os::Solaris::_mutex_scope = USYNC_THREAD;
4748 
4749 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4750 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4751 int_fnP_cond_tP os::Solaris::_cond_signal;
4752 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4753 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4754 int_fnP_cond_tP os::Solaris::_cond_destroy;
4755 int os::Solaris::_cond_scope = USYNC_THREAD;
4756 
4757 void os::Solaris::synchronization_init() {
4758   if(UseLWPSynchronization) {
4759     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4760     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4761     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4762     os::Solaris::set_mutex_init(lwp_mutex_init);
4763     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4764     os::Solaris::set_mutex_scope(USYNC_THREAD);
4765 
4766     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4767     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4768     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4769     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4770     os::Solaris::set_cond_init(lwp_cond_init);
4771     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4772     os::Solaris::set_cond_scope(USYNC_THREAD);
4773   }
4774   else {
4775     os::Solaris::set_mutex_scope(USYNC_THREAD);
4776     os::Solaris::set_cond_scope(USYNC_THREAD);
4777 
4778     if(UsePthreads) {
4779       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4780       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4781       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4782       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4783       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4784 
4785       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4786       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4787       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4788       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4789       os::Solaris::set_cond_init(pthread_cond_default_init);
4790       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4791     }
4792     else {
4793       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4794       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4795       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4796       os::Solaris::set_mutex_init(::mutex_init);
4797       os::Solaris::set_mutex_destroy(::mutex_destroy);
4798 
4799       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4800       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4801       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4802       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4803       os::Solaris::set_cond_init(::cond_init);
4804       os::Solaris::set_cond_destroy(::cond_destroy);
4805     }
4806   }
4807 }
4808 
4809 bool os::Solaris::liblgrp_init() {
4810   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4811   if (handle != NULL) {
4812     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4813     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4814     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4815     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4816     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4817     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4818     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4819     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4820                                        dlsym(handle, "lgrp_cookie_stale")));
4821 
4822     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4823     set_lgrp_cookie(c);
4824     return true;
4825   }
4826   return false;
4827 }
4828 
4829 void os::Solaris::misc_sym_init() {
4830   address func;
4831 
4832   // getisax
4833   func = resolve_symbol_lazy("getisax");
4834   if (func != NULL) {
4835     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4836   }
4837 
4838   // meminfo
4839   func = resolve_symbol_lazy("meminfo");
4840   if (func != NULL) {
4841     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4842   }
4843 }
4844 
4845 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4846   assert(_getisax != NULL, "_getisax not set");
4847   return _getisax(array, n);
4848 }
4849 
4850 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4851 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4852 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4853 
4854 void init_pset_getloadavg_ptr(void) {
4855   pset_getloadavg_ptr =
4856     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4857   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4858     warning("pset_getloadavg function not found");
4859   }
4860 }
4861 
4862 int os::Solaris::_dev_zero_fd = -1;
4863 
4864 // this is called _before_ the global arguments have been parsed
4865 void os::init(void) {
4866   _initial_pid = getpid();
4867 
4868   max_hrtime = first_hrtime = gethrtime();
4869 
4870   init_random(1234567);
4871 
4872   page_size = sysconf(_SC_PAGESIZE);
4873   if (page_size == -1)
4874     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4875                   strerror(errno)));
4876   init_page_sizes((size_t) page_size);
4877 
4878   Solaris::initialize_system_info();
4879 
4880   // Initialize misc. symbols as soon as possible, so we can use them
4881   // if we need them.
4882   Solaris::misc_sym_init();
4883 
4884   int fd = ::open("/dev/zero", O_RDWR);
4885   if (fd < 0) {
4886     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4887   } else {
4888     Solaris::set_dev_zero_fd(fd);
4889 
4890     // Close on exec, child won't inherit.
4891     fcntl(fd, F_SETFD, FD_CLOEXEC);
4892   }
4893 
4894   clock_tics_per_sec = CLK_TCK;
4895 
4896   // check if dladdr1() exists; dladdr1 can provide more information than
4897   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4898   // and is available on linker patches for 5.7 and 5.8.
4899   // libdl.so must have been loaded, this call is just an entry lookup
4900   void * hdl = dlopen("libdl.so", RTLD_NOW);
4901   if (hdl)
4902     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4903 
4904   // (Solaris only) this switches to calls that actually do locking.
4905   ThreadCritical::initialize();
4906 
4907   main_thread = thr_self();
4908 
4909   // Constant minimum stack size allowed. It must be at least
4910   // the minimum of what the OS supports (thr_min_stack()), and
4911   // enough to allow the thread to get to user bytecode execution.
4912   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4913   // If the pagesize of the VM is greater than 8K determine the appropriate
4914   // number of initial guard pages.  The user can change this with the
4915   // command line arguments, if needed.
4916   if (vm_page_size() > 8*K) {
4917     StackYellowPages = 1;
4918     StackRedPages = 1;
4919     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4920   }
4921 }
4922 
4923 // To install functions for atexit system call
4924 extern "C" {
4925   static void perfMemory_exit_helper() {
4926     perfMemory_exit();
4927   }
4928 }
4929 
4930 // this is called _after_ the global arguments have been parsed
4931 jint os::init_2(void) {
4932   // try to enable extended file IO ASAP, see 6431278
4933   os::Solaris::try_enable_extended_io();
4934 
4935   // Allocate a single page and mark it as readable for safepoint polling.  Also
4936   // use this first mmap call to check support for MAP_ALIGN.
4937   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4938                                                       page_size,
4939                                                       MAP_PRIVATE | MAP_ALIGN,
4940                                                       PROT_READ);
4941   if (polling_page == NULL) {
4942     has_map_align = false;
4943     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4944                                                 PROT_READ);
4945   }
4946 
4947   os::set_polling_page(polling_page);
4948 
4949 #ifndef PRODUCT
4950   if( Verbose && PrintMiscellaneous )
4951     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
4952 #endif
4953 
4954   if (!UseMembar) {
4955     address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
4956     guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4957     os::set_memory_serialize_page( mem_serialize_page );
4958 
4959 #ifndef PRODUCT
4960     if(Verbose && PrintMiscellaneous)
4961       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
4962 #endif
4963   }
4964 
4965   // Check minimum allowable stack size for thread creation and to initialize
4966   // the java system classes, including StackOverflowError - depends on page
4967   // size.  Add a page for compiler2 recursion in main thread.
4968   // Add in 2*BytesPerWord times page size to account for VM stack during
4969   // class initialization depending on 32 or 64 bit VM.
4970   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4971             (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4972                     2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4973 
4974   size_t threadStackSizeInBytes = ThreadStackSize * K;
4975   if (threadStackSizeInBytes != 0 &&
4976     threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4977     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4978                   os::Solaris::min_stack_allowed/K);
4979     return JNI_ERR;
4980   }
4981 
4982   // For 64kbps there will be a 64kb page size, which makes
4983   // the usable default stack size quite a bit less.  Increase the
4984   // stack for 64kb (or any > than 8kb) pages, this increases
4985   // virtual memory fragmentation (since we're not creating the
4986   // stack on a power of 2 boundary.  The real fix for this
4987   // should be to fix the guard page mechanism.
4988 
4989   if (vm_page_size() > 8*K) {
4990       threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4991          ? threadStackSizeInBytes +
4992            ((StackYellowPages + StackRedPages) * vm_page_size())
4993          : 0;
4994       ThreadStackSize = threadStackSizeInBytes/K;
4995   }
4996 
4997   // Make the stack size a multiple of the page size so that
4998   // the yellow/red zones can be guarded.
4999   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
5000         vm_page_size()));
5001 
5002   Solaris::libthread_init();
5003 
5004   if (UseNUMA) {
5005     if (!Solaris::liblgrp_init()) {
5006       UseNUMA = false;
5007     } else {
5008       size_t lgrp_limit = os::numa_get_groups_num();
5009       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
5010       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
5011       FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
5012       if (lgrp_num < 2) {
5013         // There's only one locality group, disable NUMA.
5014         UseNUMA = false;
5015       }
5016     }
5017     if (!UseNUMA && ForceNUMA) {
5018       UseNUMA = true;
5019     }
5020   }
5021 
5022   Solaris::signal_sets_init();
5023   Solaris::init_signal_mem();
5024   Solaris::install_signal_handlers();
5025 
5026   if (libjsigversion < JSIG_VERSION_1_4_1) {
5027     Maxlibjsigsigs = OLDMAXSIGNUM;
5028   }
5029 
5030   // initialize synchronization primitives to use either thread or
5031   // lwp synchronization (controlled by UseLWPSynchronization)
5032   Solaris::synchronization_init();
5033 
5034   if (MaxFDLimit) {
5035     // set the number of file descriptors to max. print out error
5036     // if getrlimit/setrlimit fails but continue regardless.
5037     struct rlimit nbr_files;
5038     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
5039     if (status != 0) {
5040       if (PrintMiscellaneous && (Verbose || WizardMode))
5041         perror("os::init_2 getrlimit failed");
5042     } else {
5043       nbr_files.rlim_cur = nbr_files.rlim_max;
5044       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
5045       if (status != 0) {
5046         if (PrintMiscellaneous && (Verbose || WizardMode))
5047           perror("os::init_2 setrlimit failed");
5048       }
5049     }
5050   }
5051 
5052   // Calculate theoretical max. size of Threads to guard gainst
5053   // artifical out-of-memory situations, where all available address-
5054   // space has been reserved by thread stacks. Default stack size is 1Mb.
5055   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
5056     JavaThread::stack_size_at_create() : (1*K*K);
5057   assert(pre_thread_stack_size != 0, "Must have a stack");
5058   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
5059   // we should start doing Virtual Memory banging. Currently when the threads will
5060   // have used all but 200Mb of space.
5061   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
5062   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
5063 
5064   // at-exit methods are called in the reverse order of their registration.
5065   // In Solaris 7 and earlier, atexit functions are called on return from
5066   // main or as a result of a call to exit(3C). There can be only 32 of
5067   // these functions registered and atexit() does not set errno. In Solaris
5068   // 8 and later, there is no limit to the number of functions registered
5069   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
5070   // functions are called upon dlclose(3DL) in addition to return from main
5071   // and exit(3C).
5072 
5073   if (PerfAllowAtExitRegistration) {
5074     // only register atexit functions if PerfAllowAtExitRegistration is set.
5075     // atexit functions can be delayed until process exit time, which
5076     // can be problematic for embedded VM situations. Embedded VMs should
5077     // call DestroyJavaVM() to assure that VM resources are released.
5078 
5079     // note: perfMemory_exit_helper atexit function may be removed in
5080     // the future if the appropriate cleanup code can be added to the
5081     // VM_Exit VMOperation's doit method.
5082     if (atexit(perfMemory_exit_helper) != 0) {
5083       warning("os::init2 atexit(perfMemory_exit_helper) failed");
5084     }
5085   }
5086 
5087   // Init pset_loadavg function pointer
5088   init_pset_getloadavg_ptr();
5089 
5090   return JNI_OK;
5091 }
5092 
5093 // Mark the polling page as unreadable
5094 void os::make_polling_page_unreadable(void) {
5095   if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
5096     fatal("Could not disable polling page");
5097 };
5098 
5099 // Mark the polling page as readable
5100 void os::make_polling_page_readable(void) {
5101   if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
5102     fatal("Could not enable polling page");
5103 };
5104 
5105 // OS interface.
5106 
5107 bool os::check_heap(bool force) { return true; }
5108 
5109 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
5110 static vsnprintf_t sol_vsnprintf = NULL;
5111 
5112 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
5113   if (!sol_vsnprintf) {
5114     //search  for the named symbol in the objects that were loaded after libjvm
5115     void* where = RTLD_NEXT;
5116     if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5117         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5118     if (!sol_vsnprintf){
5119       //search  for the named symbol in the objects that were loaded before libjvm
5120       where = RTLD_DEFAULT;
5121       if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5122         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5123       assert(sol_vsnprintf != NULL, "vsnprintf not found");
5124     }
5125   }
5126   return (*sol_vsnprintf)(buf, count, fmt, argptr);
5127 }
5128 
5129 
5130 // Is a (classpath) directory empty?
5131 bool os::dir_is_empty(const char* path) {
5132   DIR *dir = NULL;
5133   struct dirent *ptr;
5134 
5135   dir = opendir(path);
5136   if (dir == NULL) return true;
5137 
5138   /* Scan the directory */
5139   bool result = true;
5140   char buf[sizeof(struct dirent) + MAX_PATH];
5141   struct dirent *dbuf = (struct dirent *) buf;
5142   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
5143     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
5144       result = false;
5145     }
5146   }
5147   closedir(dir);
5148   return result;
5149 }
5150 
5151 // This code originates from JDK's sysOpen and open64_w
5152 // from src/solaris/hpi/src/system_md.c
5153 
5154 #ifndef O_DELETE
5155 #define O_DELETE 0x10000
5156 #endif
5157 
5158 // Open a file. Unlink the file immediately after open returns
5159 // if the specified oflag has the O_DELETE flag set.
5160 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
5161 
5162 int os::open(const char *path, int oflag, int mode) {
5163   if (strlen(path) > MAX_PATH - 1) {
5164     errno = ENAMETOOLONG;
5165     return -1;
5166   }
5167   int fd;
5168   int o_delete = (oflag & O_DELETE);
5169   oflag = oflag & ~O_DELETE;
5170 
5171   fd = ::open64(path, oflag, mode);
5172   if (fd == -1) return -1;
5173 
5174   //If the open succeeded, the file might still be a directory
5175   {
5176     struct stat64 buf64;
5177     int ret = ::fstat64(fd, &buf64);
5178     int st_mode = buf64.st_mode;
5179 
5180     if (ret != -1) {
5181       if ((st_mode & S_IFMT) == S_IFDIR) {
5182         errno = EISDIR;
5183         ::close(fd);
5184         return -1;
5185       }
5186     } else {
5187       ::close(fd);
5188       return -1;
5189     }
5190   }
5191     /*
5192      * 32-bit Solaris systems suffer from:
5193      *
5194      * - an historical default soft limit of 256 per-process file
5195      *   descriptors that is too low for many Java programs.
5196      *
5197      * - a design flaw where file descriptors created using stdio
5198      *   fopen must be less than 256, _even_ when the first limit above
5199      *   has been raised.  This can cause calls to fopen (but not calls to
5200      *   open, for example) to fail mysteriously, perhaps in 3rd party
5201      *   native code (although the JDK itself uses fopen).  One can hardly
5202      *   criticize them for using this most standard of all functions.
5203      *
5204      * We attempt to make everything work anyways by:
5205      *
5206      * - raising the soft limit on per-process file descriptors beyond
5207      *   256
5208      *
5209      * - As of Solaris 10u4, we can request that Solaris raise the 256
5210      *   stdio fopen limit by calling function enable_extended_FILE_stdio.
5211      *   This is done in init_2 and recorded in enabled_extended_FILE_stdio
5212      *
5213      * - If we are stuck on an old (pre 10u4) Solaris system, we can
5214      *   workaround the bug by remapping non-stdio file descriptors below
5215      *   256 to ones beyond 256, which is done below.
5216      *
5217      * See:
5218      * 1085341: 32-bit stdio routines should support file descriptors >255
5219      * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
5220      * 6431278: Netbeans crash on 32 bit Solaris: need to call
5221      *          enable_extended_FILE_stdio() in VM initialisation
5222      * Giri Mandalika's blog
5223      * http://technopark02.blogspot.com/2005_05_01_archive.html
5224      */
5225 #ifndef  _LP64
5226      if ((!enabled_extended_FILE_stdio) && fd < 256) {
5227          int newfd = ::fcntl(fd, F_DUPFD, 256);
5228          if (newfd != -1) {
5229              ::close(fd);
5230              fd = newfd;
5231          }
5232      }
5233 #endif // 32-bit Solaris
5234     /*
5235      * All file descriptors that are opened in the JVM and not
5236      * specifically destined for a subprocess should have the
5237      * close-on-exec flag set.  If we don't set it, then careless 3rd
5238      * party native code might fork and exec without closing all
5239      * appropriate file descriptors (e.g. as we do in closeDescriptors in
5240      * UNIXProcess.c), and this in turn might:
5241      *
5242      * - cause end-of-file to fail to be detected on some file
5243      *   descriptors, resulting in mysterious hangs, or
5244      *
5245      * - might cause an fopen in the subprocess to fail on a system
5246      *   suffering from bug 1085341.
5247      *
5248      * (Yes, the default setting of the close-on-exec flag is a Unix
5249      * design flaw)
5250      *
5251      * See:
5252      * 1085341: 32-bit stdio routines should support file descriptors >255
5253      * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
5254      * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
5255      */
5256 #ifdef FD_CLOEXEC
5257     {
5258         int flags = ::fcntl(fd, F_GETFD);
5259         if (flags != -1)
5260             ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5261     }
5262 #endif
5263 
5264   if (o_delete != 0) {
5265     ::unlink(path);
5266   }
5267   return fd;
5268 }
5269 
5270 // create binary file, rewriting existing file if required
5271 int os::create_binary_file(const char* path, bool rewrite_existing) {
5272   int oflags = O_WRONLY | O_CREAT;
5273   if (!rewrite_existing) {
5274     oflags |= O_EXCL;
5275   }
5276   return ::open64(path, oflags, S_IREAD | S_IWRITE);
5277 }
5278 
5279 // return current position of file pointer
5280 jlong os::current_file_offset(int fd) {
5281   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5282 }
5283 
5284 // move file pointer to the specified offset
5285 jlong os::seek_to_file_offset(int fd, jlong offset) {
5286   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5287 }
5288 
5289 jlong os::lseek(int fd, jlong offset, int whence) {
5290   return (jlong) ::lseek64(fd, offset, whence);
5291 }
5292 
5293 char * os::native_path(char *path) {
5294   return path;
5295 }
5296 
5297 int os::ftruncate(int fd, jlong length) {
5298   return ::ftruncate64(fd, length);
5299 }
5300 
5301 int os::fsync(int fd)  {
5302   RESTARTABLE_RETURN_INT(::fsync(fd));
5303 }
5304 
5305 int os::available(int fd, jlong *bytes) {
5306   jlong cur, end;
5307   int mode;
5308   struct stat64 buf64;
5309 
5310   if (::fstat64(fd, &buf64) >= 0) {
5311     mode = buf64.st_mode;
5312     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5313       /*
5314       * XXX: is the following call interruptible? If so, this might
5315       * need to go through the INTERRUPT_IO() wrapper as for other
5316       * blocking, interruptible calls in this file.
5317       */
5318       int n,ioctl_return;
5319 
5320       INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted);
5321       if (ioctl_return>= 0) {
5322           *bytes = n;
5323         return 1;
5324       }
5325     }
5326   }
5327   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5328     return 0;
5329   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5330     return 0;
5331   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5332     return 0;
5333   }
5334   *bytes = end - cur;
5335   return 1;
5336 }
5337 
5338 // Map a block of memory.
5339 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5340                      char *addr, size_t bytes, bool read_only,
5341                      bool allow_exec) {
5342   int prot;
5343   int flags;
5344 
5345   if (read_only) {
5346     prot = PROT_READ;
5347     flags = MAP_SHARED;
5348   } else {
5349     prot = PROT_READ | PROT_WRITE;
5350     flags = MAP_PRIVATE;
5351   }
5352 
5353   if (allow_exec) {
5354     prot |= PROT_EXEC;
5355   }
5356 
5357   if (addr != NULL) {
5358     flags |= MAP_FIXED;
5359   }
5360 
5361   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5362                                      fd, file_offset);
5363   if (mapped_address == MAP_FAILED) {
5364     return NULL;
5365   }
5366   return mapped_address;
5367 }
5368 
5369 
5370 // Remap a block of memory.
5371 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5372                        char *addr, size_t bytes, bool read_only,
5373                        bool allow_exec) {
5374   // same as map_memory() on this OS
5375   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5376                         allow_exec);
5377 }
5378 
5379 
5380 // Unmap a block of memory.
5381 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5382   return munmap(addr, bytes) == 0;
5383 }
5384 
5385 void os::pause() {
5386   char filename[MAX_PATH];
5387   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5388     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5389   } else {
5390     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5391   }
5392 
5393   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5394   if (fd != -1) {
5395     struct stat buf;
5396     ::close(fd);
5397     while (::stat(filename, &buf) == 0) {
5398       (void)::poll(NULL, 0, 100);
5399     }
5400   } else {
5401     jio_fprintf(stderr,
5402       "Could not open pause file '%s', continuing immediately.\n", filename);
5403   }
5404 }
5405 
5406 #ifndef PRODUCT
5407 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5408 // Turn this on if you need to trace synch operations.
5409 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5410 // and call record_synch_enable and record_synch_disable
5411 // around the computation of interest.
5412 
5413 void record_synch(char* name, bool returning);  // defined below
5414 
5415 class RecordSynch {
5416   char* _name;
5417  public:
5418   RecordSynch(char* name) :_name(name)
5419                  { record_synch(_name, false); }
5420   ~RecordSynch() { record_synch(_name,   true);  }
5421 };
5422 
5423 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5424 extern "C" ret name params {                                    \
5425   typedef ret name##_t params;                                  \
5426   static name##_t* implem = NULL;                               \
5427   static int callcount = 0;                                     \
5428   if (implem == NULL) {                                         \
5429     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5430     if (implem == NULL)  fatal(dlerror());                      \
5431   }                                                             \
5432   ++callcount;                                                  \
5433   RecordSynch _rs(#name);                                       \
5434   inner;                                                        \
5435   return implem args;                                           \
5436 }
5437 // in dbx, examine callcounts this way:
5438 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5439 
5440 #define CHECK_POINTER_OK(p) \
5441   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5442 #define CHECK_MU \
5443   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5444 #define CHECK_CV \
5445   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5446 #define CHECK_P(p) \
5447   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5448 
5449 #define CHECK_MUTEX(mutex_op) \
5450 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5451 
5452 CHECK_MUTEX(   mutex_lock)
5453 CHECK_MUTEX(  _mutex_lock)
5454 CHECK_MUTEX( mutex_unlock)
5455 CHECK_MUTEX(_mutex_unlock)
5456 CHECK_MUTEX( mutex_trylock)
5457 CHECK_MUTEX(_mutex_trylock)
5458 
5459 #define CHECK_COND(cond_op) \
5460 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5461 
5462 CHECK_COND( cond_wait);
5463 CHECK_COND(_cond_wait);
5464 CHECK_COND(_cond_wait_cancel);
5465 
5466 #define CHECK_COND2(cond_op) \
5467 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5468 
5469 CHECK_COND2( cond_timedwait);
5470 CHECK_COND2(_cond_timedwait);
5471 CHECK_COND2(_cond_timedwait_cancel);
5472 
5473 // do the _lwp_* versions too
5474 #define mutex_t lwp_mutex_t
5475 #define cond_t  lwp_cond_t
5476 CHECK_MUTEX(  _lwp_mutex_lock)
5477 CHECK_MUTEX(  _lwp_mutex_unlock)
5478 CHECK_MUTEX(  _lwp_mutex_trylock)
5479 CHECK_MUTEX( __lwp_mutex_lock)
5480 CHECK_MUTEX( __lwp_mutex_unlock)
5481 CHECK_MUTEX( __lwp_mutex_trylock)
5482 CHECK_MUTEX(___lwp_mutex_lock)
5483 CHECK_MUTEX(___lwp_mutex_unlock)
5484 
5485 CHECK_COND(  _lwp_cond_wait);
5486 CHECK_COND( __lwp_cond_wait);
5487 CHECK_COND(___lwp_cond_wait);
5488 
5489 CHECK_COND2(  _lwp_cond_timedwait);
5490 CHECK_COND2( __lwp_cond_timedwait);
5491 #undef mutex_t
5492 #undef cond_t
5493 
5494 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5495 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5496 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5497 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5498 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5499 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5500 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5501 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5502 
5503 
5504 // recording machinery:
5505 
5506 enum { RECORD_SYNCH_LIMIT = 200 };
5507 char* record_synch_name[RECORD_SYNCH_LIMIT];
5508 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5509 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5510 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5511 int record_synch_count = 0;
5512 bool record_synch_enabled = false;
5513 
5514 // in dbx, examine recorded data this way:
5515 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5516 
5517 void record_synch(char* name, bool returning) {
5518   if (record_synch_enabled) {
5519     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5520       record_synch_name[record_synch_count] = name;
5521       record_synch_returning[record_synch_count] = returning;
5522       record_synch_thread[record_synch_count] = thr_self();
5523       record_synch_arg0ptr[record_synch_count] = &name;
5524       record_synch_count++;
5525     }
5526     // put more checking code here:
5527     // ...
5528   }
5529 }
5530 
5531 void record_synch_enable() {
5532   // start collecting trace data, if not already doing so
5533   if (!record_synch_enabled)  record_synch_count = 0;
5534   record_synch_enabled = true;
5535 }
5536 
5537 void record_synch_disable() {
5538   // stop collecting trace data
5539   record_synch_enabled = false;
5540 }
5541 
5542 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5543 #endif // PRODUCT
5544 
5545 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5546 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5547                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5548 
5549 
5550 // JVMTI & JVM monitoring and management support
5551 // The thread_cpu_time() and current_thread_cpu_time() are only
5552 // supported if is_thread_cpu_time_supported() returns true.
5553 // They are not supported on Solaris T1.
5554 
5555 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5556 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5557 // of a thread.
5558 //
5559 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5560 // returns the fast estimate available on the platform.
5561 
5562 // hrtime_t gethrvtime() return value includes
5563 // user time but does not include system time
5564 jlong os::current_thread_cpu_time() {
5565   return (jlong) gethrvtime();
5566 }
5567 
5568 jlong os::thread_cpu_time(Thread *thread) {
5569   // return user level CPU time only to be consistent with
5570   // what current_thread_cpu_time returns.
5571   // thread_cpu_time_info() must be changed if this changes
5572   return os::thread_cpu_time(thread, false /* user time only */);
5573 }
5574 
5575 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5576   if (user_sys_cpu_time) {
5577     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5578   } else {
5579     return os::current_thread_cpu_time();
5580   }
5581 }
5582 
5583 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5584   char proc_name[64];
5585   int count;
5586   prusage_t prusage;
5587   jlong lwp_time;
5588   int fd;
5589 
5590   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5591                      getpid(),
5592                      thread->osthread()->lwp_id());
5593   fd = ::open(proc_name, O_RDONLY);
5594   if ( fd == -1 ) return -1;
5595 
5596   do {
5597     count = ::pread(fd,
5598                   (void *)&prusage.pr_utime,
5599                   thr_time_size,
5600                   thr_time_off);
5601   } while (count < 0 && errno == EINTR);
5602   ::close(fd);
5603   if ( count < 0 ) return -1;
5604 
5605   if (user_sys_cpu_time) {
5606     // user + system CPU time
5607     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5608                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5609                  (jlong)prusage.pr_stime.tv_nsec +
5610                  (jlong)prusage.pr_utime.tv_nsec;
5611   } else {
5612     // user level CPU time only
5613     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5614                 (jlong)prusage.pr_utime.tv_nsec;
5615   }
5616 
5617   return(lwp_time);
5618 }
5619 
5620 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5621   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5622   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5623   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5624   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5625 }
5626 
5627 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5628   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5629   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5630   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5631   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5632 }
5633 
5634 bool os::is_thread_cpu_time_supported() {
5635   if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
5636     return true;
5637   } else {
5638     return false;
5639   }
5640 }
5641 
5642 // System loadavg support.  Returns -1 if load average cannot be obtained.
5643 // Return the load average for our processor set if the primitive exists
5644 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5645 int os::loadavg(double loadavg[], int nelem) {
5646   if (pset_getloadavg_ptr != NULL) {
5647     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5648   } else {
5649     return ::getloadavg(loadavg, nelem);
5650   }
5651 }
5652 
5653 //---------------------------------------------------------------------------------
5654 
5655 bool os::find(address addr, outputStream* st) {
5656   Dl_info dlinfo;
5657   memset(&dlinfo, 0, sizeof(dlinfo));
5658   if (dladdr(addr, &dlinfo) != 0) {
5659     st->print(PTR_FORMAT ": ", addr);
5660     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5661       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5662     } else if (dlinfo.dli_fbase != NULL)
5663       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5664     else
5665       st->print("<absolute address>");
5666     if (dlinfo.dli_fname != NULL) {
5667       st->print(" in %s", dlinfo.dli_fname);
5668     }
5669     if (dlinfo.dli_fbase != NULL) {
5670       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5671     }
5672     st->cr();
5673 
5674     if (Verbose) {
5675       // decode some bytes around the PC
5676       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5677       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5678       address       lowest = (address) dlinfo.dli_sname;
5679       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5680       if (begin < lowest)  begin = lowest;
5681       Dl_info dlinfo2;
5682       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5683           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5684         end = (address) dlinfo2.dli_saddr;
5685       Disassembler::decode(begin, end, st);
5686     }
5687     return true;
5688   }
5689   return false;
5690 }
5691 
5692 // Following function has been added to support HotSparc's libjvm.so running
5693 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5694 // src/solaris/hpi/native_threads in the EVM codebase.
5695 //
5696 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5697 // libraries and should thus be removed. We will leave it behind for a while
5698 // until we no longer want to able to run on top of 1.3.0 Solaris production
5699 // JDK. See 4341971.
5700 
5701 #define STACK_SLACK 0x800
5702 
5703 extern "C" {
5704   intptr_t sysThreadAvailableStackWithSlack() {
5705     stack_t st;
5706     intptr_t retval, stack_top;
5707     retval = thr_stksegment(&st);
5708     assert(retval == 0, "incorrect return value from thr_stksegment");
5709     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5710     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5711     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5712     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5713   }
5714 }
5715 
5716 // ObjectMonitor park-unpark infrastructure ...
5717 //
5718 // We implement Solaris and Linux PlatformEvents with the
5719 // obvious condvar-mutex-flag triple.
5720 // Another alternative that works quite well is pipes:
5721 // Each PlatformEvent consists of a pipe-pair.
5722 // The thread associated with the PlatformEvent
5723 // calls park(), which reads from the input end of the pipe.
5724 // Unpark() writes into the other end of the pipe.
5725 // The write-side of the pipe must be set NDELAY.
5726 // Unfortunately pipes consume a large # of handles.
5727 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5728 // Using pipes for the 1st few threads might be workable, however.
5729 //
5730 // park() is permitted to return spuriously.
5731 // Callers of park() should wrap the call to park() in
5732 // an appropriate loop.  A litmus test for the correct
5733 // usage of park is the following: if park() were modified
5734 // to immediately return 0 your code should still work,
5735 // albeit degenerating to a spin loop.
5736 //
5737 // An interesting optimization for park() is to use a trylock()
5738 // to attempt to acquire the mutex.  If the trylock() fails
5739 // then we know that a concurrent unpark() operation is in-progress.
5740 // in that case the park() code could simply set _count to 0
5741 // and return immediately.  The subsequent park() operation *might*
5742 // return immediately.  That's harmless as the caller of park() is
5743 // expected to loop.  By using trylock() we will have avoided a
5744 // avoided a context switch caused by contention on the per-thread mutex.
5745 //
5746 // TODO-FIXME:
5747 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
5748 //     objectmonitor implementation.
5749 // 2.  Collapse the JSR166 parker event, and the
5750 //     objectmonitor ParkEvent into a single "Event" construct.
5751 // 3.  In park() and unpark() add:
5752 //     assert (Thread::current() == AssociatedWith).
5753 // 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5754 //     1-out-of-N park() operations will return immediately.
5755 //
5756 // _Event transitions in park()
5757 //   -1 => -1 : illegal
5758 //    1 =>  0 : pass - return immediately
5759 //    0 => -1 : block
5760 //
5761 // _Event serves as a restricted-range semaphore.
5762 //
5763 // Another possible encoding of _Event would be with
5764 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5765 //
5766 // TODO-FIXME: add DTRACE probes for:
5767 // 1.   Tx parks
5768 // 2.   Ty unparks Tx
5769 // 3.   Tx resumes from park
5770 
5771 
5772 // value determined through experimentation
5773 #define ROUNDINGFIX 11
5774 
5775 // utility to compute the abstime argument to timedwait.
5776 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5777 
5778 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5779   // millis is the relative timeout time
5780   // abstime will be the absolute timeout time
5781   if (millis < 0)  millis = 0;
5782   struct timeval now;
5783   int status = gettimeofday(&now, NULL);
5784   assert(status == 0, "gettimeofday");
5785   jlong seconds = millis / 1000;
5786   jlong max_wait_period;
5787 
5788   if (UseLWPSynchronization) {
5789     // forward port of fix for 4275818 (not sleeping long enough)
5790     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5791     // _lwp_cond_timedwait() used a round_down algorithm rather
5792     // than a round_up. For millis less than our roundfactor
5793     // it rounded down to 0 which doesn't meet the spec.
5794     // For millis > roundfactor we may return a bit sooner, but
5795     // since we can not accurately identify the patch level and
5796     // this has already been fixed in Solaris 9 and 8 we will
5797     // leave it alone rather than always rounding down.
5798 
5799     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5800        // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5801            // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5802            max_wait_period = 21000000;
5803   } else {
5804     max_wait_period = 50000000;
5805   }
5806   millis %= 1000;
5807   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5808      seconds = max_wait_period;
5809   }
5810   abstime->tv_sec = now.tv_sec  + seconds;
5811   long       usec = now.tv_usec + millis * 1000;
5812   if (usec >= 1000000) {
5813     abstime->tv_sec += 1;
5814     usec -= 1000000;
5815   }
5816   abstime->tv_nsec = usec * 1000;
5817   return abstime;
5818 }
5819 
5820 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5821 // Conceptually TryPark() should be equivalent to park(0).
5822 
5823 int os::PlatformEvent::TryPark() {
5824   for (;;) {
5825     const int v = _Event ;
5826     guarantee ((v == 0) || (v == 1), "invariant") ;
5827     if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
5828   }
5829 }
5830 
5831 void os::PlatformEvent::park() {           // AKA: down()
5832   // Invariant: Only the thread associated with the Event/PlatformEvent
5833   // may call park().
5834   int v ;
5835   for (;;) {
5836       v = _Event ;
5837       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5838   }
5839   guarantee (v >= 0, "invariant") ;
5840   if (v == 0) {
5841      // Do this the hard way by blocking ...
5842      // See http://monaco.sfbay/detail.jsf?cr=5094058.
5843      // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5844      // Only for SPARC >= V8PlusA
5845 #if defined(__sparc) && defined(COMPILER2)
5846      if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5847 #endif
5848      int status = os::Solaris::mutex_lock(_mutex);
5849      assert_status(status == 0, status,  "mutex_lock");
5850      guarantee (_nParked == 0, "invariant") ;
5851      ++ _nParked ;
5852      while (_Event < 0) {
5853         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5854         // Treat this the same as if the wait was interrupted
5855         // With usr/lib/lwp going to kernel, always handle ETIME
5856         status = os::Solaris::cond_wait(_cond, _mutex);
5857         if (status == ETIME) status = EINTR ;
5858         assert_status(status == 0 || status == EINTR, status, "cond_wait");
5859      }
5860      -- _nParked ;
5861      _Event = 0 ;
5862      status = os::Solaris::mutex_unlock(_mutex);
5863      assert_status(status == 0, status, "mutex_unlock");
5864     // Paranoia to ensure our locked and lock-free paths interact
5865     // correctly with each other.
5866     OrderAccess::fence();
5867   }
5868 }
5869 
5870 int os::PlatformEvent::park(jlong millis) {
5871   guarantee (_nParked == 0, "invariant") ;
5872   int v ;
5873   for (;;) {
5874       v = _Event ;
5875       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5876   }
5877   guarantee (v >= 0, "invariant") ;
5878   if (v != 0) return OS_OK ;
5879 
5880   int ret = OS_TIMEOUT;
5881   timestruc_t abst;
5882   compute_abstime (&abst, millis);
5883 
5884   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5885   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5886   // Only for SPARC >= V8PlusA
5887 #if defined(__sparc) && defined(COMPILER2)
5888  if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5889 #endif
5890   int status = os::Solaris::mutex_lock(_mutex);
5891   assert_status(status == 0, status, "mutex_lock");
5892   guarantee (_nParked == 0, "invariant") ;
5893   ++ _nParked ;
5894   while (_Event < 0) {
5895      int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5896      assert_status(status == 0 || status == EINTR ||
5897                    status == ETIME || status == ETIMEDOUT,
5898                    status, "cond_timedwait");
5899      if (!FilterSpuriousWakeups) break ;                // previous semantics
5900      if (status == ETIME || status == ETIMEDOUT) break ;
5901      // We consume and ignore EINTR and spurious wakeups.
5902   }
5903   -- _nParked ;
5904   if (_Event >= 0) ret = OS_OK ;
5905   _Event = 0 ;
5906   status = os::Solaris::mutex_unlock(_mutex);
5907   assert_status(status == 0, status, "mutex_unlock");
5908   // Paranoia to ensure our locked and lock-free paths interact
5909   // correctly with each other.
5910   OrderAccess::fence();
5911   return ret;
5912 }
5913 
5914 void os::PlatformEvent::unpark() {
5915   // Transitions for _Event:
5916   //    0 :=> 1
5917   //    1 :=> 1
5918   //   -1 :=> either 0 or 1; must signal target thread
5919   //          That is, we can safely transition _Event from -1 to either
5920   //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
5921   //          unpark() calls.
5922   // See also: "Semaphores in Plan 9" by Mullender & Cox
5923   //
5924   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5925   // that it will take two back-to-back park() calls for the owning
5926   // thread to block. This has the benefit of forcing a spurious return
5927   // from the first park() call after an unpark() call which will help
5928   // shake out uses of park() and unpark() without condition variables.
5929 
5930   if (Atomic::xchg(1, &_Event) >= 0) return;
5931 
5932   // If the thread associated with the event was parked, wake it.
5933   // Wait for the thread assoc with the PlatformEvent to vacate.
5934   int status = os::Solaris::mutex_lock(_mutex);
5935   assert_status(status == 0, status, "mutex_lock");
5936   int AnyWaiters = _nParked;
5937   status = os::Solaris::mutex_unlock(_mutex);
5938   assert_status(status == 0, status, "mutex_unlock");
5939   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5940   if (AnyWaiters != 0) {
5941     // We intentional signal *after* dropping the lock
5942     // to avoid a common class of futile wakeups.
5943     status = os::Solaris::cond_signal(_cond);
5944     assert_status(status == 0, status, "cond_signal");
5945   }
5946 }
5947 
5948 // JSR166
5949 // -------------------------------------------------------
5950 
5951 /*
5952  * The solaris and linux implementations of park/unpark are fairly
5953  * conservative for now, but can be improved. They currently use a
5954  * mutex/condvar pair, plus _counter.
5955  * Park decrements _counter if > 0, else does a condvar wait.  Unpark
5956  * sets count to 1 and signals condvar.  Only one thread ever waits
5957  * on the condvar. Contention seen when trying to park implies that someone
5958  * is unparking you, so don't wait. And spurious returns are fine, so there
5959  * is no need to track notifications.
5960  */
5961 
5962 #define MAX_SECS 100000000
5963 /*
5964  * This code is common to linux and solaris and will be moved to a
5965  * common place in dolphin.
5966  *
5967  * The passed in time value is either a relative time in nanoseconds
5968  * or an absolute time in milliseconds. Either way it has to be unpacked
5969  * into suitable seconds and nanoseconds components and stored in the
5970  * given timespec structure.
5971  * Given time is a 64-bit value and the time_t used in the timespec is only
5972  * a signed-32-bit value (except on 64-bit Linux) we have to watch for
5973  * overflow if times way in the future are given. Further on Solaris versions
5974  * prior to 10 there is a restriction (see cond_timedwait) that the specified
5975  * number of seconds, in abstime, is less than current_time  + 100,000,000.
5976  * As it will be 28 years before "now + 100000000" will overflow we can
5977  * ignore overflow and just impose a hard-limit on seconds using the value
5978  * of "now + 100,000,000". This places a limit on the timeout of about 3.17
5979  * years from "now".
5980  */
5981 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5982   assert (time > 0, "convertTime");
5983 
5984   struct timeval now;
5985   int status = gettimeofday(&now, NULL);
5986   assert(status == 0, "gettimeofday");
5987 
5988   time_t max_secs = now.tv_sec + MAX_SECS;
5989 
5990   if (isAbsolute) {
5991     jlong secs = time / 1000;
5992     if (secs > max_secs) {
5993       absTime->tv_sec = max_secs;
5994     }
5995     else {
5996       absTime->tv_sec = secs;
5997     }
5998     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5999   }
6000   else {
6001     jlong secs = time / NANOSECS_PER_SEC;
6002     if (secs >= MAX_SECS) {
6003       absTime->tv_sec = max_secs;
6004       absTime->tv_nsec = 0;
6005     }
6006     else {
6007       absTime->tv_sec = now.tv_sec + secs;
6008       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
6009       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
6010         absTime->tv_nsec -= NANOSECS_PER_SEC;
6011         ++absTime->tv_sec; // note: this must be <= max_secs
6012       }
6013     }
6014   }
6015   assert(absTime->tv_sec >= 0, "tv_sec < 0");
6016   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
6017   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
6018   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
6019 }
6020 
6021 void Parker::park(bool isAbsolute, jlong time) {
6022   // Ideally we'd do something useful while spinning, such
6023   // as calling unpackTime().
6024 
6025   // Optional fast-path check:
6026   // Return immediately if a permit is available.
6027   // We depend on Atomic::xchg() having full barrier semantics
6028   // since we are doing a lock-free update to _counter.
6029   if (Atomic::xchg(0, &_counter) > 0) return;
6030 
6031   // Optional fast-exit: Check interrupt before trying to wait
6032   Thread* thread = Thread::current();
6033   assert(thread->is_Java_thread(), "Must be JavaThread");
6034   JavaThread *jt = (JavaThread *)thread;
6035   if (Thread::is_interrupted(thread, false)) {
6036     return;
6037   }
6038 
6039   // First, demultiplex/decode time arguments
6040   timespec absTime;
6041   if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
6042     return;
6043   }
6044   if (time > 0) {
6045     // Warning: this code might be exposed to the old Solaris time
6046     // round-down bugs.  Grep "roundingFix" for details.
6047     unpackTime(&absTime, isAbsolute, time);
6048   }
6049 
6050   // Enter safepoint region
6051   // Beware of deadlocks such as 6317397.
6052   // The per-thread Parker:: _mutex is a classic leaf-lock.
6053   // In particular a thread must never block on the Threads_lock while
6054   // holding the Parker:: mutex.  If safepoints are pending both the
6055   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
6056   ThreadBlockInVM tbivm(jt);
6057 
6058   // Don't wait if cannot get lock since interference arises from
6059   // unblocking.  Also. check interrupt before trying wait
6060   if (Thread::is_interrupted(thread, false) ||
6061       os::Solaris::mutex_trylock(_mutex) != 0) {
6062     return;
6063   }
6064 
6065   int status ;
6066 
6067   if (_counter > 0)  { // no wait needed
6068     _counter = 0;
6069     status = os::Solaris::mutex_unlock(_mutex);
6070     assert (status == 0, "invariant") ;
6071     // Paranoia to ensure our locked and lock-free paths interact
6072     // correctly with each other and Java-level accesses.
6073     OrderAccess::fence();
6074     return;
6075   }
6076 
6077 #ifdef ASSERT
6078   // Don't catch signals while blocked; let the running threads have the signals.
6079   // (This allows a debugger to break into the running thread.)
6080   sigset_t oldsigs;
6081   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
6082   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
6083 #endif
6084 
6085   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
6086   jt->set_suspend_equivalent();
6087   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
6088 
6089   // Do this the hard way by blocking ...
6090   // See http://monaco.sfbay/detail.jsf?cr=5094058.
6091   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
6092   // Only for SPARC >= V8PlusA
6093 #if defined(__sparc) && defined(COMPILER2)
6094   if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
6095 #endif
6096 
6097   if (time == 0) {
6098     status = os::Solaris::cond_wait (_cond, _mutex) ;
6099   } else {
6100     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
6101   }
6102   // Note that an untimed cond_wait() can sometimes return ETIME on older
6103   // versions of the Solaris.
6104   assert_status(status == 0 || status == EINTR ||
6105                 status == ETIME || status == ETIMEDOUT,
6106                 status, "cond_timedwait");
6107 
6108 #ifdef ASSERT
6109   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
6110 #endif
6111   _counter = 0 ;
6112   status = os::Solaris::mutex_unlock(_mutex);
6113   assert_status(status == 0, status, "mutex_unlock") ;
6114   // Paranoia to ensure our locked and lock-free paths interact
6115   // correctly with each other and Java-level accesses.
6116   OrderAccess::fence();
6117 
6118   // If externally suspended while waiting, re-suspend
6119   if (jt->handle_special_suspend_equivalent_condition()) {
6120     jt->java_suspend_self();
6121   }
6122 }
6123 
6124 void Parker::unpark() {
6125   int s, status ;
6126   status = os::Solaris::mutex_lock (_mutex) ;
6127   assert (status == 0, "invariant") ;
6128   s = _counter;
6129   _counter = 1;
6130   status = os::Solaris::mutex_unlock (_mutex) ;
6131   assert (status == 0, "invariant") ;
6132 
6133   if (s < 1) {
6134     status = os::Solaris::cond_signal (_cond) ;
6135     assert (status == 0, "invariant") ;
6136   }
6137 }
6138 
6139 extern char** environ;
6140 
6141 // Run the specified command in a separate process. Return its exit value,
6142 // or -1 on failure (e.g. can't fork a new process).
6143 // Unlike system(), this function can be called from signal handler. It
6144 // doesn't block SIGINT et al.
6145 int os::fork_and_exec(char* cmd) {
6146   char * argv[4];
6147   argv[0] = (char *)"sh";
6148   argv[1] = (char *)"-c";
6149   argv[2] = cmd;
6150   argv[3] = NULL;
6151 
6152   // fork is async-safe, fork1 is not so can't use in signal handler
6153   pid_t pid;
6154   Thread* t = ThreadLocalStorage::get_thread_slow();
6155   if (t != NULL && t->is_inside_signal_handler()) {
6156     pid = fork();
6157   } else {
6158     pid = fork1();
6159   }
6160 
6161   if (pid < 0) {
6162     // fork failed
6163     warning("fork failed: %s", strerror(errno));
6164     return -1;
6165 
6166   } else if (pid == 0) {
6167     // child process
6168 
6169     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
6170     execve("/usr/bin/sh", argv, environ);
6171 
6172     // execve failed
6173     _exit(-1);
6174 
6175   } else  {
6176     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
6177     // care about the actual exit code, for now.
6178 
6179     int status;
6180 
6181     // Wait for the child process to exit.  This returns immediately if
6182     // the child has already exited. */
6183     while (waitpid(pid, &status, 0) < 0) {
6184         switch (errno) {
6185         case ECHILD: return 0;
6186         case EINTR: break;
6187         default: return -1;
6188         }
6189     }
6190 
6191     if (WIFEXITED(status)) {
6192        // The child exited normally; get its exit code.
6193        return WEXITSTATUS(status);
6194     } else if (WIFSIGNALED(status)) {
6195        // The child exited because of a signal
6196        // The best value to return is 0x80 + signal number,
6197        // because that is what all Unix shells do, and because
6198        // it allows callers to distinguish between process exit and
6199        // process death by signal.
6200        return 0x80 + WTERMSIG(status);
6201     } else {
6202        // Unknown exit code; pass it through
6203        return status;
6204     }
6205   }
6206 }
6207 
6208 // is_headless_jre()
6209 //
6210 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
6211 // in order to report if we are running in a headless jre
6212 //
6213 // Since JDK8 xawt/libmawt.so was moved into the same directory
6214 // as libawt.so, and renamed libawt_xawt.so
6215 //
6216 bool os::is_headless_jre() {
6217     struct stat statbuf;
6218     char buf[MAXPATHLEN];
6219     char libmawtpath[MAXPATHLEN];
6220     const char *xawtstr  = "/xawt/libmawt.so";
6221     const char *new_xawtstr = "/libawt_xawt.so";
6222     char *p;
6223 
6224     // Get path to libjvm.so
6225     os::jvm_path(buf, sizeof(buf));
6226 
6227     // Get rid of libjvm.so
6228     p = strrchr(buf, '/');
6229     if (p == NULL) return false;
6230     else *p = '\0';
6231 
6232     // Get rid of client or server
6233     p = strrchr(buf, '/');
6234     if (p == NULL) return false;
6235     else *p = '\0';
6236 
6237     // check xawt/libmawt.so
6238     strcpy(libmawtpath, buf);
6239     strcat(libmawtpath, xawtstr);
6240     if (::stat(libmawtpath, &statbuf) == 0) return false;
6241 
6242     // check libawt_xawt.so
6243     strcpy(libmawtpath, buf);
6244     strcat(libmawtpath, new_xawtstr);
6245     if (::stat(libmawtpath, &statbuf) == 0) return false;
6246 
6247     return true;
6248 }
6249 
6250 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
6251   INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted);
6252 }
6253 
6254 int os::close(int fd) {
6255   return ::close(fd);
6256 }
6257 
6258 int os::socket_close(int fd) {
6259   return ::close(fd);
6260 }
6261 
6262 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
6263   INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6264 }
6265 
6266 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
6267   INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6268 }
6269 
6270 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
6271   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
6272 }
6273 
6274 // As both poll and select can be interrupted by signals, we have to be
6275 // prepared to restart the system call after updating the timeout, unless
6276 // a poll() is done with timeout == -1, in which case we repeat with this
6277 // "wait forever" value.
6278 
6279 int os::timeout(int fd, long timeout) {
6280   int res;
6281   struct timeval t;
6282   julong prevtime, newtime;
6283   static const char* aNull = 0;
6284   struct pollfd pfd;
6285   pfd.fd = fd;
6286   pfd.events = POLLIN;
6287 
6288   gettimeofday(&t, &aNull);
6289   prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
6290 
6291   for(;;) {
6292     INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted);
6293     if(res == OS_ERR && errno == EINTR) {
6294         if(timeout != -1) {
6295           gettimeofday(&t, &aNull);
6296           newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
6297           timeout -= newtime - prevtime;
6298           if(timeout <= 0)
6299             return OS_OK;
6300           prevtime = newtime;
6301         }
6302     } else return res;
6303   }
6304 }
6305 
6306 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
6307   int _result;
6308   INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\
6309                           os::Solaris::clear_interrupted);
6310 
6311   // Depending on when thread interruption is reset, _result could be
6312   // one of two values when errno == EINTR
6313 
6314   if (((_result == OS_INTRPT) || (_result == OS_ERR))
6315       && (errno == EINTR)) {
6316      /* restarting a connect() changes its errno semantics */
6317      INTERRUPTIBLE(::connect(fd, him, len), _result,\
6318                    os::Solaris::clear_interrupted);
6319      /* undo these changes */
6320      if (_result == OS_ERR) {
6321        if (errno == EALREADY) {
6322          errno = EINPROGRESS; /* fall through */
6323        } else if (errno == EISCONN) {
6324          errno = 0;
6325          return OS_OK;
6326        }
6327      }
6328    }
6329    return _result;
6330  }
6331 
6332 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
6333   if (fd < 0) {
6334     return OS_ERR;
6335   }
6336   INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\
6337                            os::Solaris::clear_interrupted);
6338 }
6339 
6340 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
6341                  sockaddr* from, socklen_t* fromlen) {
6342   INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\
6343                            os::Solaris::clear_interrupted);
6344 }
6345 
6346 int os::sendto(int fd, char* buf, size_t len, uint flags,
6347                struct sockaddr* to, socklen_t tolen) {
6348   INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\
6349                            os::Solaris::clear_interrupted);
6350 }
6351 
6352 int os::socket_available(int fd, jint *pbytes) {
6353   if (fd < 0) {
6354     return OS_OK;
6355   }
6356   int ret;
6357   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6358   // note: ioctl can return 0 when successful, JVM_SocketAvailable
6359   // is expected to return 0 on failure and 1 on success to the jdk.
6360   return (ret == OS_ERR) ? 0 : 1;
6361 }
6362 
6363 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6364    INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
6365                                       os::Solaris::clear_interrupted);
6366 }
6367 
6368 // Get the default path to the core file
6369 // Returns the length of the string
6370 int os::get_core_path(char* buffer, size_t bufferSize) {
6371   const char* p = get_current_directory(buffer, bufferSize);
6372 
6373   if (p == NULL) {
6374     assert(p != NULL, "failed to get current directory");
6375     return 0;
6376   }
6377 
6378   return strlen(buffer);
6379 }
6380 
6381 #ifndef PRODUCT
6382 void TestReserveMemorySpecial_test() {
6383   // No tests available for this platform
6384 }
6385 #endif