1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "prims/jniFastGetField.hpp"
  41 #include "prims/jvm.h"
  42 #include "prims/jvm_misc.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/extendedPC.hpp"
  45 #include "runtime/globals.hpp"
  46 #include "runtime/interfaceSupport.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/javaCalls.hpp"
  49 #include "runtime/mutexLocker.hpp"
  50 #include "runtime/objectMonitor.hpp"
  51 #include "runtime/orderAccess.inline.hpp"
  52 #include "runtime/osThread.hpp"
  53 #include "runtime/perfMemory.hpp"
  54 #include "runtime/sharedRuntime.hpp"
  55 #include "runtime/statSampler.hpp"
  56 #include "runtime/stubRoutines.hpp"
  57 #include "runtime/thread.inline.hpp"
  58 #include "runtime/threadCritical.hpp"
  59 #include "runtime/timer.hpp"
  60 #include "services/attachListener.hpp"
  61 #include "services/memTracker.hpp"
  62 #include "services/runtimeService.hpp"
  63 #include "utilities/decoder.hpp"
  64 #include "utilities/defaultStream.hpp"
  65 #include "utilities/events.hpp"
  66 #include "utilities/growableArray.hpp"
  67 #include "utilities/vmError.hpp"
  68 
  69 // put OS-includes here
  70 # include <dlfcn.h>
  71 # include <errno.h>
  72 # include <exception>
  73 # include <link.h>
  74 # include <poll.h>
  75 # include <pthread.h>
  76 # include <pwd.h>
  77 # include <schedctl.h>
  78 # include <setjmp.h>
  79 # include <signal.h>
  80 # include <stdio.h>
  81 # include <alloca.h>
  82 # include <sys/filio.h>
  83 # include <sys/ipc.h>
  84 # include <sys/lwp.h>
  85 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  86 # include <sys/mman.h>
  87 # include <sys/processor.h>
  88 # include <sys/procset.h>
  89 # include <sys/pset.h>
  90 # include <sys/resource.h>
  91 # include <sys/shm.h>
  92 # include <sys/socket.h>
  93 # include <sys/stat.h>
  94 # include <sys/systeminfo.h>
  95 # include <sys/time.h>
  96 # include <sys/times.h>
  97 # include <sys/types.h>
  98 # include <sys/wait.h>
  99 # include <sys/utsname.h>
 100 # include <thread.h>
 101 # include <unistd.h>
 102 # include <sys/priocntl.h>
 103 # include <sys/rtpriocntl.h>
 104 # include <sys/tspriocntl.h>
 105 # include <sys/iapriocntl.h>
 106 # include <sys/fxpriocntl.h>
 107 # include <sys/loadavg.h>
 108 # include <string.h>
 109 # include <stdio.h>
 110 
 111 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 112 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 113 
 114 #define MAX_PATH (2 * K)
 115 
 116 // for timer info max values which include all bits
 117 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 118 
 119 
 120 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 121 // compile on older systems without this header file.
 122 
 123 #ifndef MADV_ACCESS_LWP
 124 # define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
 125 #endif
 126 #ifndef MADV_ACCESS_MANY
 127 # define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
 128 #endif
 129 
 130 #ifndef LGRP_RSRC_CPU
 131 # define LGRP_RSRC_CPU           0       /* CPU resources */
 132 #endif
 133 #ifndef LGRP_RSRC_MEM
 134 # define LGRP_RSRC_MEM           1       /* memory resources */
 135 #endif
 136 
 137 // see thr_setprio(3T) for the basis of these numbers
 138 #define MinimumPriority 0
 139 #define NormalPriority  64
 140 #define MaximumPriority 127
 141 
 142 // Values for ThreadPriorityPolicy == 1
 143 int prio_policy1[CriticalPriority+1] = {
 144   -99999,  0, 16,  32,  48,  64,
 145           80, 96, 112, 124, 127, 127 };
 146 
 147 // System parameters used internally
 148 static clock_t clock_tics_per_sec = 100;
 149 
 150 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 151 static bool enabled_extended_FILE_stdio = false;
 152 
 153 // For diagnostics to print a message once. see run_periodic_checks
 154 static bool check_addr0_done = false;
 155 static sigset_t check_signal_done;
 156 static bool check_signals = true;
 157 
 158 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 159 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 160 
 161 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 162 
 163 
 164 // "default" initializers for missing libc APIs
 165 extern "C" {
 166   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 167   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 168 
 169   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 170   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 171 }
 172 
 173 // "default" initializers for pthread-based synchronization
 174 extern "C" {
 175   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 176   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 177 }
 178 
 179 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 180 
 181 static inline size_t adjust_stack_size(address base, size_t size) {
 182   if ((ssize_t)size < 0) {
 183     // 4759953: Compensate for ridiculous stack size.
 184     size = max_intx;
 185   }
 186   if (size > (size_t)base) {
 187     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 188     size = (size_t)base;
 189   }
 190   return size;
 191 }
 192 
 193 static inline stack_t get_stack_info() {
 194   stack_t st;
 195   int retval = thr_stksegment(&st);
 196   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 197   assert(retval == 0, "incorrect return value from thr_stksegment");
 198   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 199   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 200   return st;
 201 }
 202 
 203 bool os::is_primordial_thread(void) {
 204   int r = thr_main() ;
 205   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 206   return r == 1;
 207 }
 208 
 209 address os::current_stack_base() {
 210   bool _is_primordial_thread = is_primordial_thread();
 211 
 212   // Workaround 4352906, avoid calls to thr_stksegment by
 213   // thr_main after the first one (it looks like we trash
 214   // some data, causing the value for ss_sp to be incorrect).
 215   if (!_is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 216     stack_t st = get_stack_info();
 217     if (_is_primordial_thread) {
 218       // cache initial value of stack base
 219       os::Solaris::_main_stack_base = (address)st.ss_sp;
 220     }
 221     return (address)st.ss_sp;
 222   } else {
 223     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 224     return os::Solaris::_main_stack_base;
 225   }
 226 }
 227 
 228 size_t os::current_stack_size() {
 229   size_t size;
 230 
 231   if (!is_primordial_thread()) {
 232     size = get_stack_info().ss_size;
 233   } else {
 234     struct rlimit limits;
 235     getrlimit(RLIMIT_STACK, &limits);
 236     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 237   }
 238   // base may not be page aligned
 239   address base = current_stack_base();
 240   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 241   return (size_t)(base - bottom);
 242 }
 243 
 244 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 245   return localtime_r(clock, res);
 246 }
 247 
 248 // interruptible infrastructure
 249 
 250 // setup_interruptible saves the thread state before going into an
 251 // interruptible system call.
 252 // The saved state is used to restore the thread to
 253 // its former state whether or not an interrupt is received.
 254 // Used by classloader os::read
 255 // os::restartable_read calls skip this layer and stay in _thread_in_native
 256 
 257 void os::Solaris::setup_interruptible(JavaThread* thread) {
 258 
 259   JavaThreadState thread_state = thread->thread_state();
 260 
 261   assert(thread_state != _thread_blocked, "Coming from the wrong thread");
 262   assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
 263   OSThread* osthread = thread->osthread();
 264   osthread->set_saved_interrupt_thread_state(thread_state);
 265   thread->frame_anchor()->make_walkable(thread);
 266   ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
 267 }
 268 
 269 // Version of setup_interruptible() for threads that are already in
 270 // _thread_blocked. Used by os_sleep().
 271 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
 272   thread->frame_anchor()->make_walkable(thread);
 273 }
 274 
 275 JavaThread* os::Solaris::setup_interruptible() {
 276   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
 277   setup_interruptible(thread);
 278   return thread;
 279 }
 280 
 281 void os::Solaris::try_enable_extended_io() {
 282   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 283 
 284   if (!UseExtendedFileIO) {
 285     return;
 286   }
 287 
 288   enable_extended_FILE_stdio_t enabler =
 289     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 290                                          "enable_extended_FILE_stdio");
 291   if (enabler) {
 292     enabler(-1, -1);
 293   }
 294 }
 295 
 296 
 297 #ifdef ASSERT
 298 
 299 JavaThread* os::Solaris::setup_interruptible_native() {
 300   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
 301   JavaThreadState thread_state = thread->thread_state();
 302   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
 303   return thread;
 304 }
 305 
 306 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
 307   JavaThreadState thread_state = thread->thread_state();
 308   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
 309 }
 310 #endif
 311 
 312 // cleanup_interruptible reverses the effects of setup_interruptible
 313 // setup_interruptible_already_blocked() does not need any cleanup.
 314 
 315 void os::Solaris::cleanup_interruptible(JavaThread* thread) {
 316   OSThread* osthread = thread->osthread();
 317 
 318   ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
 319 }
 320 
 321 // I/O interruption related counters called in _INTERRUPTIBLE
 322 
 323 void os::Solaris::bump_interrupted_before_count() {
 324   RuntimeService::record_interrupted_before_count();
 325 }
 326 
 327 void os::Solaris::bump_interrupted_during_count() {
 328   RuntimeService::record_interrupted_during_count();
 329 }
 330 
 331 static int _processors_online = 0;
 332 
 333          jint os::Solaris::_os_thread_limit = 0;
 334 volatile jint os::Solaris::_os_thread_count = 0;
 335 
 336 julong os::available_memory() {
 337   return Solaris::available_memory();
 338 }
 339 
 340 julong os::Solaris::available_memory() {
 341   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 342 }
 343 
 344 julong os::Solaris::_physical_memory = 0;
 345 
 346 julong os::physical_memory() {
 347    return Solaris::physical_memory();
 348 }
 349 
 350 static hrtime_t first_hrtime = 0;
 351 static const hrtime_t hrtime_hz = 1000*1000*1000;
 352 static volatile hrtime_t max_hrtime = 0;
 353 
 354 
 355 void os::Solaris::initialize_system_info() {
 356   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 357   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
 358   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 359 }
 360 
 361 int os::active_processor_count() {
 362   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 363   pid_t pid = getpid();
 364   psetid_t pset = PS_NONE;
 365   // Are we running in a processor set or is there any processor set around?
 366   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 367     uint_t pset_cpus;
 368     // Query the number of cpus available to us.
 369     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 370       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 371       _processors_online = pset_cpus;
 372       return pset_cpus;
 373     }
 374   }
 375   // Otherwise return number of online cpus
 376   return online_cpus;
 377 }
 378 
 379 static bool find_processors_in_pset(psetid_t        pset,
 380                                     processorid_t** id_array,
 381                                     uint_t*         id_length) {
 382   bool result = false;
 383   // Find the number of processors in the processor set.
 384   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 385     // Make up an array to hold their ids.
 386     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 387     // Fill in the array with their processor ids.
 388     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 389       result = true;
 390     }
 391   }
 392   return result;
 393 }
 394 
 395 // Callers of find_processors_online() must tolerate imprecise results --
 396 // the system configuration can change asynchronously because of DR
 397 // or explicit psradm operations.
 398 //
 399 // We also need to take care that the loop (below) terminates as the
 400 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 401 // request and the loop that builds the list of processor ids.   Unfortunately
 402 // there's no reliable way to determine the maximum valid processor id,
 403 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 404 // man pages, which claim the processor id set is "sparse, but
 405 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 406 // exit the loop.
 407 //
 408 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 409 // not available on S8.0.
 410 
 411 static bool find_processors_online(processorid_t** id_array,
 412                                    uint*           id_length) {
 413   const processorid_t MAX_PROCESSOR_ID = 100000 ;
 414   // Find the number of processors online.
 415   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 416   // Make up an array to hold their ids.
 417   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 418   // Processors need not be numbered consecutively.
 419   long found = 0;
 420   processorid_t next = 0;
 421   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 422     processor_info_t info;
 423     if (processor_info(next, &info) == 0) {
 424       // NB, PI_NOINTR processors are effectively online ...
 425       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 426         (*id_array)[found] = next;
 427         found += 1;
 428       }
 429     }
 430     next += 1;
 431   }
 432   if (found < *id_length) {
 433       // The loop above didn't identify the expected number of processors.
 434       // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 435       // and re-running the loop, above, but there's no guarantee of progress
 436       // if the system configuration is in flux.  Instead, we just return what
 437       // we've got.  Note that in the worst case find_processors_online() could
 438       // return an empty set.  (As a fall-back in the case of the empty set we
 439       // could just return the ID of the current processor).
 440       *id_length = found ;
 441   }
 442 
 443   return true;
 444 }
 445 
 446 static bool assign_distribution(processorid_t* id_array,
 447                                 uint           id_length,
 448                                 uint*          distribution,
 449                                 uint           distribution_length) {
 450   // We assume we can assign processorid_t's to uint's.
 451   assert(sizeof(processorid_t) == sizeof(uint),
 452          "can't convert processorid_t to uint");
 453   // Quick check to see if we won't succeed.
 454   if (id_length < distribution_length) {
 455     return false;
 456   }
 457   // Assign processor ids to the distribution.
 458   // Try to shuffle processors to distribute work across boards,
 459   // assuming 4 processors per board.
 460   const uint processors_per_board = ProcessDistributionStride;
 461   // Find the maximum processor id.
 462   processorid_t max_id = 0;
 463   for (uint m = 0; m < id_length; m += 1) {
 464     max_id = MAX2(max_id, id_array[m]);
 465   }
 466   // The next id, to limit loops.
 467   const processorid_t limit_id = max_id + 1;
 468   // Make up markers for available processors.
 469   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 470   for (uint c = 0; c < limit_id; c += 1) {
 471     available_id[c] = false;
 472   }
 473   for (uint a = 0; a < id_length; a += 1) {
 474     available_id[id_array[a]] = true;
 475   }
 476   // Step by "boards", then by "slot", copying to "assigned".
 477   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 478   //                remembering which processors have been assigned by
 479   //                previous calls, etc., so as to distribute several
 480   //                independent calls of this method.  What we'd like is
 481   //                It would be nice to have an API that let us ask
 482   //                how many processes are bound to a processor,
 483   //                but we don't have that, either.
 484   //                In the short term, "board" is static so that
 485   //                subsequent distributions don't all start at board 0.
 486   static uint board = 0;
 487   uint assigned = 0;
 488   // Until we've found enough processors ....
 489   while (assigned < distribution_length) {
 490     // ... find the next available processor in the board.
 491     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 492       uint try_id = board * processors_per_board + slot;
 493       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 494         distribution[assigned] = try_id;
 495         available_id[try_id] = false;
 496         assigned += 1;
 497         break;
 498       }
 499     }
 500     board += 1;
 501     if (board * processors_per_board + 0 >= limit_id) {
 502       board = 0;
 503     }
 504   }
 505   if (available_id != NULL) {
 506     FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
 507   }
 508   return true;
 509 }
 510 
 511 void os::set_native_thread_name(const char *name) {
 512   // Not yet implemented.
 513   return;
 514 }
 515 
 516 bool os::distribute_processes(uint length, uint* distribution) {
 517   bool result = false;
 518   // Find the processor id's of all the available CPUs.
 519   processorid_t* id_array  = NULL;
 520   uint           id_length = 0;
 521   // There are some races between querying information and using it,
 522   // since processor sets can change dynamically.
 523   psetid_t pset = PS_NONE;
 524   // Are we running in a processor set?
 525   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 526     result = find_processors_in_pset(pset, &id_array, &id_length);
 527   } else {
 528     result = find_processors_online(&id_array, &id_length);
 529   }
 530   if (result == true) {
 531     if (id_length >= length) {
 532       result = assign_distribution(id_array, id_length, distribution, length);
 533     } else {
 534       result = false;
 535     }
 536   }
 537   if (id_array != NULL) {
 538     FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
 539   }
 540   return result;
 541 }
 542 
 543 bool os::bind_to_processor(uint processor_id) {
 544   // We assume that a processorid_t can be stored in a uint.
 545   assert(sizeof(uint) == sizeof(processorid_t),
 546          "can't convert uint to processorid_t");
 547   int bind_result =
 548     processor_bind(P_LWPID,                       // bind LWP.
 549                    P_MYID,                        // bind current LWP.
 550                    (processorid_t) processor_id,  // id.
 551                    NULL);                         // don't return old binding.
 552   return (bind_result == 0);
 553 }
 554 
 555 bool os::getenv(const char* name, char* buffer, int len) {
 556   char* val = ::getenv( name );
 557   if ( val == NULL
 558   ||   strlen(val) + 1  >  len ) {
 559     if (len > 0)  buffer[0] = 0; // return a null string
 560     return false;
 561   }
 562   strcpy( buffer, val );
 563   return true;
 564 }
 565 
 566 
 567 // Return true if user is running as root.
 568 
 569 bool os::have_special_privileges() {
 570   static bool init = false;
 571   static bool privileges = false;
 572   if (!init) {
 573     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 574     init = true;
 575   }
 576   return privileges;
 577 }
 578 
 579 
 580 void os::init_system_properties_values() {
 581   // The next steps are taken in the product version:
 582   //
 583   // Obtain the JAVA_HOME value from the location of libjvm.so.
 584   // This library should be located at:
 585   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 586   //
 587   // If "/jre/lib/" appears at the right place in the path, then we
 588   // assume libjvm.so is installed in a JDK and we use this path.
 589   //
 590   // Otherwise exit with message: "Could not create the Java virtual machine."
 591   //
 592   // The following extra steps are taken in the debugging version:
 593   //
 594   // If "/jre/lib/" does NOT appear at the right place in the path
 595   // instead of exit check for $JAVA_HOME environment variable.
 596   //
 597   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 598   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 599   // it looks like libjvm.so is installed there
 600   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 601   //
 602   // Otherwise exit.
 603   //
 604   // Important note: if the location of libjvm.so changes this
 605   // code needs to be changed accordingly.
 606 
 607 // Base path of extensions installed on the system.
 608 #define SYS_EXT_DIR     "/usr/jdk/packages"
 609 #define EXTENSIONS_DIR  "/lib/ext"
 610 #define ENDORSED_DIR    "/lib/endorsed"
 611 
 612   char cpu_arch[12];
 613   // Buffer that fits several sprintfs.
 614   // Note that the space for the colon and the trailing null are provided
 615   // by the nulls included by the sizeof operator.
 616   const size_t bufsize =
 617     MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
 618          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 619          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
 620          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
 621   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 622 
 623   // sysclasspath, java_home, dll_dir
 624   {
 625     char *pslash;
 626     os::jvm_path(buf, bufsize);
 627 
 628     // Found the full path to libjvm.so.
 629     // Now cut the path to <java_home>/jre if we can.
 630     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 631     pslash = strrchr(buf, '/');
 632     if (pslash != NULL) {
 633       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 634     }
 635     Arguments::set_dll_dir(buf);
 636 
 637     if (pslash != NULL) {
 638       pslash = strrchr(buf, '/');
 639       if (pslash != NULL) {
 640         *pslash = '\0';          // Get rid of /<arch>.
 641         pslash = strrchr(buf, '/');
 642         if (pslash != NULL) {
 643           *pslash = '\0';        // Get rid of /lib.
 644         }
 645       }
 646     }
 647     Arguments::set_java_home(buf);
 648     set_boot_path('/', ':');
 649   }
 650 
 651   // Where to look for native libraries.
 652   {
 653     // Use dlinfo() to determine the correct java.library.path.
 654     //
 655     // If we're launched by the Java launcher, and the user
 656     // does not set java.library.path explicitly on the commandline,
 657     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 658     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 659     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 660     // /usr/lib), which is exactly what we want.
 661     //
 662     // If the user does set java.library.path, it completely
 663     // overwrites this setting, and always has.
 664     //
 665     // If we're not launched by the Java launcher, we may
 666     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 667     // settings.  Again, dlinfo does exactly what we want.
 668 
 669     Dl_serinfo     info_sz, *info = &info_sz;
 670     Dl_serpath     *path;
 671     char           *library_path;
 672     char           *common_path = buf;
 673 
 674     // Determine search path count and required buffer size.
 675     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 676       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 677       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 678     }
 679 
 680     // Allocate new buffer and initialize.
 681     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 682     info->dls_size = info_sz.dls_size;
 683     info->dls_cnt = info_sz.dls_cnt;
 684 
 685     // Obtain search path information.
 686     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 687       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 688       FREE_C_HEAP_ARRAY(char, info, mtInternal);
 689       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 690     }
 691 
 692     path = &info->dls_serpath[0];
 693 
 694     // Note: Due to a legacy implementation, most of the library path
 695     // is set in the launcher. This was to accomodate linking restrictions
 696     // on legacy Solaris implementations (which are no longer supported).
 697     // Eventually, all the library path setting will be done here.
 698     //
 699     // However, to prevent the proliferation of improperly built native
 700     // libraries, the new path component /usr/jdk/packages is added here.
 701 
 702     // Determine the actual CPU architecture.
 703     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 704 #ifdef _LP64
 705     // If we are a 64-bit vm, perform the following translations:
 706     //   sparc   -> sparcv9
 707     //   i386    -> amd64
 708     if (strcmp(cpu_arch, "sparc") == 0) {
 709       strcat(cpu_arch, "v9");
 710     } else if (strcmp(cpu_arch, "i386") == 0) {
 711       strcpy(cpu_arch, "amd64");
 712     }
 713 #endif
 714 
 715     // Construct the invariant part of ld_library_path.
 716     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 717 
 718     // Struct size is more than sufficient for the path components obtained
 719     // through the dlinfo() call, so only add additional space for the path
 720     // components explicitly added here.
 721     size_t library_path_size = info->dls_size + strlen(common_path);
 722     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 723     library_path[0] = '\0';
 724 
 725     // Construct the desired Java library path from the linker's library
 726     // search path.
 727     //
 728     // For compatibility, it is optimal that we insert the additional path
 729     // components specific to the Java VM after those components specified
 730     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 731     // infrastructure.
 732     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 733       strcpy(library_path, common_path);
 734     } else {
 735       int inserted = 0;
 736       int i;
 737       for (i = 0; i < info->dls_cnt; i++, path++) {
 738         uint_t flags = path->dls_flags & LA_SER_MASK;
 739         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 740           strcat(library_path, common_path);
 741           strcat(library_path, os::path_separator());
 742           inserted = 1;
 743         }
 744         strcat(library_path, path->dls_name);
 745         strcat(library_path, os::path_separator());
 746       }
 747       // Eliminate trailing path separator.
 748       library_path[strlen(library_path)-1] = '\0';
 749     }
 750 
 751     // happens before argument parsing - can't use a trace flag
 752     // tty->print_raw("init_system_properties_values: native lib path: ");
 753     // tty->print_raw_cr(library_path);
 754 
 755     // Callee copies into its own buffer.
 756     Arguments::set_library_path(library_path);
 757 
 758     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
 759     FREE_C_HEAP_ARRAY(char, info, mtInternal);
 760   }
 761 
 762   // Extensions directories.
 763   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 764   Arguments::set_ext_dirs(buf);
 765 
 766   // Endorsed standards default directory.
 767   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
 768   Arguments::set_endorsed_dirs(buf);
 769 
 770   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 771 
 772 #undef SYS_EXT_DIR
 773 #undef EXTENSIONS_DIR
 774 #undef ENDORSED_DIR
 775 }
 776 
 777 void os::breakpoint() {
 778   BREAKPOINT;
 779 }
 780 
 781 bool os::obsolete_option(const JavaVMOption *option)
 782 {
 783   if (!strncmp(option->optionString, "-Xt", 3)) {
 784     return true;
 785   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 786     return true;
 787   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 788     return true;
 789   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 790     return true;
 791   }
 792   return false;
 793 }
 794 
 795 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 796   address  stackStart  = (address)thread->stack_base();
 797   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 798   if (sp < stackStart && sp >= stackEnd ) return true;
 799   return false;
 800 }
 801 
 802 extern "C" void breakpoint() {
 803   // use debugger to set breakpoint here
 804 }
 805 
 806 static thread_t main_thread;
 807 
 808 // Thread start routine for all new Java threads
 809 extern "C" void* java_start(void* thread_addr) {
 810   // Try to randomize the cache line index of hot stack frames.
 811   // This helps when threads of the same stack traces evict each other's
 812   // cache lines. The threads can be either from the same JVM instance, or
 813   // from different JVM instances. The benefit is especially true for
 814   // processors with hyperthreading technology.
 815   static int counter = 0;
 816   int pid = os::current_process_id();
 817   alloca(((pid ^ counter++) & 7) * 128);
 818 
 819   int prio;
 820   Thread* thread = (Thread*)thread_addr;
 821   OSThread* osthr = thread->osthread();
 822 
 823   osthr->set_lwp_id( _lwp_self() );  // Store lwp in case we are bound
 824   thread->_schedctl = (void *) schedctl_init () ;
 825 
 826   if (UseNUMA) {
 827     int lgrp_id = os::numa_get_group_id();
 828     if (lgrp_id != -1) {
 829       thread->set_lgrp_id(lgrp_id);
 830     }
 831   }
 832 
 833   // If the creator called set priority before we started,
 834   // we need to call set_native_priority now that we have an lwp.
 835   // We used to get the priority from thr_getprio (we called
 836   // thr_setprio way back in create_thread) and pass it to
 837   // set_native_priority, but Solaris scales the priority
 838   // in java_to_os_priority, so when we read it back here,
 839   // we pass trash to set_native_priority instead of what's
 840   // in java_to_os_priority. So we save the native priority
 841   // in the osThread and recall it here.
 842 
 843   if ( osthr->thread_id() != -1 ) {
 844     if ( UseThreadPriorities ) {
 845       int prio = osthr->native_priority();
 846       if (ThreadPriorityVerbose) {
 847         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 848                       INTPTR_FORMAT ", setting priority: %d\n",
 849                       osthr->thread_id(), osthr->lwp_id(), prio);
 850       }
 851       os::set_native_priority(thread, prio);
 852     }
 853   } else if (ThreadPriorityVerbose) {
 854     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 855   }
 856 
 857   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 858 
 859   // initialize signal mask for this thread
 860   os::Solaris::hotspot_sigmask(thread);
 861 
 862   thread->run();
 863 
 864   // One less thread is executing
 865   // When the VMThread gets here, the main thread may have already exited
 866   // which frees the CodeHeap containing the Atomic::dec code
 867   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 868     Atomic::dec(&os::Solaris::_os_thread_count);
 869   }
 870 
 871   if (UseDetachedThreads) {
 872     thr_exit(NULL);
 873     ShouldNotReachHere();
 874   }
 875   return NULL;
 876 }
 877 
 878 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 879   // Allocate the OSThread object
 880   OSThread* osthread = new OSThread(NULL, NULL);
 881   if (osthread == NULL) return NULL;
 882 
 883   // Store info on the Solaris thread into the OSThread
 884   osthread->set_thread_id(thread_id);
 885   osthread->set_lwp_id(_lwp_self());
 886   thread->_schedctl = (void *) schedctl_init () ;
 887 
 888   if (UseNUMA) {
 889     int lgrp_id = os::numa_get_group_id();
 890     if (lgrp_id != -1) {
 891       thread->set_lgrp_id(lgrp_id);
 892     }
 893   }
 894 
 895   if ( ThreadPriorityVerbose ) {
 896     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 897                   osthread->thread_id(), osthread->lwp_id() );
 898   }
 899 
 900   // Initial thread state is INITIALIZED, not SUSPENDED
 901   osthread->set_state(INITIALIZED);
 902 
 903   return osthread;
 904 }
 905 
 906 void os::Solaris::hotspot_sigmask(Thread* thread) {
 907 
 908   //Save caller's signal mask
 909   sigset_t sigmask;
 910   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 911   OSThread *osthread = thread->osthread();
 912   osthread->set_caller_sigmask(sigmask);
 913 
 914   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 915   if (!ReduceSignalUsage) {
 916     if (thread->is_VM_thread()) {
 917       // Only the VM thread handles BREAK_SIGNAL ...
 918       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 919     } else {
 920       // ... all other threads block BREAK_SIGNAL
 921       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 922       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 923     }
 924   }
 925 }
 926 
 927 bool os::create_attached_thread(JavaThread* thread) {
 928 #ifdef ASSERT
 929   thread->verify_not_published();
 930 #endif
 931   OSThread* osthread = create_os_thread(thread, thr_self());
 932   if (osthread == NULL) {
 933      return false;
 934   }
 935 
 936   // Initial thread state is RUNNABLE
 937   osthread->set_state(RUNNABLE);
 938   thread->set_osthread(osthread);
 939 
 940   // initialize signal mask for this thread
 941   // and save the caller's signal mask
 942   os::Solaris::hotspot_sigmask(thread);
 943 
 944   return true;
 945 }
 946 
 947 bool os::create_main_thread(JavaThread* thread) {
 948 #ifdef ASSERT
 949   thread->verify_not_published();
 950 #endif
 951   if (_starting_thread == NULL) {
 952     _starting_thread = create_os_thread(thread, main_thread);
 953      if (_starting_thread == NULL) {
 954         return false;
 955      }
 956   }
 957 
 958   // The primodial thread is runnable from the start
 959   _starting_thread->set_state(RUNNABLE);
 960 
 961   thread->set_osthread(_starting_thread);
 962 
 963   // initialize signal mask for this thread
 964   // and save the caller's signal mask
 965   os::Solaris::hotspot_sigmask(thread);
 966 
 967   return true;
 968 }
 969 
 970 // _T2_libthread is true if we believe we are running with the newer
 971 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
 972 bool os::Solaris::_T2_libthread = false;
 973 
 974 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 975   // Allocate the OSThread object
 976   OSThread* osthread = new OSThread(NULL, NULL);
 977   if (osthread == NULL) {
 978     return false;
 979   }
 980 
 981   if ( ThreadPriorityVerbose ) {
 982     char *thrtyp;
 983     switch ( thr_type ) {
 984       case vm_thread:
 985         thrtyp = (char *)"vm";
 986         break;
 987       case cgc_thread:
 988         thrtyp = (char *)"cgc";
 989         break;
 990       case pgc_thread:
 991         thrtyp = (char *)"pgc";
 992         break;
 993       case java_thread:
 994         thrtyp = (char *)"java";
 995         break;
 996       case compiler_thread:
 997         thrtyp = (char *)"compiler";
 998         break;
 999       case watcher_thread:
1000         thrtyp = (char *)"watcher";
1001         break;
1002       default:
1003         thrtyp = (char *)"unknown";
1004         break;
1005     }
1006     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1007   }
1008 
1009   // Calculate stack size if it's not specified by caller.
1010   if (stack_size == 0) {
1011     // The default stack size 1M (2M for LP64).
1012     stack_size = (BytesPerWord >> 2) * K * K;
1013 
1014     switch (thr_type) {
1015     case os::java_thread:
1016       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1017       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1018       break;
1019     case os::compiler_thread:
1020       if (CompilerThreadStackSize > 0) {
1021         stack_size = (size_t)(CompilerThreadStackSize * K);
1022         break;
1023       } // else fall through:
1024         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1025     case os::vm_thread:
1026     case os::pgc_thread:
1027     case os::cgc_thread:
1028     case os::watcher_thread:
1029       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1030       break;
1031     }
1032   }
1033   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1034 
1035   // Initial state is ALLOCATED but not INITIALIZED
1036   osthread->set_state(ALLOCATED);
1037 
1038   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1039     // We got lots of threads. Check if we still have some address space left.
1040     // Need to be at least 5Mb of unreserved address space. We do check by
1041     // trying to reserve some.
1042     const size_t VirtualMemoryBangSize = 20*K*K;
1043     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1044     if (mem == NULL) {
1045       delete osthread;
1046       return false;
1047     } else {
1048       // Release the memory again
1049       os::release_memory(mem, VirtualMemoryBangSize);
1050     }
1051   }
1052 
1053   // Setup osthread because the child thread may need it.
1054   thread->set_osthread(osthread);
1055 
1056   // Create the Solaris thread
1057   // explicit THR_BOUND for T2_libthread case in case
1058   // that assumption is not accurate, but our alternate signal stack
1059   // handling is based on it which must have bound threads
1060   thread_t tid = 0;
1061   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
1062                    | ((UseBoundThreads || os::Solaris::T2_libthread() ||
1063                        (thr_type == vm_thread) ||
1064                        (thr_type == cgc_thread) ||
1065                        (thr_type == pgc_thread) ||
1066                        (thr_type == compiler_thread && BackgroundCompilation)) ?
1067                       THR_BOUND : 0);
1068   int      status;
1069 
1070   // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
1071   //
1072   // On multiprocessors systems, libthread sometimes under-provisions our
1073   // process with LWPs.  On a 30-way systems, for instance, we could have
1074   // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
1075   // to our process.  This can result in under utilization of PEs.
1076   // I suspect the problem is related to libthread's LWP
1077   // pool management and to the kernel's SIGBLOCKING "last LWP parked"
1078   // upcall policy.
1079   //
1080   // The following code is palliative -- it attempts to ensure that our
1081   // process has sufficient LWPs to take advantage of multiple PEs.
1082   // Proper long-term cures include using user-level threads bound to LWPs
1083   // (THR_BOUND) or using LWP-based synchronization.  Note that there is a
1084   // slight timing window with respect to sampling _os_thread_count, but
1085   // the race is benign.  Also, we should periodically recompute
1086   // _processors_online as the min of SC_NPROCESSORS_ONLN and the
1087   // the number of PEs in our partition.  You might be tempted to use
1088   // THR_NEW_LWP here, but I'd recommend against it as that could
1089   // result in undesirable growth of the libthread's LWP pool.
1090   // The fix below isn't sufficient; for instance, it doesn't take into count
1091   // LWPs parked on IO.  It does, however, help certain CPU-bound benchmarks.
1092   //
1093   // Some pathologies this scheme doesn't handle:
1094   // *  Threads can block, releasing the LWPs.  The LWPs can age out.
1095   //    When a large number of threads become ready again there aren't
1096   //    enough LWPs available to service them.  This can occur when the
1097   //    number of ready threads oscillates.
1098   // *  LWPs/Threads park on IO, thus taking the LWP out of circulation.
1099   //
1100   // Finally, we should call thr_setconcurrency() periodically to refresh
1101   // the LWP pool and thwart the LWP age-out mechanism.
1102   // The "+3" term provides a little slop -- we want to slightly overprovision.
1103 
1104   if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
1105     if (!(flags & THR_BOUND)) {
1106       thr_setconcurrency (os::Solaris::_os_thread_count);       // avoid starvation
1107     }
1108   }
1109   // Although this doesn't hurt, we should warn of undefined behavior
1110   // when using unbound T1 threads with schedctl().  This should never
1111   // happen, as the compiler and VM threads are always created bound
1112   DEBUG_ONLY(
1113       if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
1114           (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
1115           ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
1116            (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
1117          warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
1118       }
1119   );
1120 
1121 
1122   // Mark that we don't have an lwp or thread id yet.
1123   // In case we attempt to set the priority before the thread starts.
1124   osthread->set_lwp_id(-1);
1125   osthread->set_thread_id(-1);
1126 
1127   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1128   if (status != 0) {
1129     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1130       perror("os::create_thread");
1131     }
1132     thread->set_osthread(NULL);
1133     // Need to clean up stuff we've allocated so far
1134     delete osthread;
1135     return false;
1136   }
1137 
1138   Atomic::inc(&os::Solaris::_os_thread_count);
1139 
1140   // Store info on the Solaris thread into the OSThread
1141   osthread->set_thread_id(tid);
1142 
1143   // Remember that we created this thread so we can set priority on it
1144   osthread->set_vm_created();
1145 
1146   // Set the default thread priority.  If using bound threads, setting
1147   // lwp priority will be delayed until thread start.
1148   set_native_priority(thread,
1149                       DefaultThreadPriority == -1 ?
1150                         java_to_os_priority[NormPriority] :
1151                         DefaultThreadPriority);
1152 
1153   // Initial thread state is INITIALIZED, not SUSPENDED
1154   osthread->set_state(INITIALIZED);
1155 
1156   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1157   return true;
1158 }
1159 
1160 /* defined for >= Solaris 10. This allows builds on earlier versions
1161  *  of Solaris to take advantage of the newly reserved Solaris JVM signals
1162  *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1163  *  and -XX:+UseAltSigs does nothing since these should have no conflict
1164  */
1165 #if !defined(SIGJVM1)
1166 #define SIGJVM1 39
1167 #define SIGJVM2 40
1168 #endif
1169 
1170 debug_only(static bool signal_sets_initialized = false);
1171 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1172 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1173 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1174 
1175 bool os::Solaris::is_sig_ignored(int sig) {
1176       struct sigaction oact;
1177       sigaction(sig, (struct sigaction*)NULL, &oact);
1178       void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1179                                      : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1180       if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1181            return true;
1182       else
1183            return false;
1184 }
1185 
1186 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1187 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1188 static bool isJVM1available() {
1189   return SIGJVM1 < SIGRTMIN;
1190 }
1191 
1192 void os::Solaris::signal_sets_init() {
1193   // Should also have an assertion stating we are still single-threaded.
1194   assert(!signal_sets_initialized, "Already initialized");
1195   // Fill in signals that are necessarily unblocked for all threads in
1196   // the VM. Currently, we unblock the following signals:
1197   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1198   //                         by -Xrs (=ReduceSignalUsage));
1199   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1200   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1201   // the dispositions or masks wrt these signals.
1202   // Programs embedding the VM that want to use the above signals for their
1203   // own purposes must, at this time, use the "-Xrs" option to prevent
1204   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1205   // (See bug 4345157, and other related bugs).
1206   // In reality, though, unblocking these signals is really a nop, since
1207   // these signals are not blocked by default.
1208   sigemptyset(&unblocked_sigs);
1209   sigemptyset(&allowdebug_blocked_sigs);
1210   sigaddset(&unblocked_sigs, SIGILL);
1211   sigaddset(&unblocked_sigs, SIGSEGV);
1212   sigaddset(&unblocked_sigs, SIGBUS);
1213   sigaddset(&unblocked_sigs, SIGFPE);
1214 
1215   if (isJVM1available) {
1216     os::Solaris::set_SIGinterrupt(SIGJVM1);
1217     os::Solaris::set_SIGasync(SIGJVM2);
1218   } else if (UseAltSigs) {
1219     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1220     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1221   } else {
1222     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1223     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1224   }
1225 
1226   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1227   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1228 
1229   if (!ReduceSignalUsage) {
1230    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1231       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1232       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1233    }
1234    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1235       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1236       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1237    }
1238    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1239       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1240       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1241    }
1242   }
1243   // Fill in signals that are blocked by all but the VM thread.
1244   sigemptyset(&vm_sigs);
1245   if (!ReduceSignalUsage)
1246     sigaddset(&vm_sigs, BREAK_SIGNAL);
1247   debug_only(signal_sets_initialized = true);
1248 
1249   // For diagnostics only used in run_periodic_checks
1250   sigemptyset(&check_signal_done);
1251 }
1252 
1253 // These are signals that are unblocked while a thread is running Java.
1254 // (For some reason, they get blocked by default.)
1255 sigset_t* os::Solaris::unblocked_signals() {
1256   assert(signal_sets_initialized, "Not initialized");
1257   return &unblocked_sigs;
1258 }
1259 
1260 // These are the signals that are blocked while a (non-VM) thread is
1261 // running Java. Only the VM thread handles these signals.
1262 sigset_t* os::Solaris::vm_signals() {
1263   assert(signal_sets_initialized, "Not initialized");
1264   return &vm_sigs;
1265 }
1266 
1267 // These are signals that are blocked during cond_wait to allow debugger in
1268 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1269   assert(signal_sets_initialized, "Not initialized");
1270   return &allowdebug_blocked_sigs;
1271 }
1272 
1273 
1274 void _handle_uncaught_cxx_exception() {
1275   VMError err("An uncaught C++ exception");
1276   err.report_and_die();
1277 }
1278 
1279 
1280 // First crack at OS-specific initialization, from inside the new thread.
1281 void os::initialize_thread(Thread* thr) {
1282   if (is_primordial_thread()) {
1283     JavaThread* jt = (JavaThread *)thr;
1284     assert(jt != NULL,"Sanity check");
1285     size_t stack_size;
1286     address base = jt->stack_base();
1287     if (Arguments::created_by_java_launcher()) {
1288       // Use 2MB to allow for Solaris 7 64 bit mode.
1289       stack_size = JavaThread::stack_size_at_create() == 0
1290         ? 2048*K : JavaThread::stack_size_at_create();
1291 
1292       // There are rare cases when we may have already used more than
1293       // the basic stack size allotment before this method is invoked.
1294       // Attempt to allow for a normally sized java_stack.
1295       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1296       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1297     } else {
1298       // 6269555: If we were not created by a Java launcher, i.e. if we are
1299       // running embedded in a native application, treat the primordial thread
1300       // as much like a native attached thread as possible.  This means using
1301       // the current stack size from thr_stksegment(), unless it is too large
1302       // to reliably setup guard pages.  A reasonable max size is 8MB.
1303       size_t current_size = current_stack_size();
1304       // This should never happen, but just in case....
1305       if (current_size == 0) current_size = 2 * K * K;
1306       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1307     }
1308     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1309     stack_size = (size_t)(base - bottom);
1310 
1311     assert(stack_size > 0, "Stack size calculation problem");
1312 
1313     if (stack_size > jt->stack_size()) {
1314       NOT_PRODUCT(
1315         struct rlimit limits;
1316         getrlimit(RLIMIT_STACK, &limits);
1317         size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1318         assert(size >= jt->stack_size(), "Stack size problem in main thread");
1319       )
1320       tty->print_cr(
1321         "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1322         "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1323         "See limit(1) to increase the stack size limit.",
1324         stack_size / K, jt->stack_size() / K);
1325       vm_exit(1);
1326     }
1327     assert(jt->stack_size() >= stack_size,
1328           "Attempt to map more stack than was allocated");
1329     jt->set_stack_size(stack_size);
1330   }
1331 
1332    // 5/22/01: Right now alternate signal stacks do not handle
1333    // throwing stack overflow exceptions, see bug 4463178
1334    // Until a fix is found for this, T2 will NOT imply alternate signal
1335    // stacks.
1336    // If using T2 libthread threads, install an alternate signal stack.
1337    // Because alternate stacks associate with LWPs on Solaris,
1338    // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
1339    // we prefer to explicitly stack bang.
1340    // If not using T2 libthread, but using UseBoundThreads any threads
1341    // (primordial thread, jni_attachCurrentThread) we do not create,
1342    // probably are not bound, therefore they can not have an alternate
1343    // signal stack. Since our stack banging code is generated and
1344    // is shared across threads, all threads must be bound to allow
1345    // using alternate signal stacks.  The alternative is to interpose
1346    // on _lwp_create to associate an alt sig stack with each LWP,
1347    // and this could be a problem when the JVM is embedded.
1348    // We would prefer to use alternate signal stacks with T2
1349    // Since there is currently no accurate way to detect T2
1350    // we do not. Assuming T2 when running T1 causes sig 11s or assertions
1351    // on installing alternate signal stacks
1352 
1353 
1354    // 05/09/03: removed alternate signal stack support for Solaris
1355    // The alternate signal stack mechanism is no longer needed to
1356    // handle stack overflow. This is now handled by allocating
1357    // guard pages (red zone) and stackbanging.
1358    // Initially the alternate signal stack mechanism was removed because
1359    // it did not work with T1 llibthread. Alternate
1360    // signal stacks MUST have all threads bound to lwps. Applications
1361    // can create their own threads and attach them without their being
1362    // bound under T1. This is frequently the case for the primordial thread.
1363    // If we were ever to reenable this mechanism we would need to
1364    // use the dynamic check for T2 libthread.
1365 
1366   os::Solaris::init_thread_fpu_state();
1367   std::set_terminate(_handle_uncaught_cxx_exception);
1368 }
1369 
1370 
1371 
1372 // Free Solaris resources related to the OSThread
1373 void os::free_thread(OSThread* osthread) {
1374   assert(osthread != NULL, "os::free_thread but osthread not set");
1375 
1376 
1377   // We are told to free resources of the argument thread,
1378   // but we can only really operate on the current thread.
1379   // The main thread must take the VMThread down synchronously
1380   // before the main thread exits and frees up CodeHeap
1381   guarantee((Thread::current()->osthread() == osthread
1382      || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1383   if (Thread::current()->osthread() == osthread) {
1384     // Restore caller's signal mask
1385     sigset_t sigmask = osthread->caller_sigmask();
1386     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1387   }
1388   delete osthread;
1389 }
1390 
1391 void os::pd_start_thread(Thread* thread) {
1392   int status = thr_continue(thread->osthread()->thread_id());
1393   assert_status(status == 0, status, "thr_continue failed");
1394 }
1395 
1396 
1397 intx os::current_thread_id() {
1398   return (intx)thr_self();
1399 }
1400 
1401 static pid_t _initial_pid = 0;
1402 
1403 int os::current_process_id() {
1404   return (int)(_initial_pid ? _initial_pid : getpid());
1405 }
1406 
1407 // gethrtime() should be monotonic according to the documentation,
1408 // but some virtualized platforms are known to break this guarantee.
1409 // getTimeNanos() must be guaranteed not to move backwards, so we
1410 // are forced to add a check here.
1411 inline hrtime_t getTimeNanos() {
1412   const hrtime_t now = gethrtime();
1413   const hrtime_t prev = max_hrtime;
1414   if (now <= prev) {
1415     return prev;   // same or retrograde time;
1416   }
1417   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1418   assert(obsv >= prev, "invariant");   // Monotonicity
1419   // If the CAS succeeded then we're done and return "now".
1420   // If the CAS failed and the observed value "obsv" is >= now then
1421   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1422   // some other thread raced this thread and installed a new value, in which case
1423   // we could either (a) retry the entire operation, (b) retry trying to install now
1424   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1425   // we might discard a higher "now" value in deference to a slightly lower but freshly
1426   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1427   // to (a) or (b) -- and greatly reduces coherence traffic.
1428   // We might also condition (c) on the magnitude of the delta between obsv and now.
1429   // Avoiding excessive CAS operations to hot RW locations is critical.
1430   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1431   return (prev == obsv) ? now : obsv;
1432 }
1433 
1434 // Time since start-up in seconds to a fine granularity.
1435 // Used by VMSelfDestructTimer and the MemProfiler.
1436 double os::elapsedTime() {
1437   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1438 }
1439 
1440 jlong os::elapsed_counter() {
1441   return (jlong)(getTimeNanos() - first_hrtime);
1442 }
1443 
1444 jlong os::elapsed_frequency() {
1445    return hrtime_hz;
1446 }
1447 
1448 // Return the real, user, and system times in seconds from an
1449 // arbitrary fixed point in the past.
1450 bool os::getTimesSecs(double* process_real_time,
1451                   double* process_user_time,
1452                   double* process_system_time) {
1453   struct tms ticks;
1454   clock_t real_ticks = times(&ticks);
1455 
1456   if (real_ticks == (clock_t) (-1)) {
1457     return false;
1458   } else {
1459     double ticks_per_second = (double) clock_tics_per_sec;
1460     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1461     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1462     // For consistency return the real time from getTimeNanos()
1463     // converted to seconds.
1464     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1465 
1466     return true;
1467   }
1468 }
1469 
1470 bool os::supports_vtime() { return true; }
1471 
1472 bool os::enable_vtime() {
1473   int fd = ::open("/proc/self/ctl", O_WRONLY);
1474   if (fd == -1)
1475     return false;
1476 
1477   long cmd[] = { PCSET, PR_MSACCT };
1478   int res = ::write(fd, cmd, sizeof(long) * 2);
1479   ::close(fd);
1480   if (res != sizeof(long) * 2)
1481     return false;
1482 
1483   return true;
1484 }
1485 
1486 bool os::vtime_enabled() {
1487   int fd = ::open("/proc/self/status", O_RDONLY);
1488   if (fd == -1)
1489     return false;
1490 
1491   pstatus_t status;
1492   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1493   ::close(fd);
1494   if (res != sizeof(pstatus_t))
1495     return false;
1496 
1497   return status.pr_flags & PR_MSACCT;
1498 }
1499 
1500 double os::elapsedVTime() {
1501   return (double)gethrvtime() / (double)hrtime_hz;
1502 }
1503 
1504 // Used internally for comparisons only
1505 // getTimeMillis guaranteed to not move backwards on Solaris
1506 jlong getTimeMillis() {
1507   jlong nanotime = getTimeNanos();
1508   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1509 }
1510 
1511 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1512 jlong os::javaTimeMillis() {
1513   timeval t;
1514   if (gettimeofday( &t, NULL) == -1)
1515     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1516   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1517 }
1518 
1519 jlong os::javaTimeNanos() {
1520   return (jlong)getTimeNanos();
1521 }
1522 
1523 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1524   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1525   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1526   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1527   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1528 }
1529 
1530 char * os::local_time_string(char *buf, size_t buflen) {
1531   struct tm t;
1532   time_t long_time;
1533   time(&long_time);
1534   localtime_r(&long_time, &t);
1535   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1536                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1537                t.tm_hour, t.tm_min, t.tm_sec);
1538   return buf;
1539 }
1540 
1541 // Note: os::shutdown() might be called very early during initialization, or
1542 // called from signal handler. Before adding something to os::shutdown(), make
1543 // sure it is async-safe and can handle partially initialized VM.
1544 void os::shutdown() {
1545 
1546   // allow PerfMemory to attempt cleanup of any persistent resources
1547   perfMemory_exit();
1548 
1549   // needs to remove object in file system
1550   AttachListener::abort();
1551 
1552   // flush buffered output, finish log files
1553   ostream_abort();
1554 
1555   // Check for abort hook
1556   abort_hook_t abort_hook = Arguments::abort_hook();
1557   if (abort_hook != NULL) {
1558     abort_hook();
1559   }
1560 }
1561 
1562 // Note: os::abort() might be called very early during initialization, or
1563 // called from signal handler. Before adding something to os::abort(), make
1564 // sure it is async-safe and can handle partially initialized VM.
1565 void os::abort(bool dump_core) {
1566   os::shutdown();
1567   if (dump_core) {
1568 #ifndef PRODUCT
1569     fdStream out(defaultStream::output_fd());
1570     out.print_raw("Current thread is ");
1571     char buf[16];
1572     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1573     out.print_raw_cr(buf);
1574     out.print_raw_cr("Dumping core ...");
1575 #endif
1576     ::abort(); // dump core (for debugging)
1577   }
1578 
1579   ::exit(1);
1580 }
1581 
1582 // Die immediately, no exit hook, no abort hook, no cleanup.
1583 void os::die() {
1584   ::abort(); // dump core (for debugging)
1585 }
1586 
1587 // DLL functions
1588 
1589 const char* os::dll_file_extension() { return ".so"; }
1590 
1591 // This must be hard coded because it's the system's temporary
1592 // directory not the java application's temp directory, ala java.io.tmpdir.
1593 const char* os::get_temp_directory() { return "/tmp"; }
1594 
1595 static bool file_exists(const char* filename) {
1596   struct stat statbuf;
1597   if (filename == NULL || strlen(filename) == 0) {
1598     return false;
1599   }
1600   return os::stat(filename, &statbuf) == 0;
1601 }
1602 
1603 bool os::dll_build_name(char* buffer, size_t buflen,
1604                         const char* pname, const char* fname) {
1605   bool retval = false;
1606   const size_t pnamelen = pname ? strlen(pname) : 0;
1607 
1608   // Return error on buffer overflow.
1609   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1610     return retval;
1611   }
1612 
1613   if (pnamelen == 0) {
1614     snprintf(buffer, buflen, "lib%s.so", fname);
1615     retval = true;
1616   } else if (strchr(pname, *os::path_separator()) != NULL) {
1617     int n;
1618     char** pelements = split_path(pname, &n);
1619     if (pelements == NULL) {
1620       return false;
1621     }
1622     for (int i = 0 ; i < n ; i++) {
1623       // really shouldn't be NULL but what the heck, check can't hurt
1624       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1625         continue; // skip the empty path values
1626       }
1627       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1628       if (file_exists(buffer)) {
1629         retval = true;
1630         break;
1631       }
1632     }
1633     // release the storage
1634     for (int i = 0 ; i < n ; i++) {
1635       if (pelements[i] != NULL) {
1636         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1637       }
1638     }
1639     if (pelements != NULL) {
1640       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1641     }
1642   } else {
1643     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1644     retval = true;
1645   }
1646   return retval;
1647 }
1648 
1649 // check if addr is inside libjvm.so
1650 bool os::address_is_in_vm(address addr) {
1651   static address libjvm_base_addr;
1652   Dl_info dlinfo;
1653 
1654   if (libjvm_base_addr == NULL) {
1655     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1656       libjvm_base_addr = (address)dlinfo.dli_fbase;
1657     }
1658     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1659   }
1660 
1661   if (dladdr((void *)addr, &dlinfo) != 0) {
1662     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1663   }
1664 
1665   return false;
1666 }
1667 
1668 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1669 static dladdr1_func_type dladdr1_func = NULL;
1670 
1671 bool os::dll_address_to_function_name(address addr, char *buf,
1672                                       int buflen, int * offset) {
1673   // buf is not optional, but offset is optional
1674   assert(buf != NULL, "sanity check");
1675 
1676   Dl_info dlinfo;
1677 
1678   // dladdr1_func was initialized in os::init()
1679   if (dladdr1_func != NULL) {
1680     // yes, we have dladdr1
1681 
1682     // Support for dladdr1 is checked at runtime; it may be
1683     // available even if the vm is built on a machine that does
1684     // not have dladdr1 support.  Make sure there is a value for
1685     // RTLD_DL_SYMENT.
1686     #ifndef RTLD_DL_SYMENT
1687     #define RTLD_DL_SYMENT 1
1688     #endif
1689 #ifdef _LP64
1690     Elf64_Sym * info;
1691 #else
1692     Elf32_Sym * info;
1693 #endif
1694     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1695                      RTLD_DL_SYMENT) != 0) {
1696       // see if we have a matching symbol that covers our address
1697       if (dlinfo.dli_saddr != NULL &&
1698           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1699         if (dlinfo.dli_sname != NULL) {
1700           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1701             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1702           }
1703           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1704           return true;
1705         }
1706       }
1707       // no matching symbol so try for just file info
1708       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1709         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1710                             buf, buflen, offset, dlinfo.dli_fname)) {
1711           return true;
1712         }
1713       }
1714     }
1715     buf[0] = '\0';
1716     if (offset != NULL) *offset  = -1;
1717     return false;
1718   }
1719 
1720   // no, only dladdr is available
1721   if (dladdr((void *)addr, &dlinfo) != 0) {
1722     // see if we have a matching symbol
1723     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1724       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1725         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1726       }
1727       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1728       return true;
1729     }
1730     // no matching symbol so try for just file info
1731     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1732       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1733                           buf, buflen, offset, dlinfo.dli_fname)) {
1734         return true;
1735       }
1736     }
1737   }
1738   buf[0] = '\0';
1739   if (offset != NULL) *offset  = -1;
1740   return false;
1741 }
1742 
1743 bool os::dll_address_to_library_name(address addr, char* buf,
1744                                      int buflen, int* offset) {
1745   // buf is not optional, but offset is optional
1746   assert(buf != NULL, "sanity check");
1747 
1748   Dl_info dlinfo;
1749 
1750   if (dladdr((void*)addr, &dlinfo) != 0) {
1751     if (dlinfo.dli_fname != NULL) {
1752       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1753     }
1754     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1755       *offset = addr - (address)dlinfo.dli_fbase;
1756     }
1757     return true;
1758   }
1759 
1760   buf[0] = '\0';
1761   if (offset) *offset = -1;
1762   return false;
1763 }
1764 
1765 // Prints the names and full paths of all opened dynamic libraries
1766 // for current process
1767 void os::print_dll_info(outputStream * st) {
1768   Dl_info dli;
1769   void *handle;
1770   Link_map *map;
1771   Link_map *p;
1772 
1773   st->print_cr("Dynamic libraries:"); st->flush();
1774 
1775   if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
1776       dli.dli_fname == NULL) {
1777     st->print_cr("Error: Cannot print dynamic libraries.");
1778     return;
1779   }
1780   handle = dlopen(dli.dli_fname, RTLD_LAZY);
1781   if (handle == NULL) {
1782     st->print_cr("Error: Cannot print dynamic libraries.");
1783     return;
1784   }
1785   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1786   if (map == NULL) {
1787     st->print_cr("Error: Cannot print dynamic libraries.");
1788     return;
1789   }
1790 
1791   while (map->l_prev != NULL)
1792     map = map->l_prev;
1793 
1794   while (map != NULL) {
1795     st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
1796     map = map->l_next;
1797   }
1798 
1799   dlclose(handle);
1800 }
1801 
1802   // Loads .dll/.so and
1803   // in case of error it checks if .dll/.so was built for the
1804   // same architecture as Hotspot is running on
1805 
1806 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1807 {
1808   void * result= ::dlopen(filename, RTLD_LAZY);
1809   if (result != NULL) {
1810     // Successful loading
1811     return result;
1812   }
1813 
1814   Elf32_Ehdr elf_head;
1815 
1816   // Read system error message into ebuf
1817   // It may or may not be overwritten below
1818   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1819   ebuf[ebuflen-1]='\0';
1820   int diag_msg_max_length=ebuflen-strlen(ebuf);
1821   char* diag_msg_buf=ebuf+strlen(ebuf);
1822 
1823   if (diag_msg_max_length==0) {
1824     // No more space in ebuf for additional diagnostics message
1825     return NULL;
1826   }
1827 
1828 
1829   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1830 
1831   if (file_descriptor < 0) {
1832     // Can't open library, report dlerror() message
1833     return NULL;
1834   }
1835 
1836   bool failed_to_read_elf_head=
1837     (sizeof(elf_head)!=
1838         (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1839 
1840   ::close(file_descriptor);
1841   if (failed_to_read_elf_head) {
1842     // file i/o error - report dlerror() msg
1843     return NULL;
1844   }
1845 
1846   typedef struct {
1847     Elf32_Half  code;         // Actual value as defined in elf.h
1848     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1849     char        elf_class;    // 32 or 64 bit
1850     char        endianess;    // MSB or LSB
1851     char*       name;         // String representation
1852   } arch_t;
1853 
1854   static const arch_t arch_array[]={
1855     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1856     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1857     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1858     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1859     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1860     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1861     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1862     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1863     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1864     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1865   };
1866 
1867   #if  (defined IA32)
1868     static  Elf32_Half running_arch_code=EM_386;
1869   #elif   (defined AMD64)
1870     static  Elf32_Half running_arch_code=EM_X86_64;
1871   #elif  (defined IA64)
1872     static  Elf32_Half running_arch_code=EM_IA_64;
1873   #elif  (defined __sparc) && (defined _LP64)
1874     static  Elf32_Half running_arch_code=EM_SPARCV9;
1875   #elif  (defined __sparc) && (!defined _LP64)
1876     static  Elf32_Half running_arch_code=EM_SPARC;
1877   #elif  (defined __powerpc64__)
1878     static  Elf32_Half running_arch_code=EM_PPC64;
1879   #elif  (defined __powerpc__)
1880     static  Elf32_Half running_arch_code=EM_PPC;
1881   #elif (defined ARM)
1882     static  Elf32_Half running_arch_code=EM_ARM;
1883   #else
1884     #error Method os::dll_load requires that one of following is defined:\
1885          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1886   #endif
1887 
1888   // Identify compatability class for VM's architecture and library's architecture
1889   // Obtain string descriptions for architectures
1890 
1891   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1892   int running_arch_index=-1;
1893 
1894   for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
1895     if (running_arch_code == arch_array[i].code) {
1896       running_arch_index    = i;
1897     }
1898     if (lib_arch.code == arch_array[i].code) {
1899       lib_arch.compat_class = arch_array[i].compat_class;
1900       lib_arch.name         = arch_array[i].name;
1901     }
1902   }
1903 
1904   assert(running_arch_index != -1,
1905     "Didn't find running architecture code (running_arch_code) in arch_array");
1906   if (running_arch_index == -1) {
1907     // Even though running architecture detection failed
1908     // we may still continue with reporting dlerror() message
1909     return NULL;
1910   }
1911 
1912   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1913     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1914     return NULL;
1915   }
1916 
1917   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1918     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1919     return NULL;
1920   }
1921 
1922   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1923     if ( lib_arch.name!=NULL ) {
1924       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1925         " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1926         lib_arch.name, arch_array[running_arch_index].name);
1927     } else {
1928       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1929       " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1930         lib_arch.code,
1931         arch_array[running_arch_index].name);
1932     }
1933   }
1934 
1935   return NULL;
1936 }
1937 
1938 void* os::dll_lookup(void* handle, const char* name) {
1939   return dlsym(handle, name);
1940 }
1941 
1942 void* os::get_default_process_handle() {
1943   return (void*)::dlopen(NULL, RTLD_LAZY);
1944 }
1945 
1946 int os::stat(const char *path, struct stat *sbuf) {
1947   char pathbuf[MAX_PATH];
1948   if (strlen(path) > MAX_PATH - 1) {
1949     errno = ENAMETOOLONG;
1950     return -1;
1951   }
1952   os::native_path(strcpy(pathbuf, path));
1953   return ::stat(pathbuf, sbuf);
1954 }
1955 
1956 static bool _print_ascii_file(const char* filename, outputStream* st) {
1957   int fd = ::open(filename, O_RDONLY);
1958   if (fd == -1) {
1959      return false;
1960   }
1961 
1962   char buf[32];
1963   int bytes;
1964   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1965     st->print_raw(buf, bytes);
1966   }
1967 
1968   ::close(fd);
1969 
1970   return true;
1971 }
1972 
1973 void os::print_os_info_brief(outputStream* st) {
1974   os::Solaris::print_distro_info(st);
1975 
1976   os::Posix::print_uname_info(st);
1977 
1978   os::Solaris::print_libversion_info(st);
1979 }
1980 
1981 void os::print_os_info(outputStream* st) {
1982   st->print("OS:");
1983 
1984   os::Solaris::print_distro_info(st);
1985 
1986   os::Posix::print_uname_info(st);
1987 
1988   os::Solaris::print_libversion_info(st);
1989 
1990   os::Posix::print_rlimit_info(st);
1991 
1992   os::Posix::print_load_average(st);
1993 }
1994 
1995 void os::Solaris::print_distro_info(outputStream* st) {
1996   if (!_print_ascii_file("/etc/release", st)) {
1997       st->print("Solaris");
1998     }
1999     st->cr();
2000 }
2001 
2002 void os::Solaris::print_libversion_info(outputStream* st) {
2003   if (os::Solaris::T2_libthread()) {
2004     st->print("  (T2 libthread)");
2005   }
2006   else {
2007     st->print("  (T1 libthread)");
2008   }
2009   st->cr();
2010 }
2011 
2012 static bool check_addr0(outputStream* st) {
2013   jboolean status = false;
2014   int fd = ::open("/proc/self/map",O_RDONLY);
2015   if (fd >= 0) {
2016     prmap_t p;
2017     while(::read(fd, &p, sizeof(p)) > 0) {
2018       if (p.pr_vaddr == 0x0) {
2019         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
2020         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
2021         st->print("Access:");
2022         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
2023         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
2024         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
2025         st->cr();
2026         status = true;
2027       }
2028     }
2029     ::close(fd);
2030   }
2031   return status;
2032 }
2033 
2034 void os::pd_print_cpu_info(outputStream* st) {
2035   // Nothing to do for now.
2036 }
2037 
2038 void os::print_memory_info(outputStream* st) {
2039   st->print("Memory:");
2040   st->print(" %dk page", os::vm_page_size()>>10);
2041   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2042   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2043   st->cr();
2044   if (VMError::fatal_error_in_progress()) {
2045      (void) check_addr0(st);
2046   }
2047 }
2048 
2049 void os::print_siginfo(outputStream* st, void* siginfo) {
2050   const siginfo_t* si = (const siginfo_t*)siginfo;
2051 
2052   os::Posix::print_siginfo_brief(st, si);
2053 
2054   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2055       UseSharedSpaces) {
2056     FileMapInfo* mapinfo = FileMapInfo::current_info();
2057     if (mapinfo->is_in_shared_space(si->si_addr)) {
2058       st->print("\n\nError accessing class data sharing archive."   \
2059                 " Mapped file inaccessible during execution, "      \
2060                 " possible disk/network problem.");
2061     }
2062   }
2063   st->cr();
2064 }
2065 
2066 // Moved from whole group, because we need them here for diagnostic
2067 // prints.
2068 #define OLDMAXSIGNUM 32
2069 static int Maxsignum = 0;
2070 static int *ourSigFlags = NULL;
2071 
2072 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2073 
2074 int os::Solaris::get_our_sigflags(int sig) {
2075   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2076   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2077   return ourSigFlags[sig];
2078 }
2079 
2080 void os::Solaris::set_our_sigflags(int sig, int flags) {
2081   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2082   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2083   ourSigFlags[sig] = flags;
2084 }
2085 
2086 
2087 static const char* get_signal_handler_name(address handler,
2088                                            char* buf, int buflen) {
2089   int offset;
2090   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2091   if (found) {
2092     // skip directory names
2093     const char *p1, *p2;
2094     p1 = buf;
2095     size_t len = strlen(os::file_separator());
2096     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2097     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2098   } else {
2099     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2100   }
2101   return buf;
2102 }
2103 
2104 static void print_signal_handler(outputStream* st, int sig,
2105                                   char* buf, size_t buflen) {
2106   struct sigaction sa;
2107 
2108   sigaction(sig, NULL, &sa);
2109 
2110   st->print("%s: ", os::exception_name(sig, buf, buflen));
2111 
2112   address handler = (sa.sa_flags & SA_SIGINFO)
2113                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2114                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2115 
2116   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2117     st->print("SIG_DFL");
2118   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2119     st->print("SIG_IGN");
2120   } else {
2121     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2122   }
2123 
2124   st->print(", sa_mask[0]=");
2125   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2126 
2127   address rh = VMError::get_resetted_sighandler(sig);
2128   // May be, handler was resetted by VMError?
2129   if(rh != NULL) {
2130     handler = rh;
2131     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2132   }
2133 
2134   st->print(", sa_flags=");
2135   os::Posix::print_sa_flags(st, sa.sa_flags);
2136 
2137   // Check: is it our handler?
2138   if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2139      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2140     // It is our signal handler
2141     // check for flags
2142     if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2143       st->print(
2144         ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2145         os::Solaris::get_our_sigflags(sig));
2146     }
2147   }
2148   st->cr();
2149 }
2150 
2151 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2152   st->print_cr("Signal Handlers:");
2153   print_signal_handler(st, SIGSEGV, buf, buflen);
2154   print_signal_handler(st, SIGBUS , buf, buflen);
2155   print_signal_handler(st, SIGFPE , buf, buflen);
2156   print_signal_handler(st, SIGPIPE, buf, buflen);
2157   print_signal_handler(st, SIGXFSZ, buf, buflen);
2158   print_signal_handler(st, SIGILL , buf, buflen);
2159   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2160   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2161   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2162   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2163   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2164   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2165   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2166   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2167 }
2168 
2169 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2170 
2171 // Find the full path to the current module, libjvm.so
2172 void os::jvm_path(char *buf, jint buflen) {
2173   // Error checking.
2174   if (buflen < MAXPATHLEN) {
2175     assert(false, "must use a large-enough buffer");
2176     buf[0] = '\0';
2177     return;
2178   }
2179   // Lazy resolve the path to current module.
2180   if (saved_jvm_path[0] != 0) {
2181     strcpy(buf, saved_jvm_path);
2182     return;
2183   }
2184 
2185   Dl_info dlinfo;
2186   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2187   assert(ret != 0, "cannot locate libjvm");
2188   if (ret != 0 && dlinfo.dli_fname != NULL) {
2189     realpath((char *)dlinfo.dli_fname, buf);
2190   } else {
2191     buf[0] = '\0';
2192     return;
2193   }
2194 
2195   if (Arguments::created_by_gamma_launcher()) {
2196     // Support for the gamma launcher.  Typical value for buf is
2197     // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
2198     // the right place in the string, then assume we are installed in a JDK and
2199     // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
2200     // up the path so it looks like libjvm.so is installed there (append a
2201     // fake suffix hotspot/libjvm.so).
2202     const char *p = buf + strlen(buf) - 1;
2203     for (int count = 0; p > buf && count < 5; ++count) {
2204       for (--p; p > buf && *p != '/'; --p)
2205         /* empty */ ;
2206     }
2207 
2208     if (strncmp(p, "/jre/lib/", 9) != 0) {
2209       // Look for JAVA_HOME in the environment.
2210       char* java_home_var = ::getenv("JAVA_HOME");
2211       if (java_home_var != NULL && java_home_var[0] != 0) {
2212         char cpu_arch[12];
2213         char* jrelib_p;
2214         int   len;
2215         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2216 #ifdef _LP64
2217         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2218         if (strcmp(cpu_arch, "sparc") == 0) {
2219           strcat(cpu_arch, "v9");
2220         } else if (strcmp(cpu_arch, "i386") == 0) {
2221           strcpy(cpu_arch, "amd64");
2222         }
2223 #endif
2224         // Check the current module name "libjvm.so".
2225         p = strrchr(buf, '/');
2226         assert(strstr(p, "/libjvm") == p, "invalid library name");
2227 
2228         realpath(java_home_var, buf);
2229         // determine if this is a legacy image or modules image
2230         // modules image doesn't have "jre" subdirectory
2231         len = strlen(buf);
2232         assert(len < buflen, "Ran out of buffer space");
2233         jrelib_p = buf + len;
2234         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2235         if (0 != access(buf, F_OK)) {
2236           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2237         }
2238 
2239         if (0 == access(buf, F_OK)) {
2240           // Use current module name "libjvm.so"
2241           len = strlen(buf);
2242           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2243         } else {
2244           // Go back to path of .so
2245           realpath((char *)dlinfo.dli_fname, buf);
2246         }
2247       }
2248     }
2249   }
2250 
2251   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2252 }
2253 
2254 
2255 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2256   // no prefix required, not even "_"
2257 }
2258 
2259 
2260 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2261   // no suffix required
2262 }
2263 
2264 // This method is a copy of JDK's sysGetLastErrorString
2265 // from src/solaris/hpi/src/system_md.c
2266 
2267 size_t os::lasterror(char *buf, size_t len) {
2268 
2269   if (errno == 0)  return 0;
2270 
2271   const char *s = ::strerror(errno);
2272   size_t n = ::strlen(s);
2273   if (n >= len) {
2274     n = len - 1;
2275   }
2276   ::strncpy(buf, s, n);
2277   buf[n] = '\0';
2278   return n;
2279 }
2280 
2281 
2282 // sun.misc.Signal
2283 
2284 extern "C" {
2285   static void UserHandler(int sig, void *siginfo, void *context) {
2286     // Ctrl-C is pressed during error reporting, likely because the error
2287     // handler fails to abort. Let VM die immediately.
2288     if (sig == SIGINT && is_error_reported()) {
2289        os::die();
2290     }
2291 
2292     os::signal_notify(sig);
2293     // We do not need to reinstate the signal handler each time...
2294   }
2295 }
2296 
2297 void* os::user_handler() {
2298   return CAST_FROM_FN_PTR(void*, UserHandler);
2299 }
2300 
2301 class Semaphore : public StackObj {
2302   public:
2303     Semaphore();
2304     ~Semaphore();
2305     void signal();
2306     void wait();
2307     bool trywait();
2308     bool timedwait(unsigned int sec, int nsec);
2309   private:
2310     sema_t _semaphore;
2311 };
2312 
2313 
2314 Semaphore::Semaphore() {
2315   sema_init(&_semaphore, 0, NULL, NULL);
2316 }
2317 
2318 Semaphore::~Semaphore() {
2319   sema_destroy(&_semaphore);
2320 }
2321 
2322 void Semaphore::signal() {
2323   sema_post(&_semaphore);
2324 }
2325 
2326 void Semaphore::wait() {
2327   sema_wait(&_semaphore);
2328 }
2329 
2330 bool Semaphore::trywait() {
2331   return sema_trywait(&_semaphore) == 0;
2332 }
2333 
2334 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2335   struct timespec ts;
2336   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2337 
2338   while (1) {
2339     int result = sema_timedwait(&_semaphore, &ts);
2340     if (result == 0) {
2341       return true;
2342     } else if (errno == EINTR) {
2343       continue;
2344     } else if (errno == ETIME) {
2345       return false;
2346     } else {
2347       return false;
2348     }
2349   }
2350 }
2351 
2352 extern "C" {
2353   typedef void (*sa_handler_t)(int);
2354   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2355 }
2356 
2357 void* os::signal(int signal_number, void* handler) {
2358   struct sigaction sigAct, oldSigAct;
2359   sigfillset(&(sigAct.sa_mask));
2360   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2361   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2362 
2363   if (sigaction(signal_number, &sigAct, &oldSigAct))
2364     // -1 means registration failed
2365     return (void *)-1;
2366 
2367   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2368 }
2369 
2370 void os::signal_raise(int signal_number) {
2371   raise(signal_number);
2372 }
2373 
2374 /*
2375  * The following code is moved from os.cpp for making this
2376  * code platform specific, which it is by its very nature.
2377  */
2378 
2379 // a counter for each possible signal value
2380 static int Sigexit = 0;
2381 static int Maxlibjsigsigs;
2382 static jint *pending_signals = NULL;
2383 static int *preinstalled_sigs = NULL;
2384 static struct sigaction *chainedsigactions = NULL;
2385 static sema_t sig_sem;
2386 typedef int (*version_getting_t)();
2387 version_getting_t os::Solaris::get_libjsig_version = NULL;
2388 static int libjsigversion = NULL;
2389 
2390 int os::sigexitnum_pd() {
2391   assert(Sigexit > 0, "signal memory not yet initialized");
2392   return Sigexit;
2393 }
2394 
2395 void os::Solaris::init_signal_mem() {
2396   // Initialize signal structures
2397   Maxsignum = SIGRTMAX;
2398   Sigexit = Maxsignum+1;
2399   assert(Maxsignum >0, "Unable to obtain max signal number");
2400 
2401   Maxlibjsigsigs = Maxsignum;
2402 
2403   // pending_signals has one int per signal
2404   // The additional signal is for SIGEXIT - exit signal to signal_thread
2405   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2406   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2407 
2408   if (UseSignalChaining) {
2409      chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2410        * (Maxsignum + 1), mtInternal);
2411      memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2412      preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2413      memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2414   }
2415   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
2416   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2417 }
2418 
2419 void os::signal_init_pd() {
2420   int ret;
2421 
2422   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2423   assert(ret == 0, "sema_init() failed");
2424 }
2425 
2426 void os::signal_notify(int signal_number) {
2427   int ret;
2428 
2429   Atomic::inc(&pending_signals[signal_number]);
2430   ret = ::sema_post(&sig_sem);
2431   assert(ret == 0, "sema_post() failed");
2432 }
2433 
2434 static int check_pending_signals(bool wait_for_signal) {
2435   int ret;
2436   while (true) {
2437     for (int i = 0; i < Sigexit + 1; i++) {
2438       jint n = pending_signals[i];
2439       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2440         return i;
2441       }
2442     }
2443     if (!wait_for_signal) {
2444       return -1;
2445     }
2446     JavaThread *thread = JavaThread::current();
2447     ThreadBlockInVM tbivm(thread);
2448 
2449     bool threadIsSuspended;
2450     do {
2451       thread->set_suspend_equivalent();
2452       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2453       while((ret = ::sema_wait(&sig_sem)) == EINTR)
2454           ;
2455       assert(ret == 0, "sema_wait() failed");
2456 
2457       // were we externally suspended while we were waiting?
2458       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2459       if (threadIsSuspended) {
2460         //
2461         // The semaphore has been incremented, but while we were waiting
2462         // another thread suspended us. We don't want to continue running
2463         // while suspended because that would surprise the thread that
2464         // suspended us.
2465         //
2466         ret = ::sema_post(&sig_sem);
2467         assert(ret == 0, "sema_post() failed");
2468 
2469         thread->java_suspend_self();
2470       }
2471     } while (threadIsSuspended);
2472   }
2473 }
2474 
2475 int os::signal_lookup() {
2476   return check_pending_signals(false);
2477 }
2478 
2479 int os::signal_wait() {
2480   return check_pending_signals(true);
2481 }
2482 
2483 ////////////////////////////////////////////////////////////////////////////////
2484 // Virtual Memory
2485 
2486 static int page_size = -1;
2487 
2488 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2489 // clear this var if support is not available.
2490 static bool has_map_align = true;
2491 
2492 int os::vm_page_size() {
2493   assert(page_size != -1, "must call os::init");
2494   return page_size;
2495 }
2496 
2497 // Solaris allocates memory by pages.
2498 int os::vm_allocation_granularity() {
2499   assert(page_size != -1, "must call os::init");
2500   return page_size;
2501 }
2502 
2503 static bool recoverable_mmap_error(int err) {
2504   // See if the error is one we can let the caller handle. This
2505   // list of errno values comes from the Solaris mmap(2) man page.
2506   switch (err) {
2507   case EBADF:
2508   case EINVAL:
2509   case ENOTSUP:
2510     // let the caller deal with these errors
2511     return true;
2512 
2513   default:
2514     // Any remaining errors on this OS can cause our reserved mapping
2515     // to be lost. That can cause confusion where different data
2516     // structures think they have the same memory mapped. The worst
2517     // scenario is if both the VM and a library think they have the
2518     // same memory mapped.
2519     return false;
2520   }
2521 }
2522 
2523 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2524                                     int err) {
2525   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2526           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2527           strerror(err), err);
2528 }
2529 
2530 static void warn_fail_commit_memory(char* addr, size_t bytes,
2531                                     size_t alignment_hint, bool exec,
2532                                     int err) {
2533   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2534           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2535           alignment_hint, exec, strerror(err), err);
2536 }
2537 
2538 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2539   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2540   size_t size = bytes;
2541   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2542   if (res != NULL) {
2543     if (UseNUMAInterleaving) {
2544       numa_make_global(addr, bytes);
2545     }
2546     return 0;
2547   }
2548 
2549   int err = errno;  // save errno from mmap() call in mmap_chunk()
2550 
2551   if (!recoverable_mmap_error(err)) {
2552     warn_fail_commit_memory(addr, bytes, exec, err);
2553     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2554   }
2555 
2556   return err;
2557 }
2558 
2559 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2560   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2561 }
2562 
2563 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2564                                   const char* mesg) {
2565   assert(mesg != NULL, "mesg must be specified");
2566   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2567   if (err != 0) {
2568     // the caller wants all commit errors to exit with the specified mesg:
2569     warn_fail_commit_memory(addr, bytes, exec, err);
2570     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2571   }
2572 }
2573 
2574 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
2575   assert(is_size_aligned(alignment, (size_t) vm_page_size()),
2576          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
2577                  alignment, (size_t) vm_page_size()));
2578 
2579   for (int i = 0; _page_sizes[i] != 0; i++) {
2580     if (is_size_aligned(alignment, _page_sizes[i])) {
2581       return _page_sizes[i];
2582     }
2583   }
2584 
2585   return (size_t) vm_page_size();
2586 }
2587 
2588 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2589                                     size_t alignment_hint, bool exec) {
2590   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2591   if (err == 0 && UseLargePages && alignment_hint > 0) {
2592     assert(is_size_aligned(bytes, alignment_hint),
2593            err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
2594 
2595     // The syscall memcntl requires an exact page size (see man memcntl for details).
2596     size_t page_size = page_size_for_alignment(alignment_hint);
2597     if (page_size > (size_t) vm_page_size()) {
2598       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2599     }
2600   }
2601   return err;
2602 }
2603 
2604 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2605                           bool exec) {
2606   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2607 }
2608 
2609 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2610                                   size_t alignment_hint, bool exec,
2611                                   const char* mesg) {
2612   assert(mesg != NULL, "mesg must be specified");
2613   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2614   if (err != 0) {
2615     // the caller wants all commit errors to exit with the specified mesg:
2616     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2617     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2618   }
2619 }
2620 
2621 // Uncommit the pages in a specified region.
2622 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2623   if (madvise(addr, bytes, MADV_FREE) < 0) {
2624     debug_only(warning("MADV_FREE failed."));
2625     return;
2626   }
2627 }
2628 
2629 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2630   return os::commit_memory(addr, size, !ExecMem);
2631 }
2632 
2633 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2634   return os::uncommit_memory(addr, size);
2635 }
2636 
2637 // Change the page size in a given range.
2638 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2639   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2640   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2641   if (UseLargePages) {
2642     Solaris::setup_large_pages(addr, bytes, alignment_hint);
2643   }
2644 }
2645 
2646 // Tell the OS to make the range local to the first-touching LWP
2647 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2648   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2649   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2650     debug_only(warning("MADV_ACCESS_LWP failed."));
2651   }
2652 }
2653 
2654 // Tell the OS that this range would be accessed from different LWPs.
2655 void os::numa_make_global(char *addr, size_t bytes) {
2656   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2657   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2658     debug_only(warning("MADV_ACCESS_MANY failed."));
2659   }
2660 }
2661 
2662 // Get the number of the locality groups.
2663 size_t os::numa_get_groups_num() {
2664   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2665   return n != -1 ? n : 1;
2666 }
2667 
2668 // Get a list of leaf locality groups. A leaf lgroup is group that
2669 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2670 // board. An LWP is assigned to one of these groups upon creation.
2671 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2672    if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2673      ids[0] = 0;
2674      return 1;
2675    }
2676    int result_size = 0, top = 1, bottom = 0, cur = 0;
2677    for (int k = 0; k < size; k++) {
2678      int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2679                                     (Solaris::lgrp_id_t*)&ids[top], size - top);
2680      if (r == -1) {
2681        ids[0] = 0;
2682        return 1;
2683      }
2684      if (!r) {
2685        // That's a leaf node.
2686        assert (bottom <= cur, "Sanity check");
2687        // Check if the node has memory
2688        if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2689                                    NULL, 0, LGRP_RSRC_MEM) > 0) {
2690          ids[bottom++] = ids[cur];
2691        }
2692      }
2693      top += r;
2694      cur++;
2695    }
2696    if (bottom == 0) {
2697      // Handle a situation, when the OS reports no memory available.
2698      // Assume UMA architecture.
2699      ids[0] = 0;
2700      return 1;
2701    }
2702    return bottom;
2703 }
2704 
2705 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2706 bool os::numa_topology_changed() {
2707   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2708   if (is_stale != -1 && is_stale) {
2709     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2710     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2711     assert(c != 0, "Failure to initialize LGRP API");
2712     Solaris::set_lgrp_cookie(c);
2713     return true;
2714   }
2715   return false;
2716 }
2717 
2718 // Get the group id of the current LWP.
2719 int os::numa_get_group_id() {
2720   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2721   if (lgrp_id == -1) {
2722     return 0;
2723   }
2724   const int size = os::numa_get_groups_num();
2725   int *ids = (int*)alloca(size * sizeof(int));
2726 
2727   // Get the ids of all lgroups with memory; r is the count.
2728   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2729                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2730   if (r <= 0) {
2731     return 0;
2732   }
2733   return ids[os::random() % r];
2734 }
2735 
2736 // Request information about the page.
2737 bool os::get_page_info(char *start, page_info* info) {
2738   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2739   uint64_t addr = (uintptr_t)start;
2740   uint64_t outdata[2];
2741   uint_t validity = 0;
2742 
2743   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2744     return false;
2745   }
2746 
2747   info->size = 0;
2748   info->lgrp_id = -1;
2749 
2750   if ((validity & 1) != 0) {
2751     if ((validity & 2) != 0) {
2752       info->lgrp_id = outdata[0];
2753     }
2754     if ((validity & 4) != 0) {
2755       info->size = outdata[1];
2756     }
2757     return true;
2758   }
2759   return false;
2760 }
2761 
2762 // Scan the pages from start to end until a page different than
2763 // the one described in the info parameter is encountered.
2764 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2765   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2766   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2767   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2768   uint_t validity[MAX_MEMINFO_CNT];
2769 
2770   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2771   uint64_t p = (uint64_t)start;
2772   while (p < (uint64_t)end) {
2773     addrs[0] = p;
2774     size_t addrs_count = 1;
2775     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2776       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2777       addrs_count++;
2778     }
2779 
2780     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2781       return NULL;
2782     }
2783 
2784     size_t i = 0;
2785     for (; i < addrs_count; i++) {
2786       if ((validity[i] & 1) != 0) {
2787         if ((validity[i] & 4) != 0) {
2788           if (outdata[types * i + 1] != page_expected->size) {
2789             break;
2790           }
2791         } else
2792           if (page_expected->size != 0) {
2793             break;
2794           }
2795 
2796         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2797           if (outdata[types * i] != page_expected->lgrp_id) {
2798             break;
2799           }
2800         }
2801       } else {
2802         return NULL;
2803       }
2804     }
2805 
2806     if (i < addrs_count) {
2807       if ((validity[i] & 2) != 0) {
2808         page_found->lgrp_id = outdata[types * i];
2809       } else {
2810         page_found->lgrp_id = -1;
2811       }
2812       if ((validity[i] & 4) != 0) {
2813         page_found->size = outdata[types * i + 1];
2814       } else {
2815         page_found->size = 0;
2816       }
2817       return (char*)addrs[i];
2818     }
2819 
2820     p = addrs[addrs_count - 1] + page_size;
2821   }
2822   return end;
2823 }
2824 
2825 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2826   size_t size = bytes;
2827   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2828   // uncommitted page. Otherwise, the read/write might succeed if we
2829   // have enough swap space to back the physical page.
2830   return
2831     NULL != Solaris::mmap_chunk(addr, size,
2832                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2833                                 PROT_NONE);
2834 }
2835 
2836 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2837   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2838 
2839   if (b == MAP_FAILED) {
2840     return NULL;
2841   }
2842   return b;
2843 }
2844 
2845 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
2846   char* addr = requested_addr;
2847   int flags = MAP_PRIVATE | MAP_NORESERVE;
2848 
2849   assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
2850 
2851   if (fixed) {
2852     flags |= MAP_FIXED;
2853   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2854     flags |= MAP_ALIGN;
2855     addr = (char*) alignment_hint;
2856   }
2857 
2858   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2859   // uncommitted page. Otherwise, the read/write might succeed if we
2860   // have enough swap space to back the physical page.
2861   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2862 }
2863 
2864 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2865   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
2866 
2867   guarantee(requested_addr == NULL || requested_addr == addr,
2868             "OS failed to return requested mmap address.");
2869   return addr;
2870 }
2871 
2872 // Reserve memory at an arbitrary address, only if that area is
2873 // available (and not reserved for something else).
2874 
2875 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2876   const int max_tries = 10;
2877   char* base[max_tries];
2878   size_t size[max_tries];
2879 
2880   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2881   // is dependent on the requested size and the MMU.  Our initial gap
2882   // value here is just a guess and will be corrected later.
2883   bool had_top_overlap = false;
2884   bool have_adjusted_gap = false;
2885   size_t gap = 0x400000;
2886 
2887   // Assert only that the size is a multiple of the page size, since
2888   // that's all that mmap requires, and since that's all we really know
2889   // about at this low abstraction level.  If we need higher alignment,
2890   // we can either pass an alignment to this method or verify alignment
2891   // in one of the methods further up the call chain.  See bug 5044738.
2892   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2893 
2894   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2895   // Give it a try, if the kernel honors the hint we can return immediately.
2896   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2897 
2898   volatile int err = errno;
2899   if (addr == requested_addr) {
2900     return addr;
2901   } else if (addr != NULL) {
2902     pd_unmap_memory(addr, bytes);
2903   }
2904 
2905   if (PrintMiscellaneous && Verbose) {
2906     char buf[256];
2907     buf[0] = '\0';
2908     if (addr == NULL) {
2909       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2910     }
2911     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2912             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2913             "%s", bytes, requested_addr, addr, buf);
2914   }
2915 
2916   // Address hint method didn't work.  Fall back to the old method.
2917   // In theory, once SNV becomes our oldest supported platform, this
2918   // code will no longer be needed.
2919   //
2920   // Repeatedly allocate blocks until the block is allocated at the
2921   // right spot. Give up after max_tries.
2922   int i;
2923   for (i = 0; i < max_tries; ++i) {
2924     base[i] = reserve_memory(bytes);
2925 
2926     if (base[i] != NULL) {
2927       // Is this the block we wanted?
2928       if (base[i] == requested_addr) {
2929         size[i] = bytes;
2930         break;
2931       }
2932 
2933       // check that the gap value is right
2934       if (had_top_overlap && !have_adjusted_gap) {
2935         size_t actual_gap = base[i-1] - base[i] - bytes;
2936         if (gap != actual_gap) {
2937           // adjust the gap value and retry the last 2 allocations
2938           assert(i > 0, "gap adjustment code problem");
2939           have_adjusted_gap = true;  // adjust the gap only once, just in case
2940           gap = actual_gap;
2941           if (PrintMiscellaneous && Verbose) {
2942             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2943           }
2944           unmap_memory(base[i], bytes);
2945           unmap_memory(base[i-1], size[i-1]);
2946           i-=2;
2947           continue;
2948         }
2949       }
2950 
2951       // Does this overlap the block we wanted? Give back the overlapped
2952       // parts and try again.
2953       //
2954       // There is still a bug in this code: if top_overlap == bytes,
2955       // the overlap is offset from requested region by the value of gap.
2956       // In this case giving back the overlapped part will not work,
2957       // because we'll give back the entire block at base[i] and
2958       // therefore the subsequent allocation will not generate a new gap.
2959       // This could be fixed with a new algorithm that used larger
2960       // or variable size chunks to find the requested region -
2961       // but such a change would introduce additional complications.
2962       // It's rare enough that the planets align for this bug,
2963       // so we'll just wait for a fix for 6204603/5003415 which
2964       // will provide a mmap flag to allow us to avoid this business.
2965 
2966       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2967       if (top_overlap >= 0 && top_overlap < bytes) {
2968         had_top_overlap = true;
2969         unmap_memory(base[i], top_overlap);
2970         base[i] += top_overlap;
2971         size[i] = bytes - top_overlap;
2972       } else {
2973         size_t bottom_overlap = base[i] + bytes - requested_addr;
2974         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2975           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2976             warning("attempt_reserve_memory_at: possible alignment bug");
2977           }
2978           unmap_memory(requested_addr, bottom_overlap);
2979           size[i] = bytes - bottom_overlap;
2980         } else {
2981           size[i] = bytes;
2982         }
2983       }
2984     }
2985   }
2986 
2987   // Give back the unused reserved pieces.
2988 
2989   for (int j = 0; j < i; ++j) {
2990     if (base[j] != NULL) {
2991       unmap_memory(base[j], size[j]);
2992     }
2993   }
2994 
2995   return (i < max_tries) ? requested_addr : NULL;
2996 }
2997 
2998 bool os::pd_release_memory(char* addr, size_t bytes) {
2999   size_t size = bytes;
3000   return munmap(addr, size) == 0;
3001 }
3002 
3003 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
3004   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
3005          "addr must be page aligned");
3006   int retVal = mprotect(addr, bytes, prot);
3007   return retVal == 0;
3008 }
3009 
3010 // Protect memory (Used to pass readonly pages through
3011 // JNI GetArray<type>Elements with empty arrays.)
3012 // Also, used for serialization page and for compressed oops null pointer
3013 // checking.
3014 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3015                         bool is_committed) {
3016   unsigned int p = 0;
3017   switch (prot) {
3018   case MEM_PROT_NONE: p = PROT_NONE; break;
3019   case MEM_PROT_READ: p = PROT_READ; break;
3020   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
3021   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3022   default:
3023     ShouldNotReachHere();
3024   }
3025   // is_committed is unused.
3026   return solaris_mprotect(addr, bytes, p);
3027 }
3028 
3029 // guard_memory and unguard_memory only happens within stack guard pages.
3030 // Since ISM pertains only to the heap, guard and unguard memory should not
3031 /// happen with an ISM region.
3032 bool os::guard_memory(char* addr, size_t bytes) {
3033   return solaris_mprotect(addr, bytes, PROT_NONE);
3034 }
3035 
3036 bool os::unguard_memory(char* addr, size_t bytes) {
3037   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3038 }
3039 
3040 // Large page support
3041 static size_t _large_page_size = 0;
3042 
3043 // Insertion sort for small arrays (descending order).
3044 static void insertion_sort_descending(size_t* array, int len) {
3045   for (int i = 0; i < len; i++) {
3046     size_t val = array[i];
3047     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3048       size_t tmp = array[key];
3049       array[key] = array[key - 1];
3050       array[key - 1] = tmp;
3051     }
3052   }
3053 }
3054 
3055 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3056   const unsigned int usable_count = VM_Version::page_size_count();
3057   if (usable_count == 1) {
3058     return false;
3059   }
3060 
3061   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3062   // build platform, getpagesizes() (without the '2') can be called directly.
3063   typedef int (*gps_t)(size_t[], int);
3064   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3065   if (gps_func == NULL) {
3066     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3067     if (gps_func == NULL) {
3068       if (warn) {
3069         warning("MPSS is not supported by the operating system.");
3070       }
3071       return false;
3072     }
3073   }
3074 
3075   // Fill the array of page sizes.
3076   int n = (*gps_func)(_page_sizes, page_sizes_max);
3077   assert(n > 0, "Solaris bug?");
3078 
3079   if (n == page_sizes_max) {
3080     // Add a sentinel value (necessary only if the array was completely filled
3081     // since it is static (zeroed at initialization)).
3082     _page_sizes[--n] = 0;
3083     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3084   }
3085   assert(_page_sizes[n] == 0, "missing sentinel");
3086   trace_page_sizes("available page sizes", _page_sizes, n);
3087 
3088   if (n == 1) return false;     // Only one page size available.
3089 
3090   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3091   // select up to usable_count elements.  First sort the array, find the first
3092   // acceptable value, then copy the usable sizes to the top of the array and
3093   // trim the rest.  Make sure to include the default page size :-).
3094   //
3095   // A better policy could get rid of the 4M limit by taking the sizes of the
3096   // important VM memory regions (java heap and possibly the code cache) into
3097   // account.
3098   insertion_sort_descending(_page_sizes, n);
3099   const size_t size_limit =
3100     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3101   int beg;
3102   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3103   const int end = MIN2((int)usable_count, n) - 1;
3104   for (int cur = 0; cur < end; ++cur, ++beg) {
3105     _page_sizes[cur] = _page_sizes[beg];
3106   }
3107   _page_sizes[end] = vm_page_size();
3108   _page_sizes[end + 1] = 0;
3109 
3110   if (_page_sizes[end] > _page_sizes[end - 1]) {
3111     // Default page size is not the smallest; sort again.
3112     insertion_sort_descending(_page_sizes, end + 1);
3113   }
3114   *page_size = _page_sizes[0];
3115 
3116   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3117   return true;
3118 }
3119 
3120 void os::large_page_init() {
3121   if (UseLargePages) {
3122     // print a warning if any large page related flag is specified on command line
3123     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3124                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3125 
3126     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3127   }
3128 }
3129 
3130 bool os::Solaris::is_valid_page_size(size_t bytes) {
3131   for (int i = 0; _page_sizes[i] != 0; i++) {
3132     if (_page_sizes[i] == bytes) {
3133       return true;
3134     }
3135   }
3136   return false;
3137 }
3138 
3139 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3140   assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
3141   assert(is_ptr_aligned((void*) start, align),
3142          err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
3143   assert(is_size_aligned(bytes, align),
3144          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
3145 
3146   // Signal to OS that we want large pages for addresses
3147   // from addr, addr + bytes
3148   struct memcntl_mha mpss_struct;
3149   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3150   mpss_struct.mha_pagesize = align;
3151   mpss_struct.mha_flags = 0;
3152   // Upon successful completion, memcntl() returns 0
3153   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3154     debug_only(warning("Attempt to use MPSS failed."));
3155     return false;
3156   }
3157   return true;
3158 }
3159 
3160 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3161   fatal("os::reserve_memory_special should not be called on Solaris.");
3162   return NULL;
3163 }
3164 
3165 bool os::release_memory_special(char* base, size_t bytes) {
3166   fatal("os::release_memory_special should not be called on Solaris.");
3167   return false;
3168 }
3169 
3170 size_t os::large_page_size() {
3171   return _large_page_size;
3172 }
3173 
3174 // MPSS allows application to commit large page memory on demand; with ISM
3175 // the entire memory region must be allocated as shared memory.
3176 bool os::can_commit_large_page_memory() {
3177   return true;
3178 }
3179 
3180 bool os::can_execute_large_page_memory() {
3181   return true;
3182 }
3183 
3184 static int os_sleep(jlong millis, bool interruptible) {
3185   const jlong limit = INT_MAX;
3186   jlong prevtime;
3187   int res;
3188 
3189   while (millis > limit) {
3190     if ((res = os_sleep(limit, interruptible)) != OS_OK)
3191       return res;
3192     millis -= limit;
3193   }
3194 
3195   // Restart interrupted polls with new parameters until the proper delay
3196   // has been completed.
3197 
3198   prevtime = getTimeMillis();
3199 
3200   while (millis > 0) {
3201     jlong newtime;
3202 
3203     if (!interruptible) {
3204       // Following assert fails for os::yield_all:
3205       // assert(!thread->is_Java_thread(), "must not be java thread");
3206       res = poll(NULL, 0, millis);
3207     } else {
3208       JavaThread *jt = JavaThread::current();
3209 
3210       INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
3211         os::Solaris::clear_interrupted);
3212     }
3213 
3214     // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
3215     // thread.Interrupt.
3216 
3217     // See c/r 6751923. Poll can return 0 before time
3218     // has elapsed if time is set via clock_settime (as NTP does).
3219     // res == 0 if poll timed out (see man poll RETURN VALUES)
3220     // using the logic below checks that we really did
3221     // sleep at least "millis" if not we'll sleep again.
3222     if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) {
3223       newtime = getTimeMillis();
3224       assert(newtime >= prevtime, "time moving backwards");
3225     /* Doing prevtime and newtime in microseconds doesn't help precision,
3226        and trying to round up to avoid lost milliseconds can result in a
3227        too-short delay. */
3228       millis -= newtime - prevtime;
3229       if(millis <= 0)
3230         return OS_OK;
3231       prevtime = newtime;
3232     } else
3233       return res;
3234   }
3235 
3236   return OS_OK;
3237 }
3238 
3239 // Read calls from inside the vm need to perform state transitions
3240 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3241   INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3242 }
3243 
3244 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3245   INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3246 }
3247 
3248 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
3249   assert(thread == Thread::current(),  "thread consistency check");
3250 
3251   // TODO-FIXME: this should be removed.
3252   // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
3253   // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
3254   // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
3255   // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
3256   // is fooled into believing that the system is making progress. In the code below we block the
3257   // the watcher thread while safepoint is in progress so that it would not appear as though the
3258   // system is making progress.
3259   if (!Solaris::T2_libthread() &&
3260       thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
3261     // We now try to acquire the threads lock. Since this lock is held by the VM thread during
3262     // the entire safepoint, the watcher thread will  line up here during the safepoint.
3263     Threads_lock->lock_without_safepoint_check();
3264     Threads_lock->unlock();
3265   }
3266 
3267   if (thread->is_Java_thread()) {
3268     // This is a JavaThread so we honor the _thread_blocked protocol
3269     // even for sleeps of 0 milliseconds. This was originally done
3270     // as a workaround for bug 4338139. However, now we also do it
3271     // to honor the suspend-equivalent protocol.
3272 
3273     JavaThread *jt = (JavaThread *) thread;
3274     ThreadBlockInVM tbivm(jt);
3275 
3276     jt->set_suspend_equivalent();
3277     // cleared by handle_special_suspend_equivalent_condition() or
3278     // java_suspend_self() via check_and_wait_while_suspended()
3279 
3280     int ret_code;
3281     if (millis <= 0) {
3282       thr_yield();
3283       ret_code = 0;
3284     } else {
3285       // The original sleep() implementation did not create an
3286       // OSThreadWaitState helper for sleeps of 0 milliseconds.
3287       // I'm preserving that decision for now.
3288       OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
3289 
3290       ret_code = os_sleep(millis, interruptible);
3291     }
3292 
3293     // were we externally suspended while we were waiting?
3294     jt->check_and_wait_while_suspended();
3295 
3296     return ret_code;
3297   }
3298 
3299   // non-JavaThread from this point on:
3300 
3301   if (millis <= 0) {
3302     thr_yield();
3303     return 0;
3304   }
3305 
3306   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
3307 
3308   return os_sleep(millis, interruptible);
3309 }
3310 
3311 void os::naked_short_sleep(jlong ms) {
3312   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3313 
3314   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3315   // Solaris requires -lrt for this.
3316   usleep((ms * 1000));
3317 
3318   return;
3319 }
3320 
3321 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3322 void os::infinite_sleep() {
3323   while (true) {    // sleep forever ...
3324     ::sleep(100);   // ... 100 seconds at a time
3325   }
3326 }
3327 
3328 // Used to convert frequent JVM_Yield() to nops
3329 bool os::dont_yield() {
3330   if (DontYieldALot) {
3331     static hrtime_t last_time = 0;
3332     hrtime_t diff = getTimeNanos() - last_time;
3333 
3334     if (diff < DontYieldALotInterval * 1000000)
3335       return true;
3336 
3337     last_time += diff;
3338 
3339     return false;
3340   }
3341   else {
3342     return false;
3343   }
3344 }
3345 
3346 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3347 // the linux and win32 implementations do not.  This should be checked.
3348 
3349 void os::yield() {
3350   // Yields to all threads with same or greater priority
3351   os::sleep(Thread::current(), 0, false);
3352 }
3353 
3354 // Note that yield semantics are defined by the scheduling class to which
3355 // the thread currently belongs.  Typically, yield will _not yield to
3356 // other equal or higher priority threads that reside on the dispatch queues
3357 // of other CPUs.
3358 
3359 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3360 
3361 
3362 // On Solaris we found that yield_all doesn't always yield to all other threads.
3363 // There have been cases where there is a thread ready to execute but it doesn't
3364 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
3365 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a
3366 // SIGWAITING signal which will cause a new lwp to be created. So we count the
3367 // number of times yield_all is called in the one loop and increase the sleep
3368 // time after 8 attempts. If this fails too we increase the concurrency level
3369 // so that the starving thread would get an lwp
3370 
3371 void os::yield_all(int attempts) {
3372   // Yields to all threads, including threads with lower priorities
3373   if (attempts == 0) {
3374     os::sleep(Thread::current(), 1, false);
3375   } else {
3376     int iterations = attempts % 30;
3377     if (iterations == 0 && !os::Solaris::T2_libthread()) {
3378       // thr_setconcurrency and _getconcurrency make sense only under T1.
3379       int noofLWPS = thr_getconcurrency();
3380       if (noofLWPS < (Threads::number_of_threads() + 2)) {
3381         thr_setconcurrency(thr_getconcurrency() + 1);
3382       }
3383     } else if (iterations < 25) {
3384       os::sleep(Thread::current(), 1, false);
3385     } else {
3386       os::sleep(Thread::current(), 10, false);
3387     }
3388   }
3389 }
3390 
3391 // Called from the tight loops to possibly influence time-sharing heuristics
3392 void os::loop_breaker(int attempts) {
3393   os::yield_all(attempts);
3394 }
3395 
3396 
3397 // Interface for setting lwp priorities.  If we are using T2 libthread,
3398 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3399 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3400 // function is meaningless in this mode so we must adjust the real lwp's priority
3401 // The routines below implement the getting and setting of lwp priorities.
3402 //
3403 // Note: There are three priority scales used on Solaris.  Java priotities
3404 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3405 //       from 0 to 127, and the current scheduling class of the process we
3406 //       are running in.  This is typically from -60 to +60.
3407 //       The setting of the lwp priorities in done after a call to thr_setprio
3408 //       so Java priorities are mapped to libthread priorities and we map from
3409 //       the latter to lwp priorities.  We don't keep priorities stored in
3410 //       Java priorities since some of our worker threads want to set priorities
3411 //       higher than all Java threads.
3412 //
3413 // For related information:
3414 // (1)  man -s 2 priocntl
3415 // (2)  man -s 4 priocntl
3416 // (3)  man dispadmin
3417 // =    librt.so
3418 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3419 // =    ps -cL <pid> ... to validate priority.
3420 // =    sched_get_priority_min and _max
3421 //              pthread_create
3422 //              sched_setparam
3423 //              pthread_setschedparam
3424 //
3425 // Assumptions:
3426 // +    We assume that all threads in the process belong to the same
3427 //              scheduling class.   IE. an homogenous process.
3428 // +    Must be root or in IA group to change change "interactive" attribute.
3429 //              Priocntl() will fail silently.  The only indication of failure is when
3430 //              we read-back the value and notice that it hasn't changed.
3431 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3432 // +    For RT, change timeslice as well.  Invariant:
3433 //              constant "priority integral"
3434 //              Konst == TimeSlice * (60-Priority)
3435 //              Given a priority, compute appropriate timeslice.
3436 // +    Higher numerical values have higher priority.
3437 
3438 // sched class attributes
3439 typedef struct {
3440         int   schedPolicy;              // classID
3441         int   maxPrio;
3442         int   minPrio;
3443 } SchedInfo;
3444 
3445 
3446 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3447 
3448 #ifdef ASSERT
3449 static int  ReadBackValidate = 1;
3450 #endif
3451 static int  myClass     = 0;
3452 static int  myMin       = 0;
3453 static int  myMax       = 0;
3454 static int  myCur       = 0;
3455 static bool priocntl_enable = false;
3456 
3457 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3458 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3459 
3460 
3461 // lwp_priocntl_init
3462 //
3463 // Try to determine the priority scale for our process.
3464 //
3465 // Return errno or 0 if OK.
3466 //
3467 static int lwp_priocntl_init () {
3468   int rslt;
3469   pcinfo_t ClassInfo;
3470   pcparms_t ParmInfo;
3471   int i;
3472 
3473   if (!UseThreadPriorities) return 0;
3474 
3475   // We are using Bound threads, we need to determine our priority ranges
3476   if (os::Solaris::T2_libthread() || UseBoundThreads) {
3477     // If ThreadPriorityPolicy is 1, switch tables
3478     if (ThreadPriorityPolicy == 1) {
3479       for (i = 0 ; i < CriticalPriority+1; i++)
3480         os::java_to_os_priority[i] = prio_policy1[i];
3481     }
3482     if (UseCriticalJavaThreadPriority) {
3483       // MaxPriority always maps to the FX scheduling class and criticalPrio.
3484       // See set_native_priority() and set_lwp_class_and_priority().
3485       // Save original MaxPriority mapping in case attempt to
3486       // use critical priority fails.
3487       java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3488       // Set negative to distinguish from other priorities
3489       os::java_to_os_priority[MaxPriority] = -criticalPrio;
3490     }
3491   }
3492   // Not using Bound Threads, set to ThreadPolicy 1
3493   else {
3494     for ( i = 0 ; i < CriticalPriority+1; i++ ) {
3495       os::java_to_os_priority[i] = prio_policy1[i];
3496     }
3497     return 0;
3498   }
3499 
3500   // Get IDs for a set of well-known scheduling classes.
3501   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3502   // the system.  We should have a loop that iterates over the
3503   // classID values, which are known to be "small" integers.
3504 
3505   strcpy(ClassInfo.pc_clname, "TS");
3506   ClassInfo.pc_cid = -1;
3507   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3508   if (rslt < 0) return errno;
3509   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3510   tsLimits.schedPolicy = ClassInfo.pc_cid;
3511   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3512   tsLimits.minPrio = -tsLimits.maxPrio;
3513 
3514   strcpy(ClassInfo.pc_clname, "IA");
3515   ClassInfo.pc_cid = -1;
3516   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3517   if (rslt < 0) return errno;
3518   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3519   iaLimits.schedPolicy = ClassInfo.pc_cid;
3520   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3521   iaLimits.minPrio = -iaLimits.maxPrio;
3522 
3523   strcpy(ClassInfo.pc_clname, "RT");
3524   ClassInfo.pc_cid = -1;
3525   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3526   if (rslt < 0) return errno;
3527   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3528   rtLimits.schedPolicy = ClassInfo.pc_cid;
3529   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3530   rtLimits.minPrio = 0;
3531 
3532   strcpy(ClassInfo.pc_clname, "FX");
3533   ClassInfo.pc_cid = -1;
3534   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3535   if (rslt < 0) return errno;
3536   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3537   fxLimits.schedPolicy = ClassInfo.pc_cid;
3538   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3539   fxLimits.minPrio = 0;
3540 
3541   // Query our "current" scheduling class.
3542   // This will normally be IA, TS or, rarely, FX or RT.
3543   memset(&ParmInfo, 0, sizeof(ParmInfo));
3544   ParmInfo.pc_cid = PC_CLNULL;
3545   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3546   if (rslt < 0) return errno;
3547   myClass = ParmInfo.pc_cid;
3548 
3549   // We now know our scheduling classId, get specific information
3550   // about the class.
3551   ClassInfo.pc_cid = myClass;
3552   ClassInfo.pc_clname[0] = 0;
3553   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3554   if (rslt < 0) return errno;
3555 
3556   if (ThreadPriorityVerbose) {
3557     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3558   }
3559 
3560   memset(&ParmInfo, 0, sizeof(pcparms_t));
3561   ParmInfo.pc_cid = PC_CLNULL;
3562   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3563   if (rslt < 0) return errno;
3564 
3565   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3566     myMin = rtLimits.minPrio;
3567     myMax = rtLimits.maxPrio;
3568   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3569     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3570     myMin = iaLimits.minPrio;
3571     myMax = iaLimits.maxPrio;
3572     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3573   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3574     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3575     myMin = tsLimits.minPrio;
3576     myMax = tsLimits.maxPrio;
3577     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3578   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3579     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3580     myMin = fxLimits.minPrio;
3581     myMax = fxLimits.maxPrio;
3582     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3583   } else {
3584     // No clue - punt
3585     if (ThreadPriorityVerbose)
3586       tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3587     return EINVAL;      // no clue, punt
3588   }
3589 
3590   if (ThreadPriorityVerbose) {
3591     tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3592   }
3593 
3594   priocntl_enable = true;  // Enable changing priorities
3595   return 0;
3596 }
3597 
3598 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3599 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3600 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3601 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3602 
3603 
3604 // scale_to_lwp_priority
3605 //
3606 // Convert from the libthread "thr_setprio" scale to our current
3607 // lwp scheduling class scale.
3608 //
3609 static
3610 int     scale_to_lwp_priority (int rMin, int rMax, int x)
3611 {
3612   int v;
3613 
3614   if (x == 127) return rMax;            // avoid round-down
3615     v = (((x*(rMax-rMin)))/128)+rMin;
3616   return v;
3617 }
3618 
3619 
3620 // set_lwp_class_and_priority
3621 //
3622 // Set the class and priority of the lwp.  This call should only
3623 // be made when using bound threads (T2 threads are bound by default).
3624 //
3625 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3626                                int newPrio, int new_class, bool scale) {
3627   int rslt;
3628   int Actual, Expected, prv;
3629   pcparms_t ParmInfo;                   // for GET-SET
3630 #ifdef ASSERT
3631   pcparms_t ReadBack;                   // for readback
3632 #endif
3633 
3634   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3635   // Query current values.
3636   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3637   // Cache "pcparms_t" in global ParmCache.
3638   // TODO: elide set-to-same-value
3639 
3640   // If something went wrong on init, don't change priorities.
3641   if ( !priocntl_enable ) {
3642     if (ThreadPriorityVerbose)
3643       tty->print_cr("Trying to set priority but init failed, ignoring");
3644     return EINVAL;
3645   }
3646 
3647   // If lwp hasn't started yet, just return
3648   // the _start routine will call us again.
3649   if ( lwpid <= 0 ) {
3650     if (ThreadPriorityVerbose) {
3651       tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
3652                      INTPTR_FORMAT " to %d, lwpid not set",
3653                      ThreadID, newPrio);
3654     }
3655     return 0;
3656   }
3657 
3658   if (ThreadPriorityVerbose) {
3659     tty->print_cr ("set_lwp_class_and_priority("
3660                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3661                    ThreadID, lwpid, newPrio);
3662   }
3663 
3664   memset(&ParmInfo, 0, sizeof(pcparms_t));
3665   ParmInfo.pc_cid = PC_CLNULL;
3666   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3667   if (rslt < 0) return errno;
3668 
3669   int cur_class = ParmInfo.pc_cid;
3670   ParmInfo.pc_cid = (id_t)new_class;
3671 
3672   if (new_class == rtLimits.schedPolicy) {
3673     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3674     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3675                                                        rtLimits.maxPrio, newPrio)
3676                                : newPrio;
3677     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3678     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3679     if (ThreadPriorityVerbose) {
3680       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3681     }
3682   } else if (new_class == iaLimits.schedPolicy) {
3683     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3684     int maxClamped     = MIN2(iaLimits.maxPrio,
3685                               cur_class == new_class
3686                                 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3687     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3688                                                        maxClamped, newPrio)
3689                                : newPrio;
3690     iaInfo->ia_uprilim = cur_class == new_class
3691                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3692     iaInfo->ia_mode    = IA_NOCHANGE;
3693     if (ThreadPriorityVerbose) {
3694       tty->print_cr("IA: [%d...%d] %d->%d\n",
3695                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3696     }
3697   } else if (new_class == tsLimits.schedPolicy) {
3698     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3699     int maxClamped     = MIN2(tsLimits.maxPrio,
3700                               cur_class == new_class
3701                                 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3702     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3703                                                        maxClamped, newPrio)
3704                                : newPrio;
3705     tsInfo->ts_uprilim = cur_class == new_class
3706                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3707     if (ThreadPriorityVerbose) {
3708       tty->print_cr("TS: [%d...%d] %d->%d\n",
3709                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3710     }
3711   } else if (new_class == fxLimits.schedPolicy) {
3712     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3713     int maxClamped     = MIN2(fxLimits.maxPrio,
3714                               cur_class == new_class
3715                                 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3716     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3717                                                        maxClamped, newPrio)
3718                                : newPrio;
3719     fxInfo->fx_uprilim = cur_class == new_class
3720                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3721     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3722     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3723     if (ThreadPriorityVerbose) {
3724       tty->print_cr("FX: [%d...%d] %d->%d\n",
3725                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3726     }
3727   } else {
3728     if (ThreadPriorityVerbose) {
3729       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3730     }
3731     return EINVAL;    // no clue, punt
3732   }
3733 
3734   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3735   if (ThreadPriorityVerbose && rslt) {
3736     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3737   }
3738   if (rslt < 0) return errno;
3739 
3740 #ifdef ASSERT
3741   // Sanity check: read back what we just attempted to set.
3742   // In theory it could have changed in the interim ...
3743   //
3744   // The priocntl system call is tricky.
3745   // Sometimes it'll validate the priority value argument and
3746   // return EINVAL if unhappy.  At other times it fails silently.
3747   // Readbacks are prudent.
3748 
3749   if (!ReadBackValidate) return 0;
3750 
3751   memset(&ReadBack, 0, sizeof(pcparms_t));
3752   ReadBack.pc_cid = PC_CLNULL;
3753   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3754   assert(rslt >= 0, "priocntl failed");
3755   Actual = Expected = 0xBAD;
3756   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3757   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3758     Actual   = RTPRI(ReadBack)->rt_pri;
3759     Expected = RTPRI(ParmInfo)->rt_pri;
3760   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3761     Actual   = IAPRI(ReadBack)->ia_upri;
3762     Expected = IAPRI(ParmInfo)->ia_upri;
3763   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3764     Actual   = TSPRI(ReadBack)->ts_upri;
3765     Expected = TSPRI(ParmInfo)->ts_upri;
3766   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3767     Actual   = FXPRI(ReadBack)->fx_upri;
3768     Expected = FXPRI(ParmInfo)->fx_upri;
3769   } else {
3770     if (ThreadPriorityVerbose) {
3771       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3772                     ParmInfo.pc_cid);
3773     }
3774   }
3775 
3776   if (Actual != Expected) {
3777     if (ThreadPriorityVerbose) {
3778       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3779                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3780     }
3781   }
3782 #endif
3783 
3784   return 0;
3785 }
3786 
3787 // Solaris only gives access to 128 real priorities at a time,
3788 // so we expand Java's ten to fill this range.  This would be better
3789 // if we dynamically adjusted relative priorities.
3790 //
3791 // The ThreadPriorityPolicy option allows us to select 2 different
3792 // priority scales.
3793 //
3794 // ThreadPriorityPolicy=0
3795 // Since the Solaris' default priority is MaximumPriority, we do not
3796 // set a priority lower than Max unless a priority lower than
3797 // NormPriority is requested.
3798 //
3799 // ThreadPriorityPolicy=1
3800 // This mode causes the priority table to get filled with
3801 // linear values.  NormPriority get's mapped to 50% of the
3802 // Maximum priority an so on.  This will cause VM threads
3803 // to get unfair treatment against other Solaris processes
3804 // which do not explicitly alter their thread priorities.
3805 //
3806 
3807 int os::java_to_os_priority[CriticalPriority + 1] = {
3808   -99999,         // 0 Entry should never be used
3809 
3810   0,              // 1 MinPriority
3811   32,             // 2
3812   64,             // 3
3813 
3814   96,             // 4
3815   127,            // 5 NormPriority
3816   127,            // 6
3817 
3818   127,            // 7
3819   127,            // 8
3820   127,            // 9 NearMaxPriority
3821 
3822   127,            // 10 MaxPriority
3823 
3824   -criticalPrio   // 11 CriticalPriority
3825 };
3826 
3827 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3828   OSThread* osthread = thread->osthread();
3829 
3830   // Save requested priority in case the thread hasn't been started
3831   osthread->set_native_priority(newpri);
3832 
3833   // Check for critical priority request
3834   bool fxcritical = false;
3835   if (newpri == -criticalPrio) {
3836     fxcritical = true;
3837     newpri = criticalPrio;
3838   }
3839 
3840   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3841   if (!UseThreadPriorities) return OS_OK;
3842 
3843   int status = 0;
3844 
3845   if (!fxcritical) {
3846     // Use thr_setprio only if we have a priority that thr_setprio understands
3847     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3848   }
3849 
3850   if (os::Solaris::T2_libthread() ||
3851       (UseBoundThreads && osthread->is_vm_created())) {
3852     int lwp_status =
3853       set_lwp_class_and_priority(osthread->thread_id(),
3854                                  osthread->lwp_id(),
3855                                  newpri,
3856                                  fxcritical ? fxLimits.schedPolicy : myClass,
3857                                  !fxcritical);
3858     if (lwp_status != 0 && fxcritical) {
3859       // Try again, this time without changing the scheduling class
3860       newpri = java_MaxPriority_to_os_priority;
3861       lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3862                                               osthread->lwp_id(),
3863                                               newpri, myClass, false);
3864     }
3865     status |= lwp_status;
3866   }
3867   return (status == 0) ? OS_OK : OS_ERR;
3868 }
3869 
3870 
3871 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3872   int p;
3873   if ( !UseThreadPriorities ) {
3874     *priority_ptr = NormalPriority;
3875     return OS_OK;
3876   }
3877   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3878   if (status != 0) {
3879     return OS_ERR;
3880   }
3881   *priority_ptr = p;
3882   return OS_OK;
3883 }
3884 
3885 
3886 // Hint to the underlying OS that a task switch would not be good.
3887 // Void return because it's a hint and can fail.
3888 void os::hint_no_preempt() {
3889   schedctl_start(schedctl_init());
3890 }
3891 
3892 static void resume_clear_context(OSThread *osthread) {
3893   osthread->set_ucontext(NULL);
3894 }
3895 
3896 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3897   osthread->set_ucontext(context);
3898 }
3899 
3900 static Semaphore sr_semaphore;
3901 
3902 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3903   // Save and restore errno to avoid confusing native code with EINTR
3904   // after sigsuspend.
3905   int old_errno = errno;
3906 
3907   OSThread* osthread = thread->osthread();
3908   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3909 
3910   os::SuspendResume::State current = osthread->sr.state();
3911   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3912     suspend_save_context(osthread, uc);
3913 
3914     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3915     os::SuspendResume::State state = osthread->sr.suspended();
3916     if (state == os::SuspendResume::SR_SUSPENDED) {
3917       sigset_t suspend_set;  // signals for sigsuspend()
3918 
3919       // get current set of blocked signals and unblock resume signal
3920       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3921       sigdelset(&suspend_set, os::Solaris::SIGasync());
3922 
3923       sr_semaphore.signal();
3924       // wait here until we are resumed
3925       while (1) {
3926         sigsuspend(&suspend_set);
3927 
3928         os::SuspendResume::State result = osthread->sr.running();
3929         if (result == os::SuspendResume::SR_RUNNING) {
3930           sr_semaphore.signal();
3931           break;
3932         }
3933       }
3934 
3935     } else if (state == os::SuspendResume::SR_RUNNING) {
3936       // request was cancelled, continue
3937     } else {
3938       ShouldNotReachHere();
3939     }
3940 
3941     resume_clear_context(osthread);
3942   } else if (current == os::SuspendResume::SR_RUNNING) {
3943     // request was cancelled, continue
3944   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3945     // ignore
3946   } else {
3947     // ignore
3948   }
3949 
3950   errno = old_errno;
3951 }
3952 
3953 
3954 void os::interrupt(Thread* thread) {
3955   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
3956 
3957   OSThread* osthread = thread->osthread();
3958 
3959   int isInterrupted = osthread->interrupted();
3960   if (!isInterrupted) {
3961       osthread->set_interrupted(true);
3962       OrderAccess::fence();
3963       // os::sleep() is implemented with either poll (NULL,0,timeout) or
3964       // by parking on _SleepEvent.  If the former, thr_kill will unwedge
3965       // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
3966       ParkEvent * const slp = thread->_SleepEvent ;
3967       if (slp != NULL) slp->unpark() ;
3968   }
3969 
3970   // For JSR166:  unpark after setting status but before thr_kill -dl
3971   if (thread->is_Java_thread()) {
3972     ((JavaThread*)thread)->parker()->unpark();
3973   }
3974 
3975   // Handle interruptible wait() ...
3976   ParkEvent * const ev = thread->_ParkEvent ;
3977   if (ev != NULL) ev->unpark() ;
3978 
3979   // When events are used everywhere for os::sleep, then this thr_kill
3980   // will only be needed if UseVMInterruptibleIO is true.
3981 
3982   if (!isInterrupted) {
3983     int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
3984     assert_status(status == 0, status, "thr_kill");
3985 
3986     // Bump thread interruption counter
3987     RuntimeService::record_thread_interrupt_signaled_count();
3988   }
3989 }
3990 
3991 
3992 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3993   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
3994 
3995   OSThread* osthread = thread->osthread();
3996 
3997   bool res = osthread->interrupted();
3998 
3999   // NOTE that since there is no "lock" around these two operations,
4000   // there is the possibility that the interrupted flag will be
4001   // "false" but that the interrupt event will be set. This is
4002   // intentional. The effect of this is that Object.wait() will appear
4003   // to have a spurious wakeup, which is not harmful, and the
4004   // possibility is so rare that it is not worth the added complexity
4005   // to add yet another lock. It has also been recommended not to put
4006   // the interrupted flag into the os::Solaris::Event structure,
4007   // because it hides the issue.
4008   if (res && clear_interrupted) {
4009     osthread->set_interrupted(false);
4010   }
4011   return res;
4012 }
4013 
4014 
4015 void os::print_statistics() {
4016 }
4017 
4018 int os::message_box(const char* title, const char* message) {
4019   int i;
4020   fdStream err(defaultStream::error_fd());
4021   for (i = 0; i < 78; i++) err.print_raw("=");
4022   err.cr();
4023   err.print_raw_cr(title);
4024   for (i = 0; i < 78; i++) err.print_raw("-");
4025   err.cr();
4026   err.print_raw_cr(message);
4027   for (i = 0; i < 78; i++) err.print_raw("=");
4028   err.cr();
4029 
4030   char buf[16];
4031   // Prevent process from exiting upon "read error" without consuming all CPU
4032   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4033 
4034   return buf[0] == 'y' || buf[0] == 'Y';
4035 }
4036 
4037 static int sr_notify(OSThread* osthread) {
4038   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
4039   assert_status(status == 0, status, "thr_kill");
4040   return status;
4041 }
4042 
4043 // "Randomly" selected value for how long we want to spin
4044 // before bailing out on suspending a thread, also how often
4045 // we send a signal to a thread we want to resume
4046 static const int RANDOMLY_LARGE_INTEGER = 1000000;
4047 static const int RANDOMLY_LARGE_INTEGER2 = 100;
4048 
4049 static bool do_suspend(OSThread* osthread) {
4050   assert(osthread->sr.is_running(), "thread should be running");
4051   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
4052 
4053   // mark as suspended and send signal
4054   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
4055     // failed to switch, state wasn't running?
4056     ShouldNotReachHere();
4057     return false;
4058   }
4059 
4060   if (sr_notify(osthread) != 0) {
4061     ShouldNotReachHere();
4062   }
4063 
4064   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
4065   while (true) {
4066     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
4067       break;
4068     } else {
4069       // timeout
4070       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
4071       if (cancelled == os::SuspendResume::SR_RUNNING) {
4072         return false;
4073       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
4074         // make sure that we consume the signal on the semaphore as well
4075         sr_semaphore.wait();
4076         break;
4077       } else {
4078         ShouldNotReachHere();
4079         return false;
4080       }
4081     }
4082   }
4083 
4084   guarantee(osthread->sr.is_suspended(), "Must be suspended");
4085   return true;
4086 }
4087 
4088 static void do_resume(OSThread* osthread) {
4089   assert(osthread->sr.is_suspended(), "thread should be suspended");
4090   assert(!sr_semaphore.trywait(), "invalid semaphore state");
4091 
4092   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
4093     // failed to switch to WAKEUP_REQUEST
4094     ShouldNotReachHere();
4095     return;
4096   }
4097 
4098   while (true) {
4099     if (sr_notify(osthread) == 0) {
4100       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
4101         if (osthread->sr.is_running()) {
4102           return;
4103         }
4104       }
4105     } else {
4106       ShouldNotReachHere();
4107     }
4108   }
4109 
4110   guarantee(osthread->sr.is_running(), "Must be running!");
4111 }
4112 
4113 void os::SuspendedThreadTask::internal_do_task() {
4114   if (do_suspend(_thread->osthread())) {
4115     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
4116     do_task(context);
4117     do_resume(_thread->osthread());
4118   }
4119 }
4120 
4121 class PcFetcher : public os::SuspendedThreadTask {
4122 public:
4123   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
4124   ExtendedPC result();
4125 protected:
4126   void do_task(const os::SuspendedThreadTaskContext& context);
4127 private:
4128   ExtendedPC _epc;
4129 };
4130 
4131 ExtendedPC PcFetcher::result() {
4132   guarantee(is_done(), "task is not done yet.");
4133   return _epc;
4134 }
4135 
4136 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
4137   Thread* thread = context.thread();
4138   OSThread* osthread = thread->osthread();
4139   if (osthread->ucontext() != NULL) {
4140     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
4141   } else {
4142     // NULL context is unexpected, double-check this is the VMThread
4143     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
4144   }
4145 }
4146 
4147 // A lightweight implementation that does not suspend the target thread and
4148 // thus returns only a hint. Used for profiling only!
4149 ExtendedPC os::get_thread_pc(Thread* thread) {
4150   // Make sure that it is called by the watcher and the Threads lock is owned.
4151   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
4152   // For now, is only used to profile the VM Thread
4153   assert(thread->is_VM_thread(), "Can only be called for VMThread");
4154   PcFetcher fetcher(thread);
4155   fetcher.run();
4156   return fetcher.result();
4157 }
4158 
4159 
4160 // This does not do anything on Solaris. This is basically a hook for being
4161 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
4162 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
4163   f(value, method, args, thread);
4164 }
4165 
4166 // This routine may be used by user applications as a "hook" to catch signals.
4167 // The user-defined signal handler must pass unrecognized signals to this
4168 // routine, and if it returns true (non-zero), then the signal handler must
4169 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
4170 // routine will never retun false (zero), but instead will execute a VM panic
4171 // routine kill the process.
4172 //
4173 // If this routine returns false, it is OK to call it again.  This allows
4174 // the user-defined signal handler to perform checks either before or after
4175 // the VM performs its own checks.  Naturally, the user code would be making
4176 // a serious error if it tried to handle an exception (such as a null check
4177 // or breakpoint) that the VM was generating for its own correct operation.
4178 //
4179 // This routine may recognize any of the following kinds of signals:
4180 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
4181 // os::Solaris::SIGasync
4182 // It should be consulted by handlers for any of those signals.
4183 // It explicitly does not recognize os::Solaris::SIGinterrupt
4184 //
4185 // The caller of this routine must pass in the three arguments supplied
4186 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
4187 // field of the structure passed to sigaction().  This routine assumes that
4188 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
4189 //
4190 // Note that the VM will print warnings if it detects conflicting signal
4191 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
4192 //
4193 extern "C" JNIEXPORT int
4194 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
4195                           int abort_if_unrecognized);
4196 
4197 
4198 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
4199   int orig_errno = errno;  // Preserve errno value over signal handler.
4200   JVM_handle_solaris_signal(sig, info, ucVoid, true);
4201   errno = orig_errno;
4202 }
4203 
4204 /* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
4205    is needed to provoke threads blocked on IO to return an EINTR
4206    Note: this explicitly does NOT call JVM_handle_solaris_signal and
4207    does NOT participate in signal chaining due to requirement for
4208    NOT setting SA_RESTART to make EINTR work. */
4209 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
4210    if (UseSignalChaining) {
4211       struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
4212       if (actp && actp->sa_handler) {
4213         vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
4214       }
4215    }
4216 }
4217 
4218 // This boolean allows users to forward their own non-matching signals
4219 // to JVM_handle_solaris_signal, harmlessly.
4220 bool os::Solaris::signal_handlers_are_installed = false;
4221 
4222 // For signal-chaining
4223 bool os::Solaris::libjsig_is_loaded = false;
4224 typedef struct sigaction *(*get_signal_t)(int);
4225 get_signal_t os::Solaris::get_signal_action = NULL;
4226 
4227 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
4228   struct sigaction *actp = NULL;
4229 
4230   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
4231     // Retrieve the old signal handler from libjsig
4232     actp = (*get_signal_action)(sig);
4233   }
4234   if (actp == NULL) {
4235     // Retrieve the preinstalled signal handler from jvm
4236     actp = get_preinstalled_handler(sig);
4237   }
4238 
4239   return actp;
4240 }
4241 
4242 static bool call_chained_handler(struct sigaction *actp, int sig,
4243                                  siginfo_t *siginfo, void *context) {
4244   // Call the old signal handler
4245   if (actp->sa_handler == SIG_DFL) {
4246     // It's more reasonable to let jvm treat it as an unexpected exception
4247     // instead of taking the default action.
4248     return false;
4249   } else if (actp->sa_handler != SIG_IGN) {
4250     if ((actp->sa_flags & SA_NODEFER) == 0) {
4251       // automaticlly block the signal
4252       sigaddset(&(actp->sa_mask), sig);
4253     }
4254 
4255     sa_handler_t hand;
4256     sa_sigaction_t sa;
4257     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4258     // retrieve the chained handler
4259     if (siginfo_flag_set) {
4260       sa = actp->sa_sigaction;
4261     } else {
4262       hand = actp->sa_handler;
4263     }
4264 
4265     if ((actp->sa_flags & SA_RESETHAND) != 0) {
4266       actp->sa_handler = SIG_DFL;
4267     }
4268 
4269     // try to honor the signal mask
4270     sigset_t oset;
4271     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4272 
4273     // call into the chained handler
4274     if (siginfo_flag_set) {
4275       (*sa)(sig, siginfo, context);
4276     } else {
4277       (*hand)(sig);
4278     }
4279 
4280     // restore the signal mask
4281     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4282   }
4283   // Tell jvm's signal handler the signal is taken care of.
4284   return true;
4285 }
4286 
4287 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4288   bool chained = false;
4289   // signal-chaining
4290   if (UseSignalChaining) {
4291     struct sigaction *actp = get_chained_signal_action(sig);
4292     if (actp != NULL) {
4293       chained = call_chained_handler(actp, sig, siginfo, context);
4294     }
4295   }
4296   return chained;
4297 }
4298 
4299 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4300   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4301   if (preinstalled_sigs[sig] != 0) {
4302     return &chainedsigactions[sig];
4303   }
4304   return NULL;
4305 }
4306 
4307 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4308 
4309   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4310   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4311   chainedsigactions[sig] = oldAct;
4312   preinstalled_sigs[sig] = 1;
4313 }
4314 
4315 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4316   // Check for overwrite.
4317   struct sigaction oldAct;
4318   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4319   void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4320                                       : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4321   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4322       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4323       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4324     if (AllowUserSignalHandlers || !set_installed) {
4325       // Do not overwrite; user takes responsibility to forward to us.
4326       return;
4327     } else if (UseSignalChaining) {
4328       if (oktochain) {
4329         // save the old handler in jvm
4330         save_preinstalled_handler(sig, oldAct);
4331       } else {
4332         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4333       }
4334       // libjsig also interposes the sigaction() call below and saves the
4335       // old sigaction on it own.
4336     } else {
4337       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4338                     "%#lx for signal %d.", (long)oldhand, sig));
4339     }
4340   }
4341 
4342   struct sigaction sigAct;
4343   sigfillset(&(sigAct.sa_mask));
4344   sigAct.sa_handler = SIG_DFL;
4345 
4346   sigAct.sa_sigaction = signalHandler;
4347   // Handle SIGSEGV on alternate signal stack if
4348   // not using stack banging
4349   if (!UseStackBanging && sig == SIGSEGV) {
4350     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4351   // Interruptible i/o requires SA_RESTART cleared so EINTR
4352   // is returned instead of restarting system calls
4353   } else if (sig == os::Solaris::SIGinterrupt()) {
4354     sigemptyset(&sigAct.sa_mask);
4355     sigAct.sa_handler = NULL;
4356     sigAct.sa_flags = SA_SIGINFO;
4357     sigAct.sa_sigaction = sigINTRHandler;
4358   } else {
4359     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4360   }
4361   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4362 
4363   sigaction(sig, &sigAct, &oldAct);
4364 
4365   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4366                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4367   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4368 }
4369 
4370 
4371 #define DO_SIGNAL_CHECK(sig) \
4372   if (!sigismember(&check_signal_done, sig)) \
4373     os::Solaris::check_signal_handler(sig)
4374 
4375 // This method is a periodic task to check for misbehaving JNI applications
4376 // under CheckJNI, we can add any periodic checks here
4377 
4378 void os::run_periodic_checks() {
4379   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4380   // thereby preventing a NULL checks.
4381   if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4382 
4383   if (check_signals == false) return;
4384 
4385   // SEGV and BUS if overridden could potentially prevent
4386   // generation of hs*.log in the event of a crash, debugging
4387   // such a case can be very challenging, so we absolutely
4388   // check for the following for a good measure:
4389   DO_SIGNAL_CHECK(SIGSEGV);
4390   DO_SIGNAL_CHECK(SIGILL);
4391   DO_SIGNAL_CHECK(SIGFPE);
4392   DO_SIGNAL_CHECK(SIGBUS);
4393   DO_SIGNAL_CHECK(SIGPIPE);
4394   DO_SIGNAL_CHECK(SIGXFSZ);
4395 
4396   // ReduceSignalUsage allows the user to override these handlers
4397   // see comments at the very top and jvm_solaris.h
4398   if (!ReduceSignalUsage) {
4399     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4400     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4401     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4402     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4403   }
4404 
4405   // See comments above for using JVM1/JVM2 and UseAltSigs
4406   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4407   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4408 
4409 }
4410 
4411 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4412 
4413 static os_sigaction_t os_sigaction = NULL;
4414 
4415 void os::Solaris::check_signal_handler(int sig) {
4416   char buf[O_BUFLEN];
4417   address jvmHandler = NULL;
4418 
4419   struct sigaction act;
4420   if (os_sigaction == NULL) {
4421     // only trust the default sigaction, in case it has been interposed
4422     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4423     if (os_sigaction == NULL) return;
4424   }
4425 
4426   os_sigaction(sig, (struct sigaction*)NULL, &act);
4427 
4428   address thisHandler = (act.sa_flags & SA_SIGINFO)
4429     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4430     : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4431 
4432 
4433   switch(sig) {
4434     case SIGSEGV:
4435     case SIGBUS:
4436     case SIGFPE:
4437     case SIGPIPE:
4438     case SIGXFSZ:
4439     case SIGILL:
4440       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4441       break;
4442 
4443     case SHUTDOWN1_SIGNAL:
4444     case SHUTDOWN2_SIGNAL:
4445     case SHUTDOWN3_SIGNAL:
4446     case BREAK_SIGNAL:
4447       jvmHandler = (address)user_handler();
4448       break;
4449 
4450     default:
4451       int intrsig = os::Solaris::SIGinterrupt();
4452       int asynsig = os::Solaris::SIGasync();
4453 
4454       if (sig == intrsig) {
4455         jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4456       } else if (sig == asynsig) {
4457         jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4458       } else {
4459         return;
4460       }
4461       break;
4462   }
4463 
4464 
4465   if (thisHandler != jvmHandler) {
4466     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4467     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4468     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4469     // No need to check this sig any longer
4470     sigaddset(&check_signal_done, sig);
4471     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4472     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4473       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4474                     exception_name(sig, buf, O_BUFLEN));
4475     }
4476   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4477     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4478     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4479     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4480     // No need to check this sig any longer
4481     sigaddset(&check_signal_done, sig);
4482   }
4483 
4484   // Print all the signal handler state
4485   if (sigismember(&check_signal_done, sig)) {
4486     print_signal_handlers(tty, buf, O_BUFLEN);
4487   }
4488 
4489 }
4490 
4491 void os::Solaris::install_signal_handlers() {
4492   bool libjsigdone = false;
4493   signal_handlers_are_installed = true;
4494 
4495   // signal-chaining
4496   typedef void (*signal_setting_t)();
4497   signal_setting_t begin_signal_setting = NULL;
4498   signal_setting_t end_signal_setting = NULL;
4499   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4500                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4501   if (begin_signal_setting != NULL) {
4502     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4503                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4504     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4505                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4506     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4507                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4508     libjsig_is_loaded = true;
4509     if (os::Solaris::get_libjsig_version != NULL) {
4510       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4511     }
4512     assert(UseSignalChaining, "should enable signal-chaining");
4513   }
4514   if (libjsig_is_loaded) {
4515     // Tell libjsig jvm is setting signal handlers
4516     (*begin_signal_setting)();
4517   }
4518 
4519   set_signal_handler(SIGSEGV, true, true);
4520   set_signal_handler(SIGPIPE, true, true);
4521   set_signal_handler(SIGXFSZ, true, true);
4522   set_signal_handler(SIGBUS, true, true);
4523   set_signal_handler(SIGILL, true, true);
4524   set_signal_handler(SIGFPE, true, true);
4525 
4526 
4527   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4528 
4529     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4530     // can not register overridable signals which might be > 32
4531     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4532     // Tell libjsig jvm has finished setting signal handlers
4533       (*end_signal_setting)();
4534       libjsigdone = true;
4535     }
4536   }
4537 
4538   // Never ok to chain our SIGinterrupt
4539   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4540   set_signal_handler(os::Solaris::SIGasync(), true, true);
4541 
4542   if (libjsig_is_loaded && !libjsigdone) {
4543     // Tell libjsig jvm finishes setting signal handlers
4544     (*end_signal_setting)();
4545   }
4546 
4547   // We don't activate signal checker if libjsig is in place, we trust ourselves
4548   // and if UserSignalHandler is installed all bets are off.
4549   // Log that signal checking is off only if -verbose:jni is specified.
4550   if (CheckJNICalls) {
4551     if (libjsig_is_loaded) {
4552       if (PrintJNIResolving) {
4553         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4554       }
4555       check_signals = false;
4556     }
4557     if (AllowUserSignalHandlers) {
4558       if (PrintJNIResolving) {
4559         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4560       }
4561       check_signals = false;
4562     }
4563   }
4564 }
4565 
4566 
4567 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4568 
4569 const char * signames[] = {
4570   "SIG0",
4571   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4572   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4573   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4574   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4575   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4576   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4577   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4578   "SIGCANCEL", "SIGLOST"
4579 };
4580 
4581 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4582   if (0 < exception_code && exception_code <= SIGRTMAX) {
4583     // signal
4584     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4585        jio_snprintf(buf, size, "%s", signames[exception_code]);
4586     } else {
4587        jio_snprintf(buf, size, "SIG%d", exception_code);
4588     }
4589     return buf;
4590   } else {
4591     return NULL;
4592   }
4593 }
4594 
4595 // (Static) wrappers for the new libthread API
4596 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
4597 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
4598 int_fnP_thread_t_i os::Solaris::_thr_setmutator;
4599 int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
4600 int_fnP_thread_t os::Solaris::_thr_continue_mutator;
4601 
4602 // (Static) wrapper for getisax(2) call.
4603 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4604 
4605 // (Static) wrappers for the liblgrp API
4606 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4607 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4608 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4609 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4610 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4611 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4612 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4613 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4614 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4615 
4616 // (Static) wrapper for meminfo() call.
4617 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4618 
4619 static address resolve_symbol_lazy(const char* name) {
4620   address addr = (address) dlsym(RTLD_DEFAULT, name);
4621   if(addr == NULL) {
4622     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4623     addr = (address) dlsym(RTLD_NEXT, name);
4624   }
4625   return addr;
4626 }
4627 
4628 static address resolve_symbol(const char* name) {
4629   address addr = resolve_symbol_lazy(name);
4630   if(addr == NULL) {
4631     fatal(dlerror());
4632   }
4633   return addr;
4634 }
4635 
4636 
4637 
4638 // isT2_libthread()
4639 //
4640 // Routine to determine if we are currently using the new T2 libthread.
4641 //
4642 // We determine if we are using T2 by reading /proc/self/lstatus and
4643 // looking for a thread with the ASLWP bit set.  If we find this status
4644 // bit set, we must assume that we are NOT using T2.  The T2 team
4645 // has approved this algorithm.
4646 //
4647 // We need to determine if we are running with the new T2 libthread
4648 // since setting native thread priorities is handled differently
4649 // when using this library.  All threads created using T2 are bound
4650 // threads. Calling thr_setprio is meaningless in this case.
4651 //
4652 bool isT2_libthread() {
4653   static prheader_t * lwpArray = NULL;
4654   static int lwpSize = 0;
4655   static int lwpFile = -1;
4656   lwpstatus_t * that;
4657   char lwpName [128];
4658   bool isT2 = false;
4659 
4660 #define ADR(x)  ((uintptr_t)(x))
4661 #define LWPINDEX(ary,ix)   ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
4662 
4663   lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
4664   if (lwpFile < 0) {
4665       if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
4666       return false;
4667   }
4668   lwpSize = 16*1024;
4669   for (;;) {
4670     ::lseek64 (lwpFile, 0, SEEK_SET);
4671     lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
4672     if (::read(lwpFile, lwpArray, lwpSize) < 0) {
4673       if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
4674       break;
4675     }
4676     if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
4677        // We got a good snapshot - now iterate over the list.
4678       int aslwpcount = 0;
4679       for (int i = 0; i < lwpArray->pr_nent; i++ ) {
4680         that = LWPINDEX(lwpArray,i);
4681         if (that->pr_flags & PR_ASLWP) {
4682           aslwpcount++;
4683         }
4684       }
4685       if (aslwpcount == 0) isT2 = true;
4686       break;
4687     }
4688     lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
4689     FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);  // retry.
4690   }
4691 
4692   FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
4693   ::close (lwpFile);
4694   if (ThreadPriorityVerbose) {
4695     if (isT2) tty->print_cr("We are running with a T2 libthread\n");
4696     else tty->print_cr("We are not running with a T2 libthread\n");
4697   }
4698   return isT2;
4699 }
4700 
4701 
4702 void os::Solaris::libthread_init() {
4703   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4704 
4705   // Determine if we are running with the new T2 libthread
4706   os::Solaris::set_T2_libthread(isT2_libthread());
4707 
4708   lwp_priocntl_init();
4709 
4710   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4711   if(func == NULL) {
4712     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4713     // Guarantee that this VM is running on an new enough OS (5.6 or
4714     // later) that it will have a new enough libthread.so.
4715     guarantee(func != NULL, "libthread.so is too old.");
4716   }
4717 
4718   // Initialize the new libthread getstate API wrappers
4719   func = resolve_symbol("thr_getstate");
4720   os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
4721 
4722   func = resolve_symbol("thr_setstate");
4723   os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
4724 
4725   func = resolve_symbol("thr_setmutator");
4726   os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
4727 
4728   func = resolve_symbol("thr_suspend_mutator");
4729   os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4730 
4731   func = resolve_symbol("thr_continue_mutator");
4732   os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4733 
4734   int size;
4735   void (*handler_info_func)(address *, int *);
4736   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4737   handler_info_func(&handler_start, &size);
4738   handler_end = handler_start + size;
4739 }
4740 
4741 
4742 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4743 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4744 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4745 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4746 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4747 int os::Solaris::_mutex_scope = USYNC_THREAD;
4748 
4749 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4750 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4751 int_fnP_cond_tP os::Solaris::_cond_signal;
4752 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4753 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4754 int_fnP_cond_tP os::Solaris::_cond_destroy;
4755 int os::Solaris::_cond_scope = USYNC_THREAD;
4756 
4757 void os::Solaris::synchronization_init() {
4758   if(UseLWPSynchronization) {
4759     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4760     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4761     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4762     os::Solaris::set_mutex_init(lwp_mutex_init);
4763     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4764     os::Solaris::set_mutex_scope(USYNC_THREAD);
4765 
4766     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4767     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4768     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4769     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4770     os::Solaris::set_cond_init(lwp_cond_init);
4771     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4772     os::Solaris::set_cond_scope(USYNC_THREAD);
4773   }
4774   else {
4775     os::Solaris::set_mutex_scope(USYNC_THREAD);
4776     os::Solaris::set_cond_scope(USYNC_THREAD);
4777 
4778     if(UsePthreads) {
4779       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4780       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4781       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4782       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4783       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4784 
4785       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4786       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4787       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4788       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4789       os::Solaris::set_cond_init(pthread_cond_default_init);
4790       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4791     }
4792     else {
4793       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4794       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4795       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4796       os::Solaris::set_mutex_init(::mutex_init);
4797       os::Solaris::set_mutex_destroy(::mutex_destroy);
4798 
4799       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4800       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4801       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4802       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4803       os::Solaris::set_cond_init(::cond_init);
4804       os::Solaris::set_cond_destroy(::cond_destroy);
4805     }
4806   }
4807 }
4808 
4809 bool os::Solaris::liblgrp_init() {
4810   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4811   if (handle != NULL) {
4812     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4813     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4814     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4815     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4816     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4817     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4818     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4819     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4820                                        dlsym(handle, "lgrp_cookie_stale")));
4821 
4822     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4823     set_lgrp_cookie(c);
4824     return true;
4825   }
4826   return false;
4827 }
4828 
4829 void os::Solaris::misc_sym_init() {
4830   address func;
4831 
4832   // getisax
4833   func = resolve_symbol_lazy("getisax");
4834   if (func != NULL) {
4835     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4836   }
4837 
4838   // meminfo
4839   func = resolve_symbol_lazy("meminfo");
4840   if (func != NULL) {
4841     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4842   }
4843 }
4844 
4845 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4846   assert(_getisax != NULL, "_getisax not set");
4847   return _getisax(array, n);
4848 }
4849 
4850 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4851 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4852 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4853 
4854 void init_pset_getloadavg_ptr(void) {
4855   pset_getloadavg_ptr =
4856     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4857   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4858     warning("pset_getloadavg function not found");
4859   }
4860 }
4861 
4862 int os::Solaris::_dev_zero_fd = -1;
4863 
4864 // this is called _before_ the global arguments have been parsed
4865 void os::init(void) {
4866   _initial_pid = getpid();
4867 
4868   max_hrtime = first_hrtime = gethrtime();
4869 
4870   init_random(1234567);
4871 
4872   page_size = sysconf(_SC_PAGESIZE);
4873   if (page_size == -1)
4874     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4875                   strerror(errno)));
4876   init_page_sizes((size_t) page_size);
4877 
4878   Solaris::initialize_system_info();
4879 
4880   // Initialize misc. symbols as soon as possible, so we can use them
4881   // if we need them.
4882   Solaris::misc_sym_init();
4883 
4884   int fd = ::open("/dev/zero", O_RDWR);
4885   if (fd < 0) {
4886     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4887   } else {
4888     Solaris::set_dev_zero_fd(fd);
4889 
4890     // Close on exec, child won't inherit.
4891     fcntl(fd, F_SETFD, FD_CLOEXEC);
4892   }
4893 
4894   clock_tics_per_sec = CLK_TCK;
4895 
4896   // check if dladdr1() exists; dladdr1 can provide more information than
4897   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4898   // and is available on linker patches for 5.7 and 5.8.
4899   // libdl.so must have been loaded, this call is just an entry lookup
4900   void * hdl = dlopen("libdl.so", RTLD_NOW);
4901   if (hdl)
4902     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4903 
4904   // (Solaris only) this switches to calls that actually do locking.
4905   ThreadCritical::initialize();
4906 
4907   // main_thread points to the thread that created/loaded the JVM.
4908   main_thread = thr_self();
4909 
4910   // Constant minimum stack size allowed. It must be at least
4911   // the minimum of what the OS supports (thr_min_stack()), and
4912   // enough to allow the thread to get to user bytecode execution.
4913   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4914   // If the pagesize of the VM is greater than 8K determine the appropriate
4915   // number of initial guard pages.  The user can change this with the
4916   // command line arguments, if needed.
4917   if (vm_page_size() > 8*K) {
4918     StackYellowPages = 1;
4919     StackRedPages = 1;
4920     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4921   }
4922 }
4923 
4924 // To install functions for atexit system call
4925 extern "C" {
4926   static void perfMemory_exit_helper() {
4927     perfMemory_exit();
4928   }
4929 }
4930 
4931 // this is called _after_ the global arguments have been parsed
4932 jint os::init_2(void) {
4933   // try to enable extended file IO ASAP, see 6431278
4934   os::Solaris::try_enable_extended_io();
4935 
4936   // Allocate a single page and mark it as readable for safepoint polling.  Also
4937   // use this first mmap call to check support for MAP_ALIGN.
4938   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4939                                                       page_size,
4940                                                       MAP_PRIVATE | MAP_ALIGN,
4941                                                       PROT_READ);
4942   if (polling_page == NULL) {
4943     has_map_align = false;
4944     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4945                                                 PROT_READ);
4946   }
4947 
4948   os::set_polling_page(polling_page);
4949 
4950 #ifndef PRODUCT
4951   if( Verbose && PrintMiscellaneous )
4952     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
4953 #endif
4954 
4955   if (!UseMembar) {
4956     address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
4957     guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4958     os::set_memory_serialize_page( mem_serialize_page );
4959 
4960 #ifndef PRODUCT
4961     if(Verbose && PrintMiscellaneous)
4962       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
4963 #endif
4964   }
4965 
4966   // Check minimum allowable stack size for thread creation and to initialize
4967   // the java system classes, including StackOverflowError - depends on page
4968   // size.  Add a page for compiler2 recursion in main thread.
4969   // Add in 2*BytesPerWord times page size to account for VM stack during
4970   // class initialization depending on 32 or 64 bit VM.
4971   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4972             (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4973                     2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4974 
4975   size_t threadStackSizeInBytes = ThreadStackSize * K;
4976   if (threadStackSizeInBytes != 0 &&
4977     threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4978     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4979                   os::Solaris::min_stack_allowed/K);
4980     return JNI_ERR;
4981   }
4982 
4983   // For 64kbps there will be a 64kb page size, which makes
4984   // the usable default stack size quite a bit less.  Increase the
4985   // stack for 64kb (or any > than 8kb) pages, this increases
4986   // virtual memory fragmentation (since we're not creating the
4987   // stack on a power of 2 boundary.  The real fix for this
4988   // should be to fix the guard page mechanism.
4989 
4990   if (vm_page_size() > 8*K) {
4991       threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4992          ? threadStackSizeInBytes +
4993            ((StackYellowPages + StackRedPages) * vm_page_size())
4994          : 0;
4995       ThreadStackSize = threadStackSizeInBytes/K;
4996   }
4997 
4998   // Make the stack size a multiple of the page size so that
4999   // the yellow/red zones can be guarded.
5000   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
5001         vm_page_size()));
5002 
5003   Solaris::libthread_init();
5004 
5005   if (UseNUMA) {
5006     if (!Solaris::liblgrp_init()) {
5007       UseNUMA = false;
5008     } else {
5009       size_t lgrp_limit = os::numa_get_groups_num();
5010       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
5011       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
5012       FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
5013       if (lgrp_num < 2) {
5014         // There's only one locality group, disable NUMA.
5015         UseNUMA = false;
5016       }
5017     }
5018     if (!UseNUMA && ForceNUMA) {
5019       UseNUMA = true;
5020     }
5021   }
5022 
5023   Solaris::signal_sets_init();
5024   Solaris::init_signal_mem();
5025   Solaris::install_signal_handlers();
5026 
5027   if (libjsigversion < JSIG_VERSION_1_4_1) {
5028     Maxlibjsigsigs = OLDMAXSIGNUM;
5029   }
5030 
5031   // initialize synchronization primitives to use either thread or
5032   // lwp synchronization (controlled by UseLWPSynchronization)
5033   Solaris::synchronization_init();
5034 
5035   if (MaxFDLimit) {
5036     // set the number of file descriptors to max. print out error
5037     // if getrlimit/setrlimit fails but continue regardless.
5038     struct rlimit nbr_files;
5039     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
5040     if (status != 0) {
5041       if (PrintMiscellaneous && (Verbose || WizardMode))
5042         perror("os::init_2 getrlimit failed");
5043     } else {
5044       nbr_files.rlim_cur = nbr_files.rlim_max;
5045       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
5046       if (status != 0) {
5047         if (PrintMiscellaneous && (Verbose || WizardMode))
5048           perror("os::init_2 setrlimit failed");
5049       }
5050     }
5051   }
5052 
5053   // Calculate theoretical max. size of Threads to guard gainst
5054   // artifical out-of-memory situations, where all available address-
5055   // space has been reserved by thread stacks. Default stack size is 1Mb.
5056   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
5057     JavaThread::stack_size_at_create() : (1*K*K);
5058   assert(pre_thread_stack_size != 0, "Must have a stack");
5059   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
5060   // we should start doing Virtual Memory banging. Currently when the threads will
5061   // have used all but 200Mb of space.
5062   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
5063   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
5064 
5065   // at-exit methods are called in the reverse order of their registration.
5066   // In Solaris 7 and earlier, atexit functions are called on return from
5067   // main or as a result of a call to exit(3C). There can be only 32 of
5068   // these functions registered and atexit() does not set errno. In Solaris
5069   // 8 and later, there is no limit to the number of functions registered
5070   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
5071   // functions are called upon dlclose(3DL) in addition to return from main
5072   // and exit(3C).
5073 
5074   if (PerfAllowAtExitRegistration) {
5075     // only register atexit functions if PerfAllowAtExitRegistration is set.
5076     // atexit functions can be delayed until process exit time, which
5077     // can be problematic for embedded VM situations. Embedded VMs should
5078     // call DestroyJavaVM() to assure that VM resources are released.
5079 
5080     // note: perfMemory_exit_helper atexit function may be removed in
5081     // the future if the appropriate cleanup code can be added to the
5082     // VM_Exit VMOperation's doit method.
5083     if (atexit(perfMemory_exit_helper) != 0) {
5084       warning("os::init2 atexit(perfMemory_exit_helper) failed");
5085     }
5086   }
5087 
5088   // Init pset_loadavg function pointer
5089   init_pset_getloadavg_ptr();
5090 
5091   return JNI_OK;
5092 }
5093 
5094 // Mark the polling page as unreadable
5095 void os::make_polling_page_unreadable(void) {
5096   if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
5097     fatal("Could not disable polling page");
5098 };
5099 
5100 // Mark the polling page as readable
5101 void os::make_polling_page_readable(void) {
5102   if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
5103     fatal("Could not enable polling page");
5104 };
5105 
5106 // OS interface.
5107 
5108 bool os::check_heap(bool force) { return true; }
5109 
5110 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
5111 static vsnprintf_t sol_vsnprintf = NULL;
5112 
5113 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
5114   if (!sol_vsnprintf) {
5115     //search  for the named symbol in the objects that were loaded after libjvm
5116     void* where = RTLD_NEXT;
5117     if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5118         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5119     if (!sol_vsnprintf){
5120       //search  for the named symbol in the objects that were loaded before libjvm
5121       where = RTLD_DEFAULT;
5122       if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5123         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5124       assert(sol_vsnprintf != NULL, "vsnprintf not found");
5125     }
5126   }
5127   return (*sol_vsnprintf)(buf, count, fmt, argptr);
5128 }
5129 
5130 
5131 // Is a (classpath) directory empty?
5132 bool os::dir_is_empty(const char* path) {
5133   DIR *dir = NULL;
5134   struct dirent *ptr;
5135 
5136   dir = opendir(path);
5137   if (dir == NULL) return true;
5138 
5139   /* Scan the directory */
5140   bool result = true;
5141   char buf[sizeof(struct dirent) + MAX_PATH];
5142   struct dirent *dbuf = (struct dirent *) buf;
5143   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
5144     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
5145       result = false;
5146     }
5147   }
5148   closedir(dir);
5149   return result;
5150 }
5151 
5152 // This code originates from JDK's sysOpen and open64_w
5153 // from src/solaris/hpi/src/system_md.c
5154 
5155 #ifndef O_DELETE
5156 #define O_DELETE 0x10000
5157 #endif
5158 
5159 // Open a file. Unlink the file immediately after open returns
5160 // if the specified oflag has the O_DELETE flag set.
5161 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
5162 
5163 int os::open(const char *path, int oflag, int mode) {
5164   if (strlen(path) > MAX_PATH - 1) {
5165     errno = ENAMETOOLONG;
5166     return -1;
5167   }
5168   int fd;
5169   int o_delete = (oflag & O_DELETE);
5170   oflag = oflag & ~O_DELETE;
5171 
5172   fd = ::open64(path, oflag, mode);
5173   if (fd == -1) return -1;
5174 
5175   //If the open succeeded, the file might still be a directory
5176   {
5177     struct stat64 buf64;
5178     int ret = ::fstat64(fd, &buf64);
5179     int st_mode = buf64.st_mode;
5180 
5181     if (ret != -1) {
5182       if ((st_mode & S_IFMT) == S_IFDIR) {
5183         errno = EISDIR;
5184         ::close(fd);
5185         return -1;
5186       }
5187     } else {
5188       ::close(fd);
5189       return -1;
5190     }
5191   }
5192     /*
5193      * 32-bit Solaris systems suffer from:
5194      *
5195      * - an historical default soft limit of 256 per-process file
5196      *   descriptors that is too low for many Java programs.
5197      *
5198      * - a design flaw where file descriptors created using stdio
5199      *   fopen must be less than 256, _even_ when the first limit above
5200      *   has been raised.  This can cause calls to fopen (but not calls to
5201      *   open, for example) to fail mysteriously, perhaps in 3rd party
5202      *   native code (although the JDK itself uses fopen).  One can hardly
5203      *   criticize them for using this most standard of all functions.
5204      *
5205      * We attempt to make everything work anyways by:
5206      *
5207      * - raising the soft limit on per-process file descriptors beyond
5208      *   256
5209      *
5210      * - As of Solaris 10u4, we can request that Solaris raise the 256
5211      *   stdio fopen limit by calling function enable_extended_FILE_stdio.
5212      *   This is done in init_2 and recorded in enabled_extended_FILE_stdio
5213      *
5214      * - If we are stuck on an old (pre 10u4) Solaris system, we can
5215      *   workaround the bug by remapping non-stdio file descriptors below
5216      *   256 to ones beyond 256, which is done below.
5217      *
5218      * See:
5219      * 1085341: 32-bit stdio routines should support file descriptors >255
5220      * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
5221      * 6431278: Netbeans crash on 32 bit Solaris: need to call
5222      *          enable_extended_FILE_stdio() in VM initialisation
5223      * Giri Mandalika's blog
5224      * http://technopark02.blogspot.com/2005_05_01_archive.html
5225      */
5226 #ifndef  _LP64
5227      if ((!enabled_extended_FILE_stdio) && fd < 256) {
5228          int newfd = ::fcntl(fd, F_DUPFD, 256);
5229          if (newfd != -1) {
5230              ::close(fd);
5231              fd = newfd;
5232          }
5233      }
5234 #endif // 32-bit Solaris
5235     /*
5236      * All file descriptors that are opened in the JVM and not
5237      * specifically destined for a subprocess should have the
5238      * close-on-exec flag set.  If we don't set it, then careless 3rd
5239      * party native code might fork and exec without closing all
5240      * appropriate file descriptors (e.g. as we do in closeDescriptors in
5241      * UNIXProcess.c), and this in turn might:
5242      *
5243      * - cause end-of-file to fail to be detected on some file
5244      *   descriptors, resulting in mysterious hangs, or
5245      *
5246      * - might cause an fopen in the subprocess to fail on a system
5247      *   suffering from bug 1085341.
5248      *
5249      * (Yes, the default setting of the close-on-exec flag is a Unix
5250      * design flaw)
5251      *
5252      * See:
5253      * 1085341: 32-bit stdio routines should support file descriptors >255
5254      * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
5255      * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
5256      */
5257 #ifdef FD_CLOEXEC
5258     {
5259         int flags = ::fcntl(fd, F_GETFD);
5260         if (flags != -1)
5261             ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5262     }
5263 #endif
5264 
5265   if (o_delete != 0) {
5266     ::unlink(path);
5267   }
5268   return fd;
5269 }
5270 
5271 // create binary file, rewriting existing file if required
5272 int os::create_binary_file(const char* path, bool rewrite_existing) {
5273   int oflags = O_WRONLY | O_CREAT;
5274   if (!rewrite_existing) {
5275     oflags |= O_EXCL;
5276   }
5277   return ::open64(path, oflags, S_IREAD | S_IWRITE);
5278 }
5279 
5280 // return current position of file pointer
5281 jlong os::current_file_offset(int fd) {
5282   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5283 }
5284 
5285 // move file pointer to the specified offset
5286 jlong os::seek_to_file_offset(int fd, jlong offset) {
5287   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5288 }
5289 
5290 jlong os::lseek(int fd, jlong offset, int whence) {
5291   return (jlong) ::lseek64(fd, offset, whence);
5292 }
5293 
5294 char * os::native_path(char *path) {
5295   return path;
5296 }
5297 
5298 int os::ftruncate(int fd, jlong length) {
5299   return ::ftruncate64(fd, length);
5300 }
5301 
5302 int os::fsync(int fd)  {
5303   RESTARTABLE_RETURN_INT(::fsync(fd));
5304 }
5305 
5306 int os::available(int fd, jlong *bytes) {
5307   jlong cur, end;
5308   int mode;
5309   struct stat64 buf64;
5310 
5311   if (::fstat64(fd, &buf64) >= 0) {
5312     mode = buf64.st_mode;
5313     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5314       /*
5315       * XXX: is the following call interruptible? If so, this might
5316       * need to go through the INTERRUPT_IO() wrapper as for other
5317       * blocking, interruptible calls in this file.
5318       */
5319       int n,ioctl_return;
5320 
5321       INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted);
5322       if (ioctl_return>= 0) {
5323           *bytes = n;
5324         return 1;
5325       }
5326     }
5327   }
5328   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5329     return 0;
5330   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5331     return 0;
5332   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5333     return 0;
5334   }
5335   *bytes = end - cur;
5336   return 1;
5337 }
5338 
5339 // Map a block of memory.
5340 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5341                      char *addr, size_t bytes, bool read_only,
5342                      bool allow_exec) {
5343   int prot;
5344   int flags;
5345 
5346   if (read_only) {
5347     prot = PROT_READ;
5348     flags = MAP_SHARED;
5349   } else {
5350     prot = PROT_READ | PROT_WRITE;
5351     flags = MAP_PRIVATE;
5352   }
5353 
5354   if (allow_exec) {
5355     prot |= PROT_EXEC;
5356   }
5357 
5358   if (addr != NULL) {
5359     flags |= MAP_FIXED;
5360   }
5361 
5362   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5363                                      fd, file_offset);
5364   if (mapped_address == MAP_FAILED) {
5365     return NULL;
5366   }
5367   return mapped_address;
5368 }
5369 
5370 
5371 // Remap a block of memory.
5372 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5373                        char *addr, size_t bytes, bool read_only,
5374                        bool allow_exec) {
5375   // same as map_memory() on this OS
5376   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5377                         allow_exec);
5378 }
5379 
5380 
5381 // Unmap a block of memory.
5382 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5383   return munmap(addr, bytes) == 0;
5384 }
5385 
5386 void os::pause() {
5387   char filename[MAX_PATH];
5388   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5389     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5390   } else {
5391     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5392   }
5393 
5394   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5395   if (fd != -1) {
5396     struct stat buf;
5397     ::close(fd);
5398     while (::stat(filename, &buf) == 0) {
5399       (void)::poll(NULL, 0, 100);
5400     }
5401   } else {
5402     jio_fprintf(stderr,
5403       "Could not open pause file '%s', continuing immediately.\n", filename);
5404   }
5405 }
5406 
5407 #ifndef PRODUCT
5408 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5409 // Turn this on if you need to trace synch operations.
5410 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5411 // and call record_synch_enable and record_synch_disable
5412 // around the computation of interest.
5413 
5414 void record_synch(char* name, bool returning);  // defined below
5415 
5416 class RecordSynch {
5417   char* _name;
5418  public:
5419   RecordSynch(char* name) :_name(name)
5420                  { record_synch(_name, false); }
5421   ~RecordSynch() { record_synch(_name,   true);  }
5422 };
5423 
5424 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5425 extern "C" ret name params {                                    \
5426   typedef ret name##_t params;                                  \
5427   static name##_t* implem = NULL;                               \
5428   static int callcount = 0;                                     \
5429   if (implem == NULL) {                                         \
5430     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5431     if (implem == NULL)  fatal(dlerror());                      \
5432   }                                                             \
5433   ++callcount;                                                  \
5434   RecordSynch _rs(#name);                                       \
5435   inner;                                                        \
5436   return implem args;                                           \
5437 }
5438 // in dbx, examine callcounts this way:
5439 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5440 
5441 #define CHECK_POINTER_OK(p) \
5442   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5443 #define CHECK_MU \
5444   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5445 #define CHECK_CV \
5446   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5447 #define CHECK_P(p) \
5448   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5449 
5450 #define CHECK_MUTEX(mutex_op) \
5451 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5452 
5453 CHECK_MUTEX(   mutex_lock)
5454 CHECK_MUTEX(  _mutex_lock)
5455 CHECK_MUTEX( mutex_unlock)
5456 CHECK_MUTEX(_mutex_unlock)
5457 CHECK_MUTEX( mutex_trylock)
5458 CHECK_MUTEX(_mutex_trylock)
5459 
5460 #define CHECK_COND(cond_op) \
5461 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5462 
5463 CHECK_COND( cond_wait);
5464 CHECK_COND(_cond_wait);
5465 CHECK_COND(_cond_wait_cancel);
5466 
5467 #define CHECK_COND2(cond_op) \
5468 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5469 
5470 CHECK_COND2( cond_timedwait);
5471 CHECK_COND2(_cond_timedwait);
5472 CHECK_COND2(_cond_timedwait_cancel);
5473 
5474 // do the _lwp_* versions too
5475 #define mutex_t lwp_mutex_t
5476 #define cond_t  lwp_cond_t
5477 CHECK_MUTEX(  _lwp_mutex_lock)
5478 CHECK_MUTEX(  _lwp_mutex_unlock)
5479 CHECK_MUTEX(  _lwp_mutex_trylock)
5480 CHECK_MUTEX( __lwp_mutex_lock)
5481 CHECK_MUTEX( __lwp_mutex_unlock)
5482 CHECK_MUTEX( __lwp_mutex_trylock)
5483 CHECK_MUTEX(___lwp_mutex_lock)
5484 CHECK_MUTEX(___lwp_mutex_unlock)
5485 
5486 CHECK_COND(  _lwp_cond_wait);
5487 CHECK_COND( __lwp_cond_wait);
5488 CHECK_COND(___lwp_cond_wait);
5489 
5490 CHECK_COND2(  _lwp_cond_timedwait);
5491 CHECK_COND2( __lwp_cond_timedwait);
5492 #undef mutex_t
5493 #undef cond_t
5494 
5495 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5496 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5497 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5498 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5499 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5500 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5501 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5502 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5503 
5504 
5505 // recording machinery:
5506 
5507 enum { RECORD_SYNCH_LIMIT = 200 };
5508 char* record_synch_name[RECORD_SYNCH_LIMIT];
5509 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5510 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5511 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5512 int record_synch_count = 0;
5513 bool record_synch_enabled = false;
5514 
5515 // in dbx, examine recorded data this way:
5516 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5517 
5518 void record_synch(char* name, bool returning) {
5519   if (record_synch_enabled) {
5520     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5521       record_synch_name[record_synch_count] = name;
5522       record_synch_returning[record_synch_count] = returning;
5523       record_synch_thread[record_synch_count] = thr_self();
5524       record_synch_arg0ptr[record_synch_count] = &name;
5525       record_synch_count++;
5526     }
5527     // put more checking code here:
5528     // ...
5529   }
5530 }
5531 
5532 void record_synch_enable() {
5533   // start collecting trace data, if not already doing so
5534   if (!record_synch_enabled)  record_synch_count = 0;
5535   record_synch_enabled = true;
5536 }
5537 
5538 void record_synch_disable() {
5539   // stop collecting trace data
5540   record_synch_enabled = false;
5541 }
5542 
5543 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5544 #endif // PRODUCT
5545 
5546 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5547 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5548                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5549 
5550 
5551 // JVMTI & JVM monitoring and management support
5552 // The thread_cpu_time() and current_thread_cpu_time() are only
5553 // supported if is_thread_cpu_time_supported() returns true.
5554 // They are not supported on Solaris T1.
5555 
5556 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5557 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5558 // of a thread.
5559 //
5560 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5561 // returns the fast estimate available on the platform.
5562 
5563 // hrtime_t gethrvtime() return value includes
5564 // user time but does not include system time
5565 jlong os::current_thread_cpu_time() {
5566   return (jlong) gethrvtime();
5567 }
5568 
5569 jlong os::thread_cpu_time(Thread *thread) {
5570   // return user level CPU time only to be consistent with
5571   // what current_thread_cpu_time returns.
5572   // thread_cpu_time_info() must be changed if this changes
5573   return os::thread_cpu_time(thread, false /* user time only */);
5574 }
5575 
5576 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5577   if (user_sys_cpu_time) {
5578     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5579   } else {
5580     return os::current_thread_cpu_time();
5581   }
5582 }
5583 
5584 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5585   char proc_name[64];
5586   int count;
5587   prusage_t prusage;
5588   jlong lwp_time;
5589   int fd;
5590 
5591   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5592                      getpid(),
5593                      thread->osthread()->lwp_id());
5594   fd = ::open(proc_name, O_RDONLY);
5595   if ( fd == -1 ) return -1;
5596 
5597   do {
5598     count = ::pread(fd,
5599                   (void *)&prusage.pr_utime,
5600                   thr_time_size,
5601                   thr_time_off);
5602   } while (count < 0 && errno == EINTR);
5603   ::close(fd);
5604   if ( count < 0 ) return -1;
5605 
5606   if (user_sys_cpu_time) {
5607     // user + system CPU time
5608     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5609                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5610                  (jlong)prusage.pr_stime.tv_nsec +
5611                  (jlong)prusage.pr_utime.tv_nsec;
5612   } else {
5613     // user level CPU time only
5614     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5615                 (jlong)prusage.pr_utime.tv_nsec;
5616   }
5617 
5618   return(lwp_time);
5619 }
5620 
5621 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5622   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5623   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5624   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5625   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5626 }
5627 
5628 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5629   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5630   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5631   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5632   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5633 }
5634 
5635 bool os::is_thread_cpu_time_supported() {
5636   if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
5637     return true;
5638   } else {
5639     return false;
5640   }
5641 }
5642 
5643 // System loadavg support.  Returns -1 if load average cannot be obtained.
5644 // Return the load average for our processor set if the primitive exists
5645 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5646 int os::loadavg(double loadavg[], int nelem) {
5647   if (pset_getloadavg_ptr != NULL) {
5648     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5649   } else {
5650     return ::getloadavg(loadavg, nelem);
5651   }
5652 }
5653 
5654 //---------------------------------------------------------------------------------
5655 
5656 bool os::find(address addr, outputStream* st) {
5657   Dl_info dlinfo;
5658   memset(&dlinfo, 0, sizeof(dlinfo));
5659   if (dladdr(addr, &dlinfo) != 0) {
5660     st->print(PTR_FORMAT ": ", addr);
5661     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5662       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5663     } else if (dlinfo.dli_fbase != NULL)
5664       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5665     else
5666       st->print("<absolute address>");
5667     if (dlinfo.dli_fname != NULL) {
5668       st->print(" in %s", dlinfo.dli_fname);
5669     }
5670     if (dlinfo.dli_fbase != NULL) {
5671       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5672     }
5673     st->cr();
5674 
5675     if (Verbose) {
5676       // decode some bytes around the PC
5677       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5678       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5679       address       lowest = (address) dlinfo.dli_sname;
5680       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5681       if (begin < lowest)  begin = lowest;
5682       Dl_info dlinfo2;
5683       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5684           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5685         end = (address) dlinfo2.dli_saddr;
5686       Disassembler::decode(begin, end, st);
5687     }
5688     return true;
5689   }
5690   return false;
5691 }
5692 
5693 // Following function has been added to support HotSparc's libjvm.so running
5694 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5695 // src/solaris/hpi/native_threads in the EVM codebase.
5696 //
5697 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5698 // libraries and should thus be removed. We will leave it behind for a while
5699 // until we no longer want to able to run on top of 1.3.0 Solaris production
5700 // JDK. See 4341971.
5701 
5702 #define STACK_SLACK 0x800
5703 
5704 extern "C" {
5705   intptr_t sysThreadAvailableStackWithSlack() {
5706     stack_t st;
5707     intptr_t retval, stack_top;
5708     retval = thr_stksegment(&st);
5709     assert(retval == 0, "incorrect return value from thr_stksegment");
5710     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5711     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5712     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5713     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5714   }
5715 }
5716 
5717 // ObjectMonitor park-unpark infrastructure ...
5718 //
5719 // We implement Solaris and Linux PlatformEvents with the
5720 // obvious condvar-mutex-flag triple.
5721 // Another alternative that works quite well is pipes:
5722 // Each PlatformEvent consists of a pipe-pair.
5723 // The thread associated with the PlatformEvent
5724 // calls park(), which reads from the input end of the pipe.
5725 // Unpark() writes into the other end of the pipe.
5726 // The write-side of the pipe must be set NDELAY.
5727 // Unfortunately pipes consume a large # of handles.
5728 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5729 // Using pipes for the 1st few threads might be workable, however.
5730 //
5731 // park() is permitted to return spuriously.
5732 // Callers of park() should wrap the call to park() in
5733 // an appropriate loop.  A litmus test for the correct
5734 // usage of park is the following: if park() were modified
5735 // to immediately return 0 your code should still work,
5736 // albeit degenerating to a spin loop.
5737 //
5738 // An interesting optimization for park() is to use a trylock()
5739 // to attempt to acquire the mutex.  If the trylock() fails
5740 // then we know that a concurrent unpark() operation is in-progress.
5741 // in that case the park() code could simply set _count to 0
5742 // and return immediately.  The subsequent park() operation *might*
5743 // return immediately.  That's harmless as the caller of park() is
5744 // expected to loop.  By using trylock() we will have avoided a
5745 // avoided a context switch caused by contention on the per-thread mutex.
5746 //
5747 // TODO-FIXME:
5748 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
5749 //     objectmonitor implementation.
5750 // 2.  Collapse the JSR166 parker event, and the
5751 //     objectmonitor ParkEvent into a single "Event" construct.
5752 // 3.  In park() and unpark() add:
5753 //     assert (Thread::current() == AssociatedWith).
5754 // 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5755 //     1-out-of-N park() operations will return immediately.
5756 //
5757 // _Event transitions in park()
5758 //   -1 => -1 : illegal
5759 //    1 =>  0 : pass - return immediately
5760 //    0 => -1 : block
5761 //
5762 // _Event serves as a restricted-range semaphore.
5763 //
5764 // Another possible encoding of _Event would be with
5765 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5766 //
5767 // TODO-FIXME: add DTRACE probes for:
5768 // 1.   Tx parks
5769 // 2.   Ty unparks Tx
5770 // 3.   Tx resumes from park
5771 
5772 
5773 // value determined through experimentation
5774 #define ROUNDINGFIX 11
5775 
5776 // utility to compute the abstime argument to timedwait.
5777 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5778 
5779 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5780   // millis is the relative timeout time
5781   // abstime will be the absolute timeout time
5782   if (millis < 0)  millis = 0;
5783   struct timeval now;
5784   int status = gettimeofday(&now, NULL);
5785   assert(status == 0, "gettimeofday");
5786   jlong seconds = millis / 1000;
5787   jlong max_wait_period;
5788 
5789   if (UseLWPSynchronization) {
5790     // forward port of fix for 4275818 (not sleeping long enough)
5791     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5792     // _lwp_cond_timedwait() used a round_down algorithm rather
5793     // than a round_up. For millis less than our roundfactor
5794     // it rounded down to 0 which doesn't meet the spec.
5795     // For millis > roundfactor we may return a bit sooner, but
5796     // since we can not accurately identify the patch level and
5797     // this has already been fixed in Solaris 9 and 8 we will
5798     // leave it alone rather than always rounding down.
5799 
5800     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5801        // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5802            // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5803            max_wait_period = 21000000;
5804   } else {
5805     max_wait_period = 50000000;
5806   }
5807   millis %= 1000;
5808   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5809      seconds = max_wait_period;
5810   }
5811   abstime->tv_sec = now.tv_sec  + seconds;
5812   long       usec = now.tv_usec + millis * 1000;
5813   if (usec >= 1000000) {
5814     abstime->tv_sec += 1;
5815     usec -= 1000000;
5816   }
5817   abstime->tv_nsec = usec * 1000;
5818   return abstime;
5819 }
5820 
5821 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5822 // Conceptually TryPark() should be equivalent to park(0).
5823 
5824 int os::PlatformEvent::TryPark() {
5825   for (;;) {
5826     const int v = _Event ;
5827     guarantee ((v == 0) || (v == 1), "invariant") ;
5828     if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
5829   }
5830 }
5831 
5832 void os::PlatformEvent::park() {           // AKA: down()
5833   // Invariant: Only the thread associated with the Event/PlatformEvent
5834   // may call park().
5835   int v ;
5836   for (;;) {
5837       v = _Event ;
5838       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5839   }
5840   guarantee (v >= 0, "invariant") ;
5841   if (v == 0) {
5842      // Do this the hard way by blocking ...
5843      // See http://monaco.sfbay/detail.jsf?cr=5094058.
5844      // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5845      // Only for SPARC >= V8PlusA
5846 #if defined(__sparc) && defined(COMPILER2)
5847      if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5848 #endif
5849      int status = os::Solaris::mutex_lock(_mutex);
5850      assert_status(status == 0, status,  "mutex_lock");
5851      guarantee (_nParked == 0, "invariant") ;
5852      ++ _nParked ;
5853      while (_Event < 0) {
5854         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5855         // Treat this the same as if the wait was interrupted
5856         // With usr/lib/lwp going to kernel, always handle ETIME
5857         status = os::Solaris::cond_wait(_cond, _mutex);
5858         if (status == ETIME) status = EINTR ;
5859         assert_status(status == 0 || status == EINTR, status, "cond_wait");
5860      }
5861      -- _nParked ;
5862      _Event = 0 ;
5863      status = os::Solaris::mutex_unlock(_mutex);
5864      assert_status(status == 0, status, "mutex_unlock");
5865     // Paranoia to ensure our locked and lock-free paths interact
5866     // correctly with each other.
5867     OrderAccess::fence();
5868   }
5869 }
5870 
5871 int os::PlatformEvent::park(jlong millis) {
5872   guarantee (_nParked == 0, "invariant") ;
5873   int v ;
5874   for (;;) {
5875       v = _Event ;
5876       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5877   }
5878   guarantee (v >= 0, "invariant") ;
5879   if (v != 0) return OS_OK ;
5880 
5881   int ret = OS_TIMEOUT;
5882   timestruc_t abst;
5883   compute_abstime (&abst, millis);
5884 
5885   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5886   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5887   // Only for SPARC >= V8PlusA
5888 #if defined(__sparc) && defined(COMPILER2)
5889  if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5890 #endif
5891   int status = os::Solaris::mutex_lock(_mutex);
5892   assert_status(status == 0, status, "mutex_lock");
5893   guarantee (_nParked == 0, "invariant") ;
5894   ++ _nParked ;
5895   while (_Event < 0) {
5896      int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5897      assert_status(status == 0 || status == EINTR ||
5898                    status == ETIME || status == ETIMEDOUT,
5899                    status, "cond_timedwait");
5900      if (!FilterSpuriousWakeups) break ;                // previous semantics
5901      if (status == ETIME || status == ETIMEDOUT) break ;
5902      // We consume and ignore EINTR and spurious wakeups.
5903   }
5904   -- _nParked ;
5905   if (_Event >= 0) ret = OS_OK ;
5906   _Event = 0 ;
5907   status = os::Solaris::mutex_unlock(_mutex);
5908   assert_status(status == 0, status, "mutex_unlock");
5909   // Paranoia to ensure our locked and lock-free paths interact
5910   // correctly with each other.
5911   OrderAccess::fence();
5912   return ret;
5913 }
5914 
5915 void os::PlatformEvent::unpark() {
5916   // Transitions for _Event:
5917   //    0 :=> 1
5918   //    1 :=> 1
5919   //   -1 :=> either 0 or 1; must signal target thread
5920   //          That is, we can safely transition _Event from -1 to either
5921   //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
5922   //          unpark() calls.
5923   // See also: "Semaphores in Plan 9" by Mullender & Cox
5924   //
5925   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5926   // that it will take two back-to-back park() calls for the owning
5927   // thread to block. This has the benefit of forcing a spurious return
5928   // from the first park() call after an unpark() call which will help
5929   // shake out uses of park() and unpark() without condition variables.
5930 
5931   if (Atomic::xchg(1, &_Event) >= 0) return;
5932 
5933   // If the thread associated with the event was parked, wake it.
5934   // Wait for the thread assoc with the PlatformEvent to vacate.
5935   int status = os::Solaris::mutex_lock(_mutex);
5936   assert_status(status == 0, status, "mutex_lock");
5937   int AnyWaiters = _nParked;
5938   status = os::Solaris::mutex_unlock(_mutex);
5939   assert_status(status == 0, status, "mutex_unlock");
5940   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5941   if (AnyWaiters != 0) {
5942     // We intentional signal *after* dropping the lock
5943     // to avoid a common class of futile wakeups.
5944     status = os::Solaris::cond_signal(_cond);
5945     assert_status(status == 0, status, "cond_signal");
5946   }
5947 }
5948 
5949 // JSR166
5950 // -------------------------------------------------------
5951 
5952 /*
5953  * The solaris and linux implementations of park/unpark are fairly
5954  * conservative for now, but can be improved. They currently use a
5955  * mutex/condvar pair, plus _counter.
5956  * Park decrements _counter if > 0, else does a condvar wait.  Unpark
5957  * sets count to 1 and signals condvar.  Only one thread ever waits
5958  * on the condvar. Contention seen when trying to park implies that someone
5959  * is unparking you, so don't wait. And spurious returns are fine, so there
5960  * is no need to track notifications.
5961  */
5962 
5963 #define MAX_SECS 100000000
5964 /*
5965  * This code is common to linux and solaris and will be moved to a
5966  * common place in dolphin.
5967  *
5968  * The passed in time value is either a relative time in nanoseconds
5969  * or an absolute time in milliseconds. Either way it has to be unpacked
5970  * into suitable seconds and nanoseconds components and stored in the
5971  * given timespec structure.
5972  * Given time is a 64-bit value and the time_t used in the timespec is only
5973  * a signed-32-bit value (except on 64-bit Linux) we have to watch for
5974  * overflow if times way in the future are given. Further on Solaris versions
5975  * prior to 10 there is a restriction (see cond_timedwait) that the specified
5976  * number of seconds, in abstime, is less than current_time  + 100,000,000.
5977  * As it will be 28 years before "now + 100000000" will overflow we can
5978  * ignore overflow and just impose a hard-limit on seconds using the value
5979  * of "now + 100,000,000". This places a limit on the timeout of about 3.17
5980  * years from "now".
5981  */
5982 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5983   assert (time > 0, "convertTime");
5984 
5985   struct timeval now;
5986   int status = gettimeofday(&now, NULL);
5987   assert(status == 0, "gettimeofday");
5988 
5989   time_t max_secs = now.tv_sec + MAX_SECS;
5990 
5991   if (isAbsolute) {
5992     jlong secs = time / 1000;
5993     if (secs > max_secs) {
5994       absTime->tv_sec = max_secs;
5995     }
5996     else {
5997       absTime->tv_sec = secs;
5998     }
5999     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
6000   }
6001   else {
6002     jlong secs = time / NANOSECS_PER_SEC;
6003     if (secs >= MAX_SECS) {
6004       absTime->tv_sec = max_secs;
6005       absTime->tv_nsec = 0;
6006     }
6007     else {
6008       absTime->tv_sec = now.tv_sec + secs;
6009       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
6010       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
6011         absTime->tv_nsec -= NANOSECS_PER_SEC;
6012         ++absTime->tv_sec; // note: this must be <= max_secs
6013       }
6014     }
6015   }
6016   assert(absTime->tv_sec >= 0, "tv_sec < 0");
6017   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
6018   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
6019   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
6020 }
6021 
6022 void Parker::park(bool isAbsolute, jlong time) {
6023   // Ideally we'd do something useful while spinning, such
6024   // as calling unpackTime().
6025 
6026   // Optional fast-path check:
6027   // Return immediately if a permit is available.
6028   // We depend on Atomic::xchg() having full barrier semantics
6029   // since we are doing a lock-free update to _counter.
6030   if (Atomic::xchg(0, &_counter) > 0) return;
6031 
6032   // Optional fast-exit: Check interrupt before trying to wait
6033   Thread* thread = Thread::current();
6034   assert(thread->is_Java_thread(), "Must be JavaThread");
6035   JavaThread *jt = (JavaThread *)thread;
6036   if (Thread::is_interrupted(thread, false)) {
6037     return;
6038   }
6039 
6040   // First, demultiplex/decode time arguments
6041   timespec absTime;
6042   if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
6043     return;
6044   }
6045   if (time > 0) {
6046     // Warning: this code might be exposed to the old Solaris time
6047     // round-down bugs.  Grep "roundingFix" for details.
6048     unpackTime(&absTime, isAbsolute, time);
6049   }
6050 
6051   // Enter safepoint region
6052   // Beware of deadlocks such as 6317397.
6053   // The per-thread Parker:: _mutex is a classic leaf-lock.
6054   // In particular a thread must never block on the Threads_lock while
6055   // holding the Parker:: mutex.  If safepoints are pending both the
6056   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
6057   ThreadBlockInVM tbivm(jt);
6058 
6059   // Don't wait if cannot get lock since interference arises from
6060   // unblocking.  Also. check interrupt before trying wait
6061   if (Thread::is_interrupted(thread, false) ||
6062       os::Solaris::mutex_trylock(_mutex) != 0) {
6063     return;
6064   }
6065 
6066   int status ;
6067 
6068   if (_counter > 0)  { // no wait needed
6069     _counter = 0;
6070     status = os::Solaris::mutex_unlock(_mutex);
6071     assert (status == 0, "invariant") ;
6072     // Paranoia to ensure our locked and lock-free paths interact
6073     // correctly with each other and Java-level accesses.
6074     OrderAccess::fence();
6075     return;
6076   }
6077 
6078 #ifdef ASSERT
6079   // Don't catch signals while blocked; let the running threads have the signals.
6080   // (This allows a debugger to break into the running thread.)
6081   sigset_t oldsigs;
6082   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
6083   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
6084 #endif
6085 
6086   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
6087   jt->set_suspend_equivalent();
6088   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
6089 
6090   // Do this the hard way by blocking ...
6091   // See http://monaco.sfbay/detail.jsf?cr=5094058.
6092   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
6093   // Only for SPARC >= V8PlusA
6094 #if defined(__sparc) && defined(COMPILER2)
6095   if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
6096 #endif
6097 
6098   if (time == 0) {
6099     status = os::Solaris::cond_wait (_cond, _mutex) ;
6100   } else {
6101     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
6102   }
6103   // Note that an untimed cond_wait() can sometimes return ETIME on older
6104   // versions of the Solaris.
6105   assert_status(status == 0 || status == EINTR ||
6106                 status == ETIME || status == ETIMEDOUT,
6107                 status, "cond_timedwait");
6108 
6109 #ifdef ASSERT
6110   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
6111 #endif
6112   _counter = 0 ;
6113   status = os::Solaris::mutex_unlock(_mutex);
6114   assert_status(status == 0, status, "mutex_unlock") ;
6115   // Paranoia to ensure our locked and lock-free paths interact
6116   // correctly with each other and Java-level accesses.
6117   OrderAccess::fence();
6118 
6119   // If externally suspended while waiting, re-suspend
6120   if (jt->handle_special_suspend_equivalent_condition()) {
6121     jt->java_suspend_self();
6122   }
6123 }
6124 
6125 void Parker::unpark() {
6126   int s, status ;
6127   status = os::Solaris::mutex_lock (_mutex) ;
6128   assert (status == 0, "invariant") ;
6129   s = _counter;
6130   _counter = 1;
6131   status = os::Solaris::mutex_unlock (_mutex) ;
6132   assert (status == 0, "invariant") ;
6133 
6134   if (s < 1) {
6135     status = os::Solaris::cond_signal (_cond) ;
6136     assert (status == 0, "invariant") ;
6137   }
6138 }
6139 
6140 extern char** environ;
6141 
6142 // Run the specified command in a separate process. Return its exit value,
6143 // or -1 on failure (e.g. can't fork a new process).
6144 // Unlike system(), this function can be called from signal handler. It
6145 // doesn't block SIGINT et al.
6146 int os::fork_and_exec(char* cmd) {
6147   char * argv[4];
6148   argv[0] = (char *)"sh";
6149   argv[1] = (char *)"-c";
6150   argv[2] = cmd;
6151   argv[3] = NULL;
6152 
6153   // fork is async-safe, fork1 is not so can't use in signal handler
6154   pid_t pid;
6155   Thread* t = ThreadLocalStorage::get_thread_slow();
6156   if (t != NULL && t->is_inside_signal_handler()) {
6157     pid = fork();
6158   } else {
6159     pid = fork1();
6160   }
6161 
6162   if (pid < 0) {
6163     // fork failed
6164     warning("fork failed: %s", strerror(errno));
6165     return -1;
6166 
6167   } else if (pid == 0) {
6168     // child process
6169 
6170     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
6171     execve("/usr/bin/sh", argv, environ);
6172 
6173     // execve failed
6174     _exit(-1);
6175 
6176   } else  {
6177     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
6178     // care about the actual exit code, for now.
6179 
6180     int status;
6181 
6182     // Wait for the child process to exit.  This returns immediately if
6183     // the child has already exited. */
6184     while (waitpid(pid, &status, 0) < 0) {
6185         switch (errno) {
6186         case ECHILD: return 0;
6187         case EINTR: break;
6188         default: return -1;
6189         }
6190     }
6191 
6192     if (WIFEXITED(status)) {
6193        // The child exited normally; get its exit code.
6194        return WEXITSTATUS(status);
6195     } else if (WIFSIGNALED(status)) {
6196        // The child exited because of a signal
6197        // The best value to return is 0x80 + signal number,
6198        // because that is what all Unix shells do, and because
6199        // it allows callers to distinguish between process exit and
6200        // process death by signal.
6201        return 0x80 + WTERMSIG(status);
6202     } else {
6203        // Unknown exit code; pass it through
6204        return status;
6205     }
6206   }
6207 }
6208 
6209 // is_headless_jre()
6210 //
6211 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
6212 // in order to report if we are running in a headless jre
6213 //
6214 // Since JDK8 xawt/libmawt.so was moved into the same directory
6215 // as libawt.so, and renamed libawt_xawt.so
6216 //
6217 bool os::is_headless_jre() {
6218     struct stat statbuf;
6219     char buf[MAXPATHLEN];
6220     char libmawtpath[MAXPATHLEN];
6221     const char *xawtstr  = "/xawt/libmawt.so";
6222     const char *new_xawtstr = "/libawt_xawt.so";
6223     char *p;
6224 
6225     // Get path to libjvm.so
6226     os::jvm_path(buf, sizeof(buf));
6227 
6228     // Get rid of libjvm.so
6229     p = strrchr(buf, '/');
6230     if (p == NULL) return false;
6231     else *p = '\0';
6232 
6233     // Get rid of client or server
6234     p = strrchr(buf, '/');
6235     if (p == NULL) return false;
6236     else *p = '\0';
6237 
6238     // check xawt/libmawt.so
6239     strcpy(libmawtpath, buf);
6240     strcat(libmawtpath, xawtstr);
6241     if (::stat(libmawtpath, &statbuf) == 0) return false;
6242 
6243     // check libawt_xawt.so
6244     strcpy(libmawtpath, buf);
6245     strcat(libmawtpath, new_xawtstr);
6246     if (::stat(libmawtpath, &statbuf) == 0) return false;
6247 
6248     return true;
6249 }
6250 
6251 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
6252   INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted);
6253 }
6254 
6255 int os::close(int fd) {
6256   return ::close(fd);
6257 }
6258 
6259 int os::socket_close(int fd) {
6260   return ::close(fd);
6261 }
6262 
6263 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
6264   INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6265 }
6266 
6267 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
6268   INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6269 }
6270 
6271 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
6272   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
6273 }
6274 
6275 // As both poll and select can be interrupted by signals, we have to be
6276 // prepared to restart the system call after updating the timeout, unless
6277 // a poll() is done with timeout == -1, in which case we repeat with this
6278 // "wait forever" value.
6279 
6280 int os::timeout(int fd, long timeout) {
6281   int res;
6282   struct timeval t;
6283   julong prevtime, newtime;
6284   static const char* aNull = 0;
6285   struct pollfd pfd;
6286   pfd.fd = fd;
6287   pfd.events = POLLIN;
6288 
6289   gettimeofday(&t, &aNull);
6290   prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
6291 
6292   for(;;) {
6293     INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted);
6294     if(res == OS_ERR && errno == EINTR) {
6295         if(timeout != -1) {
6296           gettimeofday(&t, &aNull);
6297           newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
6298           timeout -= newtime - prevtime;
6299           if(timeout <= 0)
6300             return OS_OK;
6301           prevtime = newtime;
6302         }
6303     } else return res;
6304   }
6305 }
6306 
6307 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
6308   int _result;
6309   INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\
6310                           os::Solaris::clear_interrupted);
6311 
6312   // Depending on when thread interruption is reset, _result could be
6313   // one of two values when errno == EINTR
6314 
6315   if (((_result == OS_INTRPT) || (_result == OS_ERR))
6316       && (errno == EINTR)) {
6317      /* restarting a connect() changes its errno semantics */
6318      INTERRUPTIBLE(::connect(fd, him, len), _result,\
6319                    os::Solaris::clear_interrupted);
6320      /* undo these changes */
6321      if (_result == OS_ERR) {
6322        if (errno == EALREADY) {
6323          errno = EINPROGRESS; /* fall through */
6324        } else if (errno == EISCONN) {
6325          errno = 0;
6326          return OS_OK;
6327        }
6328      }
6329    }
6330    return _result;
6331  }
6332 
6333 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
6334   if (fd < 0) {
6335     return OS_ERR;
6336   }
6337   INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\
6338                            os::Solaris::clear_interrupted);
6339 }
6340 
6341 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
6342                  sockaddr* from, socklen_t* fromlen) {
6343   INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\
6344                            os::Solaris::clear_interrupted);
6345 }
6346 
6347 int os::sendto(int fd, char* buf, size_t len, uint flags,
6348                struct sockaddr* to, socklen_t tolen) {
6349   INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\
6350                            os::Solaris::clear_interrupted);
6351 }
6352 
6353 int os::socket_available(int fd, jint *pbytes) {
6354   if (fd < 0) {
6355     return OS_OK;
6356   }
6357   int ret;
6358   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6359   // note: ioctl can return 0 when successful, JVM_SocketAvailable
6360   // is expected to return 0 on failure and 1 on success to the jdk.
6361   return (ret == OS_ERR) ? 0 : 1;
6362 }
6363 
6364 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6365    INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
6366                                       os::Solaris::clear_interrupted);
6367 }
6368 
6369 // Get the default path to the core file
6370 // Returns the length of the string
6371 int os::get_core_path(char* buffer, size_t bufferSize) {
6372   const char* p = get_current_directory(buffer, bufferSize);
6373 
6374   if (p == NULL) {
6375     assert(p != NULL, "failed to get current directory");
6376     return 0;
6377   }
6378 
6379   return strlen(buffer);
6380 }
6381 
6382 #ifndef PRODUCT
6383 void TestReserveMemorySpecial_test() {
6384   // No tests available for this platform
6385 }
6386 #endif