1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "prims/jniFastGetField.hpp"
  41 #include "prims/jvm.h"
  42 #include "prims/jvm_misc.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/extendedPC.hpp"
  45 #include "runtime/globals.hpp"
  46 #include "runtime/interfaceSupport.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/javaCalls.hpp"
  49 #include "runtime/mutexLocker.hpp"
  50 #include "runtime/objectMonitor.hpp"
  51 #include "runtime/orderAccess.inline.hpp"
  52 #include "runtime/osThread.hpp"
  53 #include "runtime/perfMemory.hpp"
  54 #include "runtime/sharedRuntime.hpp"
  55 #include "runtime/statSampler.hpp"
  56 #include "runtime/stubRoutines.hpp"
  57 #include "runtime/thread.inline.hpp"
  58 #include "runtime/threadCritical.hpp"
  59 #include "runtime/timer.hpp"
  60 #include "services/attachListener.hpp"
  61 #include "services/memTracker.hpp"
  62 #include "services/runtimeService.hpp"
  63 #include "utilities/decoder.hpp"
  64 #include "utilities/defaultStream.hpp"
  65 #include "utilities/events.hpp"
  66 #include "utilities/growableArray.hpp"
  67 #include "utilities/vmError.hpp"
  68 
  69 // put OS-includes here
  70 # include <dlfcn.h>
  71 # include <errno.h>
  72 # include <exception>
  73 # include <link.h>
  74 # include <poll.h>
  75 # include <pthread.h>
  76 # include <pwd.h>
  77 # include <schedctl.h>
  78 # include <setjmp.h>
  79 # include <signal.h>
  80 # include <stdio.h>
  81 # include <alloca.h>
  82 # include <sys/filio.h>
  83 # include <sys/ipc.h>
  84 # include <sys/lwp.h>
  85 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  86 # include <sys/mman.h>
  87 # include <sys/processor.h>
  88 # include <sys/procset.h>
  89 # include <sys/pset.h>
  90 # include <sys/resource.h>
  91 # include <sys/shm.h>
  92 # include <sys/socket.h>
  93 # include <sys/stat.h>
  94 # include <sys/systeminfo.h>
  95 # include <sys/time.h>
  96 # include <sys/times.h>
  97 # include <sys/types.h>
  98 # include <sys/wait.h>
  99 # include <sys/utsname.h>
 100 # include <thread.h>
 101 # include <unistd.h>
 102 # include <sys/priocntl.h>
 103 # include <sys/rtpriocntl.h>
 104 # include <sys/tspriocntl.h>
 105 # include <sys/iapriocntl.h>
 106 # include <sys/fxpriocntl.h>
 107 # include <sys/loadavg.h>
 108 # include <string.h>
 109 # include <stdio.h>
 110 
 111 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 112 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 113 
 114 #define MAX_PATH (2 * K)
 115 
 116 // for timer info max values which include all bits
 117 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 118 
 119 
 120 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 121 // compile on older systems without this header file.
 122 
 123 #ifndef MADV_ACCESS_LWP
 124 # define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
 125 #endif
 126 #ifndef MADV_ACCESS_MANY
 127 # define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
 128 #endif
 129 
 130 #ifndef LGRP_RSRC_CPU
 131 # define LGRP_RSRC_CPU           0       /* CPU resources */
 132 #endif
 133 #ifndef LGRP_RSRC_MEM
 134 # define LGRP_RSRC_MEM           1       /* memory resources */
 135 #endif
 136 
 137 // see thr_setprio(3T) for the basis of these numbers
 138 #define MinimumPriority 0
 139 #define NormalPriority  64
 140 #define MaximumPriority 127
 141 
 142 // Values for ThreadPriorityPolicy == 1
 143 int prio_policy1[CriticalPriority+1] = {
 144   -99999,  0, 16,  32,  48,  64,
 145           80, 96, 112, 124, 127, 127 };
 146 
 147 // System parameters used internally
 148 static clock_t clock_tics_per_sec = 100;
 149 
 150 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 151 static bool enabled_extended_FILE_stdio = false;
 152 
 153 // For diagnostics to print a message once. see run_periodic_checks
 154 static bool check_addr0_done = false;
 155 static sigset_t check_signal_done;
 156 static bool check_signals = true;
 157 
 158 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 159 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 160 
 161 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 162 
 163 
 164 // "default" initializers for missing libc APIs
 165 extern "C" {
 166   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 167   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 168 
 169   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 170   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 171 }
 172 
 173 // "default" initializers for pthread-based synchronization
 174 extern "C" {
 175   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 176   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 177 }
 178 
 179 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 180 
 181 static inline size_t adjust_stack_size(address base, size_t size) {
 182   if ((ssize_t)size < 0) {
 183     // 4759953: Compensate for ridiculous stack size.
 184     size = max_intx;
 185   }
 186   if (size > (size_t)base) {
 187     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 188     size = (size_t)base;
 189   }
 190   return size;
 191 }
 192 
 193 static inline stack_t get_stack_info() {
 194   stack_t st;
 195   int retval = thr_stksegment(&st);
 196   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 197   assert(retval == 0, "incorrect return value from thr_stksegment");
 198   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 199   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 200   return st;
 201 }
 202 
 203 address os::current_stack_base() {
 204   int r = thr_main() ;
 205   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 206   bool is_primordial_thread = r;
 207 
 208   // Workaround 4352906, avoid calls to thr_stksegment by
 209   // thr_main after the first one (it looks like we trash
 210   // some data, causing the value for ss_sp to be incorrect).
 211   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 212     stack_t st = get_stack_info();
 213     if (is_primordial_thread) {
 214       // cache initial value of stack base
 215       os::Solaris::_main_stack_base = (address)st.ss_sp;
 216     }
 217     return (address)st.ss_sp;
 218   } else {
 219     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 220     return os::Solaris::_main_stack_base;
 221   }
 222 }
 223 
 224 size_t os::current_stack_size() {
 225   size_t size;
 226 
 227   int r = thr_main() ;
 228   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 229   if(!r) {
 230     size = get_stack_info().ss_size;
 231   } else {
 232     struct rlimit limits;
 233     getrlimit(RLIMIT_STACK, &limits);
 234     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 235   }
 236   // base may not be page aligned
 237   address base = current_stack_base();
 238   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 239   return (size_t)(base - bottom);
 240 }
 241 
 242 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 243   return localtime_r(clock, res);
 244 }
 245 
 246 // interruptible infrastructure
 247 
 248 // setup_interruptible saves the thread state before going into an
 249 // interruptible system call.
 250 // The saved state is used to restore the thread to
 251 // its former state whether or not an interrupt is received.
 252 // Used by classloader os::read
 253 // os::restartable_read calls skip this layer and stay in _thread_in_native
 254 
 255 void os::Solaris::setup_interruptible(JavaThread* thread) {
 256 
 257   JavaThreadState thread_state = thread->thread_state();
 258 
 259   assert(thread_state != _thread_blocked, "Coming from the wrong thread");
 260   assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
 261   OSThread* osthread = thread->osthread();
 262   osthread->set_saved_interrupt_thread_state(thread_state);
 263   thread->frame_anchor()->make_walkable(thread);
 264   ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
 265 }
 266 
 267 // Version of setup_interruptible() for threads that are already in
 268 // _thread_blocked. Used by os_sleep().
 269 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
 270   thread->frame_anchor()->make_walkable(thread);
 271 }
 272 
 273 JavaThread* os::Solaris::setup_interruptible() {
 274   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
 275   setup_interruptible(thread);
 276   return thread;
 277 }
 278 
 279 void os::Solaris::try_enable_extended_io() {
 280   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 281 
 282   if (!UseExtendedFileIO) {
 283     return;
 284   }
 285 
 286   enable_extended_FILE_stdio_t enabler =
 287     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 288                                          "enable_extended_FILE_stdio");
 289   if (enabler) {
 290     enabler(-1, -1);
 291   }
 292 }
 293 
 294 
 295 #ifdef ASSERT
 296 
 297 JavaThread* os::Solaris::setup_interruptible_native() {
 298   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
 299   JavaThreadState thread_state = thread->thread_state();
 300   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
 301   return thread;
 302 }
 303 
 304 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
 305   JavaThreadState thread_state = thread->thread_state();
 306   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
 307 }
 308 #endif
 309 
 310 // cleanup_interruptible reverses the effects of setup_interruptible
 311 // setup_interruptible_already_blocked() does not need any cleanup.
 312 
 313 void os::Solaris::cleanup_interruptible(JavaThread* thread) {
 314   OSThread* osthread = thread->osthread();
 315 
 316   ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
 317 }
 318 
 319 // I/O interruption related counters called in _INTERRUPTIBLE
 320 
 321 void os::Solaris::bump_interrupted_before_count() {
 322   RuntimeService::record_interrupted_before_count();
 323 }
 324 
 325 void os::Solaris::bump_interrupted_during_count() {
 326   RuntimeService::record_interrupted_during_count();
 327 }
 328 
 329 static int _processors_online = 0;
 330 
 331          jint os::Solaris::_os_thread_limit = 0;
 332 volatile jint os::Solaris::_os_thread_count = 0;
 333 
 334 julong os::available_memory() {
 335   return Solaris::available_memory();
 336 }
 337 
 338 julong os::Solaris::available_memory() {
 339   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 340 }
 341 
 342 julong os::Solaris::_physical_memory = 0;
 343 
 344 julong os::physical_memory() {
 345    return Solaris::physical_memory();
 346 }
 347 
 348 static hrtime_t first_hrtime = 0;
 349 static const hrtime_t hrtime_hz = 1000*1000*1000;
 350 static volatile hrtime_t max_hrtime = 0;
 351 
 352 
 353 void os::Solaris::initialize_system_info() {
 354   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 355   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
 356   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 357 }
 358 
 359 int os::active_processor_count() {
 360   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 361   pid_t pid = getpid();
 362   psetid_t pset = PS_NONE;
 363   // Are we running in a processor set or is there any processor set around?
 364   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 365     uint_t pset_cpus;
 366     // Query the number of cpus available to us.
 367     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 368       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 369       _processors_online = pset_cpus;
 370       return pset_cpus;
 371     }
 372   }
 373   // Otherwise return number of online cpus
 374   return online_cpus;
 375 }
 376 
 377 static bool find_processors_in_pset(psetid_t        pset,
 378                                     processorid_t** id_array,
 379                                     uint_t*         id_length) {
 380   bool result = false;
 381   // Find the number of processors in the processor set.
 382   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 383     // Make up an array to hold their ids.
 384     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 385     // Fill in the array with their processor ids.
 386     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 387       result = true;
 388     }
 389   }
 390   return result;
 391 }
 392 
 393 // Callers of find_processors_online() must tolerate imprecise results --
 394 // the system configuration can change asynchronously because of DR
 395 // or explicit psradm operations.
 396 //
 397 // We also need to take care that the loop (below) terminates as the
 398 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 399 // request and the loop that builds the list of processor ids.   Unfortunately
 400 // there's no reliable way to determine the maximum valid processor id,
 401 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 402 // man pages, which claim the processor id set is "sparse, but
 403 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 404 // exit the loop.
 405 //
 406 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 407 // not available on S8.0.
 408 
 409 static bool find_processors_online(processorid_t** id_array,
 410                                    uint*           id_length) {
 411   const processorid_t MAX_PROCESSOR_ID = 100000 ;
 412   // Find the number of processors online.
 413   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 414   // Make up an array to hold their ids.
 415   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 416   // Processors need not be numbered consecutively.
 417   long found = 0;
 418   processorid_t next = 0;
 419   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 420     processor_info_t info;
 421     if (processor_info(next, &info) == 0) {
 422       // NB, PI_NOINTR processors are effectively online ...
 423       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 424         (*id_array)[found] = next;
 425         found += 1;
 426       }
 427     }
 428     next += 1;
 429   }
 430   if (found < *id_length) {
 431       // The loop above didn't identify the expected number of processors.
 432       // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 433       // and re-running the loop, above, but there's no guarantee of progress
 434       // if the system configuration is in flux.  Instead, we just return what
 435       // we've got.  Note that in the worst case find_processors_online() could
 436       // return an empty set.  (As a fall-back in the case of the empty set we
 437       // could just return the ID of the current processor).
 438       *id_length = found ;
 439   }
 440 
 441   return true;
 442 }
 443 
 444 static bool assign_distribution(processorid_t* id_array,
 445                                 uint           id_length,
 446                                 uint*          distribution,
 447                                 uint           distribution_length) {
 448   // We assume we can assign processorid_t's to uint's.
 449   assert(sizeof(processorid_t) == sizeof(uint),
 450          "can't convert processorid_t to uint");
 451   // Quick check to see if we won't succeed.
 452   if (id_length < distribution_length) {
 453     return false;
 454   }
 455   // Assign processor ids to the distribution.
 456   // Try to shuffle processors to distribute work across boards,
 457   // assuming 4 processors per board.
 458   const uint processors_per_board = ProcessDistributionStride;
 459   // Find the maximum processor id.
 460   processorid_t max_id = 0;
 461   for (uint m = 0; m < id_length; m += 1) {
 462     max_id = MAX2(max_id, id_array[m]);
 463   }
 464   // The next id, to limit loops.
 465   const processorid_t limit_id = max_id + 1;
 466   // Make up markers for available processors.
 467   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 468   for (uint c = 0; c < limit_id; c += 1) {
 469     available_id[c] = false;
 470   }
 471   for (uint a = 0; a < id_length; a += 1) {
 472     available_id[id_array[a]] = true;
 473   }
 474   // Step by "boards", then by "slot", copying to "assigned".
 475   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 476   //                remembering which processors have been assigned by
 477   //                previous calls, etc., so as to distribute several
 478   //                independent calls of this method.  What we'd like is
 479   //                It would be nice to have an API that let us ask
 480   //                how many processes are bound to a processor,
 481   //                but we don't have that, either.
 482   //                In the short term, "board" is static so that
 483   //                subsequent distributions don't all start at board 0.
 484   static uint board = 0;
 485   uint assigned = 0;
 486   // Until we've found enough processors ....
 487   while (assigned < distribution_length) {
 488     // ... find the next available processor in the board.
 489     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 490       uint try_id = board * processors_per_board + slot;
 491       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 492         distribution[assigned] = try_id;
 493         available_id[try_id] = false;
 494         assigned += 1;
 495         break;
 496       }
 497     }
 498     board += 1;
 499     if (board * processors_per_board + 0 >= limit_id) {
 500       board = 0;
 501     }
 502   }
 503   if (available_id != NULL) {
 504     FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
 505   }
 506   return true;
 507 }
 508 
 509 void os::set_native_thread_name(const char *name) {
 510   // Not yet implemented.
 511   return;
 512 }
 513 
 514 bool os::distribute_processes(uint length, uint* distribution) {
 515   bool result = false;
 516   // Find the processor id's of all the available CPUs.
 517   processorid_t* id_array  = NULL;
 518   uint           id_length = 0;
 519   // There are some races between querying information and using it,
 520   // since processor sets can change dynamically.
 521   psetid_t pset = PS_NONE;
 522   // Are we running in a processor set?
 523   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 524     result = find_processors_in_pset(pset, &id_array, &id_length);
 525   } else {
 526     result = find_processors_online(&id_array, &id_length);
 527   }
 528   if (result == true) {
 529     if (id_length >= length) {
 530       result = assign_distribution(id_array, id_length, distribution, length);
 531     } else {
 532       result = false;
 533     }
 534   }
 535   if (id_array != NULL) {
 536     FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
 537   }
 538   return result;
 539 }
 540 
 541 bool os::bind_to_processor(uint processor_id) {
 542   // We assume that a processorid_t can be stored in a uint.
 543   assert(sizeof(uint) == sizeof(processorid_t),
 544          "can't convert uint to processorid_t");
 545   int bind_result =
 546     processor_bind(P_LWPID,                       // bind LWP.
 547                    P_MYID,                        // bind current LWP.
 548                    (processorid_t) processor_id,  // id.
 549                    NULL);                         // don't return old binding.
 550   return (bind_result == 0);
 551 }
 552 
 553 bool os::getenv(const char* name, char* buffer, int len) {
 554   char* val = ::getenv( name );
 555   if ( val == NULL
 556   ||   strlen(val) + 1  >  len ) {
 557     if (len > 0)  buffer[0] = 0; // return a null string
 558     return false;
 559   }
 560   strcpy( buffer, val );
 561   return true;
 562 }
 563 
 564 
 565 // Return true if user is running as root.
 566 
 567 bool os::have_special_privileges() {
 568   static bool init = false;
 569   static bool privileges = false;
 570   if (!init) {
 571     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 572     init = true;
 573   }
 574   return privileges;
 575 }
 576 
 577 
 578 void os::init_system_properties_values() {
 579   // The next steps are taken in the product version:
 580   //
 581   // Obtain the JAVA_HOME value from the location of libjvm.so.
 582   // This library should be located at:
 583   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 584   //
 585   // If "/jre/lib/" appears at the right place in the path, then we
 586   // assume libjvm.so is installed in a JDK and we use this path.
 587   //
 588   // Otherwise exit with message: "Could not create the Java virtual machine."
 589   //
 590   // The following extra steps are taken in the debugging version:
 591   //
 592   // If "/jre/lib/" does NOT appear at the right place in the path
 593   // instead of exit check for $JAVA_HOME environment variable.
 594   //
 595   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 596   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 597   // it looks like libjvm.so is installed there
 598   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 599   //
 600   // Otherwise exit.
 601   //
 602   // Important note: if the location of libjvm.so changes this
 603   // code needs to be changed accordingly.
 604 
 605 // Base path of extensions installed on the system.
 606 #define SYS_EXT_DIR     "/usr/jdk/packages"
 607 #define EXTENSIONS_DIR  "/lib/ext"
 608 #define ENDORSED_DIR    "/lib/endorsed"
 609 
 610   char cpu_arch[12];
 611   // Buffer that fits several sprintfs.
 612   // Note that the space for the colon and the trailing null are provided
 613   // by the nulls included by the sizeof operator.
 614   const size_t bufsize =
 615     MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
 616          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 617          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
 618          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
 619   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 620 
 621   // sysclasspath, java_home, dll_dir
 622   {
 623     char *pslash;
 624     os::jvm_path(buf, bufsize);
 625 
 626     // Found the full path to libjvm.so.
 627     // Now cut the path to <java_home>/jre if we can.
 628     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 629     pslash = strrchr(buf, '/');
 630     if (pslash != NULL) {
 631       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 632     }
 633     Arguments::set_dll_dir(buf);
 634 
 635     if (pslash != NULL) {
 636       pslash = strrchr(buf, '/');
 637       if (pslash != NULL) {
 638         *pslash = '\0';          // Get rid of /<arch>.
 639         pslash = strrchr(buf, '/');
 640         if (pslash != NULL) {
 641           *pslash = '\0';        // Get rid of /lib.
 642         }
 643       }
 644     }
 645     Arguments::set_java_home(buf);
 646     set_boot_path('/', ':');
 647   }
 648 
 649   // Where to look for native libraries.
 650   {
 651     // Use dlinfo() to determine the correct java.library.path.
 652     //
 653     // If we're launched by the Java launcher, and the user
 654     // does not set java.library.path explicitly on the commandline,
 655     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 656     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 657     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 658     // /usr/lib), which is exactly what we want.
 659     //
 660     // If the user does set java.library.path, it completely
 661     // overwrites this setting, and always has.
 662     //
 663     // If we're not launched by the Java launcher, we may
 664     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 665     // settings.  Again, dlinfo does exactly what we want.
 666 
 667     Dl_serinfo     info_sz, *info = &info_sz;
 668     Dl_serpath     *path;
 669     char           *library_path;
 670     char           *common_path = buf;
 671 
 672     // Determine search path count and required buffer size.
 673     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 674       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 675       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 676     }
 677 
 678     // Allocate new buffer and initialize.
 679     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 680     info->dls_size = info_sz.dls_size;
 681     info->dls_cnt = info_sz.dls_cnt;
 682 
 683     // Obtain search path information.
 684     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 685       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 686       FREE_C_HEAP_ARRAY(char, info, mtInternal);
 687       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 688     }
 689 
 690     path = &info->dls_serpath[0];
 691 
 692     // Note: Due to a legacy implementation, most of the library path
 693     // is set in the launcher. This was to accomodate linking restrictions
 694     // on legacy Solaris implementations (which are no longer supported).
 695     // Eventually, all the library path setting will be done here.
 696     //
 697     // However, to prevent the proliferation of improperly built native
 698     // libraries, the new path component /usr/jdk/packages is added here.
 699 
 700     // Determine the actual CPU architecture.
 701     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 702 #ifdef _LP64
 703     // If we are a 64-bit vm, perform the following translations:
 704     //   sparc   -> sparcv9
 705     //   i386    -> amd64
 706     if (strcmp(cpu_arch, "sparc") == 0) {
 707       strcat(cpu_arch, "v9");
 708     } else if (strcmp(cpu_arch, "i386") == 0) {
 709       strcpy(cpu_arch, "amd64");
 710     }
 711 #endif
 712 
 713     // Construct the invariant part of ld_library_path.
 714     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 715 
 716     // Struct size is more than sufficient for the path components obtained
 717     // through the dlinfo() call, so only add additional space for the path
 718     // components explicitly added here.
 719     size_t library_path_size = info->dls_size + strlen(common_path);
 720     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 721     library_path[0] = '\0';
 722 
 723     // Construct the desired Java library path from the linker's library
 724     // search path.
 725     //
 726     // For compatibility, it is optimal that we insert the additional path
 727     // components specific to the Java VM after those components specified
 728     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 729     // infrastructure.
 730     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 731       strcpy(library_path, common_path);
 732     } else {
 733       int inserted = 0;
 734       int i;
 735       for (i = 0; i < info->dls_cnt; i++, path++) {
 736         uint_t flags = path->dls_flags & LA_SER_MASK;
 737         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 738           strcat(library_path, common_path);
 739           strcat(library_path, os::path_separator());
 740           inserted = 1;
 741         }
 742         strcat(library_path, path->dls_name);
 743         strcat(library_path, os::path_separator());
 744       }
 745       // Eliminate trailing path separator.
 746       library_path[strlen(library_path)-1] = '\0';
 747     }
 748 
 749     // happens before argument parsing - can't use a trace flag
 750     // tty->print_raw("init_system_properties_values: native lib path: ");
 751     // tty->print_raw_cr(library_path);
 752 
 753     // Callee copies into its own buffer.
 754     Arguments::set_library_path(library_path);
 755 
 756     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
 757     FREE_C_HEAP_ARRAY(char, info, mtInternal);
 758   }
 759 
 760   // Extensions directories.
 761   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 762   Arguments::set_ext_dirs(buf);
 763 
 764   // Endorsed standards default directory.
 765   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
 766   Arguments::set_endorsed_dirs(buf);
 767 
 768   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 769 
 770 #undef SYS_EXT_DIR
 771 #undef EXTENSIONS_DIR
 772 #undef ENDORSED_DIR
 773 }
 774 
 775 void os::breakpoint() {
 776   BREAKPOINT;
 777 }
 778 
 779 bool os::obsolete_option(const JavaVMOption *option)
 780 {
 781   if (!strncmp(option->optionString, "-Xt", 3)) {
 782     return true;
 783   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 784     return true;
 785   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 786     return true;
 787   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 788     return true;
 789   }
 790   return false;
 791 }
 792 
 793 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 794   address  stackStart  = (address)thread->stack_base();
 795   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 796   if (sp < stackStart && sp >= stackEnd ) return true;
 797   return false;
 798 }
 799 
 800 extern "C" void breakpoint() {
 801   // use debugger to set breakpoint here
 802 }
 803 
 804 static thread_t main_thread;
 805 
 806 // Thread start routine for all new Java threads
 807 extern "C" void* java_start(void* thread_addr) {
 808   // Try to randomize the cache line index of hot stack frames.
 809   // This helps when threads of the same stack traces evict each other's
 810   // cache lines. The threads can be either from the same JVM instance, or
 811   // from different JVM instances. The benefit is especially true for
 812   // processors with hyperthreading technology.
 813   static int counter = 0;
 814   int pid = os::current_process_id();
 815   alloca(((pid ^ counter++) & 7) * 128);
 816 
 817   int prio;
 818   Thread* thread = (Thread*)thread_addr;
 819   OSThread* osthr = thread->osthread();
 820 
 821   osthr->set_lwp_id( _lwp_self() );  // Store lwp in case we are bound
 822   thread->_schedctl = (void *) schedctl_init () ;
 823 
 824   if (UseNUMA) {
 825     int lgrp_id = os::numa_get_group_id();
 826     if (lgrp_id != -1) {
 827       thread->set_lgrp_id(lgrp_id);
 828     }
 829   }
 830 
 831   // If the creator called set priority before we started,
 832   // we need to call set_native_priority now that we have an lwp.
 833   // We used to get the priority from thr_getprio (we called
 834   // thr_setprio way back in create_thread) and pass it to
 835   // set_native_priority, but Solaris scales the priority
 836   // in java_to_os_priority, so when we read it back here,
 837   // we pass trash to set_native_priority instead of what's
 838   // in java_to_os_priority. So we save the native priority
 839   // in the osThread and recall it here.
 840 
 841   if ( osthr->thread_id() != -1 ) {
 842     if ( UseThreadPriorities ) {
 843       int prio = osthr->native_priority();
 844       if (ThreadPriorityVerbose) {
 845         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 846                       INTPTR_FORMAT ", setting priority: %d\n",
 847                       osthr->thread_id(), osthr->lwp_id(), prio);
 848       }
 849       os::set_native_priority(thread, prio);
 850     }
 851   } else if (ThreadPriorityVerbose) {
 852     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 853   }
 854 
 855   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 856 
 857   // initialize signal mask for this thread
 858   os::Solaris::hotspot_sigmask(thread);
 859 
 860   thread->run();
 861 
 862   // One less thread is executing
 863   // When the VMThread gets here, the main thread may have already exited
 864   // which frees the CodeHeap containing the Atomic::dec code
 865   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 866     Atomic::dec(&os::Solaris::_os_thread_count);
 867   }
 868 
 869   if (UseDetachedThreads) {
 870     thr_exit(NULL);
 871     ShouldNotReachHere();
 872   }
 873   return NULL;
 874 }
 875 
 876 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 877   // Allocate the OSThread object
 878   OSThread* osthread = new OSThread(NULL, NULL);
 879   if (osthread == NULL) return NULL;
 880 
 881   // Store info on the Solaris thread into the OSThread
 882   osthread->set_thread_id(thread_id);
 883   osthread->set_lwp_id(_lwp_self());
 884   thread->_schedctl = (void *) schedctl_init () ;
 885 
 886   if (UseNUMA) {
 887     int lgrp_id = os::numa_get_group_id();
 888     if (lgrp_id != -1) {
 889       thread->set_lgrp_id(lgrp_id);
 890     }
 891   }
 892 
 893   if ( ThreadPriorityVerbose ) {
 894     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 895                   osthread->thread_id(), osthread->lwp_id() );
 896   }
 897 
 898   // Initial thread state is INITIALIZED, not SUSPENDED
 899   osthread->set_state(INITIALIZED);
 900 
 901   return osthread;
 902 }
 903 
 904 void os::Solaris::hotspot_sigmask(Thread* thread) {
 905 
 906   //Save caller's signal mask
 907   sigset_t sigmask;
 908   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 909   OSThread *osthread = thread->osthread();
 910   osthread->set_caller_sigmask(sigmask);
 911 
 912   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 913   if (!ReduceSignalUsage) {
 914     if (thread->is_VM_thread()) {
 915       // Only the VM thread handles BREAK_SIGNAL ...
 916       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 917     } else {
 918       // ... all other threads block BREAK_SIGNAL
 919       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 920       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 921     }
 922   }
 923 }
 924 
 925 bool os::create_attached_thread(JavaThread* thread) {
 926 #ifdef ASSERT
 927   thread->verify_not_published();
 928 #endif
 929   OSThread* osthread = create_os_thread(thread, thr_self());
 930   if (osthread == NULL) {
 931      return false;
 932   }
 933 
 934   // Initial thread state is RUNNABLE
 935   osthread->set_state(RUNNABLE);
 936   thread->set_osthread(osthread);
 937 
 938   // initialize signal mask for this thread
 939   // and save the caller's signal mask
 940   os::Solaris::hotspot_sigmask(thread);
 941 
 942   return true;
 943 }
 944 
 945 bool os::create_main_thread(JavaThread* thread) {
 946 #ifdef ASSERT
 947   thread->verify_not_published();
 948 #endif
 949   if (_starting_thread == NULL) {
 950     _starting_thread = create_os_thread(thread, main_thread);
 951      if (_starting_thread == NULL) {
 952         return false;
 953      }
 954   }
 955 
 956   // The primodial thread is runnable from the start
 957   _starting_thread->set_state(RUNNABLE);
 958 
 959   thread->set_osthread(_starting_thread);
 960 
 961   // initialize signal mask for this thread
 962   // and save the caller's signal mask
 963   os::Solaris::hotspot_sigmask(thread);
 964 
 965   return true;
 966 }
 967 
 968 // _T2_libthread is true if we believe we are running with the newer
 969 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
 970 bool os::Solaris::_T2_libthread = false;
 971 
 972 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 973   // Allocate the OSThread object
 974   OSThread* osthread = new OSThread(NULL, NULL);
 975   if (osthread == NULL) {
 976     return false;
 977   }
 978 
 979   if ( ThreadPriorityVerbose ) {
 980     char *thrtyp;
 981     switch ( thr_type ) {
 982       case vm_thread:
 983         thrtyp = (char *)"vm";
 984         break;
 985       case cgc_thread:
 986         thrtyp = (char *)"cgc";
 987         break;
 988       case pgc_thread:
 989         thrtyp = (char *)"pgc";
 990         break;
 991       case java_thread:
 992         thrtyp = (char *)"java";
 993         break;
 994       case compiler_thread:
 995         thrtyp = (char *)"compiler";
 996         break;
 997       case watcher_thread:
 998         thrtyp = (char *)"watcher";
 999         break;
1000       default:
1001         thrtyp = (char *)"unknown";
1002         break;
1003     }
1004     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1005   }
1006 
1007   // Calculate stack size if it's not specified by caller.
1008   if (stack_size == 0) {
1009     // The default stack size 1M (2M for LP64).
1010     stack_size = (BytesPerWord >> 2) * K * K;
1011 
1012     switch (thr_type) {
1013     case os::java_thread:
1014       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1015       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1016       break;
1017     case os::compiler_thread:
1018       if (CompilerThreadStackSize > 0) {
1019         stack_size = (size_t)(CompilerThreadStackSize * K);
1020         break;
1021       } // else fall through:
1022         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1023     case os::vm_thread:
1024     case os::pgc_thread:
1025     case os::cgc_thread:
1026     case os::watcher_thread:
1027       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1028       break;
1029     }
1030   }
1031   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1032 
1033   // Initial state is ALLOCATED but not INITIALIZED
1034   osthread->set_state(ALLOCATED);
1035 
1036   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1037     // We got lots of threads. Check if we still have some address space left.
1038     // Need to be at least 5Mb of unreserved address space. We do check by
1039     // trying to reserve some.
1040     const size_t VirtualMemoryBangSize = 20*K*K;
1041     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1042     if (mem == NULL) {
1043       delete osthread;
1044       return false;
1045     } else {
1046       // Release the memory again
1047       os::release_memory(mem, VirtualMemoryBangSize);
1048     }
1049   }
1050 
1051   // Setup osthread because the child thread may need it.
1052   thread->set_osthread(osthread);
1053 
1054   // Create the Solaris thread
1055   // explicit THR_BOUND for T2_libthread case in case
1056   // that assumption is not accurate, but our alternate signal stack
1057   // handling is based on it which must have bound threads
1058   thread_t tid = 0;
1059   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
1060                    | ((UseBoundThreads || os::Solaris::T2_libthread() ||
1061                        (thr_type == vm_thread) ||
1062                        (thr_type == cgc_thread) ||
1063                        (thr_type == pgc_thread) ||
1064                        (thr_type == compiler_thread && BackgroundCompilation)) ?
1065                       THR_BOUND : 0);
1066   int      status;
1067 
1068   // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
1069   //
1070   // On multiprocessors systems, libthread sometimes under-provisions our
1071   // process with LWPs.  On a 30-way systems, for instance, we could have
1072   // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
1073   // to our process.  This can result in under utilization of PEs.
1074   // I suspect the problem is related to libthread's LWP
1075   // pool management and to the kernel's SIGBLOCKING "last LWP parked"
1076   // upcall policy.
1077   //
1078   // The following code is palliative -- it attempts to ensure that our
1079   // process has sufficient LWPs to take advantage of multiple PEs.
1080   // Proper long-term cures include using user-level threads bound to LWPs
1081   // (THR_BOUND) or using LWP-based synchronization.  Note that there is a
1082   // slight timing window with respect to sampling _os_thread_count, but
1083   // the race is benign.  Also, we should periodically recompute
1084   // _processors_online as the min of SC_NPROCESSORS_ONLN and the
1085   // the number of PEs in our partition.  You might be tempted to use
1086   // THR_NEW_LWP here, but I'd recommend against it as that could
1087   // result in undesirable growth of the libthread's LWP pool.
1088   // The fix below isn't sufficient; for instance, it doesn't take into count
1089   // LWPs parked on IO.  It does, however, help certain CPU-bound benchmarks.
1090   //
1091   // Some pathologies this scheme doesn't handle:
1092   // *  Threads can block, releasing the LWPs.  The LWPs can age out.
1093   //    When a large number of threads become ready again there aren't
1094   //    enough LWPs available to service them.  This can occur when the
1095   //    number of ready threads oscillates.
1096   // *  LWPs/Threads park on IO, thus taking the LWP out of circulation.
1097   //
1098   // Finally, we should call thr_setconcurrency() periodically to refresh
1099   // the LWP pool and thwart the LWP age-out mechanism.
1100   // The "+3" term provides a little slop -- we want to slightly overprovision.
1101 
1102   if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
1103     if (!(flags & THR_BOUND)) {
1104       thr_setconcurrency (os::Solaris::_os_thread_count);       // avoid starvation
1105     }
1106   }
1107   // Although this doesn't hurt, we should warn of undefined behavior
1108   // when using unbound T1 threads with schedctl().  This should never
1109   // happen, as the compiler and VM threads are always created bound
1110   DEBUG_ONLY(
1111       if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
1112           (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
1113           ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
1114            (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
1115          warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
1116       }
1117   );
1118 
1119 
1120   // Mark that we don't have an lwp or thread id yet.
1121   // In case we attempt to set the priority before the thread starts.
1122   osthread->set_lwp_id(-1);
1123   osthread->set_thread_id(-1);
1124 
1125   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1126   if (status != 0) {
1127     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1128       perror("os::create_thread");
1129     }
1130     thread->set_osthread(NULL);
1131     // Need to clean up stuff we've allocated so far
1132     delete osthread;
1133     return false;
1134   }
1135 
1136   Atomic::inc(&os::Solaris::_os_thread_count);
1137 
1138   // Store info on the Solaris thread into the OSThread
1139   osthread->set_thread_id(tid);
1140 
1141   // Remember that we created this thread so we can set priority on it
1142   osthread->set_vm_created();
1143 
1144   // Set the default thread priority.  If using bound threads, setting
1145   // lwp priority will be delayed until thread start.
1146   set_native_priority(thread,
1147                       DefaultThreadPriority == -1 ?
1148                         java_to_os_priority[NormPriority] :
1149                         DefaultThreadPriority);
1150 
1151   // Initial thread state is INITIALIZED, not SUSPENDED
1152   osthread->set_state(INITIALIZED);
1153 
1154   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1155   return true;
1156 }
1157 
1158 /* defined for >= Solaris 10. This allows builds on earlier versions
1159  *  of Solaris to take advantage of the newly reserved Solaris JVM signals
1160  *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1161  *  and -XX:+UseAltSigs does nothing since these should have no conflict
1162  */
1163 #if !defined(SIGJVM1)
1164 #define SIGJVM1 39
1165 #define SIGJVM2 40
1166 #endif
1167 
1168 debug_only(static bool signal_sets_initialized = false);
1169 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1170 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1171 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1172 
1173 bool os::Solaris::is_sig_ignored(int sig) {
1174       struct sigaction oact;
1175       sigaction(sig, (struct sigaction*)NULL, &oact);
1176       void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1177                                      : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1178       if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1179            return true;
1180       else
1181            return false;
1182 }
1183 
1184 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1185 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1186 static bool isJVM1available() {
1187   return SIGJVM1 < SIGRTMIN;
1188 }
1189 
1190 void os::Solaris::signal_sets_init() {
1191   // Should also have an assertion stating we are still single-threaded.
1192   assert(!signal_sets_initialized, "Already initialized");
1193   // Fill in signals that are necessarily unblocked for all threads in
1194   // the VM. Currently, we unblock the following signals:
1195   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1196   //                         by -Xrs (=ReduceSignalUsage));
1197   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1198   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1199   // the dispositions or masks wrt these signals.
1200   // Programs embedding the VM that want to use the above signals for their
1201   // own purposes must, at this time, use the "-Xrs" option to prevent
1202   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1203   // (See bug 4345157, and other related bugs).
1204   // In reality, though, unblocking these signals is really a nop, since
1205   // these signals are not blocked by default.
1206   sigemptyset(&unblocked_sigs);
1207   sigemptyset(&allowdebug_blocked_sigs);
1208   sigaddset(&unblocked_sigs, SIGILL);
1209   sigaddset(&unblocked_sigs, SIGSEGV);
1210   sigaddset(&unblocked_sigs, SIGBUS);
1211   sigaddset(&unblocked_sigs, SIGFPE);
1212 
1213   if (isJVM1available) {
1214     os::Solaris::set_SIGinterrupt(SIGJVM1);
1215     os::Solaris::set_SIGasync(SIGJVM2);
1216   } else if (UseAltSigs) {
1217     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1218     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1219   } else {
1220     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1221     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1222   }
1223 
1224   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1225   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1226 
1227   if (!ReduceSignalUsage) {
1228    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1229       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1230       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1231    }
1232    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1233       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1234       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1235    }
1236    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1237       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1238       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1239    }
1240   }
1241   // Fill in signals that are blocked by all but the VM thread.
1242   sigemptyset(&vm_sigs);
1243   if (!ReduceSignalUsage)
1244     sigaddset(&vm_sigs, BREAK_SIGNAL);
1245   debug_only(signal_sets_initialized = true);
1246 
1247   // For diagnostics only used in run_periodic_checks
1248   sigemptyset(&check_signal_done);
1249 }
1250 
1251 // These are signals that are unblocked while a thread is running Java.
1252 // (For some reason, they get blocked by default.)
1253 sigset_t* os::Solaris::unblocked_signals() {
1254   assert(signal_sets_initialized, "Not initialized");
1255   return &unblocked_sigs;
1256 }
1257 
1258 // These are the signals that are blocked while a (non-VM) thread is
1259 // running Java. Only the VM thread handles these signals.
1260 sigset_t* os::Solaris::vm_signals() {
1261   assert(signal_sets_initialized, "Not initialized");
1262   return &vm_sigs;
1263 }
1264 
1265 // These are signals that are blocked during cond_wait to allow debugger in
1266 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1267   assert(signal_sets_initialized, "Not initialized");
1268   return &allowdebug_blocked_sigs;
1269 }
1270 
1271 
1272 void _handle_uncaught_cxx_exception() {
1273   VMError err("An uncaught C++ exception");
1274   err.report_and_die();
1275 }
1276 
1277 
1278 // First crack at OS-specific initialization, from inside the new thread.
1279 void os::initialize_thread(Thread* thr) {
1280   int r = thr_main() ;
1281   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
1282   if (r) {
1283     JavaThread* jt = (JavaThread *)thr;
1284     assert(jt != NULL,"Sanity check");
1285     size_t stack_size;
1286     address base = jt->stack_base();
1287     if (Arguments::created_by_java_launcher()) {
1288       // Use 2MB to allow for Solaris 7 64 bit mode.
1289       stack_size = JavaThread::stack_size_at_create() == 0
1290         ? 2048*K : JavaThread::stack_size_at_create();
1291 
1292       // There are rare cases when we may have already used more than
1293       // the basic stack size allotment before this method is invoked.
1294       // Attempt to allow for a normally sized java_stack.
1295       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1296       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1297     } else {
1298       // 6269555: If we were not created by a Java launcher, i.e. if we are
1299       // running embedded in a native application, treat the primordial thread
1300       // as much like a native attached thread as possible.  This means using
1301       // the current stack size from thr_stksegment(), unless it is too large
1302       // to reliably setup guard pages.  A reasonable max size is 8MB.
1303       size_t current_size = current_stack_size();
1304       // This should never happen, but just in case....
1305       if (current_size == 0) current_size = 2 * K * K;
1306       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1307     }
1308     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1309     stack_size = (size_t)(base - bottom);
1310 
1311     assert(stack_size > 0, "Stack size calculation problem");
1312 
1313     if (stack_size > jt->stack_size()) {
1314       NOT_PRODUCT(
1315         struct rlimit limits;
1316         getrlimit(RLIMIT_STACK, &limits);
1317         size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1318         assert(size >= jt->stack_size(), "Stack size problem in main thread");
1319       )
1320       tty->print_cr(
1321         "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1322         "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1323         "See limit(1) to increase the stack size limit.",
1324         stack_size / K, jt->stack_size() / K);
1325       vm_exit(1);
1326     }
1327     assert(jt->stack_size() >= stack_size,
1328           "Attempt to map more stack than was allocated");
1329     jt->set_stack_size(stack_size);
1330   }
1331 
1332    // 5/22/01: Right now alternate signal stacks do not handle
1333    // throwing stack overflow exceptions, see bug 4463178
1334    // Until a fix is found for this, T2 will NOT imply alternate signal
1335    // stacks.
1336    // If using T2 libthread threads, install an alternate signal stack.
1337    // Because alternate stacks associate with LWPs on Solaris,
1338    // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
1339    // we prefer to explicitly stack bang.
1340    // If not using T2 libthread, but using UseBoundThreads any threads
1341    // (primordial thread, jni_attachCurrentThread) we do not create,
1342    // probably are not bound, therefore they can not have an alternate
1343    // signal stack. Since our stack banging code is generated and
1344    // is shared across threads, all threads must be bound to allow
1345    // using alternate signal stacks.  The alternative is to interpose
1346    // on _lwp_create to associate an alt sig stack with each LWP,
1347    // and this could be a problem when the JVM is embedded.
1348    // We would prefer to use alternate signal stacks with T2
1349    // Since there is currently no accurate way to detect T2
1350    // we do not. Assuming T2 when running T1 causes sig 11s or assertions
1351    // on installing alternate signal stacks
1352 
1353 
1354    // 05/09/03: removed alternate signal stack support for Solaris
1355    // The alternate signal stack mechanism is no longer needed to
1356    // handle stack overflow. This is now handled by allocating
1357    // guard pages (red zone) and stackbanging.
1358    // Initially the alternate signal stack mechanism was removed because
1359    // it did not work with T1 llibthread. Alternate
1360    // signal stacks MUST have all threads bound to lwps. Applications
1361    // can create their own threads and attach them without their being
1362    // bound under T1. This is frequently the case for the primordial thread.
1363    // If we were ever to reenable this mechanism we would need to
1364    // use the dynamic check for T2 libthread.
1365 
1366   os::Solaris::init_thread_fpu_state();
1367   std::set_terminate(_handle_uncaught_cxx_exception);
1368 }
1369 
1370 
1371 
1372 // Free Solaris resources related to the OSThread
1373 void os::free_thread(OSThread* osthread) {
1374   assert(osthread != NULL, "os::free_thread but osthread not set");
1375 
1376 
1377   // We are told to free resources of the argument thread,
1378   // but we can only really operate on the current thread.
1379   // The main thread must take the VMThread down synchronously
1380   // before the main thread exits and frees up CodeHeap
1381   guarantee((Thread::current()->osthread() == osthread
1382      || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1383   if (Thread::current()->osthread() == osthread) {
1384     // Restore caller's signal mask
1385     sigset_t sigmask = osthread->caller_sigmask();
1386     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1387   }
1388   delete osthread;
1389 }
1390 
1391 void os::pd_start_thread(Thread* thread) {
1392   int status = thr_continue(thread->osthread()->thread_id());
1393   assert_status(status == 0, status, "thr_continue failed");
1394 }
1395 
1396 
1397 intx os::current_thread_id() {
1398   return (intx)thr_self();
1399 }
1400 
1401 static pid_t _initial_pid = 0;
1402 
1403 int os::current_process_id() {
1404   return (int)(_initial_pid ? _initial_pid : getpid());
1405 }
1406 
1407 // gethrtime() should be monotonic according to the documentation,
1408 // but some virtualized platforms are known to break this guarantee.
1409 // getTimeNanos() must be guaranteed not to move backwards, so we
1410 // are forced to add a check here.
1411 inline hrtime_t getTimeNanos() {
1412   const hrtime_t now = gethrtime();
1413   const hrtime_t prev = max_hrtime;
1414   if (now <= prev) {
1415     return prev;   // same or retrograde time;
1416   }
1417   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1418   assert(obsv >= prev, "invariant");   // Monotonicity
1419   // If the CAS succeeded then we're done and return "now".
1420   // If the CAS failed and the observed value "obsv" is >= now then
1421   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1422   // some other thread raced this thread and installed a new value, in which case
1423   // we could either (a) retry the entire operation, (b) retry trying to install now
1424   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1425   // we might discard a higher "now" value in deference to a slightly lower but freshly
1426   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1427   // to (a) or (b) -- and greatly reduces coherence traffic.
1428   // We might also condition (c) on the magnitude of the delta between obsv and now.
1429   // Avoiding excessive CAS operations to hot RW locations is critical.
1430   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1431   return (prev == obsv) ? now : obsv;
1432 }
1433 
1434 // Time since start-up in seconds to a fine granularity.
1435 // Used by VMSelfDestructTimer and the MemProfiler.
1436 double os::elapsedTime() {
1437   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1438 }
1439 
1440 jlong os::elapsed_counter() {
1441   return (jlong)(getTimeNanos() - first_hrtime);
1442 }
1443 
1444 jlong os::elapsed_frequency() {
1445    return hrtime_hz;
1446 }
1447 
1448 // Return the real, user, and system times in seconds from an
1449 // arbitrary fixed point in the past.
1450 bool os::getTimesSecs(double* process_real_time,
1451                   double* process_user_time,
1452                   double* process_system_time) {
1453   struct tms ticks;
1454   clock_t real_ticks = times(&ticks);
1455 
1456   if (real_ticks == (clock_t) (-1)) {
1457     return false;
1458   } else {
1459     double ticks_per_second = (double) clock_tics_per_sec;
1460     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1461     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1462     // For consistency return the real time from getTimeNanos()
1463     // converted to seconds.
1464     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1465 
1466     return true;
1467   }
1468 }
1469 
1470 bool os::supports_vtime() { return true; }
1471 
1472 bool os::enable_vtime() {
1473   int fd = ::open("/proc/self/ctl", O_WRONLY);
1474   if (fd == -1)
1475     return false;
1476 
1477   long cmd[] = { PCSET, PR_MSACCT };
1478   int res = ::write(fd, cmd, sizeof(long) * 2);
1479   ::close(fd);
1480   if (res != sizeof(long) * 2)
1481     return false;
1482 
1483   return true;
1484 }
1485 
1486 bool os::vtime_enabled() {
1487   int fd = ::open("/proc/self/status", O_RDONLY);
1488   if (fd == -1)
1489     return false;
1490 
1491   pstatus_t status;
1492   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1493   ::close(fd);
1494   if (res != sizeof(pstatus_t))
1495     return false;
1496 
1497   return status.pr_flags & PR_MSACCT;
1498 }
1499 
1500 double os::elapsedVTime() {
1501   return (double)gethrvtime() / (double)hrtime_hz;
1502 }
1503 
1504 // Used internally for comparisons only
1505 // getTimeMillis guaranteed to not move backwards on Solaris
1506 jlong getTimeMillis() {
1507   jlong nanotime = getTimeNanos();
1508   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1509 }
1510 
1511 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1512 jlong os::javaTimeMillis() {
1513   timeval t;
1514   if (gettimeofday( &t, NULL) == -1)
1515     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1516   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1517 }
1518 
1519 jlong os::javaTimeNanos() {
1520   return (jlong)getTimeNanos();
1521 }
1522 
1523 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1524   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1525   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1526   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1527   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1528 }
1529 
1530 char * os::local_time_string(char *buf, size_t buflen) {
1531   struct tm t;
1532   time_t long_time;
1533   time(&long_time);
1534   localtime_r(&long_time, &t);
1535   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1536                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1537                t.tm_hour, t.tm_min, t.tm_sec);
1538   return buf;
1539 }
1540 
1541 // Note: os::shutdown() might be called very early during initialization, or
1542 // called from signal handler. Before adding something to os::shutdown(), make
1543 // sure it is async-safe and can handle partially initialized VM.
1544 void os::shutdown() {
1545 
1546   // allow PerfMemory to attempt cleanup of any persistent resources
1547   perfMemory_exit();
1548 
1549   // needs to remove object in file system
1550   AttachListener::abort();
1551 
1552   // flush buffered output, finish log files
1553   ostream_abort();
1554 
1555   // Check for abort hook
1556   abort_hook_t abort_hook = Arguments::abort_hook();
1557   if (abort_hook != NULL) {
1558     abort_hook();
1559   }
1560 }
1561 
1562 // Note: os::abort() might be called very early during initialization, or
1563 // called from signal handler. Before adding something to os::abort(), make
1564 // sure it is async-safe and can handle partially initialized VM.
1565 void os::abort(bool dump_core) {
1566   os::shutdown();
1567   if (dump_core) {
1568 #ifndef PRODUCT
1569     fdStream out(defaultStream::output_fd());
1570     out.print_raw("Current thread is ");
1571     char buf[16];
1572     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1573     out.print_raw_cr(buf);
1574     out.print_raw_cr("Dumping core ...");
1575 #endif
1576     ::abort(); // dump core (for debugging)
1577   }
1578 
1579   ::exit(1);
1580 }
1581 
1582 // Die immediately, no exit hook, no abort hook, no cleanup.
1583 void os::die() {
1584   ::abort(); // dump core (for debugging)
1585 }
1586 
1587 // DLL functions
1588 
1589 const char* os::dll_file_extension() { return ".so"; }
1590 
1591 // This must be hard coded because it's the system's temporary
1592 // directory not the java application's temp directory, ala java.io.tmpdir.
1593 const char* os::get_temp_directory() { return "/tmp"; }
1594 
1595 static bool file_exists(const char* filename) {
1596   struct stat statbuf;
1597   if (filename == NULL || strlen(filename) == 0) {
1598     return false;
1599   }
1600   return os::stat(filename, &statbuf) == 0;
1601 }
1602 
1603 bool os::dll_build_name(char* buffer, size_t buflen,
1604                         const char* pname, const char* fname) {
1605   bool retval = false;
1606   const size_t pnamelen = pname ? strlen(pname) : 0;
1607 
1608   // Return error on buffer overflow.
1609   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1610     return retval;
1611   }
1612 
1613   if (pnamelen == 0) {
1614     snprintf(buffer, buflen, "lib%s.so", fname);
1615     retval = true;
1616   } else if (strchr(pname, *os::path_separator()) != NULL) {
1617     int n;
1618     char** pelements = split_path(pname, &n);
1619     if (pelements == NULL) {
1620       return false;
1621     }
1622     for (int i = 0 ; i < n ; i++) {
1623       // really shouldn't be NULL but what the heck, check can't hurt
1624       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1625         continue; // skip the empty path values
1626       }
1627       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1628       if (file_exists(buffer)) {
1629         retval = true;
1630         break;
1631       }
1632     }
1633     // release the storage
1634     for (int i = 0 ; i < n ; i++) {
1635       if (pelements[i] != NULL) {
1636         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1637       }
1638     }
1639     if (pelements != NULL) {
1640       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1641     }
1642   } else {
1643     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1644     retval = true;
1645   }
1646   return retval;
1647 }
1648 
1649 // check if addr is inside libjvm.so
1650 bool os::address_is_in_vm(address addr) {
1651   static address libjvm_base_addr;
1652   Dl_info dlinfo;
1653 
1654   if (libjvm_base_addr == NULL) {
1655     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1656       libjvm_base_addr = (address)dlinfo.dli_fbase;
1657     }
1658     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1659   }
1660 
1661   if (dladdr((void *)addr, &dlinfo) != 0) {
1662     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1663   }
1664 
1665   return false;
1666 }
1667 
1668 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1669 static dladdr1_func_type dladdr1_func = NULL;
1670 
1671 bool os::dll_address_to_function_name(address addr, char *buf,
1672                                       int buflen, int * offset) {
1673   // buf is not optional, but offset is optional
1674   assert(buf != NULL, "sanity check");
1675 
1676   Dl_info dlinfo;
1677 
1678   // dladdr1_func was initialized in os::init()
1679   if (dladdr1_func != NULL) {
1680     // yes, we have dladdr1
1681 
1682     // Support for dladdr1 is checked at runtime; it may be
1683     // available even if the vm is built on a machine that does
1684     // not have dladdr1 support.  Make sure there is a value for
1685     // RTLD_DL_SYMENT.
1686     #ifndef RTLD_DL_SYMENT
1687     #define RTLD_DL_SYMENT 1
1688     #endif
1689 #ifdef _LP64
1690     Elf64_Sym * info;
1691 #else
1692     Elf32_Sym * info;
1693 #endif
1694     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1695                      RTLD_DL_SYMENT) != 0) {
1696       // see if we have a matching symbol that covers our address
1697       if (dlinfo.dli_saddr != NULL &&
1698           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1699         if (dlinfo.dli_sname != NULL) {
1700           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1701             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1702           }
1703           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1704           return true;
1705         }
1706       }
1707       // no matching symbol so try for just file info
1708       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1709         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1710                             buf, buflen, offset, dlinfo.dli_fname)) {
1711           return true;
1712         }
1713       }
1714     }
1715     buf[0] = '\0';
1716     if (offset != NULL) *offset  = -1;
1717     return false;
1718   }
1719 
1720   // no, only dladdr is available
1721   if (dladdr((void *)addr, &dlinfo) != 0) {
1722     // see if we have a matching symbol
1723     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1724       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1725         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1726       }
1727       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1728       return true;
1729     }
1730     // no matching symbol so try for just file info
1731     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1732       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1733                           buf, buflen, offset, dlinfo.dli_fname)) {
1734         return true;
1735       }
1736     }
1737   }
1738   buf[0] = '\0';
1739   if (offset != NULL) *offset  = -1;
1740   return false;
1741 }
1742 
1743 bool os::dll_address_to_library_name(address addr, char* buf,
1744                                      int buflen, int* offset) {
1745   // buf is not optional, but offset is optional
1746   assert(buf != NULL, "sanity check");
1747 
1748   Dl_info dlinfo;
1749 
1750   if (dladdr((void*)addr, &dlinfo) != 0) {
1751     if (dlinfo.dli_fname != NULL) {
1752       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1753     }
1754     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1755       *offset = addr - (address)dlinfo.dli_fbase;
1756     }
1757     return true;
1758   }
1759 
1760   buf[0] = '\0';
1761   if (offset) *offset = -1;
1762   return false;
1763 }
1764 
1765 // Prints the names and full paths of all opened dynamic libraries
1766 // for current process
1767 void os::print_dll_info(outputStream * st) {
1768   Dl_info dli;
1769   void *handle;
1770   Link_map *map;
1771   Link_map *p;
1772 
1773   st->print_cr("Dynamic libraries:"); st->flush();
1774 
1775   if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
1776       dli.dli_fname == NULL) {
1777     st->print_cr("Error: Cannot print dynamic libraries.");
1778     return;
1779   }
1780   handle = dlopen(dli.dli_fname, RTLD_LAZY);
1781   if (handle == NULL) {
1782     st->print_cr("Error: Cannot print dynamic libraries.");
1783     return;
1784   }
1785   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1786   if (map == NULL) {
1787     st->print_cr("Error: Cannot print dynamic libraries.");
1788     return;
1789   }
1790 
1791   while (map->l_prev != NULL)
1792     map = map->l_prev;
1793 
1794   while (map != NULL) {
1795     st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
1796     map = map->l_next;
1797   }
1798 
1799   dlclose(handle);
1800 }
1801 
1802   // Loads .dll/.so and
1803   // in case of error it checks if .dll/.so was built for the
1804   // same architecture as Hotspot is running on
1805 
1806 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1807 {
1808   void * result= ::dlopen(filename, RTLD_LAZY);
1809   if (result != NULL) {
1810     // Successful loading
1811     return result;
1812   }
1813 
1814   Elf32_Ehdr elf_head;
1815 
1816   // Read system error message into ebuf
1817   // It may or may not be overwritten below
1818   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1819   ebuf[ebuflen-1]='\0';
1820   int diag_msg_max_length=ebuflen-strlen(ebuf);
1821   char* diag_msg_buf=ebuf+strlen(ebuf);
1822 
1823   if (diag_msg_max_length==0) {
1824     // No more space in ebuf for additional diagnostics message
1825     return NULL;
1826   }
1827 
1828 
1829   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1830 
1831   if (file_descriptor < 0) {
1832     // Can't open library, report dlerror() message
1833     return NULL;
1834   }
1835 
1836   bool failed_to_read_elf_head=
1837     (sizeof(elf_head)!=
1838         (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1839 
1840   ::close(file_descriptor);
1841   if (failed_to_read_elf_head) {
1842     // file i/o error - report dlerror() msg
1843     return NULL;
1844   }
1845 
1846   typedef struct {
1847     Elf32_Half  code;         // Actual value as defined in elf.h
1848     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1849     char        elf_class;    // 32 or 64 bit
1850     char        endianess;    // MSB or LSB
1851     char*       name;         // String representation
1852   } arch_t;
1853 
1854   static const arch_t arch_array[]={
1855     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1856     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1857     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1858     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1859     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1860     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1861     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1862     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1863     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1864     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1865   };
1866 
1867   #if  (defined IA32)
1868     static  Elf32_Half running_arch_code=EM_386;
1869   #elif   (defined AMD64)
1870     static  Elf32_Half running_arch_code=EM_X86_64;
1871   #elif  (defined IA64)
1872     static  Elf32_Half running_arch_code=EM_IA_64;
1873   #elif  (defined __sparc) && (defined _LP64)
1874     static  Elf32_Half running_arch_code=EM_SPARCV9;
1875   #elif  (defined __sparc) && (!defined _LP64)
1876     static  Elf32_Half running_arch_code=EM_SPARC;
1877   #elif  (defined __powerpc64__)
1878     static  Elf32_Half running_arch_code=EM_PPC64;
1879   #elif  (defined __powerpc__)
1880     static  Elf32_Half running_arch_code=EM_PPC;
1881   #elif (defined ARM)
1882     static  Elf32_Half running_arch_code=EM_ARM;
1883   #else
1884     #error Method os::dll_load requires that one of following is defined:\
1885          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1886   #endif
1887 
1888   // Identify compatability class for VM's architecture and library's architecture
1889   // Obtain string descriptions for architectures
1890 
1891   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1892   int running_arch_index=-1;
1893 
1894   for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
1895     if (running_arch_code == arch_array[i].code) {
1896       running_arch_index    = i;
1897     }
1898     if (lib_arch.code == arch_array[i].code) {
1899       lib_arch.compat_class = arch_array[i].compat_class;
1900       lib_arch.name         = arch_array[i].name;
1901     }
1902   }
1903 
1904   assert(running_arch_index != -1,
1905     "Didn't find running architecture code (running_arch_code) in arch_array");
1906   if (running_arch_index == -1) {
1907     // Even though running architecture detection failed
1908     // we may still continue with reporting dlerror() message
1909     return NULL;
1910   }
1911 
1912   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1913     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1914     return NULL;
1915   }
1916 
1917   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1918     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1919     return NULL;
1920   }
1921 
1922   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1923     if ( lib_arch.name!=NULL ) {
1924       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1925         " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1926         lib_arch.name, arch_array[running_arch_index].name);
1927     } else {
1928       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1929       " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1930         lib_arch.code,
1931         arch_array[running_arch_index].name);
1932     }
1933   }
1934 
1935   return NULL;
1936 }
1937 
1938 void* os::dll_lookup(void* handle, const char* name) {
1939   return dlsym(handle, name);
1940 }
1941 
1942 void* os::get_default_process_handle() {
1943   return (void*)::dlopen(NULL, RTLD_LAZY);
1944 }
1945 
1946 int os::stat(const char *path, struct stat *sbuf) {
1947   char pathbuf[MAX_PATH];
1948   if (strlen(path) > MAX_PATH - 1) {
1949     errno = ENAMETOOLONG;
1950     return -1;
1951   }
1952   os::native_path(strcpy(pathbuf, path));
1953   return ::stat(pathbuf, sbuf);
1954 }
1955 
1956 static bool _print_ascii_file(const char* filename, outputStream* st) {
1957   int fd = ::open(filename, O_RDONLY);
1958   if (fd == -1) {
1959      return false;
1960   }
1961 
1962   char buf[32];
1963   int bytes;
1964   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1965     st->print_raw(buf, bytes);
1966   }
1967 
1968   ::close(fd);
1969 
1970   return true;
1971 }
1972 
1973 void os::print_os_info_brief(outputStream* st) {
1974   os::Solaris::print_distro_info(st);
1975 
1976   os::Posix::print_uname_info(st);
1977 
1978   os::Solaris::print_libversion_info(st);
1979 }
1980 
1981 void os::print_os_info(outputStream* st) {
1982   st->print("OS:");
1983 
1984   os::Solaris::print_distro_info(st);
1985 
1986   os::Posix::print_uname_info(st);
1987 
1988   os::Solaris::print_libversion_info(st);
1989 
1990   os::Posix::print_rlimit_info(st);
1991 
1992   os::Posix::print_load_average(st);
1993 }
1994 
1995 void os::Solaris::print_distro_info(outputStream* st) {
1996   if (!_print_ascii_file("/etc/release", st)) {
1997       st->print("Solaris");
1998     }
1999     st->cr();
2000 }
2001 
2002 void os::Solaris::print_libversion_info(outputStream* st) {
2003   if (os::Solaris::T2_libthread()) {
2004     st->print("  (T2 libthread)");
2005   }
2006   else {
2007     st->print("  (T1 libthread)");
2008   }
2009   st->cr();
2010 }
2011 
2012 static bool check_addr0(outputStream* st) {
2013   jboolean status = false;
2014   int fd = ::open("/proc/self/map",O_RDONLY);
2015   if (fd >= 0) {
2016     prmap_t p;
2017     while(::read(fd, &p, sizeof(p)) > 0) {
2018       if (p.pr_vaddr == 0x0) {
2019         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
2020         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
2021         st->print("Access:");
2022         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
2023         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
2024         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
2025         st->cr();
2026         status = true;
2027       }
2028     }
2029     ::close(fd);
2030   }
2031   return status;
2032 }
2033 
2034 void os::pd_print_cpu_info(outputStream* st) {
2035   // Nothing to do for now.
2036 }
2037 
2038 void os::print_memory_info(outputStream* st) {
2039   st->print("Memory:");
2040   st->print(" %dk page", os::vm_page_size()>>10);
2041   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2042   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2043   st->cr();
2044   (void) check_addr0(st);
2045 }
2046 
2047 void os::print_siginfo(outputStream* st, void* siginfo) {
2048   const siginfo_t* si = (const siginfo_t*)siginfo;
2049 
2050   os::Posix::print_siginfo_brief(st, si);
2051 
2052   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2053       UseSharedSpaces) {
2054     FileMapInfo* mapinfo = FileMapInfo::current_info();
2055     if (mapinfo->is_in_shared_space(si->si_addr)) {
2056       st->print("\n\nError accessing class data sharing archive."   \
2057                 " Mapped file inaccessible during execution, "      \
2058                 " possible disk/network problem.");
2059     }
2060   }
2061   st->cr();
2062 }
2063 
2064 // Moved from whole group, because we need them here for diagnostic
2065 // prints.
2066 #define OLDMAXSIGNUM 32
2067 static int Maxsignum = 0;
2068 static int *ourSigFlags = NULL;
2069 
2070 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2071 
2072 int os::Solaris::get_our_sigflags(int sig) {
2073   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2074   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2075   return ourSigFlags[sig];
2076 }
2077 
2078 void os::Solaris::set_our_sigflags(int sig, int flags) {
2079   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2080   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2081   ourSigFlags[sig] = flags;
2082 }
2083 
2084 
2085 static const char* get_signal_handler_name(address handler,
2086                                            char* buf, int buflen) {
2087   int offset;
2088   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2089   if (found) {
2090     // skip directory names
2091     const char *p1, *p2;
2092     p1 = buf;
2093     size_t len = strlen(os::file_separator());
2094     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2095     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2096   } else {
2097     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2098   }
2099   return buf;
2100 }
2101 
2102 static void print_signal_handler(outputStream* st, int sig,
2103                                   char* buf, size_t buflen) {
2104   struct sigaction sa;
2105 
2106   sigaction(sig, NULL, &sa);
2107 
2108   st->print("%s: ", os::exception_name(sig, buf, buflen));
2109 
2110   address handler = (sa.sa_flags & SA_SIGINFO)
2111                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2112                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2113 
2114   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2115     st->print("SIG_DFL");
2116   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2117     st->print("SIG_IGN");
2118   } else {
2119     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2120   }
2121 
2122   st->print(", sa_mask[0]=");
2123   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2124 
2125   address rh = VMError::get_resetted_sighandler(sig);
2126   // May be, handler was resetted by VMError?
2127   if(rh != NULL) {
2128     handler = rh;
2129     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2130   }
2131 
2132   st->print(", sa_flags=");
2133   os::Posix::print_sa_flags(st, sa.sa_flags);
2134 
2135   // Check: is it our handler?
2136   if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2137      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2138     // It is our signal handler
2139     // check for flags
2140     if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2141       st->print(
2142         ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2143         os::Solaris::get_our_sigflags(sig));
2144     }
2145   }
2146   st->cr();
2147 }
2148 
2149 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2150   st->print_cr("Signal Handlers:");
2151   print_signal_handler(st, SIGSEGV, buf, buflen);
2152   print_signal_handler(st, SIGBUS , buf, buflen);
2153   print_signal_handler(st, SIGFPE , buf, buflen);
2154   print_signal_handler(st, SIGPIPE, buf, buflen);
2155   print_signal_handler(st, SIGXFSZ, buf, buflen);
2156   print_signal_handler(st, SIGILL , buf, buflen);
2157   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2158   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2159   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2160   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2161   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2162   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2163   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2164   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2165 }
2166 
2167 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2168 
2169 // Find the full path to the current module, libjvm.so
2170 void os::jvm_path(char *buf, jint buflen) {
2171   // Error checking.
2172   if (buflen < MAXPATHLEN) {
2173     assert(false, "must use a large-enough buffer");
2174     buf[0] = '\0';
2175     return;
2176   }
2177   // Lazy resolve the path to current module.
2178   if (saved_jvm_path[0] != 0) {
2179     strcpy(buf, saved_jvm_path);
2180     return;
2181   }
2182 
2183   Dl_info dlinfo;
2184   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2185   assert(ret != 0, "cannot locate libjvm");
2186   if (ret != 0 && dlinfo.dli_fname != NULL) {
2187     realpath((char *)dlinfo.dli_fname, buf);
2188   } else {
2189     buf[0] = '\0';
2190     return;
2191   }
2192 
2193   if (Arguments::created_by_gamma_launcher()) {
2194     // Support for the gamma launcher.  Typical value for buf is
2195     // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
2196     // the right place in the string, then assume we are installed in a JDK and
2197     // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
2198     // up the path so it looks like libjvm.so is installed there (append a
2199     // fake suffix hotspot/libjvm.so).
2200     const char *p = buf + strlen(buf) - 1;
2201     for (int count = 0; p > buf && count < 5; ++count) {
2202       for (--p; p > buf && *p != '/'; --p)
2203         /* empty */ ;
2204     }
2205 
2206     if (strncmp(p, "/jre/lib/", 9) != 0) {
2207       // Look for JAVA_HOME in the environment.
2208       char* java_home_var = ::getenv("JAVA_HOME");
2209       if (java_home_var != NULL && java_home_var[0] != 0) {
2210         char cpu_arch[12];
2211         char* jrelib_p;
2212         int   len;
2213         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2214 #ifdef _LP64
2215         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2216         if (strcmp(cpu_arch, "sparc") == 0) {
2217           strcat(cpu_arch, "v9");
2218         } else if (strcmp(cpu_arch, "i386") == 0) {
2219           strcpy(cpu_arch, "amd64");
2220         }
2221 #endif
2222         // Check the current module name "libjvm.so".
2223         p = strrchr(buf, '/');
2224         assert(strstr(p, "/libjvm") == p, "invalid library name");
2225 
2226         realpath(java_home_var, buf);
2227         // determine if this is a legacy image or modules image
2228         // modules image doesn't have "jre" subdirectory
2229         len = strlen(buf);
2230         assert(len < buflen, "Ran out of buffer space");
2231         jrelib_p = buf + len;
2232         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2233         if (0 != access(buf, F_OK)) {
2234           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2235         }
2236 
2237         if (0 == access(buf, F_OK)) {
2238           // Use current module name "libjvm.so"
2239           len = strlen(buf);
2240           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2241         } else {
2242           // Go back to path of .so
2243           realpath((char *)dlinfo.dli_fname, buf);
2244         }
2245       }
2246     }
2247   }
2248 
2249   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2250 }
2251 
2252 
2253 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2254   // no prefix required, not even "_"
2255 }
2256 
2257 
2258 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2259   // no suffix required
2260 }
2261 
2262 // This method is a copy of JDK's sysGetLastErrorString
2263 // from src/solaris/hpi/src/system_md.c
2264 
2265 size_t os::lasterror(char *buf, size_t len) {
2266 
2267   if (errno == 0)  return 0;
2268 
2269   const char *s = ::strerror(errno);
2270   size_t n = ::strlen(s);
2271   if (n >= len) {
2272     n = len - 1;
2273   }
2274   ::strncpy(buf, s, n);
2275   buf[n] = '\0';
2276   return n;
2277 }
2278 
2279 
2280 // sun.misc.Signal
2281 
2282 extern "C" {
2283   static void UserHandler(int sig, void *siginfo, void *context) {
2284     // Ctrl-C is pressed during error reporting, likely because the error
2285     // handler fails to abort. Let VM die immediately.
2286     if (sig == SIGINT && is_error_reported()) {
2287        os::die();
2288     }
2289 
2290     os::signal_notify(sig);
2291     // We do not need to reinstate the signal handler each time...
2292   }
2293 }
2294 
2295 void* os::user_handler() {
2296   return CAST_FROM_FN_PTR(void*, UserHandler);
2297 }
2298 
2299 class Semaphore : public StackObj {
2300   public:
2301     Semaphore();
2302     ~Semaphore();
2303     void signal();
2304     void wait();
2305     bool trywait();
2306     bool timedwait(unsigned int sec, int nsec);
2307   private:
2308     sema_t _semaphore;
2309 };
2310 
2311 
2312 Semaphore::Semaphore() {
2313   sema_init(&_semaphore, 0, NULL, NULL);
2314 }
2315 
2316 Semaphore::~Semaphore() {
2317   sema_destroy(&_semaphore);
2318 }
2319 
2320 void Semaphore::signal() {
2321   sema_post(&_semaphore);
2322 }
2323 
2324 void Semaphore::wait() {
2325   sema_wait(&_semaphore);
2326 }
2327 
2328 bool Semaphore::trywait() {
2329   return sema_trywait(&_semaphore) == 0;
2330 }
2331 
2332 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2333   struct timespec ts;
2334   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2335 
2336   while (1) {
2337     int result = sema_timedwait(&_semaphore, &ts);
2338     if (result == 0) {
2339       return true;
2340     } else if (errno == EINTR) {
2341       continue;
2342     } else if (errno == ETIME) {
2343       return false;
2344     } else {
2345       return false;
2346     }
2347   }
2348 }
2349 
2350 extern "C" {
2351   typedef void (*sa_handler_t)(int);
2352   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2353 }
2354 
2355 void* os::signal(int signal_number, void* handler) {
2356   struct sigaction sigAct, oldSigAct;
2357   sigfillset(&(sigAct.sa_mask));
2358   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2359   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2360 
2361   if (sigaction(signal_number, &sigAct, &oldSigAct))
2362     // -1 means registration failed
2363     return (void *)-1;
2364 
2365   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2366 }
2367 
2368 void os::signal_raise(int signal_number) {
2369   raise(signal_number);
2370 }
2371 
2372 /*
2373  * The following code is moved from os.cpp for making this
2374  * code platform specific, which it is by its very nature.
2375  */
2376 
2377 // a counter for each possible signal value
2378 static int Sigexit = 0;
2379 static int Maxlibjsigsigs;
2380 static jint *pending_signals = NULL;
2381 static int *preinstalled_sigs = NULL;
2382 static struct sigaction *chainedsigactions = NULL;
2383 static sema_t sig_sem;
2384 typedef int (*version_getting_t)();
2385 version_getting_t os::Solaris::get_libjsig_version = NULL;
2386 static int libjsigversion = NULL;
2387 
2388 int os::sigexitnum_pd() {
2389   assert(Sigexit > 0, "signal memory not yet initialized");
2390   return Sigexit;
2391 }
2392 
2393 void os::Solaris::init_signal_mem() {
2394   // Initialize signal structures
2395   Maxsignum = SIGRTMAX;
2396   Sigexit = Maxsignum+1;
2397   assert(Maxsignum >0, "Unable to obtain max signal number");
2398 
2399   Maxlibjsigsigs = Maxsignum;
2400 
2401   // pending_signals has one int per signal
2402   // The additional signal is for SIGEXIT - exit signal to signal_thread
2403   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2404   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2405 
2406   if (UseSignalChaining) {
2407      chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2408        * (Maxsignum + 1), mtInternal);
2409      memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2410      preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2411      memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2412   }
2413   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
2414   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2415 }
2416 
2417 void os::signal_init_pd() {
2418   int ret;
2419 
2420   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2421   assert(ret == 0, "sema_init() failed");
2422 }
2423 
2424 void os::signal_notify(int signal_number) {
2425   int ret;
2426 
2427   Atomic::inc(&pending_signals[signal_number]);
2428   ret = ::sema_post(&sig_sem);
2429   assert(ret == 0, "sema_post() failed");
2430 }
2431 
2432 static int check_pending_signals(bool wait_for_signal) {
2433   int ret;
2434   while (true) {
2435     for (int i = 0; i < Sigexit + 1; i++) {
2436       jint n = pending_signals[i];
2437       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2438         return i;
2439       }
2440     }
2441     if (!wait_for_signal) {
2442       return -1;
2443     }
2444     JavaThread *thread = JavaThread::current();
2445     ThreadBlockInVM tbivm(thread);
2446 
2447     bool threadIsSuspended;
2448     do {
2449       thread->set_suspend_equivalent();
2450       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2451       while((ret = ::sema_wait(&sig_sem)) == EINTR)
2452           ;
2453       assert(ret == 0, "sema_wait() failed");
2454 
2455       // were we externally suspended while we were waiting?
2456       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2457       if (threadIsSuspended) {
2458         //
2459         // The semaphore has been incremented, but while we were waiting
2460         // another thread suspended us. We don't want to continue running
2461         // while suspended because that would surprise the thread that
2462         // suspended us.
2463         //
2464         ret = ::sema_post(&sig_sem);
2465         assert(ret == 0, "sema_post() failed");
2466 
2467         thread->java_suspend_self();
2468       }
2469     } while (threadIsSuspended);
2470   }
2471 }
2472 
2473 int os::signal_lookup() {
2474   return check_pending_signals(false);
2475 }
2476 
2477 int os::signal_wait() {
2478   return check_pending_signals(true);
2479 }
2480 
2481 ////////////////////////////////////////////////////////////////////////////////
2482 // Virtual Memory
2483 
2484 static int page_size = -1;
2485 
2486 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2487 // clear this var if support is not available.
2488 static bool has_map_align = true;
2489 
2490 int os::vm_page_size() {
2491   assert(page_size != -1, "must call os::init");
2492   return page_size;
2493 }
2494 
2495 // Solaris allocates memory by pages.
2496 int os::vm_allocation_granularity() {
2497   assert(page_size != -1, "must call os::init");
2498   return page_size;
2499 }
2500 
2501 static bool recoverable_mmap_error(int err) {
2502   // See if the error is one we can let the caller handle. This
2503   // list of errno values comes from the Solaris mmap(2) man page.
2504   switch (err) {
2505   case EBADF:
2506   case EINVAL:
2507   case ENOTSUP:
2508     // let the caller deal with these errors
2509     return true;
2510 
2511   default:
2512     // Any remaining errors on this OS can cause our reserved mapping
2513     // to be lost. That can cause confusion where different data
2514     // structures think they have the same memory mapped. The worst
2515     // scenario is if both the VM and a library think they have the
2516     // same memory mapped.
2517     return false;
2518   }
2519 }
2520 
2521 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2522                                     int err) {
2523   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2524           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2525           strerror(err), err);
2526 }
2527 
2528 static void warn_fail_commit_memory(char* addr, size_t bytes,
2529                                     size_t alignment_hint, bool exec,
2530                                     int err) {
2531   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2532           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2533           alignment_hint, exec, strerror(err), err);
2534 }
2535 
2536 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2537   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2538   size_t size = bytes;
2539   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2540   if (res != NULL) {
2541     if (UseNUMAInterleaving) {
2542       numa_make_global(addr, bytes);
2543     }
2544     return 0;
2545   }
2546 
2547   int err = errno;  // save errno from mmap() call in mmap_chunk()
2548 
2549   if (!recoverable_mmap_error(err)) {
2550     warn_fail_commit_memory(addr, bytes, exec, err);
2551     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2552   }
2553 
2554   return err;
2555 }
2556 
2557 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2558   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2559 }
2560 
2561 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2562                                   const char* mesg) {
2563   assert(mesg != NULL, "mesg must be specified");
2564   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2565   if (err != 0) {
2566     // the caller wants all commit errors to exit with the specified mesg:
2567     warn_fail_commit_memory(addr, bytes, exec, err);
2568     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2569   }
2570 }
2571 
2572 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
2573   assert(is_size_aligned(alignment, (size_t) vm_page_size()),
2574          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
2575                  alignment, (size_t) vm_page_size()));
2576 
2577   for (int i = 0; _page_sizes[i] != 0; i++) {
2578     if (is_size_aligned(alignment, _page_sizes[i])) {
2579       return _page_sizes[i];
2580     }
2581   }
2582 
2583   return (size_t) vm_page_size();
2584 }
2585 
2586 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2587                                     size_t alignment_hint, bool exec) {
2588   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2589   if (err == 0 && UseLargePages && alignment_hint > 0) {
2590     assert(is_size_aligned(bytes, alignment_hint),
2591            err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
2592 
2593     // The syscall memcntl requires an exact page size (see man memcntl for details).
2594     size_t page_size = page_size_for_alignment(alignment_hint);
2595     if (page_size > (size_t) vm_page_size()) {
2596       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2597     }
2598   }
2599   return err;
2600 }
2601 
2602 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2603                           bool exec) {
2604   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2605 }
2606 
2607 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2608                                   size_t alignment_hint, bool exec,
2609                                   const char* mesg) {
2610   assert(mesg != NULL, "mesg must be specified");
2611   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2612   if (err != 0) {
2613     // the caller wants all commit errors to exit with the specified mesg:
2614     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2615     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2616   }
2617 }
2618 
2619 // Uncommit the pages in a specified region.
2620 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2621   if (madvise(addr, bytes, MADV_FREE) < 0) {
2622     debug_only(warning("MADV_FREE failed."));
2623     return;
2624   }
2625 }
2626 
2627 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2628   return os::commit_memory(addr, size, !ExecMem);
2629 }
2630 
2631 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2632   return os::uncommit_memory(addr, size);
2633 }
2634 
2635 // Change the page size in a given range.
2636 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2637   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2638   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2639   if (UseLargePages) {
2640     Solaris::setup_large_pages(addr, bytes, alignment_hint);
2641   }
2642 }
2643 
2644 // Tell the OS to make the range local to the first-touching LWP
2645 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2646   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2647   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2648     debug_only(warning("MADV_ACCESS_LWP failed."));
2649   }
2650 }
2651 
2652 // Tell the OS that this range would be accessed from different LWPs.
2653 void os::numa_make_global(char *addr, size_t bytes) {
2654   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2655   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2656     debug_only(warning("MADV_ACCESS_MANY failed."));
2657   }
2658 }
2659 
2660 // Get the number of the locality groups.
2661 size_t os::numa_get_groups_num() {
2662   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2663   return n != -1 ? n : 1;
2664 }
2665 
2666 // Get a list of leaf locality groups. A leaf lgroup is group that
2667 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2668 // board. An LWP is assigned to one of these groups upon creation.
2669 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2670    if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2671      ids[0] = 0;
2672      return 1;
2673    }
2674    int result_size = 0, top = 1, bottom = 0, cur = 0;
2675    for (int k = 0; k < size; k++) {
2676      int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2677                                     (Solaris::lgrp_id_t*)&ids[top], size - top);
2678      if (r == -1) {
2679        ids[0] = 0;
2680        return 1;
2681      }
2682      if (!r) {
2683        // That's a leaf node.
2684        assert (bottom <= cur, "Sanity check");
2685        // Check if the node has memory
2686        if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2687                                    NULL, 0, LGRP_RSRC_MEM) > 0) {
2688          ids[bottom++] = ids[cur];
2689        }
2690      }
2691      top += r;
2692      cur++;
2693    }
2694    if (bottom == 0) {
2695      // Handle a situation, when the OS reports no memory available.
2696      // Assume UMA architecture.
2697      ids[0] = 0;
2698      return 1;
2699    }
2700    return bottom;
2701 }
2702 
2703 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2704 bool os::numa_topology_changed() {
2705   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2706   if (is_stale != -1 && is_stale) {
2707     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2708     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2709     assert(c != 0, "Failure to initialize LGRP API");
2710     Solaris::set_lgrp_cookie(c);
2711     return true;
2712   }
2713   return false;
2714 }
2715 
2716 // Get the group id of the current LWP.
2717 int os::numa_get_group_id() {
2718   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2719   if (lgrp_id == -1) {
2720     return 0;
2721   }
2722   const int size = os::numa_get_groups_num();
2723   int *ids = (int*)alloca(size * sizeof(int));
2724 
2725   // Get the ids of all lgroups with memory; r is the count.
2726   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2727                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2728   if (r <= 0) {
2729     return 0;
2730   }
2731   return ids[os::random() % r];
2732 }
2733 
2734 // Request information about the page.
2735 bool os::get_page_info(char *start, page_info* info) {
2736   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2737   uint64_t addr = (uintptr_t)start;
2738   uint64_t outdata[2];
2739   uint_t validity = 0;
2740 
2741   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2742     return false;
2743   }
2744 
2745   info->size = 0;
2746   info->lgrp_id = -1;
2747 
2748   if ((validity & 1) != 0) {
2749     if ((validity & 2) != 0) {
2750       info->lgrp_id = outdata[0];
2751     }
2752     if ((validity & 4) != 0) {
2753       info->size = outdata[1];
2754     }
2755     return true;
2756   }
2757   return false;
2758 }
2759 
2760 // Scan the pages from start to end until a page different than
2761 // the one described in the info parameter is encountered.
2762 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2763   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2764   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2765   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2766   uint_t validity[MAX_MEMINFO_CNT];
2767 
2768   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2769   uint64_t p = (uint64_t)start;
2770   while (p < (uint64_t)end) {
2771     addrs[0] = p;
2772     size_t addrs_count = 1;
2773     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2774       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2775       addrs_count++;
2776     }
2777 
2778     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2779       return NULL;
2780     }
2781 
2782     size_t i = 0;
2783     for (; i < addrs_count; i++) {
2784       if ((validity[i] & 1) != 0) {
2785         if ((validity[i] & 4) != 0) {
2786           if (outdata[types * i + 1] != page_expected->size) {
2787             break;
2788           }
2789         } else
2790           if (page_expected->size != 0) {
2791             break;
2792           }
2793 
2794         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2795           if (outdata[types * i] != page_expected->lgrp_id) {
2796             break;
2797           }
2798         }
2799       } else {
2800         return NULL;
2801       }
2802     }
2803 
2804     if (i < addrs_count) {
2805       if ((validity[i] & 2) != 0) {
2806         page_found->lgrp_id = outdata[types * i];
2807       } else {
2808         page_found->lgrp_id = -1;
2809       }
2810       if ((validity[i] & 4) != 0) {
2811         page_found->size = outdata[types * i + 1];
2812       } else {
2813         page_found->size = 0;
2814       }
2815       return (char*)addrs[i];
2816     }
2817 
2818     p = addrs[addrs_count - 1] + page_size;
2819   }
2820   return end;
2821 }
2822 
2823 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2824   size_t size = bytes;
2825   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2826   // uncommitted page. Otherwise, the read/write might succeed if we
2827   // have enough swap space to back the physical page.
2828   return
2829     NULL != Solaris::mmap_chunk(addr, size,
2830                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2831                                 PROT_NONE);
2832 }
2833 
2834 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2835   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2836 
2837   if (b == MAP_FAILED) {
2838     return NULL;
2839   }
2840   return b;
2841 }
2842 
2843 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
2844   char* addr = requested_addr;
2845   int flags = MAP_PRIVATE | MAP_NORESERVE;
2846 
2847   assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
2848 
2849   if (fixed) {
2850     flags |= MAP_FIXED;
2851   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2852     flags |= MAP_ALIGN;
2853     addr = (char*) alignment_hint;
2854   }
2855 
2856   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2857   // uncommitted page. Otherwise, the read/write might succeed if we
2858   // have enough swap space to back the physical page.
2859   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2860 }
2861 
2862 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2863   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
2864 
2865   guarantee(requested_addr == NULL || requested_addr == addr,
2866             "OS failed to return requested mmap address.");
2867   return addr;
2868 }
2869 
2870 // Reserve memory at an arbitrary address, only if that area is
2871 // available (and not reserved for something else).
2872 
2873 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2874   const int max_tries = 10;
2875   char* base[max_tries];
2876   size_t size[max_tries];
2877 
2878   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2879   // is dependent on the requested size and the MMU.  Our initial gap
2880   // value here is just a guess and will be corrected later.
2881   bool had_top_overlap = false;
2882   bool have_adjusted_gap = false;
2883   size_t gap = 0x400000;
2884 
2885   // Assert only that the size is a multiple of the page size, since
2886   // that's all that mmap requires, and since that's all we really know
2887   // about at this low abstraction level.  If we need higher alignment,
2888   // we can either pass an alignment to this method or verify alignment
2889   // in one of the methods further up the call chain.  See bug 5044738.
2890   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2891 
2892   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2893   // Give it a try, if the kernel honors the hint we can return immediately.
2894   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2895 
2896   volatile int err = errno;
2897   if (addr == requested_addr) {
2898     return addr;
2899   } else if (addr != NULL) {
2900     pd_unmap_memory(addr, bytes);
2901   }
2902 
2903   if (PrintMiscellaneous && Verbose) {
2904     char buf[256];
2905     buf[0] = '\0';
2906     if (addr == NULL) {
2907       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2908     }
2909     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2910             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2911             "%s", bytes, requested_addr, addr, buf);
2912   }
2913 
2914   // Address hint method didn't work.  Fall back to the old method.
2915   // In theory, once SNV becomes our oldest supported platform, this
2916   // code will no longer be needed.
2917   //
2918   // Repeatedly allocate blocks until the block is allocated at the
2919   // right spot. Give up after max_tries.
2920   int i;
2921   for (i = 0; i < max_tries; ++i) {
2922     base[i] = reserve_memory(bytes);
2923 
2924     if (base[i] != NULL) {
2925       // Is this the block we wanted?
2926       if (base[i] == requested_addr) {
2927         size[i] = bytes;
2928         break;
2929       }
2930 
2931       // check that the gap value is right
2932       if (had_top_overlap && !have_adjusted_gap) {
2933         size_t actual_gap = base[i-1] - base[i] - bytes;
2934         if (gap != actual_gap) {
2935           // adjust the gap value and retry the last 2 allocations
2936           assert(i > 0, "gap adjustment code problem");
2937           have_adjusted_gap = true;  // adjust the gap only once, just in case
2938           gap = actual_gap;
2939           if (PrintMiscellaneous && Verbose) {
2940             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2941           }
2942           unmap_memory(base[i], bytes);
2943           unmap_memory(base[i-1], size[i-1]);
2944           i-=2;
2945           continue;
2946         }
2947       }
2948 
2949       // Does this overlap the block we wanted? Give back the overlapped
2950       // parts and try again.
2951       //
2952       // There is still a bug in this code: if top_overlap == bytes,
2953       // the overlap is offset from requested region by the value of gap.
2954       // In this case giving back the overlapped part will not work,
2955       // because we'll give back the entire block at base[i] and
2956       // therefore the subsequent allocation will not generate a new gap.
2957       // This could be fixed with a new algorithm that used larger
2958       // or variable size chunks to find the requested region -
2959       // but such a change would introduce additional complications.
2960       // It's rare enough that the planets align for this bug,
2961       // so we'll just wait for a fix for 6204603/5003415 which
2962       // will provide a mmap flag to allow us to avoid this business.
2963 
2964       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2965       if (top_overlap >= 0 && top_overlap < bytes) {
2966         had_top_overlap = true;
2967         unmap_memory(base[i], top_overlap);
2968         base[i] += top_overlap;
2969         size[i] = bytes - top_overlap;
2970       } else {
2971         size_t bottom_overlap = base[i] + bytes - requested_addr;
2972         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2973           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2974             warning("attempt_reserve_memory_at: possible alignment bug");
2975           }
2976           unmap_memory(requested_addr, bottom_overlap);
2977           size[i] = bytes - bottom_overlap;
2978         } else {
2979           size[i] = bytes;
2980         }
2981       }
2982     }
2983   }
2984 
2985   // Give back the unused reserved pieces.
2986 
2987   for (int j = 0; j < i; ++j) {
2988     if (base[j] != NULL) {
2989       unmap_memory(base[j], size[j]);
2990     }
2991   }
2992 
2993   return (i < max_tries) ? requested_addr : NULL;
2994 }
2995 
2996 bool os::pd_release_memory(char* addr, size_t bytes) {
2997   size_t size = bytes;
2998   return munmap(addr, size) == 0;
2999 }
3000 
3001 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
3002   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
3003          "addr must be page aligned");
3004   int retVal = mprotect(addr, bytes, prot);
3005   return retVal == 0;
3006 }
3007 
3008 // Protect memory (Used to pass readonly pages through
3009 // JNI GetArray<type>Elements with empty arrays.)
3010 // Also, used for serialization page and for compressed oops null pointer
3011 // checking.
3012 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3013                         bool is_committed) {
3014   unsigned int p = 0;
3015   switch (prot) {
3016   case MEM_PROT_NONE: p = PROT_NONE; break;
3017   case MEM_PROT_READ: p = PROT_READ; break;
3018   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
3019   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3020   default:
3021     ShouldNotReachHere();
3022   }
3023   // is_committed is unused.
3024   return solaris_mprotect(addr, bytes, p);
3025 }
3026 
3027 // guard_memory and unguard_memory only happens within stack guard pages.
3028 // Since ISM pertains only to the heap, guard and unguard memory should not
3029 /// happen with an ISM region.
3030 bool os::guard_memory(char* addr, size_t bytes) {
3031   return solaris_mprotect(addr, bytes, PROT_NONE);
3032 }
3033 
3034 bool os::unguard_memory(char* addr, size_t bytes) {
3035   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3036 }
3037 
3038 // Large page support
3039 static size_t _large_page_size = 0;
3040 
3041 // Insertion sort for small arrays (descending order).
3042 static void insertion_sort_descending(size_t* array, int len) {
3043   for (int i = 0; i < len; i++) {
3044     size_t val = array[i];
3045     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3046       size_t tmp = array[key];
3047       array[key] = array[key - 1];
3048       array[key - 1] = tmp;
3049     }
3050   }
3051 }
3052 
3053 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3054   const unsigned int usable_count = VM_Version::page_size_count();
3055   if (usable_count == 1) {
3056     return false;
3057   }
3058 
3059   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3060   // build platform, getpagesizes() (without the '2') can be called directly.
3061   typedef int (*gps_t)(size_t[], int);
3062   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3063   if (gps_func == NULL) {
3064     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3065     if (gps_func == NULL) {
3066       if (warn) {
3067         warning("MPSS is not supported by the operating system.");
3068       }
3069       return false;
3070     }
3071   }
3072 
3073   // Fill the array of page sizes.
3074   int n = (*gps_func)(_page_sizes, page_sizes_max);
3075   assert(n > 0, "Solaris bug?");
3076 
3077   if (n == page_sizes_max) {
3078     // Add a sentinel value (necessary only if the array was completely filled
3079     // since it is static (zeroed at initialization)).
3080     _page_sizes[--n] = 0;
3081     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3082   }
3083   assert(_page_sizes[n] == 0, "missing sentinel");
3084   trace_page_sizes("available page sizes", _page_sizes, n);
3085 
3086   if (n == 1) return false;     // Only one page size available.
3087 
3088   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3089   // select up to usable_count elements.  First sort the array, find the first
3090   // acceptable value, then copy the usable sizes to the top of the array and
3091   // trim the rest.  Make sure to include the default page size :-).
3092   //
3093   // A better policy could get rid of the 4M limit by taking the sizes of the
3094   // important VM memory regions (java heap and possibly the code cache) into
3095   // account.
3096   insertion_sort_descending(_page_sizes, n);
3097   const size_t size_limit =
3098     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3099   int beg;
3100   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3101   const int end = MIN2((int)usable_count, n) - 1;
3102   for (int cur = 0; cur < end; ++cur, ++beg) {
3103     _page_sizes[cur] = _page_sizes[beg];
3104   }
3105   _page_sizes[end] = vm_page_size();
3106   _page_sizes[end + 1] = 0;
3107 
3108   if (_page_sizes[end] > _page_sizes[end - 1]) {
3109     // Default page size is not the smallest; sort again.
3110     insertion_sort_descending(_page_sizes, end + 1);
3111   }
3112   *page_size = _page_sizes[0];
3113 
3114   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3115   return true;
3116 }
3117 
3118 void os::large_page_init() {
3119   if (UseLargePages) {
3120     // print a warning if any large page related flag is specified on command line
3121     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3122                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3123 
3124     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3125   }
3126 }
3127 
3128 bool os::Solaris::is_valid_page_size(size_t bytes) {
3129   for (int i = 0; _page_sizes[i] != 0; i++) {
3130     if (_page_sizes[i] == bytes) {
3131       return true;
3132     }
3133   }
3134   return false;
3135 }
3136 
3137 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3138   assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
3139   assert(is_ptr_aligned((void*) start, align),
3140          err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
3141   assert(is_size_aligned(bytes, align),
3142          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
3143 
3144   // Signal to OS that we want large pages for addresses
3145   // from addr, addr + bytes
3146   struct memcntl_mha mpss_struct;
3147   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3148   mpss_struct.mha_pagesize = align;
3149   mpss_struct.mha_flags = 0;
3150   // Upon successful completion, memcntl() returns 0
3151   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3152     debug_only(warning("Attempt to use MPSS failed."));
3153     return false;
3154   }
3155   return true;
3156 }
3157 
3158 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3159   fatal("os::reserve_memory_special should not be called on Solaris.");
3160   return NULL;
3161 }
3162 
3163 bool os::release_memory_special(char* base, size_t bytes) {
3164   fatal("os::release_memory_special should not be called on Solaris.");
3165   return false;
3166 }
3167 
3168 size_t os::large_page_size() {
3169   return _large_page_size;
3170 }
3171 
3172 // MPSS allows application to commit large page memory on demand; with ISM
3173 // the entire memory region must be allocated as shared memory.
3174 bool os::can_commit_large_page_memory() {
3175   return true;
3176 }
3177 
3178 bool os::can_execute_large_page_memory() {
3179   return true;
3180 }
3181 
3182 static int os_sleep(jlong millis, bool interruptible) {
3183   const jlong limit = INT_MAX;
3184   jlong prevtime;
3185   int res;
3186 
3187   while (millis > limit) {
3188     if ((res = os_sleep(limit, interruptible)) != OS_OK)
3189       return res;
3190     millis -= limit;
3191   }
3192 
3193   // Restart interrupted polls with new parameters until the proper delay
3194   // has been completed.
3195 
3196   prevtime = getTimeMillis();
3197 
3198   while (millis > 0) {
3199     jlong newtime;
3200 
3201     if (!interruptible) {
3202       // Following assert fails for os::yield_all:
3203       // assert(!thread->is_Java_thread(), "must not be java thread");
3204       res = poll(NULL, 0, millis);
3205     } else {
3206       JavaThread *jt = JavaThread::current();
3207 
3208       INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
3209         os::Solaris::clear_interrupted);
3210     }
3211 
3212     // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
3213     // thread.Interrupt.
3214 
3215     // See c/r 6751923. Poll can return 0 before time
3216     // has elapsed if time is set via clock_settime (as NTP does).
3217     // res == 0 if poll timed out (see man poll RETURN VALUES)
3218     // using the logic below checks that we really did
3219     // sleep at least "millis" if not we'll sleep again.
3220     if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) {
3221       newtime = getTimeMillis();
3222       assert(newtime >= prevtime, "time moving backwards");
3223     /* Doing prevtime and newtime in microseconds doesn't help precision,
3224        and trying to round up to avoid lost milliseconds can result in a
3225        too-short delay. */
3226       millis -= newtime - prevtime;
3227       if(millis <= 0)
3228         return OS_OK;
3229       prevtime = newtime;
3230     } else
3231       return res;
3232   }
3233 
3234   return OS_OK;
3235 }
3236 
3237 // Read calls from inside the vm need to perform state transitions
3238 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3239   INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3240 }
3241 
3242 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3243   INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3244 }
3245 
3246 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
3247   assert(thread == Thread::current(),  "thread consistency check");
3248 
3249   // TODO-FIXME: this should be removed.
3250   // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
3251   // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
3252   // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
3253   // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
3254   // is fooled into believing that the system is making progress. In the code below we block the
3255   // the watcher thread while safepoint is in progress so that it would not appear as though the
3256   // system is making progress.
3257   if (!Solaris::T2_libthread() &&
3258       thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
3259     // We now try to acquire the threads lock. Since this lock is held by the VM thread during
3260     // the entire safepoint, the watcher thread will  line up here during the safepoint.
3261     Threads_lock->lock_without_safepoint_check();
3262     Threads_lock->unlock();
3263   }
3264 
3265   if (thread->is_Java_thread()) {
3266     // This is a JavaThread so we honor the _thread_blocked protocol
3267     // even for sleeps of 0 milliseconds. This was originally done
3268     // as a workaround for bug 4338139. However, now we also do it
3269     // to honor the suspend-equivalent protocol.
3270 
3271     JavaThread *jt = (JavaThread *) thread;
3272     ThreadBlockInVM tbivm(jt);
3273 
3274     jt->set_suspend_equivalent();
3275     // cleared by handle_special_suspend_equivalent_condition() or
3276     // java_suspend_self() via check_and_wait_while_suspended()
3277 
3278     int ret_code;
3279     if (millis <= 0) {
3280       thr_yield();
3281       ret_code = 0;
3282     } else {
3283       // The original sleep() implementation did not create an
3284       // OSThreadWaitState helper for sleeps of 0 milliseconds.
3285       // I'm preserving that decision for now.
3286       OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
3287 
3288       ret_code = os_sleep(millis, interruptible);
3289     }
3290 
3291     // were we externally suspended while we were waiting?
3292     jt->check_and_wait_while_suspended();
3293 
3294     return ret_code;
3295   }
3296 
3297   // non-JavaThread from this point on:
3298 
3299   if (millis <= 0) {
3300     thr_yield();
3301     return 0;
3302   }
3303 
3304   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
3305 
3306   return os_sleep(millis, interruptible);
3307 }
3308 
3309 void os::naked_short_sleep(jlong ms) {
3310   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3311 
3312   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3313   // Solaris requires -lrt for this.
3314   usleep((ms * 1000));
3315 
3316   return;
3317 }
3318 
3319 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3320 void os::infinite_sleep() {
3321   while (true) {    // sleep forever ...
3322     ::sleep(100);   // ... 100 seconds at a time
3323   }
3324 }
3325 
3326 // Used to convert frequent JVM_Yield() to nops
3327 bool os::dont_yield() {
3328   if (DontYieldALot) {
3329     static hrtime_t last_time = 0;
3330     hrtime_t diff = getTimeNanos() - last_time;
3331 
3332     if (diff < DontYieldALotInterval * 1000000)
3333       return true;
3334 
3335     last_time += diff;
3336 
3337     return false;
3338   }
3339   else {
3340     return false;
3341   }
3342 }
3343 
3344 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3345 // the linux and win32 implementations do not.  This should be checked.
3346 
3347 void os::yield() {
3348   // Yields to all threads with same or greater priority
3349   os::sleep(Thread::current(), 0, false);
3350 }
3351 
3352 // Note that yield semantics are defined by the scheduling class to which
3353 // the thread currently belongs.  Typically, yield will _not yield to
3354 // other equal or higher priority threads that reside on the dispatch queues
3355 // of other CPUs.
3356 
3357 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3358 
3359 
3360 // On Solaris we found that yield_all doesn't always yield to all other threads.
3361 // There have been cases where there is a thread ready to execute but it doesn't
3362 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
3363 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a
3364 // SIGWAITING signal which will cause a new lwp to be created. So we count the
3365 // number of times yield_all is called in the one loop and increase the sleep
3366 // time after 8 attempts. If this fails too we increase the concurrency level
3367 // so that the starving thread would get an lwp
3368 
3369 void os::yield_all(int attempts) {
3370   // Yields to all threads, including threads with lower priorities
3371   if (attempts == 0) {
3372     os::sleep(Thread::current(), 1, false);
3373   } else {
3374     int iterations = attempts % 30;
3375     if (iterations == 0 && !os::Solaris::T2_libthread()) {
3376       // thr_setconcurrency and _getconcurrency make sense only under T1.
3377       int noofLWPS = thr_getconcurrency();
3378       if (noofLWPS < (Threads::number_of_threads() + 2)) {
3379         thr_setconcurrency(thr_getconcurrency() + 1);
3380       }
3381     } else if (iterations < 25) {
3382       os::sleep(Thread::current(), 1, false);
3383     } else {
3384       os::sleep(Thread::current(), 10, false);
3385     }
3386   }
3387 }
3388 
3389 // Called from the tight loops to possibly influence time-sharing heuristics
3390 void os::loop_breaker(int attempts) {
3391   os::yield_all(attempts);
3392 }
3393 
3394 
3395 // Interface for setting lwp priorities.  If we are using T2 libthread,
3396 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3397 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3398 // function is meaningless in this mode so we must adjust the real lwp's priority
3399 // The routines below implement the getting and setting of lwp priorities.
3400 //
3401 // Note: There are three priority scales used on Solaris.  Java priotities
3402 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3403 //       from 0 to 127, and the current scheduling class of the process we
3404 //       are running in.  This is typically from -60 to +60.
3405 //       The setting of the lwp priorities in done after a call to thr_setprio
3406 //       so Java priorities are mapped to libthread priorities and we map from
3407 //       the latter to lwp priorities.  We don't keep priorities stored in
3408 //       Java priorities since some of our worker threads want to set priorities
3409 //       higher than all Java threads.
3410 //
3411 // For related information:
3412 // (1)  man -s 2 priocntl
3413 // (2)  man -s 4 priocntl
3414 // (3)  man dispadmin
3415 // =    librt.so
3416 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3417 // =    ps -cL <pid> ... to validate priority.
3418 // =    sched_get_priority_min and _max
3419 //              pthread_create
3420 //              sched_setparam
3421 //              pthread_setschedparam
3422 //
3423 // Assumptions:
3424 // +    We assume that all threads in the process belong to the same
3425 //              scheduling class.   IE. an homogenous process.
3426 // +    Must be root or in IA group to change change "interactive" attribute.
3427 //              Priocntl() will fail silently.  The only indication of failure is when
3428 //              we read-back the value and notice that it hasn't changed.
3429 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3430 // +    For RT, change timeslice as well.  Invariant:
3431 //              constant "priority integral"
3432 //              Konst == TimeSlice * (60-Priority)
3433 //              Given a priority, compute appropriate timeslice.
3434 // +    Higher numerical values have higher priority.
3435 
3436 // sched class attributes
3437 typedef struct {
3438         int   schedPolicy;              // classID
3439         int   maxPrio;
3440         int   minPrio;
3441 } SchedInfo;
3442 
3443 
3444 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3445 
3446 #ifdef ASSERT
3447 static int  ReadBackValidate = 1;
3448 #endif
3449 static int  myClass     = 0;
3450 static int  myMin       = 0;
3451 static int  myMax       = 0;
3452 static int  myCur       = 0;
3453 static bool priocntl_enable = false;
3454 
3455 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3456 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3457 
3458 
3459 // lwp_priocntl_init
3460 //
3461 // Try to determine the priority scale for our process.
3462 //
3463 // Return errno or 0 if OK.
3464 //
3465 static int lwp_priocntl_init () {
3466   int rslt;
3467   pcinfo_t ClassInfo;
3468   pcparms_t ParmInfo;
3469   int i;
3470 
3471   if (!UseThreadPriorities) return 0;
3472 
3473   // We are using Bound threads, we need to determine our priority ranges
3474   if (os::Solaris::T2_libthread() || UseBoundThreads) {
3475     // If ThreadPriorityPolicy is 1, switch tables
3476     if (ThreadPriorityPolicy == 1) {
3477       for (i = 0 ; i < CriticalPriority+1; i++)
3478         os::java_to_os_priority[i] = prio_policy1[i];
3479     }
3480     if (UseCriticalJavaThreadPriority) {
3481       // MaxPriority always maps to the FX scheduling class and criticalPrio.
3482       // See set_native_priority() and set_lwp_class_and_priority().
3483       // Save original MaxPriority mapping in case attempt to
3484       // use critical priority fails.
3485       java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3486       // Set negative to distinguish from other priorities
3487       os::java_to_os_priority[MaxPriority] = -criticalPrio;
3488     }
3489   }
3490   // Not using Bound Threads, set to ThreadPolicy 1
3491   else {
3492     for ( i = 0 ; i < CriticalPriority+1; i++ ) {
3493       os::java_to_os_priority[i] = prio_policy1[i];
3494     }
3495     return 0;
3496   }
3497 
3498   // Get IDs for a set of well-known scheduling classes.
3499   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3500   // the system.  We should have a loop that iterates over the
3501   // classID values, which are known to be "small" integers.
3502 
3503   strcpy(ClassInfo.pc_clname, "TS");
3504   ClassInfo.pc_cid = -1;
3505   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3506   if (rslt < 0) return errno;
3507   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3508   tsLimits.schedPolicy = ClassInfo.pc_cid;
3509   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3510   tsLimits.minPrio = -tsLimits.maxPrio;
3511 
3512   strcpy(ClassInfo.pc_clname, "IA");
3513   ClassInfo.pc_cid = -1;
3514   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3515   if (rslt < 0) return errno;
3516   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3517   iaLimits.schedPolicy = ClassInfo.pc_cid;
3518   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3519   iaLimits.minPrio = -iaLimits.maxPrio;
3520 
3521   strcpy(ClassInfo.pc_clname, "RT");
3522   ClassInfo.pc_cid = -1;
3523   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3524   if (rslt < 0) return errno;
3525   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3526   rtLimits.schedPolicy = ClassInfo.pc_cid;
3527   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3528   rtLimits.minPrio = 0;
3529 
3530   strcpy(ClassInfo.pc_clname, "FX");
3531   ClassInfo.pc_cid = -1;
3532   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3533   if (rslt < 0) return errno;
3534   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3535   fxLimits.schedPolicy = ClassInfo.pc_cid;
3536   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3537   fxLimits.minPrio = 0;
3538 
3539   // Query our "current" scheduling class.
3540   // This will normally be IA, TS or, rarely, FX or RT.
3541   memset(&ParmInfo, 0, sizeof(ParmInfo));
3542   ParmInfo.pc_cid = PC_CLNULL;
3543   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3544   if (rslt < 0) return errno;
3545   myClass = ParmInfo.pc_cid;
3546 
3547   // We now know our scheduling classId, get specific information
3548   // about the class.
3549   ClassInfo.pc_cid = myClass;
3550   ClassInfo.pc_clname[0] = 0;
3551   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3552   if (rslt < 0) return errno;
3553 
3554   if (ThreadPriorityVerbose) {
3555     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3556   }
3557 
3558   memset(&ParmInfo, 0, sizeof(pcparms_t));
3559   ParmInfo.pc_cid = PC_CLNULL;
3560   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3561   if (rslt < 0) return errno;
3562 
3563   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3564     myMin = rtLimits.minPrio;
3565     myMax = rtLimits.maxPrio;
3566   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3567     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3568     myMin = iaLimits.minPrio;
3569     myMax = iaLimits.maxPrio;
3570     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3571   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3572     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3573     myMin = tsLimits.minPrio;
3574     myMax = tsLimits.maxPrio;
3575     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3576   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3577     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3578     myMin = fxLimits.minPrio;
3579     myMax = fxLimits.maxPrio;
3580     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3581   } else {
3582     // No clue - punt
3583     if (ThreadPriorityVerbose)
3584       tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3585     return EINVAL;      // no clue, punt
3586   }
3587 
3588   if (ThreadPriorityVerbose) {
3589     tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3590   }
3591 
3592   priocntl_enable = true;  // Enable changing priorities
3593   return 0;
3594 }
3595 
3596 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3597 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3598 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3599 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3600 
3601 
3602 // scale_to_lwp_priority
3603 //
3604 // Convert from the libthread "thr_setprio" scale to our current
3605 // lwp scheduling class scale.
3606 //
3607 static
3608 int     scale_to_lwp_priority (int rMin, int rMax, int x)
3609 {
3610   int v;
3611 
3612   if (x == 127) return rMax;            // avoid round-down
3613     v = (((x*(rMax-rMin)))/128)+rMin;
3614   return v;
3615 }
3616 
3617 
3618 // set_lwp_class_and_priority
3619 //
3620 // Set the class and priority of the lwp.  This call should only
3621 // be made when using bound threads (T2 threads are bound by default).
3622 //
3623 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3624                                int newPrio, int new_class, bool scale) {
3625   int rslt;
3626   int Actual, Expected, prv;
3627   pcparms_t ParmInfo;                   // for GET-SET
3628 #ifdef ASSERT
3629   pcparms_t ReadBack;                   // for readback
3630 #endif
3631 
3632   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3633   // Query current values.
3634   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3635   // Cache "pcparms_t" in global ParmCache.
3636   // TODO: elide set-to-same-value
3637 
3638   // If something went wrong on init, don't change priorities.
3639   if ( !priocntl_enable ) {
3640     if (ThreadPriorityVerbose)
3641       tty->print_cr("Trying to set priority but init failed, ignoring");
3642     return EINVAL;
3643   }
3644 
3645   // If lwp hasn't started yet, just return
3646   // the _start routine will call us again.
3647   if ( lwpid <= 0 ) {
3648     if (ThreadPriorityVerbose) {
3649       tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
3650                      INTPTR_FORMAT " to %d, lwpid not set",
3651                      ThreadID, newPrio);
3652     }
3653     return 0;
3654   }
3655 
3656   if (ThreadPriorityVerbose) {
3657     tty->print_cr ("set_lwp_class_and_priority("
3658                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3659                    ThreadID, lwpid, newPrio);
3660   }
3661 
3662   memset(&ParmInfo, 0, sizeof(pcparms_t));
3663   ParmInfo.pc_cid = PC_CLNULL;
3664   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3665   if (rslt < 0) return errno;
3666 
3667   int cur_class = ParmInfo.pc_cid;
3668   ParmInfo.pc_cid = (id_t)new_class;
3669 
3670   if (new_class == rtLimits.schedPolicy) {
3671     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3672     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3673                                                        rtLimits.maxPrio, newPrio)
3674                                : newPrio;
3675     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3676     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3677     if (ThreadPriorityVerbose) {
3678       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3679     }
3680   } else if (new_class == iaLimits.schedPolicy) {
3681     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3682     int maxClamped     = MIN2(iaLimits.maxPrio,
3683                               cur_class == new_class
3684                                 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3685     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3686                                                        maxClamped, newPrio)
3687                                : newPrio;
3688     iaInfo->ia_uprilim = cur_class == new_class
3689                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3690     iaInfo->ia_mode    = IA_NOCHANGE;
3691     if (ThreadPriorityVerbose) {
3692       tty->print_cr("IA: [%d...%d] %d->%d\n",
3693                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3694     }
3695   } else if (new_class == tsLimits.schedPolicy) {
3696     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3697     int maxClamped     = MIN2(tsLimits.maxPrio,
3698                               cur_class == new_class
3699                                 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3700     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3701                                                        maxClamped, newPrio)
3702                                : newPrio;
3703     tsInfo->ts_uprilim = cur_class == new_class
3704                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3705     if (ThreadPriorityVerbose) {
3706       tty->print_cr("TS: [%d...%d] %d->%d\n",
3707                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3708     }
3709   } else if (new_class == fxLimits.schedPolicy) {
3710     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3711     int maxClamped     = MIN2(fxLimits.maxPrio,
3712                               cur_class == new_class
3713                                 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3714     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3715                                                        maxClamped, newPrio)
3716                                : newPrio;
3717     fxInfo->fx_uprilim = cur_class == new_class
3718                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3719     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3720     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3721     if (ThreadPriorityVerbose) {
3722       tty->print_cr("FX: [%d...%d] %d->%d\n",
3723                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3724     }
3725   } else {
3726     if (ThreadPriorityVerbose) {
3727       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3728     }
3729     return EINVAL;    // no clue, punt
3730   }
3731 
3732   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3733   if (ThreadPriorityVerbose && rslt) {
3734     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3735   }
3736   if (rslt < 0) return errno;
3737 
3738 #ifdef ASSERT
3739   // Sanity check: read back what we just attempted to set.
3740   // In theory it could have changed in the interim ...
3741   //
3742   // The priocntl system call is tricky.
3743   // Sometimes it'll validate the priority value argument and
3744   // return EINVAL if unhappy.  At other times it fails silently.
3745   // Readbacks are prudent.
3746 
3747   if (!ReadBackValidate) return 0;
3748 
3749   memset(&ReadBack, 0, sizeof(pcparms_t));
3750   ReadBack.pc_cid = PC_CLNULL;
3751   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3752   assert(rslt >= 0, "priocntl failed");
3753   Actual = Expected = 0xBAD;
3754   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3755   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3756     Actual   = RTPRI(ReadBack)->rt_pri;
3757     Expected = RTPRI(ParmInfo)->rt_pri;
3758   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3759     Actual   = IAPRI(ReadBack)->ia_upri;
3760     Expected = IAPRI(ParmInfo)->ia_upri;
3761   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3762     Actual   = TSPRI(ReadBack)->ts_upri;
3763     Expected = TSPRI(ParmInfo)->ts_upri;
3764   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3765     Actual   = FXPRI(ReadBack)->fx_upri;
3766     Expected = FXPRI(ParmInfo)->fx_upri;
3767   } else {
3768     if (ThreadPriorityVerbose) {
3769       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3770                     ParmInfo.pc_cid);
3771     }
3772   }
3773 
3774   if (Actual != Expected) {
3775     if (ThreadPriorityVerbose) {
3776       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3777                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3778     }
3779   }
3780 #endif
3781 
3782   return 0;
3783 }
3784 
3785 // Solaris only gives access to 128 real priorities at a time,
3786 // so we expand Java's ten to fill this range.  This would be better
3787 // if we dynamically adjusted relative priorities.
3788 //
3789 // The ThreadPriorityPolicy option allows us to select 2 different
3790 // priority scales.
3791 //
3792 // ThreadPriorityPolicy=0
3793 // Since the Solaris' default priority is MaximumPriority, we do not
3794 // set a priority lower than Max unless a priority lower than
3795 // NormPriority is requested.
3796 //
3797 // ThreadPriorityPolicy=1
3798 // This mode causes the priority table to get filled with
3799 // linear values.  NormPriority get's mapped to 50% of the
3800 // Maximum priority an so on.  This will cause VM threads
3801 // to get unfair treatment against other Solaris processes
3802 // which do not explicitly alter their thread priorities.
3803 //
3804 
3805 int os::java_to_os_priority[CriticalPriority + 1] = {
3806   -99999,         // 0 Entry should never be used
3807 
3808   0,              // 1 MinPriority
3809   32,             // 2
3810   64,             // 3
3811 
3812   96,             // 4
3813   127,            // 5 NormPriority
3814   127,            // 6
3815 
3816   127,            // 7
3817   127,            // 8
3818   127,            // 9 NearMaxPriority
3819 
3820   127,            // 10 MaxPriority
3821 
3822   -criticalPrio   // 11 CriticalPriority
3823 };
3824 
3825 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3826   OSThread* osthread = thread->osthread();
3827 
3828   // Save requested priority in case the thread hasn't been started
3829   osthread->set_native_priority(newpri);
3830 
3831   // Check for critical priority request
3832   bool fxcritical = false;
3833   if (newpri == -criticalPrio) {
3834     fxcritical = true;
3835     newpri = criticalPrio;
3836   }
3837 
3838   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3839   if (!UseThreadPriorities) return OS_OK;
3840 
3841   int status = 0;
3842 
3843   if (!fxcritical) {
3844     // Use thr_setprio only if we have a priority that thr_setprio understands
3845     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3846   }
3847 
3848   if (os::Solaris::T2_libthread() ||
3849       (UseBoundThreads && osthread->is_vm_created())) {
3850     int lwp_status =
3851       set_lwp_class_and_priority(osthread->thread_id(),
3852                                  osthread->lwp_id(),
3853                                  newpri,
3854                                  fxcritical ? fxLimits.schedPolicy : myClass,
3855                                  !fxcritical);
3856     if (lwp_status != 0 && fxcritical) {
3857       // Try again, this time without changing the scheduling class
3858       newpri = java_MaxPriority_to_os_priority;
3859       lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3860                                               osthread->lwp_id(),
3861                                               newpri, myClass, false);
3862     }
3863     status |= lwp_status;
3864   }
3865   return (status == 0) ? OS_OK : OS_ERR;
3866 }
3867 
3868 
3869 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3870   int p;
3871   if ( !UseThreadPriorities ) {
3872     *priority_ptr = NormalPriority;
3873     return OS_OK;
3874   }
3875   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3876   if (status != 0) {
3877     return OS_ERR;
3878   }
3879   *priority_ptr = p;
3880   return OS_OK;
3881 }
3882 
3883 
3884 // Hint to the underlying OS that a task switch would not be good.
3885 // Void return because it's a hint and can fail.
3886 void os::hint_no_preempt() {
3887   schedctl_start(schedctl_init());
3888 }
3889 
3890 static void resume_clear_context(OSThread *osthread) {
3891   osthread->set_ucontext(NULL);
3892 }
3893 
3894 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3895   osthread->set_ucontext(context);
3896 }
3897 
3898 static Semaphore sr_semaphore;
3899 
3900 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3901   // Save and restore errno to avoid confusing native code with EINTR
3902   // after sigsuspend.
3903   int old_errno = errno;
3904 
3905   OSThread* osthread = thread->osthread();
3906   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3907 
3908   os::SuspendResume::State current = osthread->sr.state();
3909   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3910     suspend_save_context(osthread, uc);
3911 
3912     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3913     os::SuspendResume::State state = osthread->sr.suspended();
3914     if (state == os::SuspendResume::SR_SUSPENDED) {
3915       sigset_t suspend_set;  // signals for sigsuspend()
3916 
3917       // get current set of blocked signals and unblock resume signal
3918       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3919       sigdelset(&suspend_set, os::Solaris::SIGasync());
3920 
3921       sr_semaphore.signal();
3922       // wait here until we are resumed
3923       while (1) {
3924         sigsuspend(&suspend_set);
3925 
3926         os::SuspendResume::State result = osthread->sr.running();
3927         if (result == os::SuspendResume::SR_RUNNING) {
3928           sr_semaphore.signal();
3929           break;
3930         }
3931       }
3932 
3933     } else if (state == os::SuspendResume::SR_RUNNING) {
3934       // request was cancelled, continue
3935     } else {
3936       ShouldNotReachHere();
3937     }
3938 
3939     resume_clear_context(osthread);
3940   } else if (current == os::SuspendResume::SR_RUNNING) {
3941     // request was cancelled, continue
3942   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3943     // ignore
3944   } else {
3945     // ignore
3946   }
3947 
3948   errno = old_errno;
3949 }
3950 
3951 
3952 void os::interrupt(Thread* thread) {
3953   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
3954 
3955   OSThread* osthread = thread->osthread();
3956 
3957   int isInterrupted = osthread->interrupted();
3958   if (!isInterrupted) {
3959       osthread->set_interrupted(true);
3960       OrderAccess::fence();
3961       // os::sleep() is implemented with either poll (NULL,0,timeout) or
3962       // by parking on _SleepEvent.  If the former, thr_kill will unwedge
3963       // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
3964       ParkEvent * const slp = thread->_SleepEvent ;
3965       if (slp != NULL) slp->unpark() ;
3966   }
3967 
3968   // For JSR166:  unpark after setting status but before thr_kill -dl
3969   if (thread->is_Java_thread()) {
3970     ((JavaThread*)thread)->parker()->unpark();
3971   }
3972 
3973   // Handle interruptible wait() ...
3974   ParkEvent * const ev = thread->_ParkEvent ;
3975   if (ev != NULL) ev->unpark() ;
3976 
3977   // When events are used everywhere for os::sleep, then this thr_kill
3978   // will only be needed if UseVMInterruptibleIO is true.
3979 
3980   if (!isInterrupted) {
3981     int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
3982     assert_status(status == 0, status, "thr_kill");
3983 
3984     // Bump thread interruption counter
3985     RuntimeService::record_thread_interrupt_signaled_count();
3986   }
3987 }
3988 
3989 
3990 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3991   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
3992 
3993   OSThread* osthread = thread->osthread();
3994 
3995   bool res = osthread->interrupted();
3996 
3997   // NOTE that since there is no "lock" around these two operations,
3998   // there is the possibility that the interrupted flag will be
3999   // "false" but that the interrupt event will be set. This is
4000   // intentional. The effect of this is that Object.wait() will appear
4001   // to have a spurious wakeup, which is not harmful, and the
4002   // possibility is so rare that it is not worth the added complexity
4003   // to add yet another lock. It has also been recommended not to put
4004   // the interrupted flag into the os::Solaris::Event structure,
4005   // because it hides the issue.
4006   if (res && clear_interrupted) {
4007     osthread->set_interrupted(false);
4008   }
4009   return res;
4010 }
4011 
4012 
4013 void os::print_statistics() {
4014 }
4015 
4016 int os::message_box(const char* title, const char* message) {
4017   int i;
4018   fdStream err(defaultStream::error_fd());
4019   for (i = 0; i < 78; i++) err.print_raw("=");
4020   err.cr();
4021   err.print_raw_cr(title);
4022   for (i = 0; i < 78; i++) err.print_raw("-");
4023   err.cr();
4024   err.print_raw_cr(message);
4025   for (i = 0; i < 78; i++) err.print_raw("=");
4026   err.cr();
4027 
4028   char buf[16];
4029   // Prevent process from exiting upon "read error" without consuming all CPU
4030   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4031 
4032   return buf[0] == 'y' || buf[0] == 'Y';
4033 }
4034 
4035 static int sr_notify(OSThread* osthread) {
4036   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
4037   assert_status(status == 0, status, "thr_kill");
4038   return status;
4039 }
4040 
4041 // "Randomly" selected value for how long we want to spin
4042 // before bailing out on suspending a thread, also how often
4043 // we send a signal to a thread we want to resume
4044 static const int RANDOMLY_LARGE_INTEGER = 1000000;
4045 static const int RANDOMLY_LARGE_INTEGER2 = 100;
4046 
4047 static bool do_suspend(OSThread* osthread) {
4048   assert(osthread->sr.is_running(), "thread should be running");
4049   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
4050 
4051   // mark as suspended and send signal
4052   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
4053     // failed to switch, state wasn't running?
4054     ShouldNotReachHere();
4055     return false;
4056   }
4057 
4058   if (sr_notify(osthread) != 0) {
4059     ShouldNotReachHere();
4060   }
4061 
4062   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
4063   while (true) {
4064     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
4065       break;
4066     } else {
4067       // timeout
4068       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
4069       if (cancelled == os::SuspendResume::SR_RUNNING) {
4070         return false;
4071       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
4072         // make sure that we consume the signal on the semaphore as well
4073         sr_semaphore.wait();
4074         break;
4075       } else {
4076         ShouldNotReachHere();
4077         return false;
4078       }
4079     }
4080   }
4081 
4082   guarantee(osthread->sr.is_suspended(), "Must be suspended");
4083   return true;
4084 }
4085 
4086 static void do_resume(OSThread* osthread) {
4087   assert(osthread->sr.is_suspended(), "thread should be suspended");
4088   assert(!sr_semaphore.trywait(), "invalid semaphore state");
4089 
4090   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
4091     // failed to switch to WAKEUP_REQUEST
4092     ShouldNotReachHere();
4093     return;
4094   }
4095 
4096   while (true) {
4097     if (sr_notify(osthread) == 0) {
4098       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
4099         if (osthread->sr.is_running()) {
4100           return;
4101         }
4102       }
4103     } else {
4104       ShouldNotReachHere();
4105     }
4106   }
4107 
4108   guarantee(osthread->sr.is_running(), "Must be running!");
4109 }
4110 
4111 void os::SuspendedThreadTask::internal_do_task() {
4112   if (do_suspend(_thread->osthread())) {
4113     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
4114     do_task(context);
4115     do_resume(_thread->osthread());
4116   }
4117 }
4118 
4119 class PcFetcher : public os::SuspendedThreadTask {
4120 public:
4121   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
4122   ExtendedPC result();
4123 protected:
4124   void do_task(const os::SuspendedThreadTaskContext& context);
4125 private:
4126   ExtendedPC _epc;
4127 };
4128 
4129 ExtendedPC PcFetcher::result() {
4130   guarantee(is_done(), "task is not done yet.");
4131   return _epc;
4132 }
4133 
4134 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
4135   Thread* thread = context.thread();
4136   OSThread* osthread = thread->osthread();
4137   if (osthread->ucontext() != NULL) {
4138     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
4139   } else {
4140     // NULL context is unexpected, double-check this is the VMThread
4141     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
4142   }
4143 }
4144 
4145 // A lightweight implementation that does not suspend the target thread and
4146 // thus returns only a hint. Used for profiling only!
4147 ExtendedPC os::get_thread_pc(Thread* thread) {
4148   // Make sure that it is called by the watcher and the Threads lock is owned.
4149   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
4150   // For now, is only used to profile the VM Thread
4151   assert(thread->is_VM_thread(), "Can only be called for VMThread");
4152   PcFetcher fetcher(thread);
4153   fetcher.run();
4154   return fetcher.result();
4155 }
4156 
4157 
4158 // This does not do anything on Solaris. This is basically a hook for being
4159 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
4160 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
4161   f(value, method, args, thread);
4162 }
4163 
4164 // This routine may be used by user applications as a "hook" to catch signals.
4165 // The user-defined signal handler must pass unrecognized signals to this
4166 // routine, and if it returns true (non-zero), then the signal handler must
4167 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
4168 // routine will never retun false (zero), but instead will execute a VM panic
4169 // routine kill the process.
4170 //
4171 // If this routine returns false, it is OK to call it again.  This allows
4172 // the user-defined signal handler to perform checks either before or after
4173 // the VM performs its own checks.  Naturally, the user code would be making
4174 // a serious error if it tried to handle an exception (such as a null check
4175 // or breakpoint) that the VM was generating for its own correct operation.
4176 //
4177 // This routine may recognize any of the following kinds of signals:
4178 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
4179 // os::Solaris::SIGasync
4180 // It should be consulted by handlers for any of those signals.
4181 // It explicitly does not recognize os::Solaris::SIGinterrupt
4182 //
4183 // The caller of this routine must pass in the three arguments supplied
4184 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
4185 // field of the structure passed to sigaction().  This routine assumes that
4186 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
4187 //
4188 // Note that the VM will print warnings if it detects conflicting signal
4189 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
4190 //
4191 extern "C" JNIEXPORT int
4192 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
4193                           int abort_if_unrecognized);
4194 
4195 
4196 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
4197   int orig_errno = errno;  // Preserve errno value over signal handler.
4198   JVM_handle_solaris_signal(sig, info, ucVoid, true);
4199   errno = orig_errno;
4200 }
4201 
4202 /* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
4203    is needed to provoke threads blocked on IO to return an EINTR
4204    Note: this explicitly does NOT call JVM_handle_solaris_signal and
4205    does NOT participate in signal chaining due to requirement for
4206    NOT setting SA_RESTART to make EINTR work. */
4207 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
4208    if (UseSignalChaining) {
4209       struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
4210       if (actp && actp->sa_handler) {
4211         vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
4212       }
4213    }
4214 }
4215 
4216 // This boolean allows users to forward their own non-matching signals
4217 // to JVM_handle_solaris_signal, harmlessly.
4218 bool os::Solaris::signal_handlers_are_installed = false;
4219 
4220 // For signal-chaining
4221 bool os::Solaris::libjsig_is_loaded = false;
4222 typedef struct sigaction *(*get_signal_t)(int);
4223 get_signal_t os::Solaris::get_signal_action = NULL;
4224 
4225 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
4226   struct sigaction *actp = NULL;
4227 
4228   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
4229     // Retrieve the old signal handler from libjsig
4230     actp = (*get_signal_action)(sig);
4231   }
4232   if (actp == NULL) {
4233     // Retrieve the preinstalled signal handler from jvm
4234     actp = get_preinstalled_handler(sig);
4235   }
4236 
4237   return actp;
4238 }
4239 
4240 static bool call_chained_handler(struct sigaction *actp, int sig,
4241                                  siginfo_t *siginfo, void *context) {
4242   // Call the old signal handler
4243   if (actp->sa_handler == SIG_DFL) {
4244     // It's more reasonable to let jvm treat it as an unexpected exception
4245     // instead of taking the default action.
4246     return false;
4247   } else if (actp->sa_handler != SIG_IGN) {
4248     if ((actp->sa_flags & SA_NODEFER) == 0) {
4249       // automaticlly block the signal
4250       sigaddset(&(actp->sa_mask), sig);
4251     }
4252 
4253     sa_handler_t hand;
4254     sa_sigaction_t sa;
4255     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4256     // retrieve the chained handler
4257     if (siginfo_flag_set) {
4258       sa = actp->sa_sigaction;
4259     } else {
4260       hand = actp->sa_handler;
4261     }
4262 
4263     if ((actp->sa_flags & SA_RESETHAND) != 0) {
4264       actp->sa_handler = SIG_DFL;
4265     }
4266 
4267     // try to honor the signal mask
4268     sigset_t oset;
4269     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4270 
4271     // call into the chained handler
4272     if (siginfo_flag_set) {
4273       (*sa)(sig, siginfo, context);
4274     } else {
4275       (*hand)(sig);
4276     }
4277 
4278     // restore the signal mask
4279     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4280   }
4281   // Tell jvm's signal handler the signal is taken care of.
4282   return true;
4283 }
4284 
4285 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4286   bool chained = false;
4287   // signal-chaining
4288   if (UseSignalChaining) {
4289     struct sigaction *actp = get_chained_signal_action(sig);
4290     if (actp != NULL) {
4291       chained = call_chained_handler(actp, sig, siginfo, context);
4292     }
4293   }
4294   return chained;
4295 }
4296 
4297 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4298   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4299   if (preinstalled_sigs[sig] != 0) {
4300     return &chainedsigactions[sig];
4301   }
4302   return NULL;
4303 }
4304 
4305 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4306 
4307   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4308   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4309   chainedsigactions[sig] = oldAct;
4310   preinstalled_sigs[sig] = 1;
4311 }
4312 
4313 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4314   // Check for overwrite.
4315   struct sigaction oldAct;
4316   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4317   void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4318                                       : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4319   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4320       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4321       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4322     if (AllowUserSignalHandlers || !set_installed) {
4323       // Do not overwrite; user takes responsibility to forward to us.
4324       return;
4325     } else if (UseSignalChaining) {
4326       if (oktochain) {
4327         // save the old handler in jvm
4328         save_preinstalled_handler(sig, oldAct);
4329       } else {
4330         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4331       }
4332       // libjsig also interposes the sigaction() call below and saves the
4333       // old sigaction on it own.
4334     } else {
4335       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4336                     "%#lx for signal %d.", (long)oldhand, sig));
4337     }
4338   }
4339 
4340   struct sigaction sigAct;
4341   sigfillset(&(sigAct.sa_mask));
4342   sigAct.sa_handler = SIG_DFL;
4343 
4344   sigAct.sa_sigaction = signalHandler;
4345   // Handle SIGSEGV on alternate signal stack if
4346   // not using stack banging
4347   if (!UseStackBanging && sig == SIGSEGV) {
4348     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4349   // Interruptible i/o requires SA_RESTART cleared so EINTR
4350   // is returned instead of restarting system calls
4351   } else if (sig == os::Solaris::SIGinterrupt()) {
4352     sigemptyset(&sigAct.sa_mask);
4353     sigAct.sa_handler = NULL;
4354     sigAct.sa_flags = SA_SIGINFO;
4355     sigAct.sa_sigaction = sigINTRHandler;
4356   } else {
4357     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4358   }
4359   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4360 
4361   sigaction(sig, &sigAct, &oldAct);
4362 
4363   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4364                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4365   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4366 }
4367 
4368 
4369 #define DO_SIGNAL_CHECK(sig) \
4370   if (!sigismember(&check_signal_done, sig)) \
4371     os::Solaris::check_signal_handler(sig)
4372 
4373 // This method is a periodic task to check for misbehaving JNI applications
4374 // under CheckJNI, we can add any periodic checks here
4375 
4376 void os::run_periodic_checks() {
4377   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4378   // thereby preventing a NULL checks.
4379   if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4380 
4381   if (check_signals == false) return;
4382 
4383   // SEGV and BUS if overridden could potentially prevent
4384   // generation of hs*.log in the event of a crash, debugging
4385   // such a case can be very challenging, so we absolutely
4386   // check for the following for a good measure:
4387   DO_SIGNAL_CHECK(SIGSEGV);
4388   DO_SIGNAL_CHECK(SIGILL);
4389   DO_SIGNAL_CHECK(SIGFPE);
4390   DO_SIGNAL_CHECK(SIGBUS);
4391   DO_SIGNAL_CHECK(SIGPIPE);
4392   DO_SIGNAL_CHECK(SIGXFSZ);
4393 
4394   // ReduceSignalUsage allows the user to override these handlers
4395   // see comments at the very top and jvm_solaris.h
4396   if (!ReduceSignalUsage) {
4397     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4398     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4399     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4400     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4401   }
4402 
4403   // See comments above for using JVM1/JVM2 and UseAltSigs
4404   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4405   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4406 
4407 }
4408 
4409 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4410 
4411 static os_sigaction_t os_sigaction = NULL;
4412 
4413 void os::Solaris::check_signal_handler(int sig) {
4414   char buf[O_BUFLEN];
4415   address jvmHandler = NULL;
4416 
4417   struct sigaction act;
4418   if (os_sigaction == NULL) {
4419     // only trust the default sigaction, in case it has been interposed
4420     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4421     if (os_sigaction == NULL) return;
4422   }
4423 
4424   os_sigaction(sig, (struct sigaction*)NULL, &act);
4425 
4426   address thisHandler = (act.sa_flags & SA_SIGINFO)
4427     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4428     : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4429 
4430 
4431   switch(sig) {
4432     case SIGSEGV:
4433     case SIGBUS:
4434     case SIGFPE:
4435     case SIGPIPE:
4436     case SIGXFSZ:
4437     case SIGILL:
4438       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4439       break;
4440 
4441     case SHUTDOWN1_SIGNAL:
4442     case SHUTDOWN2_SIGNAL:
4443     case SHUTDOWN3_SIGNAL:
4444     case BREAK_SIGNAL:
4445       jvmHandler = (address)user_handler();
4446       break;
4447 
4448     default:
4449       int intrsig = os::Solaris::SIGinterrupt();
4450       int asynsig = os::Solaris::SIGasync();
4451 
4452       if (sig == intrsig) {
4453         jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4454       } else if (sig == asynsig) {
4455         jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4456       } else {
4457         return;
4458       }
4459       break;
4460   }
4461 
4462 
4463   if (thisHandler != jvmHandler) {
4464     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4465     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4466     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4467     // No need to check this sig any longer
4468     sigaddset(&check_signal_done, sig);
4469     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4470     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4471       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4472                     exception_name(sig, buf, O_BUFLEN));
4473     }
4474   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4475     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4476     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4477     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4478     // No need to check this sig any longer
4479     sigaddset(&check_signal_done, sig);
4480   }
4481 
4482   // Print all the signal handler state
4483   if (sigismember(&check_signal_done, sig)) {
4484     print_signal_handlers(tty, buf, O_BUFLEN);
4485   }
4486 
4487 }
4488 
4489 void os::Solaris::install_signal_handlers() {
4490   bool libjsigdone = false;
4491   signal_handlers_are_installed = true;
4492 
4493   // signal-chaining
4494   typedef void (*signal_setting_t)();
4495   signal_setting_t begin_signal_setting = NULL;
4496   signal_setting_t end_signal_setting = NULL;
4497   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4498                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4499   if (begin_signal_setting != NULL) {
4500     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4501                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4502     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4503                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4504     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4505                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4506     libjsig_is_loaded = true;
4507     if (os::Solaris::get_libjsig_version != NULL) {
4508       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4509     }
4510     assert(UseSignalChaining, "should enable signal-chaining");
4511   }
4512   if (libjsig_is_loaded) {
4513     // Tell libjsig jvm is setting signal handlers
4514     (*begin_signal_setting)();
4515   }
4516 
4517   set_signal_handler(SIGSEGV, true, true);
4518   set_signal_handler(SIGPIPE, true, true);
4519   set_signal_handler(SIGXFSZ, true, true);
4520   set_signal_handler(SIGBUS, true, true);
4521   set_signal_handler(SIGILL, true, true);
4522   set_signal_handler(SIGFPE, true, true);
4523 
4524 
4525   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4526 
4527     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4528     // can not register overridable signals which might be > 32
4529     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4530     // Tell libjsig jvm has finished setting signal handlers
4531       (*end_signal_setting)();
4532       libjsigdone = true;
4533     }
4534   }
4535 
4536   // Never ok to chain our SIGinterrupt
4537   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4538   set_signal_handler(os::Solaris::SIGasync(), true, true);
4539 
4540   if (libjsig_is_loaded && !libjsigdone) {
4541     // Tell libjsig jvm finishes setting signal handlers
4542     (*end_signal_setting)();
4543   }
4544 
4545   // We don't activate signal checker if libjsig is in place, we trust ourselves
4546   // and if UserSignalHandler is installed all bets are off.
4547   // Log that signal checking is off only if -verbose:jni is specified.
4548   if (CheckJNICalls) {
4549     if (libjsig_is_loaded) {
4550       if (PrintJNIResolving) {
4551         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4552       }
4553       check_signals = false;
4554     }
4555     if (AllowUserSignalHandlers) {
4556       if (PrintJNIResolving) {
4557         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4558       }
4559       check_signals = false;
4560     }
4561   }
4562 }
4563 
4564 
4565 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4566 
4567 const char * signames[] = {
4568   "SIG0",
4569   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4570   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4571   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4572   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4573   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4574   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4575   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4576   "SIGCANCEL", "SIGLOST"
4577 };
4578 
4579 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4580   if (0 < exception_code && exception_code <= SIGRTMAX) {
4581     // signal
4582     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4583        jio_snprintf(buf, size, "%s", signames[exception_code]);
4584     } else {
4585        jio_snprintf(buf, size, "SIG%d", exception_code);
4586     }
4587     return buf;
4588   } else {
4589     return NULL;
4590   }
4591 }
4592 
4593 // (Static) wrappers for the new libthread API
4594 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
4595 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
4596 int_fnP_thread_t_i os::Solaris::_thr_setmutator;
4597 int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
4598 int_fnP_thread_t os::Solaris::_thr_continue_mutator;
4599 
4600 // (Static) wrapper for getisax(2) call.
4601 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4602 
4603 // (Static) wrappers for the liblgrp API
4604 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4605 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4606 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4607 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4608 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4609 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4610 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4611 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4612 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4613 
4614 // (Static) wrapper for meminfo() call.
4615 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4616 
4617 static address resolve_symbol_lazy(const char* name) {
4618   address addr = (address) dlsym(RTLD_DEFAULT, name);
4619   if(addr == NULL) {
4620     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4621     addr = (address) dlsym(RTLD_NEXT, name);
4622   }
4623   return addr;
4624 }
4625 
4626 static address resolve_symbol(const char* name) {
4627   address addr = resolve_symbol_lazy(name);
4628   if(addr == NULL) {
4629     fatal(dlerror());
4630   }
4631   return addr;
4632 }
4633 
4634 
4635 
4636 // isT2_libthread()
4637 //
4638 // Routine to determine if we are currently using the new T2 libthread.
4639 //
4640 // We determine if we are using T2 by reading /proc/self/lstatus and
4641 // looking for a thread with the ASLWP bit set.  If we find this status
4642 // bit set, we must assume that we are NOT using T2.  The T2 team
4643 // has approved this algorithm.
4644 //
4645 // We need to determine if we are running with the new T2 libthread
4646 // since setting native thread priorities is handled differently
4647 // when using this library.  All threads created using T2 are bound
4648 // threads. Calling thr_setprio is meaningless in this case.
4649 //
4650 bool isT2_libthread() {
4651   static prheader_t * lwpArray = NULL;
4652   static int lwpSize = 0;
4653   static int lwpFile = -1;
4654   lwpstatus_t * that;
4655   char lwpName [128];
4656   bool isT2 = false;
4657 
4658 #define ADR(x)  ((uintptr_t)(x))
4659 #define LWPINDEX(ary,ix)   ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
4660 
4661   lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
4662   if (lwpFile < 0) {
4663       if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
4664       return false;
4665   }
4666   lwpSize = 16*1024;
4667   for (;;) {
4668     ::lseek64 (lwpFile, 0, SEEK_SET);
4669     lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
4670     if (::read(lwpFile, lwpArray, lwpSize) < 0) {
4671       if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
4672       break;
4673     }
4674     if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
4675        // We got a good snapshot - now iterate over the list.
4676       int aslwpcount = 0;
4677       for (int i = 0; i < lwpArray->pr_nent; i++ ) {
4678         that = LWPINDEX(lwpArray,i);
4679         if (that->pr_flags & PR_ASLWP) {
4680           aslwpcount++;
4681         }
4682       }
4683       if (aslwpcount == 0) isT2 = true;
4684       break;
4685     }
4686     lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
4687     FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);  // retry.
4688   }
4689 
4690   FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
4691   ::close (lwpFile);
4692   if (ThreadPriorityVerbose) {
4693     if (isT2) tty->print_cr("We are running with a T2 libthread\n");
4694     else tty->print_cr("We are not running with a T2 libthread\n");
4695   }
4696   return isT2;
4697 }
4698 
4699 
4700 void os::Solaris::libthread_init() {
4701   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4702 
4703   // Determine if we are running with the new T2 libthread
4704   os::Solaris::set_T2_libthread(isT2_libthread());
4705 
4706   lwp_priocntl_init();
4707 
4708   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4709   if(func == NULL) {
4710     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4711     // Guarantee that this VM is running on an new enough OS (5.6 or
4712     // later) that it will have a new enough libthread.so.
4713     guarantee(func != NULL, "libthread.so is too old.");
4714   }
4715 
4716   // Initialize the new libthread getstate API wrappers
4717   func = resolve_symbol("thr_getstate");
4718   os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
4719 
4720   func = resolve_symbol("thr_setstate");
4721   os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
4722 
4723   func = resolve_symbol("thr_setmutator");
4724   os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
4725 
4726   func = resolve_symbol("thr_suspend_mutator");
4727   os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4728 
4729   func = resolve_symbol("thr_continue_mutator");
4730   os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4731 
4732   int size;
4733   void (*handler_info_func)(address *, int *);
4734   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4735   handler_info_func(&handler_start, &size);
4736   handler_end = handler_start + size;
4737 }
4738 
4739 
4740 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4741 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4742 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4743 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4744 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4745 int os::Solaris::_mutex_scope = USYNC_THREAD;
4746 
4747 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4748 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4749 int_fnP_cond_tP os::Solaris::_cond_signal;
4750 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4751 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4752 int_fnP_cond_tP os::Solaris::_cond_destroy;
4753 int os::Solaris::_cond_scope = USYNC_THREAD;
4754 
4755 void os::Solaris::synchronization_init() {
4756   if(UseLWPSynchronization) {
4757     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4758     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4759     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4760     os::Solaris::set_mutex_init(lwp_mutex_init);
4761     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4762     os::Solaris::set_mutex_scope(USYNC_THREAD);
4763 
4764     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4765     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4766     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4767     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4768     os::Solaris::set_cond_init(lwp_cond_init);
4769     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4770     os::Solaris::set_cond_scope(USYNC_THREAD);
4771   }
4772   else {
4773     os::Solaris::set_mutex_scope(USYNC_THREAD);
4774     os::Solaris::set_cond_scope(USYNC_THREAD);
4775 
4776     if(UsePthreads) {
4777       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4778       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4779       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4780       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4781       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4782 
4783       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4784       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4785       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4786       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4787       os::Solaris::set_cond_init(pthread_cond_default_init);
4788       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4789     }
4790     else {
4791       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4792       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4793       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4794       os::Solaris::set_mutex_init(::mutex_init);
4795       os::Solaris::set_mutex_destroy(::mutex_destroy);
4796 
4797       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4798       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4799       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4800       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4801       os::Solaris::set_cond_init(::cond_init);
4802       os::Solaris::set_cond_destroy(::cond_destroy);
4803     }
4804   }
4805 }
4806 
4807 bool os::Solaris::liblgrp_init() {
4808   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4809   if (handle != NULL) {
4810     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4811     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4812     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4813     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4814     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4815     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4816     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4817     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4818                                        dlsym(handle, "lgrp_cookie_stale")));
4819 
4820     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4821     set_lgrp_cookie(c);
4822     return true;
4823   }
4824   return false;
4825 }
4826 
4827 void os::Solaris::misc_sym_init() {
4828   address func;
4829 
4830   // getisax
4831   func = resolve_symbol_lazy("getisax");
4832   if (func != NULL) {
4833     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4834   }
4835 
4836   // meminfo
4837   func = resolve_symbol_lazy("meminfo");
4838   if (func != NULL) {
4839     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4840   }
4841 }
4842 
4843 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4844   assert(_getisax != NULL, "_getisax not set");
4845   return _getisax(array, n);
4846 }
4847 
4848 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4849 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4850 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4851 
4852 void init_pset_getloadavg_ptr(void) {
4853   pset_getloadavg_ptr =
4854     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4855   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4856     warning("pset_getloadavg function not found");
4857   }
4858 }
4859 
4860 int os::Solaris::_dev_zero_fd = -1;
4861 
4862 // this is called _before_ the global arguments have been parsed
4863 void os::init(void) {
4864   _initial_pid = getpid();
4865 
4866   max_hrtime = first_hrtime = gethrtime();
4867 
4868   init_random(1234567);
4869 
4870   page_size = sysconf(_SC_PAGESIZE);
4871   if (page_size == -1)
4872     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4873                   strerror(errno)));
4874   init_page_sizes((size_t) page_size);
4875 
4876   Solaris::initialize_system_info();
4877 
4878   // Initialize misc. symbols as soon as possible, so we can use them
4879   // if we need them.
4880   Solaris::misc_sym_init();
4881 
4882   int fd = ::open("/dev/zero", O_RDWR);
4883   if (fd < 0) {
4884     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4885   } else {
4886     Solaris::set_dev_zero_fd(fd);
4887 
4888     // Close on exec, child won't inherit.
4889     fcntl(fd, F_SETFD, FD_CLOEXEC);
4890   }
4891 
4892   clock_tics_per_sec = CLK_TCK;
4893 
4894   // check if dladdr1() exists; dladdr1 can provide more information than
4895   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4896   // and is available on linker patches for 5.7 and 5.8.
4897   // libdl.so must have been loaded, this call is just an entry lookup
4898   void * hdl = dlopen("libdl.so", RTLD_NOW);
4899   if (hdl)
4900     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4901 
4902   // (Solaris only) this switches to calls that actually do locking.
4903   ThreadCritical::initialize();
4904 
4905   main_thread = thr_self();
4906 
4907   // Constant minimum stack size allowed. It must be at least
4908   // the minimum of what the OS supports (thr_min_stack()), and
4909   // enough to allow the thread to get to user bytecode execution.
4910   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4911   // If the pagesize of the VM is greater than 8K determine the appropriate
4912   // number of initial guard pages.  The user can change this with the
4913   // command line arguments, if needed.
4914   if (vm_page_size() > 8*K) {
4915     StackYellowPages = 1;
4916     StackRedPages = 1;
4917     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4918   }
4919 }
4920 
4921 // To install functions for atexit system call
4922 extern "C" {
4923   static void perfMemory_exit_helper() {
4924     perfMemory_exit();
4925   }
4926 }
4927 
4928 // this is called _after_ the global arguments have been parsed
4929 jint os::init_2(void) {
4930   // try to enable extended file IO ASAP, see 6431278
4931   os::Solaris::try_enable_extended_io();
4932 
4933   // Allocate a single page and mark it as readable for safepoint polling.  Also
4934   // use this first mmap call to check support for MAP_ALIGN.
4935   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4936                                                       page_size,
4937                                                       MAP_PRIVATE | MAP_ALIGN,
4938                                                       PROT_READ);
4939   if (polling_page == NULL) {
4940     has_map_align = false;
4941     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4942                                                 PROT_READ);
4943   }
4944 
4945   os::set_polling_page(polling_page);
4946 
4947 #ifndef PRODUCT
4948   if( Verbose && PrintMiscellaneous )
4949     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
4950 #endif
4951 
4952   if (!UseMembar) {
4953     address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
4954     guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4955     os::set_memory_serialize_page( mem_serialize_page );
4956 
4957 #ifndef PRODUCT
4958     if(Verbose && PrintMiscellaneous)
4959       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
4960 #endif
4961   }
4962 
4963   // Check minimum allowable stack size for thread creation and to initialize
4964   // the java system classes, including StackOverflowError - depends on page
4965   // size.  Add a page for compiler2 recursion in main thread.
4966   // Add in 2*BytesPerWord times page size to account for VM stack during
4967   // class initialization depending on 32 or 64 bit VM.
4968   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4969             (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4970                     2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4971 
4972   size_t threadStackSizeInBytes = ThreadStackSize * K;
4973   if (threadStackSizeInBytes != 0 &&
4974     threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4975     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4976                   os::Solaris::min_stack_allowed/K);
4977     return JNI_ERR;
4978   }
4979 
4980   // For 64kbps there will be a 64kb page size, which makes
4981   // the usable default stack size quite a bit less.  Increase the
4982   // stack for 64kb (or any > than 8kb) pages, this increases
4983   // virtual memory fragmentation (since we're not creating the
4984   // stack on a power of 2 boundary.  The real fix for this
4985   // should be to fix the guard page mechanism.
4986 
4987   if (vm_page_size() > 8*K) {
4988       threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4989          ? threadStackSizeInBytes +
4990            ((StackYellowPages + StackRedPages) * vm_page_size())
4991          : 0;
4992       ThreadStackSize = threadStackSizeInBytes/K;
4993   }
4994 
4995   // Make the stack size a multiple of the page size so that
4996   // the yellow/red zones can be guarded.
4997   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4998         vm_page_size()));
4999 
5000   Solaris::libthread_init();
5001 
5002   if (UseNUMA) {
5003     if (!Solaris::liblgrp_init()) {
5004       UseNUMA = false;
5005     } else {
5006       size_t lgrp_limit = os::numa_get_groups_num();
5007       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
5008       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
5009       FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
5010       if (lgrp_num < 2) {
5011         // There's only one locality group, disable NUMA.
5012         UseNUMA = false;
5013       }
5014     }
5015     if (!UseNUMA && ForceNUMA) {
5016       UseNUMA = true;
5017     }
5018   }
5019 
5020   Solaris::signal_sets_init();
5021   Solaris::init_signal_mem();
5022   Solaris::install_signal_handlers();
5023 
5024   if (libjsigversion < JSIG_VERSION_1_4_1) {
5025     Maxlibjsigsigs = OLDMAXSIGNUM;
5026   }
5027 
5028   // initialize synchronization primitives to use either thread or
5029   // lwp synchronization (controlled by UseLWPSynchronization)
5030   Solaris::synchronization_init();
5031 
5032   if (MaxFDLimit) {
5033     // set the number of file descriptors to max. print out error
5034     // if getrlimit/setrlimit fails but continue regardless.
5035     struct rlimit nbr_files;
5036     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
5037     if (status != 0) {
5038       if (PrintMiscellaneous && (Verbose || WizardMode))
5039         perror("os::init_2 getrlimit failed");
5040     } else {
5041       nbr_files.rlim_cur = nbr_files.rlim_max;
5042       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
5043       if (status != 0) {
5044         if (PrintMiscellaneous && (Verbose || WizardMode))
5045           perror("os::init_2 setrlimit failed");
5046       }
5047     }
5048   }
5049 
5050   // Calculate theoretical max. size of Threads to guard gainst
5051   // artifical out-of-memory situations, where all available address-
5052   // space has been reserved by thread stacks. Default stack size is 1Mb.
5053   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
5054     JavaThread::stack_size_at_create() : (1*K*K);
5055   assert(pre_thread_stack_size != 0, "Must have a stack");
5056   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
5057   // we should start doing Virtual Memory banging. Currently when the threads will
5058   // have used all but 200Mb of space.
5059   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
5060   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
5061 
5062   // at-exit methods are called in the reverse order of their registration.
5063   // In Solaris 7 and earlier, atexit functions are called on return from
5064   // main or as a result of a call to exit(3C). There can be only 32 of
5065   // these functions registered and atexit() does not set errno. In Solaris
5066   // 8 and later, there is no limit to the number of functions registered
5067   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
5068   // functions are called upon dlclose(3DL) in addition to return from main
5069   // and exit(3C).
5070 
5071   if (PerfAllowAtExitRegistration) {
5072     // only register atexit functions if PerfAllowAtExitRegistration is set.
5073     // atexit functions can be delayed until process exit time, which
5074     // can be problematic for embedded VM situations. Embedded VMs should
5075     // call DestroyJavaVM() to assure that VM resources are released.
5076 
5077     // note: perfMemory_exit_helper atexit function may be removed in
5078     // the future if the appropriate cleanup code can be added to the
5079     // VM_Exit VMOperation's doit method.
5080     if (atexit(perfMemory_exit_helper) != 0) {
5081       warning("os::init2 atexit(perfMemory_exit_helper) failed");
5082     }
5083   }
5084 
5085   // Init pset_loadavg function pointer
5086   init_pset_getloadavg_ptr();
5087 
5088   return JNI_OK;
5089 }
5090 
5091 // Mark the polling page as unreadable
5092 void os::make_polling_page_unreadable(void) {
5093   if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
5094     fatal("Could not disable polling page");
5095 };
5096 
5097 // Mark the polling page as readable
5098 void os::make_polling_page_readable(void) {
5099   if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
5100     fatal("Could not enable polling page");
5101 };
5102 
5103 // OS interface.
5104 
5105 bool os::check_heap(bool force) { return true; }
5106 
5107 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
5108 static vsnprintf_t sol_vsnprintf = NULL;
5109 
5110 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
5111   if (!sol_vsnprintf) {
5112     //search  for the named symbol in the objects that were loaded after libjvm
5113     void* where = RTLD_NEXT;
5114     if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5115         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5116     if (!sol_vsnprintf){
5117       //search  for the named symbol in the objects that were loaded before libjvm
5118       where = RTLD_DEFAULT;
5119       if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5120         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5121       assert(sol_vsnprintf != NULL, "vsnprintf not found");
5122     }
5123   }
5124   return (*sol_vsnprintf)(buf, count, fmt, argptr);
5125 }
5126 
5127 
5128 // Is a (classpath) directory empty?
5129 bool os::dir_is_empty(const char* path) {
5130   DIR *dir = NULL;
5131   struct dirent *ptr;
5132 
5133   dir = opendir(path);
5134   if (dir == NULL) return true;
5135 
5136   /* Scan the directory */
5137   bool result = true;
5138   char buf[sizeof(struct dirent) + MAX_PATH];
5139   struct dirent *dbuf = (struct dirent *) buf;
5140   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
5141     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
5142       result = false;
5143     }
5144   }
5145   closedir(dir);
5146   return result;
5147 }
5148 
5149 // This code originates from JDK's sysOpen and open64_w
5150 // from src/solaris/hpi/src/system_md.c
5151 
5152 #ifndef O_DELETE
5153 #define O_DELETE 0x10000
5154 #endif
5155 
5156 // Open a file. Unlink the file immediately after open returns
5157 // if the specified oflag has the O_DELETE flag set.
5158 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
5159 
5160 int os::open(const char *path, int oflag, int mode) {
5161   if (strlen(path) > MAX_PATH - 1) {
5162     errno = ENAMETOOLONG;
5163     return -1;
5164   }
5165   int fd;
5166   int o_delete = (oflag & O_DELETE);
5167   oflag = oflag & ~O_DELETE;
5168 
5169   fd = ::open64(path, oflag, mode);
5170   if (fd == -1) return -1;
5171 
5172   //If the open succeeded, the file might still be a directory
5173   {
5174     struct stat64 buf64;
5175     int ret = ::fstat64(fd, &buf64);
5176     int st_mode = buf64.st_mode;
5177 
5178     if (ret != -1) {
5179       if ((st_mode & S_IFMT) == S_IFDIR) {
5180         errno = EISDIR;
5181         ::close(fd);
5182         return -1;
5183       }
5184     } else {
5185       ::close(fd);
5186       return -1;
5187     }
5188   }
5189     /*
5190      * 32-bit Solaris systems suffer from:
5191      *
5192      * - an historical default soft limit of 256 per-process file
5193      *   descriptors that is too low for many Java programs.
5194      *
5195      * - a design flaw where file descriptors created using stdio
5196      *   fopen must be less than 256, _even_ when the first limit above
5197      *   has been raised.  This can cause calls to fopen (but not calls to
5198      *   open, for example) to fail mysteriously, perhaps in 3rd party
5199      *   native code (although the JDK itself uses fopen).  One can hardly
5200      *   criticize them for using this most standard of all functions.
5201      *
5202      * We attempt to make everything work anyways by:
5203      *
5204      * - raising the soft limit on per-process file descriptors beyond
5205      *   256
5206      *
5207      * - As of Solaris 10u4, we can request that Solaris raise the 256
5208      *   stdio fopen limit by calling function enable_extended_FILE_stdio.
5209      *   This is done in init_2 and recorded in enabled_extended_FILE_stdio
5210      *
5211      * - If we are stuck on an old (pre 10u4) Solaris system, we can
5212      *   workaround the bug by remapping non-stdio file descriptors below
5213      *   256 to ones beyond 256, which is done below.
5214      *
5215      * See:
5216      * 1085341: 32-bit stdio routines should support file descriptors >255
5217      * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
5218      * 6431278: Netbeans crash on 32 bit Solaris: need to call
5219      *          enable_extended_FILE_stdio() in VM initialisation
5220      * Giri Mandalika's blog
5221      * http://technopark02.blogspot.com/2005_05_01_archive.html
5222      */
5223 #ifndef  _LP64
5224      if ((!enabled_extended_FILE_stdio) && fd < 256) {
5225          int newfd = ::fcntl(fd, F_DUPFD, 256);
5226          if (newfd != -1) {
5227              ::close(fd);
5228              fd = newfd;
5229          }
5230      }
5231 #endif // 32-bit Solaris
5232     /*
5233      * All file descriptors that are opened in the JVM and not
5234      * specifically destined for a subprocess should have the
5235      * close-on-exec flag set.  If we don't set it, then careless 3rd
5236      * party native code might fork and exec without closing all
5237      * appropriate file descriptors (e.g. as we do in closeDescriptors in
5238      * UNIXProcess.c), and this in turn might:
5239      *
5240      * - cause end-of-file to fail to be detected on some file
5241      *   descriptors, resulting in mysterious hangs, or
5242      *
5243      * - might cause an fopen in the subprocess to fail on a system
5244      *   suffering from bug 1085341.
5245      *
5246      * (Yes, the default setting of the close-on-exec flag is a Unix
5247      * design flaw)
5248      *
5249      * See:
5250      * 1085341: 32-bit stdio routines should support file descriptors >255
5251      * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
5252      * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
5253      */
5254 #ifdef FD_CLOEXEC
5255     {
5256         int flags = ::fcntl(fd, F_GETFD);
5257         if (flags != -1)
5258             ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5259     }
5260 #endif
5261 
5262   if (o_delete != 0) {
5263     ::unlink(path);
5264   }
5265   return fd;
5266 }
5267 
5268 // create binary file, rewriting existing file if required
5269 int os::create_binary_file(const char* path, bool rewrite_existing) {
5270   int oflags = O_WRONLY | O_CREAT;
5271   if (!rewrite_existing) {
5272     oflags |= O_EXCL;
5273   }
5274   return ::open64(path, oflags, S_IREAD | S_IWRITE);
5275 }
5276 
5277 // return current position of file pointer
5278 jlong os::current_file_offset(int fd) {
5279   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5280 }
5281 
5282 // move file pointer to the specified offset
5283 jlong os::seek_to_file_offset(int fd, jlong offset) {
5284   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5285 }
5286 
5287 jlong os::lseek(int fd, jlong offset, int whence) {
5288   return (jlong) ::lseek64(fd, offset, whence);
5289 }
5290 
5291 char * os::native_path(char *path) {
5292   return path;
5293 }
5294 
5295 int os::ftruncate(int fd, jlong length) {
5296   return ::ftruncate64(fd, length);
5297 }
5298 
5299 int os::fsync(int fd)  {
5300   RESTARTABLE_RETURN_INT(::fsync(fd));
5301 }
5302 
5303 int os::available(int fd, jlong *bytes) {
5304   jlong cur, end;
5305   int mode;
5306   struct stat64 buf64;
5307 
5308   if (::fstat64(fd, &buf64) >= 0) {
5309     mode = buf64.st_mode;
5310     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5311       /*
5312       * XXX: is the following call interruptible? If so, this might
5313       * need to go through the INTERRUPT_IO() wrapper as for other
5314       * blocking, interruptible calls in this file.
5315       */
5316       int n,ioctl_return;
5317 
5318       INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted);
5319       if (ioctl_return>= 0) {
5320           *bytes = n;
5321         return 1;
5322       }
5323     }
5324   }
5325   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5326     return 0;
5327   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5328     return 0;
5329   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5330     return 0;
5331   }
5332   *bytes = end - cur;
5333   return 1;
5334 }
5335 
5336 // Map a block of memory.
5337 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5338                      char *addr, size_t bytes, bool read_only,
5339                      bool allow_exec) {
5340   int prot;
5341   int flags;
5342 
5343   if (read_only) {
5344     prot = PROT_READ;
5345     flags = MAP_SHARED;
5346   } else {
5347     prot = PROT_READ | PROT_WRITE;
5348     flags = MAP_PRIVATE;
5349   }
5350 
5351   if (allow_exec) {
5352     prot |= PROT_EXEC;
5353   }
5354 
5355   if (addr != NULL) {
5356     flags |= MAP_FIXED;
5357   }
5358 
5359   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5360                                      fd, file_offset);
5361   if (mapped_address == MAP_FAILED) {
5362     return NULL;
5363   }
5364   return mapped_address;
5365 }
5366 
5367 
5368 // Remap a block of memory.
5369 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5370                        char *addr, size_t bytes, bool read_only,
5371                        bool allow_exec) {
5372   // same as map_memory() on this OS
5373   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5374                         allow_exec);
5375 }
5376 
5377 
5378 // Unmap a block of memory.
5379 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5380   return munmap(addr, bytes) == 0;
5381 }
5382 
5383 void os::pause() {
5384   char filename[MAX_PATH];
5385   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5386     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5387   } else {
5388     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5389   }
5390 
5391   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5392   if (fd != -1) {
5393     struct stat buf;
5394     ::close(fd);
5395     while (::stat(filename, &buf) == 0) {
5396       (void)::poll(NULL, 0, 100);
5397     }
5398   } else {
5399     jio_fprintf(stderr,
5400       "Could not open pause file '%s', continuing immediately.\n", filename);
5401   }
5402 }
5403 
5404 #ifndef PRODUCT
5405 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5406 // Turn this on if you need to trace synch operations.
5407 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5408 // and call record_synch_enable and record_synch_disable
5409 // around the computation of interest.
5410 
5411 void record_synch(char* name, bool returning);  // defined below
5412 
5413 class RecordSynch {
5414   char* _name;
5415  public:
5416   RecordSynch(char* name) :_name(name)
5417                  { record_synch(_name, false); }
5418   ~RecordSynch() { record_synch(_name,   true);  }
5419 };
5420 
5421 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5422 extern "C" ret name params {                                    \
5423   typedef ret name##_t params;                                  \
5424   static name##_t* implem = NULL;                               \
5425   static int callcount = 0;                                     \
5426   if (implem == NULL) {                                         \
5427     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5428     if (implem == NULL)  fatal(dlerror());                      \
5429   }                                                             \
5430   ++callcount;                                                  \
5431   RecordSynch _rs(#name);                                       \
5432   inner;                                                        \
5433   return implem args;                                           \
5434 }
5435 // in dbx, examine callcounts this way:
5436 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5437 
5438 #define CHECK_POINTER_OK(p) \
5439   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5440 #define CHECK_MU \
5441   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5442 #define CHECK_CV \
5443   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5444 #define CHECK_P(p) \
5445   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5446 
5447 #define CHECK_MUTEX(mutex_op) \
5448 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5449 
5450 CHECK_MUTEX(   mutex_lock)
5451 CHECK_MUTEX(  _mutex_lock)
5452 CHECK_MUTEX( mutex_unlock)
5453 CHECK_MUTEX(_mutex_unlock)
5454 CHECK_MUTEX( mutex_trylock)
5455 CHECK_MUTEX(_mutex_trylock)
5456 
5457 #define CHECK_COND(cond_op) \
5458 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5459 
5460 CHECK_COND( cond_wait);
5461 CHECK_COND(_cond_wait);
5462 CHECK_COND(_cond_wait_cancel);
5463 
5464 #define CHECK_COND2(cond_op) \
5465 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5466 
5467 CHECK_COND2( cond_timedwait);
5468 CHECK_COND2(_cond_timedwait);
5469 CHECK_COND2(_cond_timedwait_cancel);
5470 
5471 // do the _lwp_* versions too
5472 #define mutex_t lwp_mutex_t
5473 #define cond_t  lwp_cond_t
5474 CHECK_MUTEX(  _lwp_mutex_lock)
5475 CHECK_MUTEX(  _lwp_mutex_unlock)
5476 CHECK_MUTEX(  _lwp_mutex_trylock)
5477 CHECK_MUTEX( __lwp_mutex_lock)
5478 CHECK_MUTEX( __lwp_mutex_unlock)
5479 CHECK_MUTEX( __lwp_mutex_trylock)
5480 CHECK_MUTEX(___lwp_mutex_lock)
5481 CHECK_MUTEX(___lwp_mutex_unlock)
5482 
5483 CHECK_COND(  _lwp_cond_wait);
5484 CHECK_COND( __lwp_cond_wait);
5485 CHECK_COND(___lwp_cond_wait);
5486 
5487 CHECK_COND2(  _lwp_cond_timedwait);
5488 CHECK_COND2( __lwp_cond_timedwait);
5489 #undef mutex_t
5490 #undef cond_t
5491 
5492 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5493 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5494 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5495 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5496 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5497 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5498 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5499 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5500 
5501 
5502 // recording machinery:
5503 
5504 enum { RECORD_SYNCH_LIMIT = 200 };
5505 char* record_synch_name[RECORD_SYNCH_LIMIT];
5506 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5507 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5508 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5509 int record_synch_count = 0;
5510 bool record_synch_enabled = false;
5511 
5512 // in dbx, examine recorded data this way:
5513 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5514 
5515 void record_synch(char* name, bool returning) {
5516   if (record_synch_enabled) {
5517     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5518       record_synch_name[record_synch_count] = name;
5519       record_synch_returning[record_synch_count] = returning;
5520       record_synch_thread[record_synch_count] = thr_self();
5521       record_synch_arg0ptr[record_synch_count] = &name;
5522       record_synch_count++;
5523     }
5524     // put more checking code here:
5525     // ...
5526   }
5527 }
5528 
5529 void record_synch_enable() {
5530   // start collecting trace data, if not already doing so
5531   if (!record_synch_enabled)  record_synch_count = 0;
5532   record_synch_enabled = true;
5533 }
5534 
5535 void record_synch_disable() {
5536   // stop collecting trace data
5537   record_synch_enabled = false;
5538 }
5539 
5540 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5541 #endif // PRODUCT
5542 
5543 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5544 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5545                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5546 
5547 
5548 // JVMTI & JVM monitoring and management support
5549 // The thread_cpu_time() and current_thread_cpu_time() are only
5550 // supported if is_thread_cpu_time_supported() returns true.
5551 // They are not supported on Solaris T1.
5552 
5553 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5554 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5555 // of a thread.
5556 //
5557 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5558 // returns the fast estimate available on the platform.
5559 
5560 // hrtime_t gethrvtime() return value includes
5561 // user time but does not include system time
5562 jlong os::current_thread_cpu_time() {
5563   return (jlong) gethrvtime();
5564 }
5565 
5566 jlong os::thread_cpu_time(Thread *thread) {
5567   // return user level CPU time only to be consistent with
5568   // what current_thread_cpu_time returns.
5569   // thread_cpu_time_info() must be changed if this changes
5570   return os::thread_cpu_time(thread, false /* user time only */);
5571 }
5572 
5573 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5574   if (user_sys_cpu_time) {
5575     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5576   } else {
5577     return os::current_thread_cpu_time();
5578   }
5579 }
5580 
5581 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5582   char proc_name[64];
5583   int count;
5584   prusage_t prusage;
5585   jlong lwp_time;
5586   int fd;
5587 
5588   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5589                      getpid(),
5590                      thread->osthread()->lwp_id());
5591   fd = ::open(proc_name, O_RDONLY);
5592   if ( fd == -1 ) return -1;
5593 
5594   do {
5595     count = ::pread(fd,
5596                   (void *)&prusage.pr_utime,
5597                   thr_time_size,
5598                   thr_time_off);
5599   } while (count < 0 && errno == EINTR);
5600   ::close(fd);
5601   if ( count < 0 ) return -1;
5602 
5603   if (user_sys_cpu_time) {
5604     // user + system CPU time
5605     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5606                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5607                  (jlong)prusage.pr_stime.tv_nsec +
5608                  (jlong)prusage.pr_utime.tv_nsec;
5609   } else {
5610     // user level CPU time only
5611     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5612                 (jlong)prusage.pr_utime.tv_nsec;
5613   }
5614 
5615   return(lwp_time);
5616 }
5617 
5618 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5619   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5620   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5621   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5622   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5623 }
5624 
5625 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5626   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5627   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5628   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5629   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5630 }
5631 
5632 bool os::is_thread_cpu_time_supported() {
5633   if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
5634     return true;
5635   } else {
5636     return false;
5637   }
5638 }
5639 
5640 // System loadavg support.  Returns -1 if load average cannot be obtained.
5641 // Return the load average for our processor set if the primitive exists
5642 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5643 int os::loadavg(double loadavg[], int nelem) {
5644   if (pset_getloadavg_ptr != NULL) {
5645     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5646   } else {
5647     return ::getloadavg(loadavg, nelem);
5648   }
5649 }
5650 
5651 //---------------------------------------------------------------------------------
5652 
5653 bool os::find(address addr, outputStream* st) {
5654   Dl_info dlinfo;
5655   memset(&dlinfo, 0, sizeof(dlinfo));
5656   if (dladdr(addr, &dlinfo) != 0) {
5657     st->print(PTR_FORMAT ": ", addr);
5658     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5659       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5660     } else if (dlinfo.dli_fbase != NULL)
5661       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5662     else
5663       st->print("<absolute address>");
5664     if (dlinfo.dli_fname != NULL) {
5665       st->print(" in %s", dlinfo.dli_fname);
5666     }
5667     if (dlinfo.dli_fbase != NULL) {
5668       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5669     }
5670     st->cr();
5671 
5672     if (Verbose) {
5673       // decode some bytes around the PC
5674       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5675       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5676       address       lowest = (address) dlinfo.dli_sname;
5677       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5678       if (begin < lowest)  begin = lowest;
5679       Dl_info dlinfo2;
5680       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5681           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5682         end = (address) dlinfo2.dli_saddr;
5683       Disassembler::decode(begin, end, st);
5684     }
5685     return true;
5686   }
5687   return false;
5688 }
5689 
5690 // Following function has been added to support HotSparc's libjvm.so running
5691 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5692 // src/solaris/hpi/native_threads in the EVM codebase.
5693 //
5694 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5695 // libraries and should thus be removed. We will leave it behind for a while
5696 // until we no longer want to able to run on top of 1.3.0 Solaris production
5697 // JDK. See 4341971.
5698 
5699 #define STACK_SLACK 0x800
5700 
5701 extern "C" {
5702   intptr_t sysThreadAvailableStackWithSlack() {
5703     stack_t st;
5704     intptr_t retval, stack_top;
5705     retval = thr_stksegment(&st);
5706     assert(retval == 0, "incorrect return value from thr_stksegment");
5707     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5708     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5709     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5710     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5711   }
5712 }
5713 
5714 // ObjectMonitor park-unpark infrastructure ...
5715 //
5716 // We implement Solaris and Linux PlatformEvents with the
5717 // obvious condvar-mutex-flag triple.
5718 // Another alternative that works quite well is pipes:
5719 // Each PlatformEvent consists of a pipe-pair.
5720 // The thread associated with the PlatformEvent
5721 // calls park(), which reads from the input end of the pipe.
5722 // Unpark() writes into the other end of the pipe.
5723 // The write-side of the pipe must be set NDELAY.
5724 // Unfortunately pipes consume a large # of handles.
5725 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5726 // Using pipes for the 1st few threads might be workable, however.
5727 //
5728 // park() is permitted to return spuriously.
5729 // Callers of park() should wrap the call to park() in
5730 // an appropriate loop.  A litmus test for the correct
5731 // usage of park is the following: if park() were modified
5732 // to immediately return 0 your code should still work,
5733 // albeit degenerating to a spin loop.
5734 //
5735 // An interesting optimization for park() is to use a trylock()
5736 // to attempt to acquire the mutex.  If the trylock() fails
5737 // then we know that a concurrent unpark() operation is in-progress.
5738 // in that case the park() code could simply set _count to 0
5739 // and return immediately.  The subsequent park() operation *might*
5740 // return immediately.  That's harmless as the caller of park() is
5741 // expected to loop.  By using trylock() we will have avoided a
5742 // avoided a context switch caused by contention on the per-thread mutex.
5743 //
5744 // TODO-FIXME:
5745 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
5746 //     objectmonitor implementation.
5747 // 2.  Collapse the JSR166 parker event, and the
5748 //     objectmonitor ParkEvent into a single "Event" construct.
5749 // 3.  In park() and unpark() add:
5750 //     assert (Thread::current() == AssociatedWith).
5751 // 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5752 //     1-out-of-N park() operations will return immediately.
5753 //
5754 // _Event transitions in park()
5755 //   -1 => -1 : illegal
5756 //    1 =>  0 : pass - return immediately
5757 //    0 => -1 : block
5758 //
5759 // _Event serves as a restricted-range semaphore.
5760 //
5761 // Another possible encoding of _Event would be with
5762 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5763 //
5764 // TODO-FIXME: add DTRACE probes for:
5765 // 1.   Tx parks
5766 // 2.   Ty unparks Tx
5767 // 3.   Tx resumes from park
5768 
5769 
5770 // value determined through experimentation
5771 #define ROUNDINGFIX 11
5772 
5773 // utility to compute the abstime argument to timedwait.
5774 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5775 
5776 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5777   // millis is the relative timeout time
5778   // abstime will be the absolute timeout time
5779   if (millis < 0)  millis = 0;
5780   struct timeval now;
5781   int status = gettimeofday(&now, NULL);
5782   assert(status == 0, "gettimeofday");
5783   jlong seconds = millis / 1000;
5784   jlong max_wait_period;
5785 
5786   if (UseLWPSynchronization) {
5787     // forward port of fix for 4275818 (not sleeping long enough)
5788     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5789     // _lwp_cond_timedwait() used a round_down algorithm rather
5790     // than a round_up. For millis less than our roundfactor
5791     // it rounded down to 0 which doesn't meet the spec.
5792     // For millis > roundfactor we may return a bit sooner, but
5793     // since we can not accurately identify the patch level and
5794     // this has already been fixed in Solaris 9 and 8 we will
5795     // leave it alone rather than always rounding down.
5796 
5797     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5798        // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5799            // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5800            max_wait_period = 21000000;
5801   } else {
5802     max_wait_period = 50000000;
5803   }
5804   millis %= 1000;
5805   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5806      seconds = max_wait_period;
5807   }
5808   abstime->tv_sec = now.tv_sec  + seconds;
5809   long       usec = now.tv_usec + millis * 1000;
5810   if (usec >= 1000000) {
5811     abstime->tv_sec += 1;
5812     usec -= 1000000;
5813   }
5814   abstime->tv_nsec = usec * 1000;
5815   return abstime;
5816 }
5817 
5818 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5819 // Conceptually TryPark() should be equivalent to park(0).
5820 
5821 int os::PlatformEvent::TryPark() {
5822   for (;;) {
5823     const int v = _Event ;
5824     guarantee ((v == 0) || (v == 1), "invariant") ;
5825     if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
5826   }
5827 }
5828 
5829 void os::PlatformEvent::park() {           // AKA: down()
5830   // Invariant: Only the thread associated with the Event/PlatformEvent
5831   // may call park().
5832   int v ;
5833   for (;;) {
5834       v = _Event ;
5835       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5836   }
5837   guarantee (v >= 0, "invariant") ;
5838   if (v == 0) {
5839      // Do this the hard way by blocking ...
5840      // See http://monaco.sfbay/detail.jsf?cr=5094058.
5841      // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5842      // Only for SPARC >= V8PlusA
5843 #if defined(__sparc) && defined(COMPILER2)
5844      if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5845 #endif
5846      int status = os::Solaris::mutex_lock(_mutex);
5847      assert_status(status == 0, status,  "mutex_lock");
5848      guarantee (_nParked == 0, "invariant") ;
5849      ++ _nParked ;
5850      while (_Event < 0) {
5851         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5852         // Treat this the same as if the wait was interrupted
5853         // With usr/lib/lwp going to kernel, always handle ETIME
5854         status = os::Solaris::cond_wait(_cond, _mutex);
5855         if (status == ETIME) status = EINTR ;
5856         assert_status(status == 0 || status == EINTR, status, "cond_wait");
5857      }
5858      -- _nParked ;
5859      _Event = 0 ;
5860      status = os::Solaris::mutex_unlock(_mutex);
5861      assert_status(status == 0, status, "mutex_unlock");
5862     // Paranoia to ensure our locked and lock-free paths interact
5863     // correctly with each other.
5864     OrderAccess::fence();
5865   }
5866 }
5867 
5868 int os::PlatformEvent::park(jlong millis) {
5869   guarantee (_nParked == 0, "invariant") ;
5870   int v ;
5871   for (;;) {
5872       v = _Event ;
5873       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5874   }
5875   guarantee (v >= 0, "invariant") ;
5876   if (v != 0) return OS_OK ;
5877 
5878   int ret = OS_TIMEOUT;
5879   timestruc_t abst;
5880   compute_abstime (&abst, millis);
5881 
5882   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5883   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5884   // Only for SPARC >= V8PlusA
5885 #if defined(__sparc) && defined(COMPILER2)
5886  if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5887 #endif
5888   int status = os::Solaris::mutex_lock(_mutex);
5889   assert_status(status == 0, status, "mutex_lock");
5890   guarantee (_nParked == 0, "invariant") ;
5891   ++ _nParked ;
5892   while (_Event < 0) {
5893      int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5894      assert_status(status == 0 || status == EINTR ||
5895                    status == ETIME || status == ETIMEDOUT,
5896                    status, "cond_timedwait");
5897      if (!FilterSpuriousWakeups) break ;                // previous semantics
5898      if (status == ETIME || status == ETIMEDOUT) break ;
5899      // We consume and ignore EINTR and spurious wakeups.
5900   }
5901   -- _nParked ;
5902   if (_Event >= 0) ret = OS_OK ;
5903   _Event = 0 ;
5904   status = os::Solaris::mutex_unlock(_mutex);
5905   assert_status(status == 0, status, "mutex_unlock");
5906   // Paranoia to ensure our locked and lock-free paths interact
5907   // correctly with each other.
5908   OrderAccess::fence();
5909   return ret;
5910 }
5911 
5912 void os::PlatformEvent::unpark() {
5913   // Transitions for _Event:
5914   //    0 :=> 1
5915   //    1 :=> 1
5916   //   -1 :=> either 0 or 1; must signal target thread
5917   //          That is, we can safely transition _Event from -1 to either
5918   //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
5919   //          unpark() calls.
5920   // See also: "Semaphores in Plan 9" by Mullender & Cox
5921   //
5922   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5923   // that it will take two back-to-back park() calls for the owning
5924   // thread to block. This has the benefit of forcing a spurious return
5925   // from the first park() call after an unpark() call which will help
5926   // shake out uses of park() and unpark() without condition variables.
5927 
5928   if (Atomic::xchg(1, &_Event) >= 0) return;
5929 
5930   // If the thread associated with the event was parked, wake it.
5931   // Wait for the thread assoc with the PlatformEvent to vacate.
5932   int status = os::Solaris::mutex_lock(_mutex);
5933   assert_status(status == 0, status, "mutex_lock");
5934   int AnyWaiters = _nParked;
5935   status = os::Solaris::mutex_unlock(_mutex);
5936   assert_status(status == 0, status, "mutex_unlock");
5937   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5938   if (AnyWaiters != 0) {
5939     // We intentional signal *after* dropping the lock
5940     // to avoid a common class of futile wakeups.
5941     status = os::Solaris::cond_signal(_cond);
5942     assert_status(status == 0, status, "cond_signal");
5943   }
5944 }
5945 
5946 // JSR166
5947 // -------------------------------------------------------
5948 
5949 /*
5950  * The solaris and linux implementations of park/unpark are fairly
5951  * conservative for now, but can be improved. They currently use a
5952  * mutex/condvar pair, plus _counter.
5953  * Park decrements _counter if > 0, else does a condvar wait.  Unpark
5954  * sets count to 1 and signals condvar.  Only one thread ever waits
5955  * on the condvar. Contention seen when trying to park implies that someone
5956  * is unparking you, so don't wait. And spurious returns are fine, so there
5957  * is no need to track notifications.
5958  */
5959 
5960 #define MAX_SECS 100000000
5961 /*
5962  * This code is common to linux and solaris and will be moved to a
5963  * common place in dolphin.
5964  *
5965  * The passed in time value is either a relative time in nanoseconds
5966  * or an absolute time in milliseconds. Either way it has to be unpacked
5967  * into suitable seconds and nanoseconds components and stored in the
5968  * given timespec structure.
5969  * Given time is a 64-bit value and the time_t used in the timespec is only
5970  * a signed-32-bit value (except on 64-bit Linux) we have to watch for
5971  * overflow if times way in the future are given. Further on Solaris versions
5972  * prior to 10 there is a restriction (see cond_timedwait) that the specified
5973  * number of seconds, in abstime, is less than current_time  + 100,000,000.
5974  * As it will be 28 years before "now + 100000000" will overflow we can
5975  * ignore overflow and just impose a hard-limit on seconds using the value
5976  * of "now + 100,000,000". This places a limit on the timeout of about 3.17
5977  * years from "now".
5978  */
5979 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5980   assert (time > 0, "convertTime");
5981 
5982   struct timeval now;
5983   int status = gettimeofday(&now, NULL);
5984   assert(status == 0, "gettimeofday");
5985 
5986   time_t max_secs = now.tv_sec + MAX_SECS;
5987 
5988   if (isAbsolute) {
5989     jlong secs = time / 1000;
5990     if (secs > max_secs) {
5991       absTime->tv_sec = max_secs;
5992     }
5993     else {
5994       absTime->tv_sec = secs;
5995     }
5996     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5997   }
5998   else {
5999     jlong secs = time / NANOSECS_PER_SEC;
6000     if (secs >= MAX_SECS) {
6001       absTime->tv_sec = max_secs;
6002       absTime->tv_nsec = 0;
6003     }
6004     else {
6005       absTime->tv_sec = now.tv_sec + secs;
6006       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
6007       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
6008         absTime->tv_nsec -= NANOSECS_PER_SEC;
6009         ++absTime->tv_sec; // note: this must be <= max_secs
6010       }
6011     }
6012   }
6013   assert(absTime->tv_sec >= 0, "tv_sec < 0");
6014   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
6015   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
6016   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
6017 }
6018 
6019 void Parker::park(bool isAbsolute, jlong time) {
6020   // Ideally we'd do something useful while spinning, such
6021   // as calling unpackTime().
6022 
6023   // Optional fast-path check:
6024   // Return immediately if a permit is available.
6025   // We depend on Atomic::xchg() having full barrier semantics
6026   // since we are doing a lock-free update to _counter.
6027   if (Atomic::xchg(0, &_counter) > 0) return;
6028 
6029   // Optional fast-exit: Check interrupt before trying to wait
6030   Thread* thread = Thread::current();
6031   assert(thread->is_Java_thread(), "Must be JavaThread");
6032   JavaThread *jt = (JavaThread *)thread;
6033   if (Thread::is_interrupted(thread, false)) {
6034     return;
6035   }
6036 
6037   // First, demultiplex/decode time arguments
6038   timespec absTime;
6039   if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
6040     return;
6041   }
6042   if (time > 0) {
6043     // Warning: this code might be exposed to the old Solaris time
6044     // round-down bugs.  Grep "roundingFix" for details.
6045     unpackTime(&absTime, isAbsolute, time);
6046   }
6047 
6048   // Enter safepoint region
6049   // Beware of deadlocks such as 6317397.
6050   // The per-thread Parker:: _mutex is a classic leaf-lock.
6051   // In particular a thread must never block on the Threads_lock while
6052   // holding the Parker:: mutex.  If safepoints are pending both the
6053   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
6054   ThreadBlockInVM tbivm(jt);
6055 
6056   // Don't wait if cannot get lock since interference arises from
6057   // unblocking.  Also. check interrupt before trying wait
6058   if (Thread::is_interrupted(thread, false) ||
6059       os::Solaris::mutex_trylock(_mutex) != 0) {
6060     return;
6061   }
6062 
6063   int status ;
6064 
6065   if (_counter > 0)  { // no wait needed
6066     _counter = 0;
6067     status = os::Solaris::mutex_unlock(_mutex);
6068     assert (status == 0, "invariant") ;
6069     // Paranoia to ensure our locked and lock-free paths interact
6070     // correctly with each other and Java-level accesses.
6071     OrderAccess::fence();
6072     return;
6073   }
6074 
6075 #ifdef ASSERT
6076   // Don't catch signals while blocked; let the running threads have the signals.
6077   // (This allows a debugger to break into the running thread.)
6078   sigset_t oldsigs;
6079   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
6080   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
6081 #endif
6082 
6083   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
6084   jt->set_suspend_equivalent();
6085   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
6086 
6087   // Do this the hard way by blocking ...
6088   // See http://monaco.sfbay/detail.jsf?cr=5094058.
6089   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
6090   // Only for SPARC >= V8PlusA
6091 #if defined(__sparc) && defined(COMPILER2)
6092   if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
6093 #endif
6094 
6095   if (time == 0) {
6096     status = os::Solaris::cond_wait (_cond, _mutex) ;
6097   } else {
6098     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
6099   }
6100   // Note that an untimed cond_wait() can sometimes return ETIME on older
6101   // versions of the Solaris.
6102   assert_status(status == 0 || status == EINTR ||
6103                 status == ETIME || status == ETIMEDOUT,
6104                 status, "cond_timedwait");
6105 
6106 #ifdef ASSERT
6107   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
6108 #endif
6109   _counter = 0 ;
6110   status = os::Solaris::mutex_unlock(_mutex);
6111   assert_status(status == 0, status, "mutex_unlock") ;
6112   // Paranoia to ensure our locked and lock-free paths interact
6113   // correctly with each other and Java-level accesses.
6114   OrderAccess::fence();
6115 
6116   // If externally suspended while waiting, re-suspend
6117   if (jt->handle_special_suspend_equivalent_condition()) {
6118     jt->java_suspend_self();
6119   }
6120 }
6121 
6122 void Parker::unpark() {
6123   int s, status ;
6124   status = os::Solaris::mutex_lock (_mutex) ;
6125   assert (status == 0, "invariant") ;
6126   s = _counter;
6127   _counter = 1;
6128   status = os::Solaris::mutex_unlock (_mutex) ;
6129   assert (status == 0, "invariant") ;
6130 
6131   if (s < 1) {
6132     status = os::Solaris::cond_signal (_cond) ;
6133     assert (status == 0, "invariant") ;
6134   }
6135 }
6136 
6137 extern char** environ;
6138 
6139 // Run the specified command in a separate process. Return its exit value,
6140 // or -1 on failure (e.g. can't fork a new process).
6141 // Unlike system(), this function can be called from signal handler. It
6142 // doesn't block SIGINT et al.
6143 int os::fork_and_exec(char* cmd) {
6144   char * argv[4];
6145   argv[0] = (char *)"sh";
6146   argv[1] = (char *)"-c";
6147   argv[2] = cmd;
6148   argv[3] = NULL;
6149 
6150   // fork is async-safe, fork1 is not so can't use in signal handler
6151   pid_t pid;
6152   Thread* t = ThreadLocalStorage::get_thread_slow();
6153   if (t != NULL && t->is_inside_signal_handler()) {
6154     pid = fork();
6155   } else {
6156     pid = fork1();
6157   }
6158 
6159   if (pid < 0) {
6160     // fork failed
6161     warning("fork failed: %s", strerror(errno));
6162     return -1;
6163 
6164   } else if (pid == 0) {
6165     // child process
6166 
6167     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
6168     execve("/usr/bin/sh", argv, environ);
6169 
6170     // execve failed
6171     _exit(-1);
6172 
6173   } else  {
6174     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
6175     // care about the actual exit code, for now.
6176 
6177     int status;
6178 
6179     // Wait for the child process to exit.  This returns immediately if
6180     // the child has already exited. */
6181     while (waitpid(pid, &status, 0) < 0) {
6182         switch (errno) {
6183         case ECHILD: return 0;
6184         case EINTR: break;
6185         default: return -1;
6186         }
6187     }
6188 
6189     if (WIFEXITED(status)) {
6190        // The child exited normally; get its exit code.
6191        return WEXITSTATUS(status);
6192     } else if (WIFSIGNALED(status)) {
6193        // The child exited because of a signal
6194        // The best value to return is 0x80 + signal number,
6195        // because that is what all Unix shells do, and because
6196        // it allows callers to distinguish between process exit and
6197        // process death by signal.
6198        return 0x80 + WTERMSIG(status);
6199     } else {
6200        // Unknown exit code; pass it through
6201        return status;
6202     }
6203   }
6204 }
6205 
6206 // is_headless_jre()
6207 //
6208 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
6209 // in order to report if we are running in a headless jre
6210 //
6211 // Since JDK8 xawt/libmawt.so was moved into the same directory
6212 // as libawt.so, and renamed libawt_xawt.so
6213 //
6214 bool os::is_headless_jre() {
6215     struct stat statbuf;
6216     char buf[MAXPATHLEN];
6217     char libmawtpath[MAXPATHLEN];
6218     const char *xawtstr  = "/xawt/libmawt.so";
6219     const char *new_xawtstr = "/libawt_xawt.so";
6220     char *p;
6221 
6222     // Get path to libjvm.so
6223     os::jvm_path(buf, sizeof(buf));
6224 
6225     // Get rid of libjvm.so
6226     p = strrchr(buf, '/');
6227     if (p == NULL) return false;
6228     else *p = '\0';
6229 
6230     // Get rid of client or server
6231     p = strrchr(buf, '/');
6232     if (p == NULL) return false;
6233     else *p = '\0';
6234 
6235     // check xawt/libmawt.so
6236     strcpy(libmawtpath, buf);
6237     strcat(libmawtpath, xawtstr);
6238     if (::stat(libmawtpath, &statbuf) == 0) return false;
6239 
6240     // check libawt_xawt.so
6241     strcpy(libmawtpath, buf);
6242     strcat(libmawtpath, new_xawtstr);
6243     if (::stat(libmawtpath, &statbuf) == 0) return false;
6244 
6245     return true;
6246 }
6247 
6248 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
6249   INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted);
6250 }
6251 
6252 int os::close(int fd) {
6253   return ::close(fd);
6254 }
6255 
6256 int os::socket_close(int fd) {
6257   return ::close(fd);
6258 }
6259 
6260 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
6261   INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6262 }
6263 
6264 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
6265   INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6266 }
6267 
6268 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
6269   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
6270 }
6271 
6272 // As both poll and select can be interrupted by signals, we have to be
6273 // prepared to restart the system call after updating the timeout, unless
6274 // a poll() is done with timeout == -1, in which case we repeat with this
6275 // "wait forever" value.
6276 
6277 int os::timeout(int fd, long timeout) {
6278   int res;
6279   struct timeval t;
6280   julong prevtime, newtime;
6281   static const char* aNull = 0;
6282   struct pollfd pfd;
6283   pfd.fd = fd;
6284   pfd.events = POLLIN;
6285 
6286   gettimeofday(&t, &aNull);
6287   prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
6288 
6289   for(;;) {
6290     INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted);
6291     if(res == OS_ERR && errno == EINTR) {
6292         if(timeout != -1) {
6293           gettimeofday(&t, &aNull);
6294           newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
6295           timeout -= newtime - prevtime;
6296           if(timeout <= 0)
6297             return OS_OK;
6298           prevtime = newtime;
6299         }
6300     } else return res;
6301   }
6302 }
6303 
6304 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
6305   int _result;
6306   INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\
6307                           os::Solaris::clear_interrupted);
6308 
6309   // Depending on when thread interruption is reset, _result could be
6310   // one of two values when errno == EINTR
6311 
6312   if (((_result == OS_INTRPT) || (_result == OS_ERR))
6313       && (errno == EINTR)) {
6314      /* restarting a connect() changes its errno semantics */
6315      INTERRUPTIBLE(::connect(fd, him, len), _result,\
6316                    os::Solaris::clear_interrupted);
6317      /* undo these changes */
6318      if (_result == OS_ERR) {
6319        if (errno == EALREADY) {
6320          errno = EINPROGRESS; /* fall through */
6321        } else if (errno == EISCONN) {
6322          errno = 0;
6323          return OS_OK;
6324        }
6325      }
6326    }
6327    return _result;
6328  }
6329 
6330 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
6331   if (fd < 0) {
6332     return OS_ERR;
6333   }
6334   INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\
6335                            os::Solaris::clear_interrupted);
6336 }
6337 
6338 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
6339                  sockaddr* from, socklen_t* fromlen) {
6340   INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\
6341                            os::Solaris::clear_interrupted);
6342 }
6343 
6344 int os::sendto(int fd, char* buf, size_t len, uint flags,
6345                struct sockaddr* to, socklen_t tolen) {
6346   INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\
6347                            os::Solaris::clear_interrupted);
6348 }
6349 
6350 int os::socket_available(int fd, jint *pbytes) {
6351   if (fd < 0) {
6352     return OS_OK;
6353   }
6354   int ret;
6355   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6356   // note: ioctl can return 0 when successful, JVM_SocketAvailable
6357   // is expected to return 0 on failure and 1 on success to the jdk.
6358   return (ret == OS_ERR) ? 0 : 1;
6359 }
6360 
6361 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6362    INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
6363                                       os::Solaris::clear_interrupted);
6364 }
6365 
6366 // Get the default path to the core file
6367 // Returns the length of the string
6368 int os::get_core_path(char* buffer, size_t bufferSize) {
6369   const char* p = get_current_directory(buffer, bufferSize);
6370 
6371   if (p == NULL) {
6372     assert(p != NULL, "failed to get current directory");
6373     return 0;
6374   }
6375 
6376   return strlen(buffer);
6377 }
6378 
6379 #ifndef PRODUCT
6380 void TestReserveMemorySpecial_test() {
6381   // No tests available for this platform
6382 }
6383 #endif