1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "prims/jniFastGetField.hpp"
  41 #include "prims/jvm.h"
  42 #include "prims/jvm_misc.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/extendedPC.hpp"
  45 #include "runtime/globals.hpp"
  46 #include "runtime/interfaceSupport.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/javaCalls.hpp"
  49 #include "runtime/mutexLocker.hpp"
  50 #include "runtime/objectMonitor.hpp"
  51 #include "runtime/orderAccess.inline.hpp"
  52 #include "runtime/osThread.hpp"
  53 #include "runtime/perfMemory.hpp"
  54 #include "runtime/sharedRuntime.hpp"
  55 #include "runtime/statSampler.hpp"
  56 #include "runtime/stubRoutines.hpp"
  57 #include "runtime/thread.inline.hpp"
  58 #include "runtime/threadCritical.hpp"
  59 #include "runtime/timer.hpp"
  60 #include "services/attachListener.hpp"
  61 #include "services/memTracker.hpp"
  62 #include "services/runtimeService.hpp"
  63 #include "utilities/decoder.hpp"
  64 #include "utilities/defaultStream.hpp"
  65 #include "utilities/events.hpp"
  66 #include "utilities/growableArray.hpp"
  67 #include "utilities/vmError.hpp"
  68 
  69 // put OS-includes here
  70 # include <dlfcn.h>
  71 # include <errno.h>
  72 # include <exception>
  73 # include <link.h>
  74 # include <poll.h>
  75 # include <pthread.h>
  76 # include <pwd.h>
  77 # include <schedctl.h>
  78 # include <setjmp.h>
  79 # include <signal.h>
  80 # include <stdio.h>
  81 # include <alloca.h>
  82 # include <sys/filio.h>
  83 # include <sys/ipc.h>
  84 # include <sys/lwp.h>
  85 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  86 # include <sys/mman.h>
  87 # include <sys/processor.h>
  88 # include <sys/procset.h>
  89 # include <sys/pset.h>
  90 # include <sys/resource.h>
  91 # include <sys/shm.h>
  92 # include <sys/socket.h>
  93 # include <sys/stat.h>
  94 # include <sys/systeminfo.h>
  95 # include <sys/time.h>
  96 # include <sys/times.h>
  97 # include <sys/types.h>
  98 # include <sys/wait.h>
  99 # include <sys/utsname.h>
 100 # include <thread.h>
 101 # include <unistd.h>
 102 # include <sys/priocntl.h>
 103 # include <sys/rtpriocntl.h>
 104 # include <sys/tspriocntl.h>
 105 # include <sys/iapriocntl.h>
 106 # include <sys/fxpriocntl.h>
 107 # include <sys/loadavg.h>
 108 # include <string.h>
 109 # include <stdio.h>
 110 
 111 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 112 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 113 
 114 #define MAX_PATH (2 * K)
 115 
 116 // for timer info max values which include all bits
 117 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 118 
 119 
 120 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 121 // compile on older systems without this header file.
 122 
 123 #ifndef MADV_ACCESS_LWP
 124 # define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
 125 #endif
 126 #ifndef MADV_ACCESS_MANY
 127 # define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
 128 #endif
 129 
 130 #ifndef LGRP_RSRC_CPU
 131 # define LGRP_RSRC_CPU           0       /* CPU resources */
 132 #endif
 133 #ifndef LGRP_RSRC_MEM
 134 # define LGRP_RSRC_MEM           1       /* memory resources */
 135 #endif
 136 
 137 // see thr_setprio(3T) for the basis of these numbers
 138 #define MinimumPriority 0
 139 #define NormalPriority  64
 140 #define MaximumPriority 127
 141 
 142 // Values for ThreadPriorityPolicy == 1
 143 int prio_policy1[CriticalPriority+1] = {
 144   -99999,  0, 16,  32,  48,  64,
 145           80, 96, 112, 124, 127, 127 };
 146 
 147 // System parameters used internally
 148 static clock_t clock_tics_per_sec = 100;
 149 
 150 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 151 static bool enabled_extended_FILE_stdio = false;
 152 
 153 // For diagnostics to print a message once. see run_periodic_checks
 154 static bool check_addr0_done = false;
 155 static sigset_t check_signal_done;
 156 static bool check_signals = true;
 157 
 158 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 159 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 160 
 161 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 162 
 163 
 164 // "default" initializers for missing libc APIs
 165 extern "C" {
 166   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 167   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 168 
 169   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 170   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 171 }
 172 
 173 // "default" initializers for pthread-based synchronization
 174 extern "C" {
 175   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 176   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 177 }
 178 
 179 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 180 
 181 static inline size_t adjust_stack_size(address base, size_t size) {
 182   if ((ssize_t)size < 0) {
 183     // 4759953: Compensate for ridiculous stack size.
 184     size = max_intx;
 185   }
 186   if (size > (size_t)base) {
 187     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 188     size = (size_t)base;
 189   }
 190   return size;
 191 }
 192 
 193 static inline stack_t get_stack_info() {
 194   stack_t st;
 195   int retval = thr_stksegment(&st);
 196   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 197   assert(retval == 0, "incorrect return value from thr_stksegment");
 198   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 199   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 200   return st;
 201 }
 202 
 203 bool os::is_primordial_thread(void) {
 204   int r = thr_main() ;
 205   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 206   return r == 1;
 207 }
 208 
 209 address os::current_stack_base() {
 210   bool _is_primordial_thread = is_primordial_thread();
 211 
 212   // Workaround 4352906, avoid calls to thr_stksegment by
 213   // thr_main after the first one (it looks like we trash
 214   // some data, causing the value for ss_sp to be incorrect).
 215   if (!_is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 216     stack_t st = get_stack_info();
 217     if (_is_primordial_thread) {
 218       // cache initial value of stack base
 219       os::Solaris::_main_stack_base = (address)st.ss_sp;
 220     }
 221     return (address)st.ss_sp;
 222   } else {
 223     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 224     return os::Solaris::_main_stack_base;
 225   }
 226 }
 227 
 228 size_t os::current_stack_size() {
 229   size_t size;
 230 
 231   if (!is_primordial_thread()) {
 232     size = get_stack_info().ss_size;
 233   } else {
 234     struct rlimit limits;
 235     getrlimit(RLIMIT_STACK, &limits);
 236     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 237   }
 238   // base may not be page aligned
 239   address base = current_stack_base();
 240   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 241   return (size_t)(base - bottom);
 242 }
 243 
 244 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 245   return localtime_r(clock, res);
 246 }
 247 
 248 // interruptible infrastructure
 249 
 250 // setup_interruptible saves the thread state before going into an
 251 // interruptible system call.
 252 // The saved state is used to restore the thread to
 253 // its former state whether or not an interrupt is received.
 254 // Used by classloader os::read
 255 // os::restartable_read calls skip this layer and stay in _thread_in_native
 256 
 257 void os::Solaris::setup_interruptible(JavaThread* thread) {
 258 
 259   JavaThreadState thread_state = thread->thread_state();
 260 
 261   assert(thread_state != _thread_blocked, "Coming from the wrong thread");
 262   assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
 263   OSThread* osthread = thread->osthread();
 264   osthread->set_saved_interrupt_thread_state(thread_state);
 265   thread->frame_anchor()->make_walkable(thread);
 266   ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
 267 }
 268 
 269 // Version of setup_interruptible() for threads that are already in
 270 // _thread_blocked. Used by os_sleep().
 271 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
 272   thread->frame_anchor()->make_walkable(thread);
 273 }
 274 
 275 JavaThread* os::Solaris::setup_interruptible() {
 276   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
 277   setup_interruptible(thread);
 278   return thread;
 279 }
 280 
 281 void os::Solaris::try_enable_extended_io() {
 282   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 283 
 284   if (!UseExtendedFileIO) {
 285     return;
 286   }
 287 
 288   enable_extended_FILE_stdio_t enabler =
 289     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 290                                          "enable_extended_FILE_stdio");
 291   if (enabler) {
 292     enabler(-1, -1);
 293   }
 294 }
 295 
 296 
 297 #ifdef ASSERT
 298 
 299 JavaThread* os::Solaris::setup_interruptible_native() {
 300   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
 301   JavaThreadState thread_state = thread->thread_state();
 302   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
 303   return thread;
 304 }
 305 
 306 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
 307   JavaThreadState thread_state = thread->thread_state();
 308   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
 309 }
 310 #endif
 311 
 312 // cleanup_interruptible reverses the effects of setup_interruptible
 313 // setup_interruptible_already_blocked() does not need any cleanup.
 314 
 315 void os::Solaris::cleanup_interruptible(JavaThread* thread) {
 316   OSThread* osthread = thread->osthread();
 317 
 318   ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
 319 }
 320 
 321 // I/O interruption related counters called in _INTERRUPTIBLE
 322 
 323 void os::Solaris::bump_interrupted_before_count() {
 324   RuntimeService::record_interrupted_before_count();
 325 }
 326 
 327 void os::Solaris::bump_interrupted_during_count() {
 328   RuntimeService::record_interrupted_during_count();
 329 }
 330 
 331 static int _processors_online = 0;
 332 
 333          jint os::Solaris::_os_thread_limit = 0;
 334 volatile jint os::Solaris::_os_thread_count = 0;
 335 
 336 julong os::available_memory() {
 337   return Solaris::available_memory();
 338 }
 339 
 340 julong os::Solaris::available_memory() {
 341   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 342 }
 343 
 344 julong os::Solaris::_physical_memory = 0;
 345 
 346 julong os::physical_memory() {
 347    return Solaris::physical_memory();
 348 }
 349 
 350 static hrtime_t first_hrtime = 0;
 351 static const hrtime_t hrtime_hz = 1000*1000*1000;
 352 static volatile hrtime_t max_hrtime = 0;
 353 
 354 
 355 void os::Solaris::initialize_system_info() {
 356   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 357   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
 358   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 359 }
 360 
 361 int os::active_processor_count() {
 362   // User has overridden the number of active processors
 363   if (ActiveProcessorCount > 0) {
 364     if (Verbose) {
 365       tty->print_cr("active_processor_count: "
 366                     "active processor count set by user : %d",
 367                      ActiveProcessorCount);
 368     }
 369     return ActiveProcessorCount;
 370   }
 371 
 372   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 373   pid_t pid = getpid();
 374   psetid_t pset = PS_NONE;
 375   // Are we running in a processor set or is there any processor set around?
 376   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 377     uint_t pset_cpus;
 378     // Query the number of cpus available to us.
 379     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 380       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 381       _processors_online = pset_cpus;
 382       return pset_cpus;
 383     }
 384   }
 385   // Otherwise return number of online cpus
 386   return online_cpus;
 387 }
 388 
 389 static bool find_processors_in_pset(psetid_t        pset,
 390                                     processorid_t** id_array,
 391                                     uint_t*         id_length) {
 392   bool result = false;
 393   // Find the number of processors in the processor set.
 394   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 395     // Make up an array to hold their ids.
 396     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 397     // Fill in the array with their processor ids.
 398     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 399       result = true;
 400     }
 401   }
 402   return result;
 403 }
 404 
 405 // Callers of find_processors_online() must tolerate imprecise results --
 406 // the system configuration can change asynchronously because of DR
 407 // or explicit psradm operations.
 408 //
 409 // We also need to take care that the loop (below) terminates as the
 410 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 411 // request and the loop that builds the list of processor ids.   Unfortunately
 412 // there's no reliable way to determine the maximum valid processor id,
 413 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 414 // man pages, which claim the processor id set is "sparse, but
 415 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 416 // exit the loop.
 417 //
 418 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 419 // not available on S8.0.
 420 
 421 static bool find_processors_online(processorid_t** id_array,
 422                                    uint*           id_length) {
 423   const processorid_t MAX_PROCESSOR_ID = 100000 ;
 424   // Find the number of processors online.
 425   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 426   // Make up an array to hold their ids.
 427   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 428   // Processors need not be numbered consecutively.
 429   long found = 0;
 430   processorid_t next = 0;
 431   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 432     processor_info_t info;
 433     if (processor_info(next, &info) == 0) {
 434       // NB, PI_NOINTR processors are effectively online ...
 435       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 436         (*id_array)[found] = next;
 437         found += 1;
 438       }
 439     }
 440     next += 1;
 441   }
 442   if (found < *id_length) {
 443       // The loop above didn't identify the expected number of processors.
 444       // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 445       // and re-running the loop, above, but there's no guarantee of progress
 446       // if the system configuration is in flux.  Instead, we just return what
 447       // we've got.  Note that in the worst case find_processors_online() could
 448       // return an empty set.  (As a fall-back in the case of the empty set we
 449       // could just return the ID of the current processor).
 450       *id_length = found ;
 451   }
 452 
 453   return true;
 454 }
 455 
 456 static bool assign_distribution(processorid_t* id_array,
 457                                 uint           id_length,
 458                                 uint*          distribution,
 459                                 uint           distribution_length) {
 460   // We assume we can assign processorid_t's to uint's.
 461   assert(sizeof(processorid_t) == sizeof(uint),
 462          "can't convert processorid_t to uint");
 463   // Quick check to see if we won't succeed.
 464   if (id_length < distribution_length) {
 465     return false;
 466   }
 467   // Assign processor ids to the distribution.
 468   // Try to shuffle processors to distribute work across boards,
 469   // assuming 4 processors per board.
 470   const uint processors_per_board = ProcessDistributionStride;
 471   // Find the maximum processor id.
 472   processorid_t max_id = 0;
 473   for (uint m = 0; m < id_length; m += 1) {
 474     max_id = MAX2(max_id, id_array[m]);
 475   }
 476   // The next id, to limit loops.
 477   const processorid_t limit_id = max_id + 1;
 478   // Make up markers for available processors.
 479   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 480   for (uint c = 0; c < limit_id; c += 1) {
 481     available_id[c] = false;
 482   }
 483   for (uint a = 0; a < id_length; a += 1) {
 484     available_id[id_array[a]] = true;
 485   }
 486   // Step by "boards", then by "slot", copying to "assigned".
 487   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 488   //                remembering which processors have been assigned by
 489   //                previous calls, etc., so as to distribute several
 490   //                independent calls of this method.  What we'd like is
 491   //                It would be nice to have an API that let us ask
 492   //                how many processes are bound to a processor,
 493   //                but we don't have that, either.
 494   //                In the short term, "board" is static so that
 495   //                subsequent distributions don't all start at board 0.
 496   static uint board = 0;
 497   uint assigned = 0;
 498   // Until we've found enough processors ....
 499   while (assigned < distribution_length) {
 500     // ... find the next available processor in the board.
 501     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 502       uint try_id = board * processors_per_board + slot;
 503       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 504         distribution[assigned] = try_id;
 505         available_id[try_id] = false;
 506         assigned += 1;
 507         break;
 508       }
 509     }
 510     board += 1;
 511     if (board * processors_per_board + 0 >= limit_id) {
 512       board = 0;
 513     }
 514   }
 515   if (available_id != NULL) {
 516     FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
 517   }
 518   return true;
 519 }
 520 
 521 void os::set_native_thread_name(const char *name) {
 522   // Not yet implemented.
 523   return;
 524 }
 525 
 526 bool os::distribute_processes(uint length, uint* distribution) {
 527   bool result = false;
 528   // Find the processor id's of all the available CPUs.
 529   processorid_t* id_array  = NULL;
 530   uint           id_length = 0;
 531   // There are some races between querying information and using it,
 532   // since processor sets can change dynamically.
 533   psetid_t pset = PS_NONE;
 534   // Are we running in a processor set?
 535   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 536     result = find_processors_in_pset(pset, &id_array, &id_length);
 537   } else {
 538     result = find_processors_online(&id_array, &id_length);
 539   }
 540   if (result == true) {
 541     if (id_length >= length) {
 542       result = assign_distribution(id_array, id_length, distribution, length);
 543     } else {
 544       result = false;
 545     }
 546   }
 547   if (id_array != NULL) {
 548     FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
 549   }
 550   return result;
 551 }
 552 
 553 bool os::bind_to_processor(uint processor_id) {
 554   // We assume that a processorid_t can be stored in a uint.
 555   assert(sizeof(uint) == sizeof(processorid_t),
 556          "can't convert uint to processorid_t");
 557   int bind_result =
 558     processor_bind(P_LWPID,                       // bind LWP.
 559                    P_MYID,                        // bind current LWP.
 560                    (processorid_t) processor_id,  // id.
 561                    NULL);                         // don't return old binding.
 562   return (bind_result == 0);
 563 }
 564 
 565 bool os::getenv(const char* name, char* buffer, int len) {
 566   char* val = ::getenv( name );
 567   if ( val == NULL
 568   ||   strlen(val) + 1  >  len ) {
 569     if (len > 0)  buffer[0] = 0; // return a null string
 570     return false;
 571   }
 572   strcpy( buffer, val );
 573   return true;
 574 }
 575 
 576 
 577 // Return true if user is running as root.
 578 
 579 bool os::have_special_privileges() {
 580   static bool init = false;
 581   static bool privileges = false;
 582   if (!init) {
 583     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 584     init = true;
 585   }
 586   return privileges;
 587 }
 588 
 589 
 590 void os::init_system_properties_values() {
 591   // The next steps are taken in the product version:
 592   //
 593   // Obtain the JAVA_HOME value from the location of libjvm.so.
 594   // This library should be located at:
 595   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 596   //
 597   // If "/jre/lib/" appears at the right place in the path, then we
 598   // assume libjvm.so is installed in a JDK and we use this path.
 599   //
 600   // Otherwise exit with message: "Could not create the Java virtual machine."
 601   //
 602   // The following extra steps are taken in the debugging version:
 603   //
 604   // If "/jre/lib/" does NOT appear at the right place in the path
 605   // instead of exit check for $JAVA_HOME environment variable.
 606   //
 607   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 608   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 609   // it looks like libjvm.so is installed there
 610   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 611   //
 612   // Otherwise exit.
 613   //
 614   // Important note: if the location of libjvm.so changes this
 615   // code needs to be changed accordingly.
 616 
 617 // Base path of extensions installed on the system.
 618 #define SYS_EXT_DIR     "/usr/jdk/packages"
 619 #define EXTENSIONS_DIR  "/lib/ext"
 620 #define ENDORSED_DIR    "/lib/endorsed"
 621 
 622   char cpu_arch[12];
 623   // Buffer that fits several sprintfs.
 624   // Note that the space for the colon and the trailing null are provided
 625   // by the nulls included by the sizeof operator.
 626   const size_t bufsize =
 627     MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
 628          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 629          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
 630          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
 631   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 632 
 633   // sysclasspath, java_home, dll_dir
 634   {
 635     char *pslash;
 636     os::jvm_path(buf, bufsize);
 637 
 638     // Found the full path to libjvm.so.
 639     // Now cut the path to <java_home>/jre if we can.
 640     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 641     pslash = strrchr(buf, '/');
 642     if (pslash != NULL) {
 643       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 644     }
 645     Arguments::set_dll_dir(buf);
 646 
 647     if (pslash != NULL) {
 648       pslash = strrchr(buf, '/');
 649       if (pslash != NULL) {
 650         *pslash = '\0';          // Get rid of /<arch>.
 651         pslash = strrchr(buf, '/');
 652         if (pslash != NULL) {
 653           *pslash = '\0';        // Get rid of /lib.
 654         }
 655       }
 656     }
 657     Arguments::set_java_home(buf);
 658     set_boot_path('/', ':');
 659   }
 660 
 661   // Where to look for native libraries.
 662   {
 663     // Use dlinfo() to determine the correct java.library.path.
 664     //
 665     // If we're launched by the Java launcher, and the user
 666     // does not set java.library.path explicitly on the commandline,
 667     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 668     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 669     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 670     // /usr/lib), which is exactly what we want.
 671     //
 672     // If the user does set java.library.path, it completely
 673     // overwrites this setting, and always has.
 674     //
 675     // If we're not launched by the Java launcher, we may
 676     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 677     // settings.  Again, dlinfo does exactly what we want.
 678 
 679     Dl_serinfo     info_sz, *info = &info_sz;
 680     Dl_serpath     *path;
 681     char           *library_path;
 682     char           *common_path = buf;
 683 
 684     // Determine search path count and required buffer size.
 685     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 686       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 687       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 688     }
 689 
 690     // Allocate new buffer and initialize.
 691     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 692     info->dls_size = info_sz.dls_size;
 693     info->dls_cnt = info_sz.dls_cnt;
 694 
 695     // Obtain search path information.
 696     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 697       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 698       FREE_C_HEAP_ARRAY(char, info, mtInternal);
 699       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 700     }
 701 
 702     path = &info->dls_serpath[0];
 703 
 704     // Note: Due to a legacy implementation, most of the library path
 705     // is set in the launcher. This was to accomodate linking restrictions
 706     // on legacy Solaris implementations (which are no longer supported).
 707     // Eventually, all the library path setting will be done here.
 708     //
 709     // However, to prevent the proliferation of improperly built native
 710     // libraries, the new path component /usr/jdk/packages is added here.
 711 
 712     // Determine the actual CPU architecture.
 713     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 714 #ifdef _LP64
 715     // If we are a 64-bit vm, perform the following translations:
 716     //   sparc   -> sparcv9
 717     //   i386    -> amd64
 718     if (strcmp(cpu_arch, "sparc") == 0) {
 719       strcat(cpu_arch, "v9");
 720     } else if (strcmp(cpu_arch, "i386") == 0) {
 721       strcpy(cpu_arch, "amd64");
 722     }
 723 #endif
 724 
 725     // Construct the invariant part of ld_library_path.
 726     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 727 
 728     // Struct size is more than sufficient for the path components obtained
 729     // through the dlinfo() call, so only add additional space for the path
 730     // components explicitly added here.
 731     size_t library_path_size = info->dls_size + strlen(common_path);
 732     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 733     library_path[0] = '\0';
 734 
 735     // Construct the desired Java library path from the linker's library
 736     // search path.
 737     //
 738     // For compatibility, it is optimal that we insert the additional path
 739     // components specific to the Java VM after those components specified
 740     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 741     // infrastructure.
 742     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 743       strcpy(library_path, common_path);
 744     } else {
 745       int inserted = 0;
 746       int i;
 747       for (i = 0; i < info->dls_cnt; i++, path++) {
 748         uint_t flags = path->dls_flags & LA_SER_MASK;
 749         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 750           strcat(library_path, common_path);
 751           strcat(library_path, os::path_separator());
 752           inserted = 1;
 753         }
 754         strcat(library_path, path->dls_name);
 755         strcat(library_path, os::path_separator());
 756       }
 757       // Eliminate trailing path separator.
 758       library_path[strlen(library_path)-1] = '\0';
 759     }
 760 
 761     // happens before argument parsing - can't use a trace flag
 762     // tty->print_raw("init_system_properties_values: native lib path: ");
 763     // tty->print_raw_cr(library_path);
 764 
 765     // Callee copies into its own buffer.
 766     Arguments::set_library_path(library_path);
 767 
 768     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
 769     FREE_C_HEAP_ARRAY(char, info, mtInternal);
 770   }
 771 
 772   // Extensions directories.
 773   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 774   Arguments::set_ext_dirs(buf);
 775 
 776   // Endorsed standards default directory.
 777   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
 778   Arguments::set_endorsed_dirs(buf);
 779 
 780   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 781 
 782 #undef SYS_EXT_DIR
 783 #undef EXTENSIONS_DIR
 784 #undef ENDORSED_DIR
 785 }
 786 
 787 void os::breakpoint() {
 788   BREAKPOINT;
 789 }
 790 
 791 bool os::obsolete_option(const JavaVMOption *option)
 792 {
 793   if (!strncmp(option->optionString, "-Xt", 3)) {
 794     return true;
 795   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 796     return true;
 797   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 798     return true;
 799   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 800     return true;
 801   }
 802   return false;
 803 }
 804 
 805 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 806   address  stackStart  = (address)thread->stack_base();
 807   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 808   if (sp < stackStart && sp >= stackEnd ) return true;
 809   return false;
 810 }
 811 
 812 extern "C" void breakpoint() {
 813   // use debugger to set breakpoint here
 814 }
 815 
 816 static thread_t main_thread;
 817 
 818 // Thread start routine for all new Java threads
 819 extern "C" void* java_start(void* thread_addr) {
 820   // Try to randomize the cache line index of hot stack frames.
 821   // This helps when threads of the same stack traces evict each other's
 822   // cache lines. The threads can be either from the same JVM instance, or
 823   // from different JVM instances. The benefit is especially true for
 824   // processors with hyperthreading technology.
 825   static int counter = 0;
 826   int pid = os::current_process_id();
 827   alloca(((pid ^ counter++) & 7) * 128);
 828 
 829   int prio;
 830   Thread* thread = (Thread*)thread_addr;
 831   OSThread* osthr = thread->osthread();
 832 
 833   osthr->set_lwp_id( _lwp_self() );  // Store lwp in case we are bound
 834   thread->_schedctl = (void *) schedctl_init () ;
 835 
 836   if (UseNUMA) {
 837     int lgrp_id = os::numa_get_group_id();
 838     if (lgrp_id != -1) {
 839       thread->set_lgrp_id(lgrp_id);
 840     }
 841   }
 842 
 843   // If the creator called set priority before we started,
 844   // we need to call set_native_priority now that we have an lwp.
 845   // We used to get the priority from thr_getprio (we called
 846   // thr_setprio way back in create_thread) and pass it to
 847   // set_native_priority, but Solaris scales the priority
 848   // in java_to_os_priority, so when we read it back here,
 849   // we pass trash to set_native_priority instead of what's
 850   // in java_to_os_priority. So we save the native priority
 851   // in the osThread and recall it here.
 852 
 853   if ( osthr->thread_id() != -1 ) {
 854     if ( UseThreadPriorities ) {
 855       int prio = osthr->native_priority();
 856       if (ThreadPriorityVerbose) {
 857         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 858                       INTPTR_FORMAT ", setting priority: %d\n",
 859                       osthr->thread_id(), osthr->lwp_id(), prio);
 860       }
 861       os::set_native_priority(thread, prio);
 862     }
 863   } else if (ThreadPriorityVerbose) {
 864     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 865   }
 866 
 867   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 868 
 869   // initialize signal mask for this thread
 870   os::Solaris::hotspot_sigmask(thread);
 871 
 872   thread->run();
 873 
 874   // One less thread is executing
 875   // When the VMThread gets here, the main thread may have already exited
 876   // which frees the CodeHeap containing the Atomic::dec code
 877   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 878     Atomic::dec(&os::Solaris::_os_thread_count);
 879   }
 880 
 881   if (UseDetachedThreads) {
 882     thr_exit(NULL);
 883     ShouldNotReachHere();
 884   }
 885   return NULL;
 886 }
 887 
 888 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 889   // Allocate the OSThread object
 890   OSThread* osthread = new OSThread(NULL, NULL);
 891   if (osthread == NULL) return NULL;
 892 
 893   // Store info on the Solaris thread into the OSThread
 894   osthread->set_thread_id(thread_id);
 895   osthread->set_lwp_id(_lwp_self());
 896   thread->_schedctl = (void *) schedctl_init () ;
 897 
 898   if (UseNUMA) {
 899     int lgrp_id = os::numa_get_group_id();
 900     if (lgrp_id != -1) {
 901       thread->set_lgrp_id(lgrp_id);
 902     }
 903   }
 904 
 905   if ( ThreadPriorityVerbose ) {
 906     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 907                   osthread->thread_id(), osthread->lwp_id() );
 908   }
 909 
 910   // Initial thread state is INITIALIZED, not SUSPENDED
 911   osthread->set_state(INITIALIZED);
 912 
 913   return osthread;
 914 }
 915 
 916 void os::Solaris::hotspot_sigmask(Thread* thread) {
 917 
 918   //Save caller's signal mask
 919   sigset_t sigmask;
 920   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 921   OSThread *osthread = thread->osthread();
 922   osthread->set_caller_sigmask(sigmask);
 923 
 924   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 925   if (!ReduceSignalUsage) {
 926     if (thread->is_VM_thread()) {
 927       // Only the VM thread handles BREAK_SIGNAL ...
 928       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 929     } else {
 930       // ... all other threads block BREAK_SIGNAL
 931       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 932       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 933     }
 934   }
 935 }
 936 
 937 bool os::create_attached_thread(JavaThread* thread) {
 938 #ifdef ASSERT
 939   thread->verify_not_published();
 940 #endif
 941   OSThread* osthread = create_os_thread(thread, thr_self());
 942   if (osthread == NULL) {
 943      return false;
 944   }
 945 
 946   // Initial thread state is RUNNABLE
 947   osthread->set_state(RUNNABLE);
 948   thread->set_osthread(osthread);
 949 
 950   // initialize signal mask for this thread
 951   // and save the caller's signal mask
 952   os::Solaris::hotspot_sigmask(thread);
 953 
 954   return true;
 955 }
 956 
 957 bool os::create_main_thread(JavaThread* thread) {
 958 #ifdef ASSERT
 959   thread->verify_not_published();
 960 #endif
 961   if (_starting_thread == NULL) {
 962     _starting_thread = create_os_thread(thread, main_thread);
 963      if (_starting_thread == NULL) {
 964         return false;
 965      }
 966   }
 967 
 968   // The primodial thread is runnable from the start
 969   _starting_thread->set_state(RUNNABLE);
 970 
 971   thread->set_osthread(_starting_thread);
 972 
 973   // initialize signal mask for this thread
 974   // and save the caller's signal mask
 975   os::Solaris::hotspot_sigmask(thread);
 976 
 977   return true;
 978 }
 979 
 980 // _T2_libthread is true if we believe we are running with the newer
 981 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
 982 bool os::Solaris::_T2_libthread = false;
 983 
 984 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 985   // Allocate the OSThread object
 986   OSThread* osthread = new OSThread(NULL, NULL);
 987   if (osthread == NULL) {
 988     return false;
 989   }
 990 
 991   if ( ThreadPriorityVerbose ) {
 992     char *thrtyp;
 993     switch ( thr_type ) {
 994       case vm_thread:
 995         thrtyp = (char *)"vm";
 996         break;
 997       case cgc_thread:
 998         thrtyp = (char *)"cgc";
 999         break;
1000       case pgc_thread:
1001         thrtyp = (char *)"pgc";
1002         break;
1003       case java_thread:
1004         thrtyp = (char *)"java";
1005         break;
1006       case compiler_thread:
1007         thrtyp = (char *)"compiler";
1008         break;
1009       case watcher_thread:
1010         thrtyp = (char *)"watcher";
1011         break;
1012       default:
1013         thrtyp = (char *)"unknown";
1014         break;
1015     }
1016     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1017   }
1018 
1019   // Calculate stack size if it's not specified by caller.
1020   if (stack_size == 0) {
1021     // The default stack size 1M (2M for LP64).
1022     stack_size = (BytesPerWord >> 2) * K * K;
1023 
1024     switch (thr_type) {
1025     case os::java_thread:
1026       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1027       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1028       break;
1029     case os::compiler_thread:
1030       if (CompilerThreadStackSize > 0) {
1031         stack_size = (size_t)(CompilerThreadStackSize * K);
1032         break;
1033       } // else fall through:
1034         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1035     case os::vm_thread:
1036     case os::pgc_thread:
1037     case os::cgc_thread:
1038     case os::watcher_thread:
1039       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1040       break;
1041     }
1042   }
1043   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1044 
1045   // Initial state is ALLOCATED but not INITIALIZED
1046   osthread->set_state(ALLOCATED);
1047 
1048   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1049     // We got lots of threads. Check if we still have some address space left.
1050     // Need to be at least 5Mb of unreserved address space. We do check by
1051     // trying to reserve some.
1052     const size_t VirtualMemoryBangSize = 20*K*K;
1053     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1054     if (mem == NULL) {
1055       delete osthread;
1056       return false;
1057     } else {
1058       // Release the memory again
1059       os::release_memory(mem, VirtualMemoryBangSize);
1060     }
1061   }
1062 
1063   // Setup osthread because the child thread may need it.
1064   thread->set_osthread(osthread);
1065 
1066   // Create the Solaris thread
1067   // explicit THR_BOUND for T2_libthread case in case
1068   // that assumption is not accurate, but our alternate signal stack
1069   // handling is based on it which must have bound threads
1070   thread_t tid = 0;
1071   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
1072                    | ((UseBoundThreads || os::Solaris::T2_libthread() ||
1073                        (thr_type == vm_thread) ||
1074                        (thr_type == cgc_thread) ||
1075                        (thr_type == pgc_thread) ||
1076                        (thr_type == compiler_thread && BackgroundCompilation)) ?
1077                       THR_BOUND : 0);
1078   int      status;
1079 
1080   // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
1081   //
1082   // On multiprocessors systems, libthread sometimes under-provisions our
1083   // process with LWPs.  On a 30-way systems, for instance, we could have
1084   // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
1085   // to our process.  This can result in under utilization of PEs.
1086   // I suspect the problem is related to libthread's LWP
1087   // pool management and to the kernel's SIGBLOCKING "last LWP parked"
1088   // upcall policy.
1089   //
1090   // The following code is palliative -- it attempts to ensure that our
1091   // process has sufficient LWPs to take advantage of multiple PEs.
1092   // Proper long-term cures include using user-level threads bound to LWPs
1093   // (THR_BOUND) or using LWP-based synchronization.  Note that there is a
1094   // slight timing window with respect to sampling _os_thread_count, but
1095   // the race is benign.  Also, we should periodically recompute
1096   // _processors_online as the min of SC_NPROCESSORS_ONLN and the
1097   // the number of PEs in our partition.  You might be tempted to use
1098   // THR_NEW_LWP here, but I'd recommend against it as that could
1099   // result in undesirable growth of the libthread's LWP pool.
1100   // The fix below isn't sufficient; for instance, it doesn't take into count
1101   // LWPs parked on IO.  It does, however, help certain CPU-bound benchmarks.
1102   //
1103   // Some pathologies this scheme doesn't handle:
1104   // *  Threads can block, releasing the LWPs.  The LWPs can age out.
1105   //    When a large number of threads become ready again there aren't
1106   //    enough LWPs available to service them.  This can occur when the
1107   //    number of ready threads oscillates.
1108   // *  LWPs/Threads park on IO, thus taking the LWP out of circulation.
1109   //
1110   // Finally, we should call thr_setconcurrency() periodically to refresh
1111   // the LWP pool and thwart the LWP age-out mechanism.
1112   // The "+3" term provides a little slop -- we want to slightly overprovision.
1113 
1114   if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
1115     if (!(flags & THR_BOUND)) {
1116       thr_setconcurrency (os::Solaris::_os_thread_count);       // avoid starvation
1117     }
1118   }
1119   // Although this doesn't hurt, we should warn of undefined behavior
1120   // when using unbound T1 threads with schedctl().  This should never
1121   // happen, as the compiler and VM threads are always created bound
1122   DEBUG_ONLY(
1123       if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
1124           (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
1125           ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
1126            (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
1127          warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
1128       }
1129   );
1130 
1131 
1132   // Mark that we don't have an lwp or thread id yet.
1133   // In case we attempt to set the priority before the thread starts.
1134   osthread->set_lwp_id(-1);
1135   osthread->set_thread_id(-1);
1136 
1137   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1138   if (status != 0) {
1139     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1140       perror("os::create_thread");
1141     }
1142     thread->set_osthread(NULL);
1143     // Need to clean up stuff we've allocated so far
1144     delete osthread;
1145     return false;
1146   }
1147 
1148   Atomic::inc(&os::Solaris::_os_thread_count);
1149 
1150   // Store info on the Solaris thread into the OSThread
1151   osthread->set_thread_id(tid);
1152 
1153   // Remember that we created this thread so we can set priority on it
1154   osthread->set_vm_created();
1155 
1156   // Set the default thread priority.  If using bound threads, setting
1157   // lwp priority will be delayed until thread start.
1158   set_native_priority(thread,
1159                       DefaultThreadPriority == -1 ?
1160                         java_to_os_priority[NormPriority] :
1161                         DefaultThreadPriority);
1162 
1163   // Initial thread state is INITIALIZED, not SUSPENDED
1164   osthread->set_state(INITIALIZED);
1165 
1166   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1167   return true;
1168 }
1169 
1170 /* defined for >= Solaris 10. This allows builds on earlier versions
1171  *  of Solaris to take advantage of the newly reserved Solaris JVM signals
1172  *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1173  *  and -XX:+UseAltSigs does nothing since these should have no conflict
1174  */
1175 #if !defined(SIGJVM1)
1176 #define SIGJVM1 39
1177 #define SIGJVM2 40
1178 #endif
1179 
1180 debug_only(static bool signal_sets_initialized = false);
1181 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1182 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1183 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1184 
1185 bool os::Solaris::is_sig_ignored(int sig) {
1186       struct sigaction oact;
1187       sigaction(sig, (struct sigaction*)NULL, &oact);
1188       void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1189                                      : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1190       if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1191            return true;
1192       else
1193            return false;
1194 }
1195 
1196 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1197 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1198 static bool isJVM1available() {
1199   return SIGJVM1 < SIGRTMIN;
1200 }
1201 
1202 void os::Solaris::signal_sets_init() {
1203   // Should also have an assertion stating we are still single-threaded.
1204   assert(!signal_sets_initialized, "Already initialized");
1205   // Fill in signals that are necessarily unblocked for all threads in
1206   // the VM. Currently, we unblock the following signals:
1207   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1208   //                         by -Xrs (=ReduceSignalUsage));
1209   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1210   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1211   // the dispositions or masks wrt these signals.
1212   // Programs embedding the VM that want to use the above signals for their
1213   // own purposes must, at this time, use the "-Xrs" option to prevent
1214   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1215   // (See bug 4345157, and other related bugs).
1216   // In reality, though, unblocking these signals is really a nop, since
1217   // these signals are not blocked by default.
1218   sigemptyset(&unblocked_sigs);
1219   sigemptyset(&allowdebug_blocked_sigs);
1220   sigaddset(&unblocked_sigs, SIGILL);
1221   sigaddset(&unblocked_sigs, SIGSEGV);
1222   sigaddset(&unblocked_sigs, SIGBUS);
1223   sigaddset(&unblocked_sigs, SIGFPE);
1224 
1225   if (isJVM1available) {
1226     os::Solaris::set_SIGinterrupt(SIGJVM1);
1227     os::Solaris::set_SIGasync(SIGJVM2);
1228   } else if (UseAltSigs) {
1229     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1230     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1231   } else {
1232     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1233     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1234   }
1235 
1236   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1237   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1238 
1239   if (!ReduceSignalUsage) {
1240    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1241       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1242       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1243    }
1244    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1245       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1246       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1247    }
1248    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1249       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1250       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1251    }
1252   }
1253   // Fill in signals that are blocked by all but the VM thread.
1254   sigemptyset(&vm_sigs);
1255   if (!ReduceSignalUsage)
1256     sigaddset(&vm_sigs, BREAK_SIGNAL);
1257   debug_only(signal_sets_initialized = true);
1258 
1259   // For diagnostics only used in run_periodic_checks
1260   sigemptyset(&check_signal_done);
1261 }
1262 
1263 // These are signals that are unblocked while a thread is running Java.
1264 // (For some reason, they get blocked by default.)
1265 sigset_t* os::Solaris::unblocked_signals() {
1266   assert(signal_sets_initialized, "Not initialized");
1267   return &unblocked_sigs;
1268 }
1269 
1270 // These are the signals that are blocked while a (non-VM) thread is
1271 // running Java. Only the VM thread handles these signals.
1272 sigset_t* os::Solaris::vm_signals() {
1273   assert(signal_sets_initialized, "Not initialized");
1274   return &vm_sigs;
1275 }
1276 
1277 // These are signals that are blocked during cond_wait to allow debugger in
1278 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1279   assert(signal_sets_initialized, "Not initialized");
1280   return &allowdebug_blocked_sigs;
1281 }
1282 
1283 
1284 void _handle_uncaught_cxx_exception() {
1285   VMError err("An uncaught C++ exception");
1286   err.report_and_die();
1287 }
1288 
1289 
1290 // First crack at OS-specific initialization, from inside the new thread.
1291 void os::initialize_thread(Thread* thr) {
1292   if (is_primordial_thread()) {
1293     JavaThread* jt = (JavaThread *)thr;
1294     assert(jt != NULL,"Sanity check");
1295     size_t stack_size;
1296     address base = jt->stack_base();
1297     if (Arguments::created_by_java_launcher()) {
1298       // Use 2MB to allow for Solaris 7 64 bit mode.
1299       stack_size = JavaThread::stack_size_at_create() == 0
1300         ? 2048*K : JavaThread::stack_size_at_create();
1301 
1302       // There are rare cases when we may have already used more than
1303       // the basic stack size allotment before this method is invoked.
1304       // Attempt to allow for a normally sized java_stack.
1305       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1306       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1307     } else {
1308       // 6269555: If we were not created by a Java launcher, i.e. if we are
1309       // running embedded in a native application, treat the primordial thread
1310       // as much like a native attached thread as possible.  This means using
1311       // the current stack size from thr_stksegment(), unless it is too large
1312       // to reliably setup guard pages.  A reasonable max size is 8MB.
1313       size_t current_size = current_stack_size();
1314       // This should never happen, but just in case....
1315       if (current_size == 0) current_size = 2 * K * K;
1316       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1317     }
1318     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1319     stack_size = (size_t)(base - bottom);
1320 
1321     assert(stack_size > 0, "Stack size calculation problem");
1322 
1323     if (stack_size > jt->stack_size()) {
1324       NOT_PRODUCT(
1325         struct rlimit limits;
1326         getrlimit(RLIMIT_STACK, &limits);
1327         size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1328         assert(size >= jt->stack_size(), "Stack size problem in main thread");
1329       )
1330       tty->print_cr(
1331         "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1332         "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1333         "See limit(1) to increase the stack size limit.",
1334         stack_size / K, jt->stack_size() / K);
1335       vm_exit(1);
1336     }
1337     assert(jt->stack_size() >= stack_size,
1338           "Attempt to map more stack than was allocated");
1339     jt->set_stack_size(stack_size);
1340   }
1341 
1342    // 5/22/01: Right now alternate signal stacks do not handle
1343    // throwing stack overflow exceptions, see bug 4463178
1344    // Until a fix is found for this, T2 will NOT imply alternate signal
1345    // stacks.
1346    // If using T2 libthread threads, install an alternate signal stack.
1347    // Because alternate stacks associate with LWPs on Solaris,
1348    // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
1349    // we prefer to explicitly stack bang.
1350    // If not using T2 libthread, but using UseBoundThreads any threads
1351    // (primordial thread, jni_attachCurrentThread) we do not create,
1352    // probably are not bound, therefore they can not have an alternate
1353    // signal stack. Since our stack banging code is generated and
1354    // is shared across threads, all threads must be bound to allow
1355    // using alternate signal stacks.  The alternative is to interpose
1356    // on _lwp_create to associate an alt sig stack with each LWP,
1357    // and this could be a problem when the JVM is embedded.
1358    // We would prefer to use alternate signal stacks with T2
1359    // Since there is currently no accurate way to detect T2
1360    // we do not. Assuming T2 when running T1 causes sig 11s or assertions
1361    // on installing alternate signal stacks
1362 
1363 
1364    // 05/09/03: removed alternate signal stack support for Solaris
1365    // The alternate signal stack mechanism is no longer needed to
1366    // handle stack overflow. This is now handled by allocating
1367    // guard pages (red zone) and stackbanging.
1368    // Initially the alternate signal stack mechanism was removed because
1369    // it did not work with T1 llibthread. Alternate
1370    // signal stacks MUST have all threads bound to lwps. Applications
1371    // can create their own threads and attach them without their being
1372    // bound under T1. This is frequently the case for the primordial thread.
1373    // If we were ever to reenable this mechanism we would need to
1374    // use the dynamic check for T2 libthread.
1375 
1376   os::Solaris::init_thread_fpu_state();
1377   std::set_terminate(_handle_uncaught_cxx_exception);
1378 }
1379 
1380 
1381 
1382 // Free Solaris resources related to the OSThread
1383 void os::free_thread(OSThread* osthread) {
1384   assert(osthread != NULL, "os::free_thread but osthread not set");
1385 
1386 
1387   // We are told to free resources of the argument thread,
1388   // but we can only really operate on the current thread.
1389   // The main thread must take the VMThread down synchronously
1390   // before the main thread exits and frees up CodeHeap
1391   guarantee((Thread::current()->osthread() == osthread
1392      || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1393   if (Thread::current()->osthread() == osthread) {
1394     // Restore caller's signal mask
1395     sigset_t sigmask = osthread->caller_sigmask();
1396     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1397   }
1398   delete osthread;
1399 }
1400 
1401 void os::pd_start_thread(Thread* thread) {
1402   int status = thr_continue(thread->osthread()->thread_id());
1403   assert_status(status == 0, status, "thr_continue failed");
1404 }
1405 
1406 
1407 intx os::current_thread_id() {
1408   return (intx)thr_self();
1409 }
1410 
1411 static pid_t _initial_pid = 0;
1412 
1413 int os::current_process_id() {
1414   return (int)(_initial_pid ? _initial_pid : getpid());
1415 }
1416 
1417 // gethrtime() should be monotonic according to the documentation,
1418 // but some virtualized platforms are known to break this guarantee.
1419 // getTimeNanos() must be guaranteed not to move backwards, so we
1420 // are forced to add a check here.
1421 inline hrtime_t getTimeNanos() {
1422   const hrtime_t now = gethrtime();
1423   const hrtime_t prev = max_hrtime;
1424   if (now <= prev) {
1425     return prev;   // same or retrograde time;
1426   }
1427   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1428   assert(obsv >= prev, "invariant");   // Monotonicity
1429   // If the CAS succeeded then we're done and return "now".
1430   // If the CAS failed and the observed value "obsv" is >= now then
1431   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1432   // some other thread raced this thread and installed a new value, in which case
1433   // we could either (a) retry the entire operation, (b) retry trying to install now
1434   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1435   // we might discard a higher "now" value in deference to a slightly lower but freshly
1436   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1437   // to (a) or (b) -- and greatly reduces coherence traffic.
1438   // We might also condition (c) on the magnitude of the delta between obsv and now.
1439   // Avoiding excessive CAS operations to hot RW locations is critical.
1440   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1441   return (prev == obsv) ? now : obsv;
1442 }
1443 
1444 // Time since start-up in seconds to a fine granularity.
1445 // Used by VMSelfDestructTimer and the MemProfiler.
1446 double os::elapsedTime() {
1447   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1448 }
1449 
1450 jlong os::elapsed_counter() {
1451   return (jlong)(getTimeNanos() - first_hrtime);
1452 }
1453 
1454 jlong os::elapsed_frequency() {
1455    return hrtime_hz;
1456 }
1457 
1458 // Return the real, user, and system times in seconds from an
1459 // arbitrary fixed point in the past.
1460 bool os::getTimesSecs(double* process_real_time,
1461                   double* process_user_time,
1462                   double* process_system_time) {
1463   struct tms ticks;
1464   clock_t real_ticks = times(&ticks);
1465 
1466   if (real_ticks == (clock_t) (-1)) {
1467     return false;
1468   } else {
1469     double ticks_per_second = (double) clock_tics_per_sec;
1470     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1471     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1472     // For consistency return the real time from getTimeNanos()
1473     // converted to seconds.
1474     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1475 
1476     return true;
1477   }
1478 }
1479 
1480 bool os::supports_vtime() { return true; }
1481 
1482 bool os::enable_vtime() {
1483   int fd = ::open("/proc/self/ctl", O_WRONLY);
1484   if (fd == -1)
1485     return false;
1486 
1487   long cmd[] = { PCSET, PR_MSACCT };
1488   int res = ::write(fd, cmd, sizeof(long) * 2);
1489   ::close(fd);
1490   if (res != sizeof(long) * 2)
1491     return false;
1492 
1493   return true;
1494 }
1495 
1496 bool os::vtime_enabled() {
1497   int fd = ::open("/proc/self/status", O_RDONLY);
1498   if (fd == -1)
1499     return false;
1500 
1501   pstatus_t status;
1502   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1503   ::close(fd);
1504   if (res != sizeof(pstatus_t))
1505     return false;
1506 
1507   return status.pr_flags & PR_MSACCT;
1508 }
1509 
1510 double os::elapsedVTime() {
1511   return (double)gethrvtime() / (double)hrtime_hz;
1512 }
1513 
1514 // Used internally for comparisons only
1515 // getTimeMillis guaranteed to not move backwards on Solaris
1516 jlong getTimeMillis() {
1517   jlong nanotime = getTimeNanos();
1518   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1519 }
1520 
1521 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1522 jlong os::javaTimeMillis() {
1523   timeval t;
1524   if (gettimeofday( &t, NULL) == -1)
1525     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1526   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1527 }
1528 
1529 jlong os::javaTimeNanos() {
1530   return (jlong)getTimeNanos();
1531 }
1532 
1533 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1534   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1535   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1536   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1537   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1538 }
1539 
1540 char * os::local_time_string(char *buf, size_t buflen) {
1541   struct tm t;
1542   time_t long_time;
1543   time(&long_time);
1544   localtime_r(&long_time, &t);
1545   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1546                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1547                t.tm_hour, t.tm_min, t.tm_sec);
1548   return buf;
1549 }
1550 
1551 // Note: os::shutdown() might be called very early during initialization, or
1552 // called from signal handler. Before adding something to os::shutdown(), make
1553 // sure it is async-safe and can handle partially initialized VM.
1554 void os::shutdown() {
1555 
1556   // allow PerfMemory to attempt cleanup of any persistent resources
1557   perfMemory_exit();
1558 
1559   // needs to remove object in file system
1560   AttachListener::abort();
1561 
1562   // flush buffered output, finish log files
1563   ostream_abort();
1564 
1565   // Check for abort hook
1566   abort_hook_t abort_hook = Arguments::abort_hook();
1567   if (abort_hook != NULL) {
1568     abort_hook();
1569   }
1570 }
1571 
1572 // Note: os::abort() might be called very early during initialization, or
1573 // called from signal handler. Before adding something to os::abort(), make
1574 // sure it is async-safe and can handle partially initialized VM.
1575 void os::abort(bool dump_core) {
1576   os::shutdown();
1577   if (dump_core) {
1578 #ifndef PRODUCT
1579     fdStream out(defaultStream::output_fd());
1580     out.print_raw("Current thread is ");
1581     char buf[16];
1582     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1583     out.print_raw_cr(buf);
1584     out.print_raw_cr("Dumping core ...");
1585 #endif
1586     ::abort(); // dump core (for debugging)
1587   }
1588 
1589   ::exit(1);
1590 }
1591 
1592 // Die immediately, no exit hook, no abort hook, no cleanup.
1593 void os::die() {
1594   ::abort(); // dump core (for debugging)
1595 }
1596 
1597 // DLL functions
1598 
1599 const char* os::dll_file_extension() { return ".so"; }
1600 
1601 // This must be hard coded because it's the system's temporary
1602 // directory not the java application's temp directory, ala java.io.tmpdir.
1603 const char* os::get_temp_directory() { return "/tmp"; }
1604 
1605 static bool file_exists(const char* filename) {
1606   struct stat statbuf;
1607   if (filename == NULL || strlen(filename) == 0) {
1608     return false;
1609   }
1610   return os::stat(filename, &statbuf) == 0;
1611 }
1612 
1613 bool os::dll_build_name(char* buffer, size_t buflen,
1614                         const char* pname, const char* fname) {
1615   bool retval = false;
1616   const size_t pnamelen = pname ? strlen(pname) : 0;
1617 
1618   // Return error on buffer overflow.
1619   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1620     return retval;
1621   }
1622 
1623   if (pnamelen == 0) {
1624     snprintf(buffer, buflen, "lib%s.so", fname);
1625     retval = true;
1626   } else if (strchr(pname, *os::path_separator()) != NULL) {
1627     int n;
1628     char** pelements = split_path(pname, &n);
1629     if (pelements == NULL) {
1630       return false;
1631     }
1632     for (int i = 0 ; i < n ; i++) {
1633       // really shouldn't be NULL but what the heck, check can't hurt
1634       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1635         continue; // skip the empty path values
1636       }
1637       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1638       if (file_exists(buffer)) {
1639         retval = true;
1640         break;
1641       }
1642     }
1643     // release the storage
1644     for (int i = 0 ; i < n ; i++) {
1645       if (pelements[i] != NULL) {
1646         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1647       }
1648     }
1649     if (pelements != NULL) {
1650       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1651     }
1652   } else {
1653     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1654     retval = true;
1655   }
1656   return retval;
1657 }
1658 
1659 // check if addr is inside libjvm.so
1660 bool os::address_is_in_vm(address addr) {
1661   static address libjvm_base_addr;
1662   Dl_info dlinfo;
1663 
1664   if (libjvm_base_addr == NULL) {
1665     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1666       libjvm_base_addr = (address)dlinfo.dli_fbase;
1667     }
1668     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1669   }
1670 
1671   if (dladdr((void *)addr, &dlinfo) != 0) {
1672     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1673   }
1674 
1675   return false;
1676 }
1677 
1678 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1679 static dladdr1_func_type dladdr1_func = NULL;
1680 
1681 bool os::dll_address_to_function_name(address addr, char *buf,
1682                                       int buflen, int * offset) {
1683   // buf is not optional, but offset is optional
1684   assert(buf != NULL, "sanity check");
1685 
1686   Dl_info dlinfo;
1687 
1688   // dladdr1_func was initialized in os::init()
1689   if (dladdr1_func != NULL) {
1690     // yes, we have dladdr1
1691 
1692     // Support for dladdr1 is checked at runtime; it may be
1693     // available even if the vm is built on a machine that does
1694     // not have dladdr1 support.  Make sure there is a value for
1695     // RTLD_DL_SYMENT.
1696     #ifndef RTLD_DL_SYMENT
1697     #define RTLD_DL_SYMENT 1
1698     #endif
1699 #ifdef _LP64
1700     Elf64_Sym * info;
1701 #else
1702     Elf32_Sym * info;
1703 #endif
1704     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1705                      RTLD_DL_SYMENT) != 0) {
1706       // see if we have a matching symbol that covers our address
1707       if (dlinfo.dli_saddr != NULL &&
1708           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1709         if (dlinfo.dli_sname != NULL) {
1710           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1711             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1712           }
1713           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1714           return true;
1715         }
1716       }
1717       // no matching symbol so try for just file info
1718       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1719         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1720                             buf, buflen, offset, dlinfo.dli_fname)) {
1721           return true;
1722         }
1723       }
1724     }
1725     buf[0] = '\0';
1726     if (offset != NULL) *offset  = -1;
1727     return false;
1728   }
1729 
1730   // no, only dladdr is available
1731   if (dladdr((void *)addr, &dlinfo) != 0) {
1732     // see if we have a matching symbol
1733     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1734       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1735         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1736       }
1737       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1738       return true;
1739     }
1740     // no matching symbol so try for just file info
1741     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1742       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1743                           buf, buflen, offset, dlinfo.dli_fname)) {
1744         return true;
1745       }
1746     }
1747   }
1748   buf[0] = '\0';
1749   if (offset != NULL) *offset  = -1;
1750   return false;
1751 }
1752 
1753 bool os::dll_address_to_library_name(address addr, char* buf,
1754                                      int buflen, int* offset) {
1755   // buf is not optional, but offset is optional
1756   assert(buf != NULL, "sanity check");
1757 
1758   Dl_info dlinfo;
1759 
1760   if (dladdr((void*)addr, &dlinfo) != 0) {
1761     if (dlinfo.dli_fname != NULL) {
1762       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1763     }
1764     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1765       *offset = addr - (address)dlinfo.dli_fbase;
1766     }
1767     return true;
1768   }
1769 
1770   buf[0] = '\0';
1771   if (offset) *offset = -1;
1772   return false;
1773 }
1774 
1775 // Prints the names and full paths of all opened dynamic libraries
1776 // for current process
1777 void os::print_dll_info(outputStream * st) {
1778   Dl_info dli;
1779   void *handle;
1780   Link_map *map;
1781   Link_map *p;
1782 
1783   st->print_cr("Dynamic libraries:"); st->flush();
1784 
1785   if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
1786       dli.dli_fname == NULL) {
1787     st->print_cr("Error: Cannot print dynamic libraries.");
1788     return;
1789   }
1790   handle = dlopen(dli.dli_fname, RTLD_LAZY);
1791   if (handle == NULL) {
1792     st->print_cr("Error: Cannot print dynamic libraries.");
1793     return;
1794   }
1795   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1796   if (map == NULL) {
1797     st->print_cr("Error: Cannot print dynamic libraries.");
1798     return;
1799   }
1800 
1801   while (map->l_prev != NULL)
1802     map = map->l_prev;
1803 
1804   while (map != NULL) {
1805     st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
1806     map = map->l_next;
1807   }
1808 
1809   dlclose(handle);
1810 }
1811 
1812 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1813   Dl_info dli;
1814   // Sanity check?
1815   if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 ||
1816       dli.dli_fname == NULL) {
1817     return 1;
1818   }
1819 
1820   void * handle = dlopen(dli.dli_fname, RTLD_LAZY);
1821   if (handle == NULL) {
1822     return 1;
1823   }
1824 
1825   Link_map *map;
1826   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1827   if (map == NULL) {
1828     dlclose(handle);
1829     return 1;
1830   }
1831 
1832   while (map->l_prev != NULL) {
1833     map = map->l_prev;
1834   }
1835 
1836   while (map != NULL) {
1837     // Iterate through all map entries and call callback with fields of interest
1838     if(callback(map->l_name, (address)map->l_addr, (address)0, param)) {
1839       dlclose(handle);
1840       return 1;
1841     }
1842     map = map->l_next;
1843   }
1844 
1845   dlclose(handle);
1846   return 0;
1847 }
1848 
1849   // Loads .dll/.so and
1850   // in case of error it checks if .dll/.so was built for the
1851   // same architecture as Hotspot is running on
1852 
1853 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1854 {
1855   void * result= ::dlopen(filename, RTLD_LAZY);
1856   if (result != NULL) {
1857     // Successful loading
1858     return result;
1859   }
1860 
1861   Elf32_Ehdr elf_head;
1862 
1863   // Read system error message into ebuf
1864   // It may or may not be overwritten below
1865   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1866   ebuf[ebuflen-1]='\0';
1867   int diag_msg_max_length=ebuflen-strlen(ebuf);
1868   char* diag_msg_buf=ebuf+strlen(ebuf);
1869 
1870   if (diag_msg_max_length==0) {
1871     // No more space in ebuf for additional diagnostics message
1872     return NULL;
1873   }
1874 
1875 
1876   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1877 
1878   if (file_descriptor < 0) {
1879     // Can't open library, report dlerror() message
1880     return NULL;
1881   }
1882 
1883   bool failed_to_read_elf_head=
1884     (sizeof(elf_head)!=
1885         (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1886 
1887   ::close(file_descriptor);
1888   if (failed_to_read_elf_head) {
1889     // file i/o error - report dlerror() msg
1890     return NULL;
1891   }
1892 
1893   typedef struct {
1894     Elf32_Half  code;         // Actual value as defined in elf.h
1895     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1896     char        elf_class;    // 32 or 64 bit
1897     char        endianess;    // MSB or LSB
1898     char*       name;         // String representation
1899   } arch_t;
1900 
1901   static const arch_t arch_array[]={
1902     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1903     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1904     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1905     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1906     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1907     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1908     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1909     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1910     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1911     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1912   };
1913 
1914   #if  (defined IA32)
1915     static  Elf32_Half running_arch_code=EM_386;
1916   #elif   (defined AMD64)
1917     static  Elf32_Half running_arch_code=EM_X86_64;
1918   #elif  (defined IA64)
1919     static  Elf32_Half running_arch_code=EM_IA_64;
1920   #elif  (defined __sparc) && (defined _LP64)
1921     static  Elf32_Half running_arch_code=EM_SPARCV9;
1922   #elif  (defined __sparc) && (!defined _LP64)
1923     static  Elf32_Half running_arch_code=EM_SPARC;
1924   #elif  (defined __powerpc64__)
1925     static  Elf32_Half running_arch_code=EM_PPC64;
1926   #elif  (defined __powerpc__)
1927     static  Elf32_Half running_arch_code=EM_PPC;
1928   #elif (defined ARM)
1929     static  Elf32_Half running_arch_code=EM_ARM;
1930   #else
1931     #error Method os::dll_load requires that one of following is defined:\
1932          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1933   #endif
1934 
1935   // Identify compatability class for VM's architecture and library's architecture
1936   // Obtain string descriptions for architectures
1937 
1938   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1939   int running_arch_index=-1;
1940 
1941   for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
1942     if (running_arch_code == arch_array[i].code) {
1943       running_arch_index    = i;
1944     }
1945     if (lib_arch.code == arch_array[i].code) {
1946       lib_arch.compat_class = arch_array[i].compat_class;
1947       lib_arch.name         = arch_array[i].name;
1948     }
1949   }
1950 
1951   assert(running_arch_index != -1,
1952     "Didn't find running architecture code (running_arch_code) in arch_array");
1953   if (running_arch_index == -1) {
1954     // Even though running architecture detection failed
1955     // we may still continue with reporting dlerror() message
1956     return NULL;
1957   }
1958 
1959   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1960     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1961     return NULL;
1962   }
1963 
1964   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1965     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1966     return NULL;
1967   }
1968 
1969   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1970     if ( lib_arch.name!=NULL ) {
1971       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1972         " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1973         lib_arch.name, arch_array[running_arch_index].name);
1974     } else {
1975       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1976       " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1977         lib_arch.code,
1978         arch_array[running_arch_index].name);
1979     }
1980   }
1981 
1982   return NULL;
1983 }
1984 
1985 void* os::dll_lookup(void* handle, const char* name) {
1986   return dlsym(handle, name);
1987 }
1988 
1989 void* os::get_default_process_handle() {
1990   return (void*)::dlopen(NULL, RTLD_LAZY);
1991 }
1992 
1993 int os::stat(const char *path, struct stat *sbuf) {
1994   char pathbuf[MAX_PATH];
1995   if (strlen(path) > MAX_PATH - 1) {
1996     errno = ENAMETOOLONG;
1997     return -1;
1998   }
1999   os::native_path(strcpy(pathbuf, path));
2000   return ::stat(pathbuf, sbuf);
2001 }
2002 
2003 static bool _print_ascii_file(const char* filename, outputStream* st) {
2004   int fd = ::open(filename, O_RDONLY);
2005   if (fd == -1) {
2006      return false;
2007   }
2008 
2009   char buf[32];
2010   int bytes;
2011   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
2012     st->print_raw(buf, bytes);
2013   }
2014 
2015   ::close(fd);
2016 
2017   return true;
2018 }
2019 
2020 void os::print_os_info_brief(outputStream* st) {
2021   os::Solaris::print_distro_info(st);
2022 
2023   os::Posix::print_uname_info(st);
2024 
2025   os::Solaris::print_libversion_info(st);
2026 }
2027 
2028 void os::print_os_info(outputStream* st) {
2029   st->print("OS:");
2030 
2031   os::Solaris::print_distro_info(st);
2032 
2033   os::Posix::print_uname_info(st);
2034 
2035   os::Solaris::print_libversion_info(st);
2036 
2037   os::Posix::print_rlimit_info(st);
2038 
2039   os::Posix::print_load_average(st);
2040 }
2041 
2042 void os::Solaris::print_distro_info(outputStream* st) {
2043   if (!_print_ascii_file("/etc/release", st)) {
2044       st->print("Solaris");
2045     }
2046     st->cr();
2047 }
2048 
2049 void os::Solaris::print_libversion_info(outputStream* st) {
2050   if (os::Solaris::T2_libthread()) {
2051     st->print("  (T2 libthread)");
2052   }
2053   else {
2054     st->print("  (T1 libthread)");
2055   }
2056   st->cr();
2057 }
2058 
2059 static bool check_addr0(outputStream* st) {
2060   jboolean status = false;
2061   int fd = ::open("/proc/self/map",O_RDONLY);
2062   if (fd >= 0) {
2063     prmap_t p;
2064     while(::read(fd, &p, sizeof(p)) > 0) {
2065       if (p.pr_vaddr == 0x0) {
2066         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
2067         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
2068         st->print("Access:");
2069         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
2070         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
2071         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
2072         st->cr();
2073         status = true;
2074       }
2075     }
2076     ::close(fd);
2077   }
2078   return status;
2079 }
2080 
2081 void os::pd_print_cpu_info(outputStream* st) {
2082   // Nothing to do for now.
2083 }
2084 
2085 void os::print_memory_info(outputStream* st) {
2086   st->print("Memory:");
2087   st->print(" %dk page", os::vm_page_size()>>10);
2088   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2089   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2090   st->cr();
2091   if (VMError::fatal_error_in_progress()) {
2092      (void) check_addr0(st);
2093   }
2094 }
2095 
2096 void os::print_siginfo(outputStream* st, void* siginfo) {
2097   const siginfo_t* si = (const siginfo_t*)siginfo;
2098 
2099   os::Posix::print_siginfo_brief(st, si);
2100 
2101   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2102       UseSharedSpaces) {
2103     FileMapInfo* mapinfo = FileMapInfo::current_info();
2104     if (mapinfo->is_in_shared_space(si->si_addr)) {
2105       st->print("\n\nError accessing class data sharing archive."   \
2106                 " Mapped file inaccessible during execution, "      \
2107                 " possible disk/network problem.");
2108     }
2109   }
2110   st->cr();
2111 }
2112 
2113 // Moved from whole group, because we need them here for diagnostic
2114 // prints.
2115 #define OLDMAXSIGNUM 32
2116 static int Maxsignum = 0;
2117 static int *ourSigFlags = NULL;
2118 
2119 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2120 
2121 int os::Solaris::get_our_sigflags(int sig) {
2122   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2123   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2124   return ourSigFlags[sig];
2125 }
2126 
2127 void os::Solaris::set_our_sigflags(int sig, int flags) {
2128   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2129   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2130   ourSigFlags[sig] = flags;
2131 }
2132 
2133 
2134 static const char* get_signal_handler_name(address handler,
2135                                            char* buf, int buflen) {
2136   int offset;
2137   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2138   if (found) {
2139     // skip directory names
2140     const char *p1, *p2;
2141     p1 = buf;
2142     size_t len = strlen(os::file_separator());
2143     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2144     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2145   } else {
2146     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2147   }
2148   return buf;
2149 }
2150 
2151 static void print_signal_handler(outputStream* st, int sig,
2152                                   char* buf, size_t buflen) {
2153   struct sigaction sa;
2154 
2155   sigaction(sig, NULL, &sa);
2156 
2157   st->print("%s: ", os::exception_name(sig, buf, buflen));
2158 
2159   address handler = (sa.sa_flags & SA_SIGINFO)
2160                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2161                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2162 
2163   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2164     st->print("SIG_DFL");
2165   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2166     st->print("SIG_IGN");
2167   } else {
2168     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2169   }
2170 
2171   st->print(", sa_mask[0]=");
2172   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2173 
2174   address rh = VMError::get_resetted_sighandler(sig);
2175   // May be, handler was resetted by VMError?
2176   if(rh != NULL) {
2177     handler = rh;
2178     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2179   }
2180 
2181   st->print(", sa_flags=");
2182   os::Posix::print_sa_flags(st, sa.sa_flags);
2183 
2184   // Check: is it our handler?
2185   if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2186      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2187     // It is our signal handler
2188     // check for flags
2189     if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2190       st->print(
2191         ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2192         os::Solaris::get_our_sigflags(sig));
2193     }
2194   }
2195   st->cr();
2196 }
2197 
2198 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2199   st->print_cr("Signal Handlers:");
2200   print_signal_handler(st, SIGSEGV, buf, buflen);
2201   print_signal_handler(st, SIGBUS , buf, buflen);
2202   print_signal_handler(st, SIGFPE , buf, buflen);
2203   print_signal_handler(st, SIGPIPE, buf, buflen);
2204   print_signal_handler(st, SIGXFSZ, buf, buflen);
2205   print_signal_handler(st, SIGILL , buf, buflen);
2206   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2207   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2208   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2209   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2210   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2211   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2212   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2213   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2214 }
2215 
2216 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2217 
2218 // Find the full path to the current module, libjvm.so
2219 void os::jvm_path(char *buf, jint buflen) {
2220   // Error checking.
2221   if (buflen < MAXPATHLEN) {
2222     assert(false, "must use a large-enough buffer");
2223     buf[0] = '\0';
2224     return;
2225   }
2226   // Lazy resolve the path to current module.
2227   if (saved_jvm_path[0] != 0) {
2228     strcpy(buf, saved_jvm_path);
2229     return;
2230   }
2231 
2232   Dl_info dlinfo;
2233   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2234   assert(ret != 0, "cannot locate libjvm");
2235   if (ret != 0 && dlinfo.dli_fname != NULL) {
2236     realpath((char *)dlinfo.dli_fname, buf);
2237   } else {
2238     buf[0] = '\0';
2239     return;
2240   }
2241 
2242   if (Arguments::created_by_gamma_launcher()) {
2243     // Support for the gamma launcher.  Typical value for buf is
2244     // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
2245     // the right place in the string, then assume we are installed in a JDK and
2246     // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
2247     // up the path so it looks like libjvm.so is installed there (append a
2248     // fake suffix hotspot/libjvm.so).
2249     const char *p = buf + strlen(buf) - 1;
2250     for (int count = 0; p > buf && count < 5; ++count) {
2251       for (--p; p > buf && *p != '/'; --p)
2252         /* empty */ ;
2253     }
2254 
2255     if (strncmp(p, "/jre/lib/", 9) != 0) {
2256       // Look for JAVA_HOME in the environment.
2257       char* java_home_var = ::getenv("JAVA_HOME");
2258       if (java_home_var != NULL && java_home_var[0] != 0) {
2259         char cpu_arch[12];
2260         char* jrelib_p;
2261         int   len;
2262         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2263 #ifdef _LP64
2264         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2265         if (strcmp(cpu_arch, "sparc") == 0) {
2266           strcat(cpu_arch, "v9");
2267         } else if (strcmp(cpu_arch, "i386") == 0) {
2268           strcpy(cpu_arch, "amd64");
2269         }
2270 #endif
2271         // Check the current module name "libjvm.so".
2272         p = strrchr(buf, '/');
2273         assert(strstr(p, "/libjvm") == p, "invalid library name");
2274 
2275         realpath(java_home_var, buf);
2276         // determine if this is a legacy image or modules image
2277         // modules image doesn't have "jre" subdirectory
2278         len = strlen(buf);
2279         assert(len < buflen, "Ran out of buffer space");
2280         jrelib_p = buf + len;
2281         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2282         if (0 != access(buf, F_OK)) {
2283           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2284         }
2285 
2286         if (0 == access(buf, F_OK)) {
2287           // Use current module name "libjvm.so"
2288           len = strlen(buf);
2289           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2290         } else {
2291           // Go back to path of .so
2292           realpath((char *)dlinfo.dli_fname, buf);
2293         }
2294       }
2295     }
2296   }
2297 
2298   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2299 }
2300 
2301 
2302 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2303   // no prefix required, not even "_"
2304 }
2305 
2306 
2307 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2308   // no suffix required
2309 }
2310 
2311 // This method is a copy of JDK's sysGetLastErrorString
2312 // from src/solaris/hpi/src/system_md.c
2313 
2314 size_t os::lasterror(char *buf, size_t len) {
2315 
2316   if (errno == 0)  return 0;
2317 
2318   const char *s = ::strerror(errno);
2319   size_t n = ::strlen(s);
2320   if (n >= len) {
2321     n = len - 1;
2322   }
2323   ::strncpy(buf, s, n);
2324   buf[n] = '\0';
2325   return n;
2326 }
2327 
2328 
2329 // sun.misc.Signal
2330 
2331 extern "C" {
2332   static void UserHandler(int sig, void *siginfo, void *context) {
2333     // Ctrl-C is pressed during error reporting, likely because the error
2334     // handler fails to abort. Let VM die immediately.
2335     if (sig == SIGINT && is_error_reported()) {
2336        os::die();
2337     }
2338 
2339     os::signal_notify(sig);
2340     // We do not need to reinstate the signal handler each time...
2341   }
2342 }
2343 
2344 void* os::user_handler() {
2345   return CAST_FROM_FN_PTR(void*, UserHandler);
2346 }
2347 
2348 class Semaphore : public StackObj {
2349   public:
2350     Semaphore();
2351     ~Semaphore();
2352     void signal();
2353     void wait();
2354     bool trywait();
2355     bool timedwait(unsigned int sec, int nsec);
2356   private:
2357     sema_t _semaphore;
2358 };
2359 
2360 
2361 Semaphore::Semaphore() {
2362   sema_init(&_semaphore, 0, NULL, NULL);
2363 }
2364 
2365 Semaphore::~Semaphore() {
2366   sema_destroy(&_semaphore);
2367 }
2368 
2369 void Semaphore::signal() {
2370   sema_post(&_semaphore);
2371 }
2372 
2373 void Semaphore::wait() {
2374   sema_wait(&_semaphore);
2375 }
2376 
2377 bool Semaphore::trywait() {
2378   return sema_trywait(&_semaphore) == 0;
2379 }
2380 
2381 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2382   struct timespec ts;
2383   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2384 
2385   while (1) {
2386     int result = sema_timedwait(&_semaphore, &ts);
2387     if (result == 0) {
2388       return true;
2389     } else if (errno == EINTR) {
2390       continue;
2391     } else if (errno == ETIME) {
2392       return false;
2393     } else {
2394       return false;
2395     }
2396   }
2397 }
2398 
2399 extern "C" {
2400   typedef void (*sa_handler_t)(int);
2401   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2402 }
2403 
2404 void* os::signal(int signal_number, void* handler) {
2405   struct sigaction sigAct, oldSigAct;
2406   sigfillset(&(sigAct.sa_mask));
2407   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2408   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2409 
2410   if (sigaction(signal_number, &sigAct, &oldSigAct))
2411     // -1 means registration failed
2412     return (void *)-1;
2413 
2414   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2415 }
2416 
2417 void os::signal_raise(int signal_number) {
2418   raise(signal_number);
2419 }
2420 
2421 /*
2422  * The following code is moved from os.cpp for making this
2423  * code platform specific, which it is by its very nature.
2424  */
2425 
2426 // a counter for each possible signal value
2427 static int Sigexit = 0;
2428 static int Maxlibjsigsigs;
2429 static jint *pending_signals = NULL;
2430 static int *preinstalled_sigs = NULL;
2431 static struct sigaction *chainedsigactions = NULL;
2432 static sema_t sig_sem;
2433 typedef int (*version_getting_t)();
2434 version_getting_t os::Solaris::get_libjsig_version = NULL;
2435 static int libjsigversion = NULL;
2436 
2437 int os::sigexitnum_pd() {
2438   assert(Sigexit > 0, "signal memory not yet initialized");
2439   return Sigexit;
2440 }
2441 
2442 void os::Solaris::init_signal_mem() {
2443   // Initialize signal structures
2444   Maxsignum = SIGRTMAX;
2445   Sigexit = Maxsignum+1;
2446   assert(Maxsignum >0, "Unable to obtain max signal number");
2447 
2448   Maxlibjsigsigs = Maxsignum;
2449 
2450   // pending_signals has one int per signal
2451   // The additional signal is for SIGEXIT - exit signal to signal_thread
2452   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2453   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2454 
2455   if (UseSignalChaining) {
2456      chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2457        * (Maxsignum + 1), mtInternal);
2458      memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2459      preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2460      memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2461   }
2462   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
2463   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2464 }
2465 
2466 void os::signal_init_pd() {
2467   int ret;
2468 
2469   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2470   assert(ret == 0, "sema_init() failed");
2471 }
2472 
2473 void os::signal_notify(int signal_number) {
2474   int ret;
2475 
2476   Atomic::inc(&pending_signals[signal_number]);
2477   ret = ::sema_post(&sig_sem);
2478   assert(ret == 0, "sema_post() failed");
2479 }
2480 
2481 static int check_pending_signals(bool wait_for_signal) {
2482   int ret;
2483   while (true) {
2484     for (int i = 0; i < Sigexit + 1; i++) {
2485       jint n = pending_signals[i];
2486       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2487         return i;
2488       }
2489     }
2490     if (!wait_for_signal) {
2491       return -1;
2492     }
2493     JavaThread *thread = JavaThread::current();
2494     ThreadBlockInVM tbivm(thread);
2495 
2496     bool threadIsSuspended;
2497     do {
2498       thread->set_suspend_equivalent();
2499       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2500       while((ret = ::sema_wait(&sig_sem)) == EINTR)
2501           ;
2502       assert(ret == 0, "sema_wait() failed");
2503 
2504       // were we externally suspended while we were waiting?
2505       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2506       if (threadIsSuspended) {
2507         //
2508         // The semaphore has been incremented, but while we were waiting
2509         // another thread suspended us. We don't want to continue running
2510         // while suspended because that would surprise the thread that
2511         // suspended us.
2512         //
2513         ret = ::sema_post(&sig_sem);
2514         assert(ret == 0, "sema_post() failed");
2515 
2516         thread->java_suspend_self();
2517       }
2518     } while (threadIsSuspended);
2519   }
2520 }
2521 
2522 int os::signal_lookup() {
2523   return check_pending_signals(false);
2524 }
2525 
2526 int os::signal_wait() {
2527   return check_pending_signals(true);
2528 }
2529 
2530 ////////////////////////////////////////////////////////////////////////////////
2531 // Virtual Memory
2532 
2533 static int page_size = -1;
2534 
2535 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2536 // clear this var if support is not available.
2537 static bool has_map_align = true;
2538 
2539 int os::vm_page_size() {
2540   assert(page_size != -1, "must call os::init");
2541   return page_size;
2542 }
2543 
2544 // Solaris allocates memory by pages.
2545 int os::vm_allocation_granularity() {
2546   assert(page_size != -1, "must call os::init");
2547   return page_size;
2548 }
2549 
2550 static bool recoverable_mmap_error(int err) {
2551   // See if the error is one we can let the caller handle. This
2552   // list of errno values comes from the Solaris mmap(2) man page.
2553   switch (err) {
2554   case EBADF:
2555   case EINVAL:
2556   case ENOTSUP:
2557     // let the caller deal with these errors
2558     return true;
2559 
2560   default:
2561     // Any remaining errors on this OS can cause our reserved mapping
2562     // to be lost. That can cause confusion where different data
2563     // structures think they have the same memory mapped. The worst
2564     // scenario is if both the VM and a library think they have the
2565     // same memory mapped.
2566     return false;
2567   }
2568 }
2569 
2570 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2571                                     int err) {
2572   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2573           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2574           strerror(err), err);
2575 }
2576 
2577 static void warn_fail_commit_memory(char* addr, size_t bytes,
2578                                     size_t alignment_hint, bool exec,
2579                                     int err) {
2580   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2581           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2582           alignment_hint, exec, strerror(err), err);
2583 }
2584 
2585 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2586   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2587   size_t size = bytes;
2588   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2589   if (res != NULL) {
2590     if (UseNUMAInterleaving) {
2591       numa_make_global(addr, bytes);
2592     }
2593     return 0;
2594   }
2595 
2596   int err = errno;  // save errno from mmap() call in mmap_chunk()
2597 
2598   if (!recoverable_mmap_error(err)) {
2599     warn_fail_commit_memory(addr, bytes, exec, err);
2600     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2601   }
2602 
2603   return err;
2604 }
2605 
2606 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2607   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2608 }
2609 
2610 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2611                                   const char* mesg) {
2612   assert(mesg != NULL, "mesg must be specified");
2613   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2614   if (err != 0) {
2615     // the caller wants all commit errors to exit with the specified mesg:
2616     warn_fail_commit_memory(addr, bytes, exec, err);
2617     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2618   }
2619 }
2620 
2621 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
2622   assert(is_size_aligned(alignment, (size_t) vm_page_size()),
2623          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
2624                  alignment, (size_t) vm_page_size()));
2625 
2626   for (int i = 0; _page_sizes[i] != 0; i++) {
2627     if (is_size_aligned(alignment, _page_sizes[i])) {
2628       return _page_sizes[i];
2629     }
2630   }
2631 
2632   return (size_t) vm_page_size();
2633 }
2634 
2635 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2636                                     size_t alignment_hint, bool exec) {
2637   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2638   if (err == 0 && UseLargePages && alignment_hint > 0) {
2639     assert(is_size_aligned(bytes, alignment_hint),
2640            err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
2641 
2642     // The syscall memcntl requires an exact page size (see man memcntl for details).
2643     size_t page_size = page_size_for_alignment(alignment_hint);
2644     if (page_size > (size_t) vm_page_size()) {
2645       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2646     }
2647   }
2648   return err;
2649 }
2650 
2651 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2652                           bool exec) {
2653   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2654 }
2655 
2656 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2657                                   size_t alignment_hint, bool exec,
2658                                   const char* mesg) {
2659   assert(mesg != NULL, "mesg must be specified");
2660   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2661   if (err != 0) {
2662     // the caller wants all commit errors to exit with the specified mesg:
2663     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2664     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2665   }
2666 }
2667 
2668 // Uncommit the pages in a specified region.
2669 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2670   if (madvise(addr, bytes, MADV_FREE) < 0) {
2671     debug_only(warning("MADV_FREE failed."));
2672     return;
2673   }
2674 }
2675 
2676 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2677   return os::commit_memory(addr, size, !ExecMem);
2678 }
2679 
2680 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2681   return os::uncommit_memory(addr, size);
2682 }
2683 
2684 // Change the page size in a given range.
2685 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2686   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2687   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2688   if (UseLargePages) {
2689     Solaris::setup_large_pages(addr, bytes, alignment_hint);
2690   }
2691 }
2692 
2693 // Tell the OS to make the range local to the first-touching LWP
2694 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2695   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2696   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2697     debug_only(warning("MADV_ACCESS_LWP failed."));
2698   }
2699 }
2700 
2701 // Tell the OS that this range would be accessed from different LWPs.
2702 void os::numa_make_global(char *addr, size_t bytes) {
2703   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2704   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2705     debug_only(warning("MADV_ACCESS_MANY failed."));
2706   }
2707 }
2708 
2709 // Get the number of the locality groups.
2710 size_t os::numa_get_groups_num() {
2711   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2712   return n != -1 ? n : 1;
2713 }
2714 
2715 // Get a list of leaf locality groups. A leaf lgroup is group that
2716 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2717 // board. An LWP is assigned to one of these groups upon creation.
2718 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2719    if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2720      ids[0] = 0;
2721      return 1;
2722    }
2723    int result_size = 0, top = 1, bottom = 0, cur = 0;
2724    for (int k = 0; k < size; k++) {
2725      int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2726                                     (Solaris::lgrp_id_t*)&ids[top], size - top);
2727      if (r == -1) {
2728        ids[0] = 0;
2729        return 1;
2730      }
2731      if (!r) {
2732        // That's a leaf node.
2733        assert (bottom <= cur, "Sanity check");
2734        // Check if the node has memory
2735        if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2736                                    NULL, 0, LGRP_RSRC_MEM) > 0) {
2737          ids[bottom++] = ids[cur];
2738        }
2739      }
2740      top += r;
2741      cur++;
2742    }
2743    if (bottom == 0) {
2744      // Handle a situation, when the OS reports no memory available.
2745      // Assume UMA architecture.
2746      ids[0] = 0;
2747      return 1;
2748    }
2749    return bottom;
2750 }
2751 
2752 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2753 bool os::numa_topology_changed() {
2754   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2755   if (is_stale != -1 && is_stale) {
2756     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2757     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2758     assert(c != 0, "Failure to initialize LGRP API");
2759     Solaris::set_lgrp_cookie(c);
2760     return true;
2761   }
2762   return false;
2763 }
2764 
2765 // Get the group id of the current LWP.
2766 int os::numa_get_group_id() {
2767   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2768   if (lgrp_id == -1) {
2769     return 0;
2770   }
2771   const int size = os::numa_get_groups_num();
2772   int *ids = (int*)alloca(size * sizeof(int));
2773 
2774   // Get the ids of all lgroups with memory; r is the count.
2775   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2776                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2777   if (r <= 0) {
2778     return 0;
2779   }
2780   return ids[os::random() % r];
2781 }
2782 
2783 // Request information about the page.
2784 bool os::get_page_info(char *start, page_info* info) {
2785   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2786   uint64_t addr = (uintptr_t)start;
2787   uint64_t outdata[2];
2788   uint_t validity = 0;
2789 
2790   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2791     return false;
2792   }
2793 
2794   info->size = 0;
2795   info->lgrp_id = -1;
2796 
2797   if ((validity & 1) != 0) {
2798     if ((validity & 2) != 0) {
2799       info->lgrp_id = outdata[0];
2800     }
2801     if ((validity & 4) != 0) {
2802       info->size = outdata[1];
2803     }
2804     return true;
2805   }
2806   return false;
2807 }
2808 
2809 // Scan the pages from start to end until a page different than
2810 // the one described in the info parameter is encountered.
2811 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2812   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2813   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2814   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2815   uint_t validity[MAX_MEMINFO_CNT];
2816 
2817   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2818   uint64_t p = (uint64_t)start;
2819   while (p < (uint64_t)end) {
2820     addrs[0] = p;
2821     size_t addrs_count = 1;
2822     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2823       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2824       addrs_count++;
2825     }
2826 
2827     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2828       return NULL;
2829     }
2830 
2831     size_t i = 0;
2832     for (; i < addrs_count; i++) {
2833       if ((validity[i] & 1) != 0) {
2834         if ((validity[i] & 4) != 0) {
2835           if (outdata[types * i + 1] != page_expected->size) {
2836             break;
2837           }
2838         } else
2839           if (page_expected->size != 0) {
2840             break;
2841           }
2842 
2843         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2844           if (outdata[types * i] != page_expected->lgrp_id) {
2845             break;
2846           }
2847         }
2848       } else {
2849         return NULL;
2850       }
2851     }
2852 
2853     if (i < addrs_count) {
2854       if ((validity[i] & 2) != 0) {
2855         page_found->lgrp_id = outdata[types * i];
2856       } else {
2857         page_found->lgrp_id = -1;
2858       }
2859       if ((validity[i] & 4) != 0) {
2860         page_found->size = outdata[types * i + 1];
2861       } else {
2862         page_found->size = 0;
2863       }
2864       return (char*)addrs[i];
2865     }
2866 
2867     p = addrs[addrs_count - 1] + page_size;
2868   }
2869   return end;
2870 }
2871 
2872 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2873   size_t size = bytes;
2874   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2875   // uncommitted page. Otherwise, the read/write might succeed if we
2876   // have enough swap space to back the physical page.
2877   return
2878     NULL != Solaris::mmap_chunk(addr, size,
2879                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2880                                 PROT_NONE);
2881 }
2882 
2883 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2884   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2885 
2886   if (b == MAP_FAILED) {
2887     return NULL;
2888   }
2889   return b;
2890 }
2891 
2892 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
2893   char* addr = requested_addr;
2894   int flags = MAP_PRIVATE | MAP_NORESERVE;
2895 
2896   assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
2897 
2898   if (fixed) {
2899     flags |= MAP_FIXED;
2900   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2901     flags |= MAP_ALIGN;
2902     addr = (char*) alignment_hint;
2903   }
2904 
2905   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2906   // uncommitted page. Otherwise, the read/write might succeed if we
2907   // have enough swap space to back the physical page.
2908   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2909 }
2910 
2911 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2912   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
2913 
2914   guarantee(requested_addr == NULL || requested_addr == addr,
2915             "OS failed to return requested mmap address.");
2916   return addr;
2917 }
2918 
2919 // Reserve memory at an arbitrary address, only if that area is
2920 // available (and not reserved for something else).
2921 
2922 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2923   const int max_tries = 10;
2924   char* base[max_tries];
2925   size_t size[max_tries];
2926 
2927   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2928   // is dependent on the requested size and the MMU.  Our initial gap
2929   // value here is just a guess and will be corrected later.
2930   bool had_top_overlap = false;
2931   bool have_adjusted_gap = false;
2932   size_t gap = 0x400000;
2933 
2934   // Assert only that the size is a multiple of the page size, since
2935   // that's all that mmap requires, and since that's all we really know
2936   // about at this low abstraction level.  If we need higher alignment,
2937   // we can either pass an alignment to this method or verify alignment
2938   // in one of the methods further up the call chain.  See bug 5044738.
2939   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2940 
2941   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2942   // Give it a try, if the kernel honors the hint we can return immediately.
2943   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2944 
2945   volatile int err = errno;
2946   if (addr == requested_addr) {
2947     return addr;
2948   } else if (addr != NULL) {
2949     pd_unmap_memory(addr, bytes);
2950   }
2951 
2952   if (PrintMiscellaneous && Verbose) {
2953     char buf[256];
2954     buf[0] = '\0';
2955     if (addr == NULL) {
2956       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2957     }
2958     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2959             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2960             "%s", bytes, requested_addr, addr, buf);
2961   }
2962 
2963   // Address hint method didn't work.  Fall back to the old method.
2964   // In theory, once SNV becomes our oldest supported platform, this
2965   // code will no longer be needed.
2966   //
2967   // Repeatedly allocate blocks until the block is allocated at the
2968   // right spot. Give up after max_tries.
2969   int i;
2970   for (i = 0; i < max_tries; ++i) {
2971     base[i] = reserve_memory(bytes);
2972 
2973     if (base[i] != NULL) {
2974       // Is this the block we wanted?
2975       if (base[i] == requested_addr) {
2976         size[i] = bytes;
2977         break;
2978       }
2979 
2980       // check that the gap value is right
2981       if (had_top_overlap && !have_adjusted_gap) {
2982         size_t actual_gap = base[i-1] - base[i] - bytes;
2983         if (gap != actual_gap) {
2984           // adjust the gap value and retry the last 2 allocations
2985           assert(i > 0, "gap adjustment code problem");
2986           have_adjusted_gap = true;  // adjust the gap only once, just in case
2987           gap = actual_gap;
2988           if (PrintMiscellaneous && Verbose) {
2989             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2990           }
2991           unmap_memory(base[i], bytes);
2992           unmap_memory(base[i-1], size[i-1]);
2993           i-=2;
2994           continue;
2995         }
2996       }
2997 
2998       // Does this overlap the block we wanted? Give back the overlapped
2999       // parts and try again.
3000       //
3001       // There is still a bug in this code: if top_overlap == bytes,
3002       // the overlap is offset from requested region by the value of gap.
3003       // In this case giving back the overlapped part will not work,
3004       // because we'll give back the entire block at base[i] and
3005       // therefore the subsequent allocation will not generate a new gap.
3006       // This could be fixed with a new algorithm that used larger
3007       // or variable size chunks to find the requested region -
3008       // but such a change would introduce additional complications.
3009       // It's rare enough that the planets align for this bug,
3010       // so we'll just wait for a fix for 6204603/5003415 which
3011       // will provide a mmap flag to allow us to avoid this business.
3012 
3013       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
3014       if (top_overlap >= 0 && top_overlap < bytes) {
3015         had_top_overlap = true;
3016         unmap_memory(base[i], top_overlap);
3017         base[i] += top_overlap;
3018         size[i] = bytes - top_overlap;
3019       } else {
3020         size_t bottom_overlap = base[i] + bytes - requested_addr;
3021         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
3022           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
3023             warning("attempt_reserve_memory_at: possible alignment bug");
3024           }
3025           unmap_memory(requested_addr, bottom_overlap);
3026           size[i] = bytes - bottom_overlap;
3027         } else {
3028           size[i] = bytes;
3029         }
3030       }
3031     }
3032   }
3033 
3034   // Give back the unused reserved pieces.
3035 
3036   for (int j = 0; j < i; ++j) {
3037     if (base[j] != NULL) {
3038       unmap_memory(base[j], size[j]);
3039     }
3040   }
3041 
3042   return (i < max_tries) ? requested_addr : NULL;
3043 }
3044 
3045 bool os::pd_release_memory(char* addr, size_t bytes) {
3046   size_t size = bytes;
3047   return munmap(addr, size) == 0;
3048 }
3049 
3050 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
3051   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
3052          "addr must be page aligned");
3053   int retVal = mprotect(addr, bytes, prot);
3054   return retVal == 0;
3055 }
3056 
3057 // Protect memory (Used to pass readonly pages through
3058 // JNI GetArray<type>Elements with empty arrays.)
3059 // Also, used for serialization page and for compressed oops null pointer
3060 // checking.
3061 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3062                         bool is_committed) {
3063   unsigned int p = 0;
3064   switch (prot) {
3065   case MEM_PROT_NONE: p = PROT_NONE; break;
3066   case MEM_PROT_READ: p = PROT_READ; break;
3067   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
3068   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3069   default:
3070     ShouldNotReachHere();
3071   }
3072   // is_committed is unused.
3073   return solaris_mprotect(addr, bytes, p);
3074 }
3075 
3076 // guard_memory and unguard_memory only happens within stack guard pages.
3077 // Since ISM pertains only to the heap, guard and unguard memory should not
3078 /// happen with an ISM region.
3079 bool os::guard_memory(char* addr, size_t bytes) {
3080   return solaris_mprotect(addr, bytes, PROT_NONE);
3081 }
3082 
3083 bool os::unguard_memory(char* addr, size_t bytes) {
3084   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3085 }
3086 
3087 // Large page support
3088 static size_t _large_page_size = 0;
3089 
3090 // Insertion sort for small arrays (descending order).
3091 static void insertion_sort_descending(size_t* array, int len) {
3092   for (int i = 0; i < len; i++) {
3093     size_t val = array[i];
3094     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3095       size_t tmp = array[key];
3096       array[key] = array[key - 1];
3097       array[key - 1] = tmp;
3098     }
3099   }
3100 }
3101 
3102 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3103   const unsigned int usable_count = VM_Version::page_size_count();
3104   if (usable_count == 1) {
3105     return false;
3106   }
3107 
3108   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3109   // build platform, getpagesizes() (without the '2') can be called directly.
3110   typedef int (*gps_t)(size_t[], int);
3111   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3112   if (gps_func == NULL) {
3113     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3114     if (gps_func == NULL) {
3115       if (warn) {
3116         warning("MPSS is not supported by the operating system.");
3117       }
3118       return false;
3119     }
3120   }
3121 
3122   // Fill the array of page sizes.
3123   int n = (*gps_func)(_page_sizes, page_sizes_max);
3124   assert(n > 0, "Solaris bug?");
3125 
3126   if (n == page_sizes_max) {
3127     // Add a sentinel value (necessary only if the array was completely filled
3128     // since it is static (zeroed at initialization)).
3129     _page_sizes[--n] = 0;
3130     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3131   }
3132   assert(_page_sizes[n] == 0, "missing sentinel");
3133   trace_page_sizes("available page sizes", _page_sizes, n);
3134 
3135   if (n == 1) return false;     // Only one page size available.
3136 
3137   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3138   // select up to usable_count elements.  First sort the array, find the first
3139   // acceptable value, then copy the usable sizes to the top of the array and
3140   // trim the rest.  Make sure to include the default page size :-).
3141   //
3142   // A better policy could get rid of the 4M limit by taking the sizes of the
3143   // important VM memory regions (java heap and possibly the code cache) into
3144   // account.
3145   insertion_sort_descending(_page_sizes, n);
3146   const size_t size_limit =
3147     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3148   int beg;
3149   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3150   const int end = MIN2((int)usable_count, n) - 1;
3151   for (int cur = 0; cur < end; ++cur, ++beg) {
3152     _page_sizes[cur] = _page_sizes[beg];
3153   }
3154   _page_sizes[end] = vm_page_size();
3155   _page_sizes[end + 1] = 0;
3156 
3157   if (_page_sizes[end] > _page_sizes[end - 1]) {
3158     // Default page size is not the smallest; sort again.
3159     insertion_sort_descending(_page_sizes, end + 1);
3160   }
3161   *page_size = _page_sizes[0];
3162 
3163   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3164   return true;
3165 }
3166 
3167 void os::large_page_init() {
3168   if (UseLargePages) {
3169     // print a warning if any large page related flag is specified on command line
3170     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3171                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3172 
3173     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3174   }
3175 }
3176 
3177 bool os::Solaris::is_valid_page_size(size_t bytes) {
3178   for (int i = 0; _page_sizes[i] != 0; i++) {
3179     if (_page_sizes[i] == bytes) {
3180       return true;
3181     }
3182   }
3183   return false;
3184 }
3185 
3186 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3187   assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
3188   assert(is_ptr_aligned((void*) start, align),
3189          err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
3190   assert(is_size_aligned(bytes, align),
3191          err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
3192 
3193   // Signal to OS that we want large pages for addresses
3194   // from addr, addr + bytes
3195   struct memcntl_mha mpss_struct;
3196   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3197   mpss_struct.mha_pagesize = align;
3198   mpss_struct.mha_flags = 0;
3199   // Upon successful completion, memcntl() returns 0
3200   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3201     debug_only(warning("Attempt to use MPSS failed."));
3202     return false;
3203   }
3204   return true;
3205 }
3206 
3207 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3208   fatal("os::reserve_memory_special should not be called on Solaris.");
3209   return NULL;
3210 }
3211 
3212 bool os::release_memory_special(char* base, size_t bytes) {
3213   fatal("os::release_memory_special should not be called on Solaris.");
3214   return false;
3215 }
3216 
3217 size_t os::large_page_size() {
3218   return _large_page_size;
3219 }
3220 
3221 // MPSS allows application to commit large page memory on demand; with ISM
3222 // the entire memory region must be allocated as shared memory.
3223 bool os::can_commit_large_page_memory() {
3224   return true;
3225 }
3226 
3227 bool os::can_execute_large_page_memory() {
3228   return true;
3229 }
3230 
3231 static int os_sleep(jlong millis, bool interruptible) {
3232   const jlong limit = INT_MAX;
3233   jlong prevtime;
3234   int res;
3235 
3236   while (millis > limit) {
3237     if ((res = os_sleep(limit, interruptible)) != OS_OK)
3238       return res;
3239     millis -= limit;
3240   }
3241 
3242   // Restart interrupted polls with new parameters until the proper delay
3243   // has been completed.
3244 
3245   prevtime = getTimeMillis();
3246 
3247   while (millis > 0) {
3248     jlong newtime;
3249 
3250     if (!interruptible) {
3251       // Following assert fails for os::yield_all:
3252       // assert(!thread->is_Java_thread(), "must not be java thread");
3253       res = poll(NULL, 0, millis);
3254     } else {
3255       JavaThread *jt = JavaThread::current();
3256 
3257       INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
3258         os::Solaris::clear_interrupted);
3259     }
3260 
3261     // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
3262     // thread.Interrupt.
3263 
3264     // See c/r 6751923. Poll can return 0 before time
3265     // has elapsed if time is set via clock_settime (as NTP does).
3266     // res == 0 if poll timed out (see man poll RETURN VALUES)
3267     // using the logic below checks that we really did
3268     // sleep at least "millis" if not we'll sleep again.
3269     if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) {
3270       newtime = getTimeMillis();
3271       assert(newtime >= prevtime, "time moving backwards");
3272     /* Doing prevtime and newtime in microseconds doesn't help precision,
3273        and trying to round up to avoid lost milliseconds can result in a
3274        too-short delay. */
3275       millis -= newtime - prevtime;
3276       if(millis <= 0)
3277         return OS_OK;
3278       prevtime = newtime;
3279     } else
3280       return res;
3281   }
3282 
3283   return OS_OK;
3284 }
3285 
3286 // Read calls from inside the vm need to perform state transitions
3287 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3288   INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3289 }
3290 
3291 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
3292   size_t res;
3293   JavaThread* thread = (JavaThread*)Thread::current();
3294   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3295   ThreadBlockInVM tbiv(thread);
3296   RESTARTABLE(::pread(fd, buf, (size_t) nBytes, offset), res);
3297   return res;
3298 }
3299 
3300 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3301   INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3302 }
3303 
3304 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
3305   assert(thread == Thread::current(),  "thread consistency check");
3306 
3307   // TODO-FIXME: this should be removed.
3308   // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
3309   // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
3310   // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
3311   // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
3312   // is fooled into believing that the system is making progress. In the code below we block the
3313   // the watcher thread while safepoint is in progress so that it would not appear as though the
3314   // system is making progress.
3315   if (!Solaris::T2_libthread() &&
3316       thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
3317     // We now try to acquire the threads lock. Since this lock is held by the VM thread during
3318     // the entire safepoint, the watcher thread will  line up here during the safepoint.
3319     Threads_lock->lock_without_safepoint_check();
3320     Threads_lock->unlock();
3321   }
3322 
3323   if (thread->is_Java_thread()) {
3324     // This is a JavaThread so we honor the _thread_blocked protocol
3325     // even for sleeps of 0 milliseconds. This was originally done
3326     // as a workaround for bug 4338139. However, now we also do it
3327     // to honor the suspend-equivalent protocol.
3328 
3329     JavaThread *jt = (JavaThread *) thread;
3330     ThreadBlockInVM tbivm(jt);
3331 
3332     jt->set_suspend_equivalent();
3333     // cleared by handle_special_suspend_equivalent_condition() or
3334     // java_suspend_self() via check_and_wait_while_suspended()
3335 
3336     int ret_code;
3337     if (millis <= 0) {
3338       thr_yield();
3339       ret_code = 0;
3340     } else {
3341       // The original sleep() implementation did not create an
3342       // OSThreadWaitState helper for sleeps of 0 milliseconds.
3343       // I'm preserving that decision for now.
3344       OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
3345 
3346       ret_code = os_sleep(millis, interruptible);
3347     }
3348 
3349     // were we externally suspended while we were waiting?
3350     jt->check_and_wait_while_suspended();
3351 
3352     return ret_code;
3353   }
3354 
3355   // non-JavaThread from this point on:
3356 
3357   if (millis <= 0) {
3358     thr_yield();
3359     return 0;
3360   }
3361 
3362   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
3363 
3364   return os_sleep(millis, interruptible);
3365 }
3366 
3367 void os::naked_short_sleep(jlong ms) {
3368   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3369 
3370   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3371   // Solaris requires -lrt for this.
3372   usleep((ms * 1000));
3373 
3374   return;
3375 }
3376 
3377 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3378 void os::infinite_sleep() {
3379   while (true) {    // sleep forever ...
3380     ::sleep(100);   // ... 100 seconds at a time
3381   }
3382 }
3383 
3384 // Used to convert frequent JVM_Yield() to nops
3385 bool os::dont_yield() {
3386   if (DontYieldALot) {
3387     static hrtime_t last_time = 0;
3388     hrtime_t diff = getTimeNanos() - last_time;
3389 
3390     if (diff < DontYieldALotInterval * 1000000)
3391       return true;
3392 
3393     last_time += diff;
3394 
3395     return false;
3396   }
3397   else {
3398     return false;
3399   }
3400 }
3401 
3402 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3403 // the linux and win32 implementations do not.  This should be checked.
3404 
3405 void os::yield() {
3406   // Yields to all threads with same or greater priority
3407   os::sleep(Thread::current(), 0, false);
3408 }
3409 
3410 // Note that yield semantics are defined by the scheduling class to which
3411 // the thread currently belongs.  Typically, yield will _not yield to
3412 // other equal or higher priority threads that reside on the dispatch queues
3413 // of other CPUs.
3414 
3415 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3416 
3417 
3418 // On Solaris we found that yield_all doesn't always yield to all other threads.
3419 // There have been cases where there is a thread ready to execute but it doesn't
3420 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
3421 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a
3422 // SIGWAITING signal which will cause a new lwp to be created. So we count the
3423 // number of times yield_all is called in the one loop and increase the sleep
3424 // time after 8 attempts. If this fails too we increase the concurrency level
3425 // so that the starving thread would get an lwp
3426 
3427 void os::yield_all(int attempts) {
3428   // Yields to all threads, including threads with lower priorities
3429   if (attempts == 0) {
3430     os::sleep(Thread::current(), 1, false);
3431   } else {
3432     int iterations = attempts % 30;
3433     if (iterations == 0 && !os::Solaris::T2_libthread()) {
3434       // thr_setconcurrency and _getconcurrency make sense only under T1.
3435       int noofLWPS = thr_getconcurrency();
3436       if (noofLWPS < (Threads::number_of_threads() + 2)) {
3437         thr_setconcurrency(thr_getconcurrency() + 1);
3438       }
3439     } else if (iterations < 25) {
3440       os::sleep(Thread::current(), 1, false);
3441     } else {
3442       os::sleep(Thread::current(), 10, false);
3443     }
3444   }
3445 }
3446 
3447 // Called from the tight loops to possibly influence time-sharing heuristics
3448 void os::loop_breaker(int attempts) {
3449   os::yield_all(attempts);
3450 }
3451 
3452 
3453 // Interface for setting lwp priorities.  If we are using T2 libthread,
3454 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3455 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3456 // function is meaningless in this mode so we must adjust the real lwp's priority
3457 // The routines below implement the getting and setting of lwp priorities.
3458 //
3459 // Note: There are three priority scales used on Solaris.  Java priotities
3460 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3461 //       from 0 to 127, and the current scheduling class of the process we
3462 //       are running in.  This is typically from -60 to +60.
3463 //       The setting of the lwp priorities in done after a call to thr_setprio
3464 //       so Java priorities are mapped to libthread priorities and we map from
3465 //       the latter to lwp priorities.  We don't keep priorities stored in
3466 //       Java priorities since some of our worker threads want to set priorities
3467 //       higher than all Java threads.
3468 //
3469 // For related information:
3470 // (1)  man -s 2 priocntl
3471 // (2)  man -s 4 priocntl
3472 // (3)  man dispadmin
3473 // =    librt.so
3474 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3475 // =    ps -cL <pid> ... to validate priority.
3476 // =    sched_get_priority_min and _max
3477 //              pthread_create
3478 //              sched_setparam
3479 //              pthread_setschedparam
3480 //
3481 // Assumptions:
3482 // +    We assume that all threads in the process belong to the same
3483 //              scheduling class.   IE. an homogenous process.
3484 // +    Must be root or in IA group to change change "interactive" attribute.
3485 //              Priocntl() will fail silently.  The only indication of failure is when
3486 //              we read-back the value and notice that it hasn't changed.
3487 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3488 // +    For RT, change timeslice as well.  Invariant:
3489 //              constant "priority integral"
3490 //              Konst == TimeSlice * (60-Priority)
3491 //              Given a priority, compute appropriate timeslice.
3492 // +    Higher numerical values have higher priority.
3493 
3494 // sched class attributes
3495 typedef struct {
3496         int   schedPolicy;              // classID
3497         int   maxPrio;
3498         int   minPrio;
3499 } SchedInfo;
3500 
3501 
3502 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3503 
3504 #ifdef ASSERT
3505 static int  ReadBackValidate = 1;
3506 #endif
3507 static int  myClass     = 0;
3508 static int  myMin       = 0;
3509 static int  myMax       = 0;
3510 static int  myCur       = 0;
3511 static bool priocntl_enable = false;
3512 
3513 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3514 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3515 
3516 
3517 // lwp_priocntl_init
3518 //
3519 // Try to determine the priority scale for our process.
3520 //
3521 // Return errno or 0 if OK.
3522 //
3523 static int lwp_priocntl_init () {
3524   int rslt;
3525   pcinfo_t ClassInfo;
3526   pcparms_t ParmInfo;
3527   int i;
3528 
3529   if (!UseThreadPriorities) return 0;
3530 
3531   // We are using Bound threads, we need to determine our priority ranges
3532   if (os::Solaris::T2_libthread() || UseBoundThreads) {
3533     // If ThreadPriorityPolicy is 1, switch tables
3534     if (ThreadPriorityPolicy == 1) {
3535       for (i = 0 ; i < CriticalPriority+1; i++)
3536         os::java_to_os_priority[i] = prio_policy1[i];
3537     }
3538     if (UseCriticalJavaThreadPriority) {
3539       // MaxPriority always maps to the FX scheduling class and criticalPrio.
3540       // See set_native_priority() and set_lwp_class_and_priority().
3541       // Save original MaxPriority mapping in case attempt to
3542       // use critical priority fails.
3543       java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3544       // Set negative to distinguish from other priorities
3545       os::java_to_os_priority[MaxPriority] = -criticalPrio;
3546     }
3547   }
3548   // Not using Bound Threads, set to ThreadPolicy 1
3549   else {
3550     for ( i = 0 ; i < CriticalPriority+1; i++ ) {
3551       os::java_to_os_priority[i] = prio_policy1[i];
3552     }
3553     return 0;
3554   }
3555 
3556   // Get IDs for a set of well-known scheduling classes.
3557   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3558   // the system.  We should have a loop that iterates over the
3559   // classID values, which are known to be "small" integers.
3560 
3561   strcpy(ClassInfo.pc_clname, "TS");
3562   ClassInfo.pc_cid = -1;
3563   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3564   if (rslt < 0) return errno;
3565   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3566   tsLimits.schedPolicy = ClassInfo.pc_cid;
3567   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3568   tsLimits.minPrio = -tsLimits.maxPrio;
3569 
3570   strcpy(ClassInfo.pc_clname, "IA");
3571   ClassInfo.pc_cid = -1;
3572   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3573   if (rslt < 0) return errno;
3574   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3575   iaLimits.schedPolicy = ClassInfo.pc_cid;
3576   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3577   iaLimits.minPrio = -iaLimits.maxPrio;
3578 
3579   strcpy(ClassInfo.pc_clname, "RT");
3580   ClassInfo.pc_cid = -1;
3581   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3582   if (rslt < 0) return errno;
3583   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3584   rtLimits.schedPolicy = ClassInfo.pc_cid;
3585   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3586   rtLimits.minPrio = 0;
3587 
3588   strcpy(ClassInfo.pc_clname, "FX");
3589   ClassInfo.pc_cid = -1;
3590   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3591   if (rslt < 0) return errno;
3592   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3593   fxLimits.schedPolicy = ClassInfo.pc_cid;
3594   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3595   fxLimits.minPrio = 0;
3596 
3597   // Query our "current" scheduling class.
3598   // This will normally be IA, TS or, rarely, FX or RT.
3599   memset(&ParmInfo, 0, sizeof(ParmInfo));
3600   ParmInfo.pc_cid = PC_CLNULL;
3601   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3602   if (rslt < 0) return errno;
3603   myClass = ParmInfo.pc_cid;
3604 
3605   // We now know our scheduling classId, get specific information
3606   // about the class.
3607   ClassInfo.pc_cid = myClass;
3608   ClassInfo.pc_clname[0] = 0;
3609   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3610   if (rslt < 0) return errno;
3611 
3612   if (ThreadPriorityVerbose) {
3613     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3614   }
3615 
3616   memset(&ParmInfo, 0, sizeof(pcparms_t));
3617   ParmInfo.pc_cid = PC_CLNULL;
3618   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3619   if (rslt < 0) return errno;
3620 
3621   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3622     myMin = rtLimits.minPrio;
3623     myMax = rtLimits.maxPrio;
3624   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3625     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3626     myMin = iaLimits.minPrio;
3627     myMax = iaLimits.maxPrio;
3628     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3629   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3630     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3631     myMin = tsLimits.minPrio;
3632     myMax = tsLimits.maxPrio;
3633     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3634   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3635     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3636     myMin = fxLimits.minPrio;
3637     myMax = fxLimits.maxPrio;
3638     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3639   } else {
3640     // No clue - punt
3641     if (ThreadPriorityVerbose)
3642       tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3643     return EINVAL;      // no clue, punt
3644   }
3645 
3646   if (ThreadPriorityVerbose) {
3647     tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3648   }
3649 
3650   priocntl_enable = true;  // Enable changing priorities
3651   return 0;
3652 }
3653 
3654 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3655 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3656 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3657 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3658 
3659 
3660 // scale_to_lwp_priority
3661 //
3662 // Convert from the libthread "thr_setprio" scale to our current
3663 // lwp scheduling class scale.
3664 //
3665 static
3666 int     scale_to_lwp_priority (int rMin, int rMax, int x)
3667 {
3668   int v;
3669 
3670   if (x == 127) return rMax;            // avoid round-down
3671     v = (((x*(rMax-rMin)))/128)+rMin;
3672   return v;
3673 }
3674 
3675 
3676 // set_lwp_class_and_priority
3677 //
3678 // Set the class and priority of the lwp.  This call should only
3679 // be made when using bound threads (T2 threads are bound by default).
3680 //
3681 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3682                                int newPrio, int new_class, bool scale) {
3683   int rslt;
3684   int Actual, Expected, prv;
3685   pcparms_t ParmInfo;                   // for GET-SET
3686 #ifdef ASSERT
3687   pcparms_t ReadBack;                   // for readback
3688 #endif
3689 
3690   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3691   // Query current values.
3692   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3693   // Cache "pcparms_t" in global ParmCache.
3694   // TODO: elide set-to-same-value
3695 
3696   // If something went wrong on init, don't change priorities.
3697   if ( !priocntl_enable ) {
3698     if (ThreadPriorityVerbose)
3699       tty->print_cr("Trying to set priority but init failed, ignoring");
3700     return EINVAL;
3701   }
3702 
3703   // If lwp hasn't started yet, just return
3704   // the _start routine will call us again.
3705   if ( lwpid <= 0 ) {
3706     if (ThreadPriorityVerbose) {
3707       tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
3708                      INTPTR_FORMAT " to %d, lwpid not set",
3709                      ThreadID, newPrio);
3710     }
3711     return 0;
3712   }
3713 
3714   if (ThreadPriorityVerbose) {
3715     tty->print_cr ("set_lwp_class_and_priority("
3716                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3717                    ThreadID, lwpid, newPrio);
3718   }
3719 
3720   memset(&ParmInfo, 0, sizeof(pcparms_t));
3721   ParmInfo.pc_cid = PC_CLNULL;
3722   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3723   if (rslt < 0) return errno;
3724 
3725   int cur_class = ParmInfo.pc_cid;
3726   ParmInfo.pc_cid = (id_t)new_class;
3727 
3728   if (new_class == rtLimits.schedPolicy) {
3729     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3730     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3731                                                        rtLimits.maxPrio, newPrio)
3732                                : newPrio;
3733     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3734     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3735     if (ThreadPriorityVerbose) {
3736       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3737     }
3738   } else if (new_class == iaLimits.schedPolicy) {
3739     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3740     int maxClamped     = MIN2(iaLimits.maxPrio,
3741                               cur_class == new_class
3742                                 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3743     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3744                                                        maxClamped, newPrio)
3745                                : newPrio;
3746     iaInfo->ia_uprilim = cur_class == new_class
3747                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3748     iaInfo->ia_mode    = IA_NOCHANGE;
3749     if (ThreadPriorityVerbose) {
3750       tty->print_cr("IA: [%d...%d] %d->%d\n",
3751                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3752     }
3753   } else if (new_class == tsLimits.schedPolicy) {
3754     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3755     int maxClamped     = MIN2(tsLimits.maxPrio,
3756                               cur_class == new_class
3757                                 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3758     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3759                                                        maxClamped, newPrio)
3760                                : newPrio;
3761     tsInfo->ts_uprilim = cur_class == new_class
3762                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3763     if (ThreadPriorityVerbose) {
3764       tty->print_cr("TS: [%d...%d] %d->%d\n",
3765                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3766     }
3767   } else if (new_class == fxLimits.schedPolicy) {
3768     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3769     int maxClamped     = MIN2(fxLimits.maxPrio,
3770                               cur_class == new_class
3771                                 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3772     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3773                                                        maxClamped, newPrio)
3774                                : newPrio;
3775     fxInfo->fx_uprilim = cur_class == new_class
3776                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3777     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3778     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3779     if (ThreadPriorityVerbose) {
3780       tty->print_cr("FX: [%d...%d] %d->%d\n",
3781                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3782     }
3783   } else {
3784     if (ThreadPriorityVerbose) {
3785       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3786     }
3787     return EINVAL;    // no clue, punt
3788   }
3789 
3790   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3791   if (ThreadPriorityVerbose && rslt) {
3792     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3793   }
3794   if (rslt < 0) return errno;
3795 
3796 #ifdef ASSERT
3797   // Sanity check: read back what we just attempted to set.
3798   // In theory it could have changed in the interim ...
3799   //
3800   // The priocntl system call is tricky.
3801   // Sometimes it'll validate the priority value argument and
3802   // return EINVAL if unhappy.  At other times it fails silently.
3803   // Readbacks are prudent.
3804 
3805   if (!ReadBackValidate) return 0;
3806 
3807   memset(&ReadBack, 0, sizeof(pcparms_t));
3808   ReadBack.pc_cid = PC_CLNULL;
3809   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3810   assert(rslt >= 0, "priocntl failed");
3811   Actual = Expected = 0xBAD;
3812   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3813   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3814     Actual   = RTPRI(ReadBack)->rt_pri;
3815     Expected = RTPRI(ParmInfo)->rt_pri;
3816   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3817     Actual   = IAPRI(ReadBack)->ia_upri;
3818     Expected = IAPRI(ParmInfo)->ia_upri;
3819   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3820     Actual   = TSPRI(ReadBack)->ts_upri;
3821     Expected = TSPRI(ParmInfo)->ts_upri;
3822   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3823     Actual   = FXPRI(ReadBack)->fx_upri;
3824     Expected = FXPRI(ParmInfo)->fx_upri;
3825   } else {
3826     if (ThreadPriorityVerbose) {
3827       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3828                     ParmInfo.pc_cid);
3829     }
3830   }
3831 
3832   if (Actual != Expected) {
3833     if (ThreadPriorityVerbose) {
3834       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3835                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3836     }
3837   }
3838 #endif
3839 
3840   return 0;
3841 }
3842 
3843 // Solaris only gives access to 128 real priorities at a time,
3844 // so we expand Java's ten to fill this range.  This would be better
3845 // if we dynamically adjusted relative priorities.
3846 //
3847 // The ThreadPriorityPolicy option allows us to select 2 different
3848 // priority scales.
3849 //
3850 // ThreadPriorityPolicy=0
3851 // Since the Solaris' default priority is MaximumPriority, we do not
3852 // set a priority lower than Max unless a priority lower than
3853 // NormPriority is requested.
3854 //
3855 // ThreadPriorityPolicy=1
3856 // This mode causes the priority table to get filled with
3857 // linear values.  NormPriority get's mapped to 50% of the
3858 // Maximum priority an so on.  This will cause VM threads
3859 // to get unfair treatment against other Solaris processes
3860 // which do not explicitly alter their thread priorities.
3861 //
3862 
3863 int os::java_to_os_priority[CriticalPriority + 1] = {
3864   -99999,         // 0 Entry should never be used
3865 
3866   0,              // 1 MinPriority
3867   32,             // 2
3868   64,             // 3
3869 
3870   96,             // 4
3871   127,            // 5 NormPriority
3872   127,            // 6
3873 
3874   127,            // 7
3875   127,            // 8
3876   127,            // 9 NearMaxPriority
3877 
3878   127,            // 10 MaxPriority
3879 
3880   -criticalPrio   // 11 CriticalPriority
3881 };
3882 
3883 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3884   OSThread* osthread = thread->osthread();
3885 
3886   // Save requested priority in case the thread hasn't been started
3887   osthread->set_native_priority(newpri);
3888 
3889   // Check for critical priority request
3890   bool fxcritical = false;
3891   if (newpri == -criticalPrio) {
3892     fxcritical = true;
3893     newpri = criticalPrio;
3894   }
3895 
3896   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3897   if (!UseThreadPriorities) return OS_OK;
3898 
3899   int status = 0;
3900 
3901   if (!fxcritical) {
3902     // Use thr_setprio only if we have a priority that thr_setprio understands
3903     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3904   }
3905 
3906   if (os::Solaris::T2_libthread() ||
3907       (UseBoundThreads && osthread->is_vm_created())) {
3908     int lwp_status =
3909       set_lwp_class_and_priority(osthread->thread_id(),
3910                                  osthread->lwp_id(),
3911                                  newpri,
3912                                  fxcritical ? fxLimits.schedPolicy : myClass,
3913                                  !fxcritical);
3914     if (lwp_status != 0 && fxcritical) {
3915       // Try again, this time without changing the scheduling class
3916       newpri = java_MaxPriority_to_os_priority;
3917       lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3918                                               osthread->lwp_id(),
3919                                               newpri, myClass, false);
3920     }
3921     status |= lwp_status;
3922   }
3923   return (status == 0) ? OS_OK : OS_ERR;
3924 }
3925 
3926 
3927 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3928   int p;
3929   if ( !UseThreadPriorities ) {
3930     *priority_ptr = NormalPriority;
3931     return OS_OK;
3932   }
3933   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3934   if (status != 0) {
3935     return OS_ERR;
3936   }
3937   *priority_ptr = p;
3938   return OS_OK;
3939 }
3940 
3941 
3942 // Hint to the underlying OS that a task switch would not be good.
3943 // Void return because it's a hint and can fail.
3944 void os::hint_no_preempt() {
3945   schedctl_start(schedctl_init());
3946 }
3947 
3948 static void resume_clear_context(OSThread *osthread) {
3949   osthread->set_ucontext(NULL);
3950 }
3951 
3952 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3953   osthread->set_ucontext(context);
3954 }
3955 
3956 static Semaphore sr_semaphore;
3957 
3958 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3959   // Save and restore errno to avoid confusing native code with EINTR
3960   // after sigsuspend.
3961   int old_errno = errno;
3962 
3963   OSThread* osthread = thread->osthread();
3964   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3965 
3966   os::SuspendResume::State current = osthread->sr.state();
3967   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3968     suspend_save_context(osthread, uc);
3969 
3970     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3971     os::SuspendResume::State state = osthread->sr.suspended();
3972     if (state == os::SuspendResume::SR_SUSPENDED) {
3973       sigset_t suspend_set;  // signals for sigsuspend()
3974 
3975       // get current set of blocked signals and unblock resume signal
3976       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3977       sigdelset(&suspend_set, os::Solaris::SIGasync());
3978 
3979       sr_semaphore.signal();
3980       // wait here until we are resumed
3981       while (1) {
3982         sigsuspend(&suspend_set);
3983 
3984         os::SuspendResume::State result = osthread->sr.running();
3985         if (result == os::SuspendResume::SR_RUNNING) {
3986           sr_semaphore.signal();
3987           break;
3988         }
3989       }
3990 
3991     } else if (state == os::SuspendResume::SR_RUNNING) {
3992       // request was cancelled, continue
3993     } else {
3994       ShouldNotReachHere();
3995     }
3996 
3997     resume_clear_context(osthread);
3998   } else if (current == os::SuspendResume::SR_RUNNING) {
3999     // request was cancelled, continue
4000   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
4001     // ignore
4002   } else {
4003     // ignore
4004   }
4005 
4006   errno = old_errno;
4007 }
4008 
4009 
4010 void os::interrupt(Thread* thread) {
4011   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
4012 
4013   OSThread* osthread = thread->osthread();
4014 
4015   int isInterrupted = osthread->interrupted();
4016   if (!isInterrupted) {
4017       osthread->set_interrupted(true);
4018       OrderAccess::fence();
4019       // os::sleep() is implemented with either poll (NULL,0,timeout) or
4020       // by parking on _SleepEvent.  If the former, thr_kill will unwedge
4021       // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
4022       ParkEvent * const slp = thread->_SleepEvent ;
4023       if (slp != NULL) slp->unpark() ;
4024   }
4025 
4026   // For JSR166:  unpark after setting status but before thr_kill -dl
4027   if (thread->is_Java_thread()) {
4028     ((JavaThread*)thread)->parker()->unpark();
4029   }
4030 
4031   // Handle interruptible wait() ...
4032   ParkEvent * const ev = thread->_ParkEvent ;
4033   if (ev != NULL) ev->unpark() ;
4034 
4035   // When events are used everywhere for os::sleep, then this thr_kill
4036   // will only be needed if UseVMInterruptibleIO is true.
4037 
4038   if (!isInterrupted) {
4039     int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
4040     assert_status(status == 0, status, "thr_kill");
4041 
4042     // Bump thread interruption counter
4043     RuntimeService::record_thread_interrupt_signaled_count();
4044   }
4045 }
4046 
4047 
4048 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
4049   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
4050 
4051   OSThread* osthread = thread->osthread();
4052 
4053   bool res = osthread->interrupted();
4054 
4055   // NOTE that since there is no "lock" around these two operations,
4056   // there is the possibility that the interrupted flag will be
4057   // "false" but that the interrupt event will be set. This is
4058   // intentional. The effect of this is that Object.wait() will appear
4059   // to have a spurious wakeup, which is not harmful, and the
4060   // possibility is so rare that it is not worth the added complexity
4061   // to add yet another lock. It has also been recommended not to put
4062   // the interrupted flag into the os::Solaris::Event structure,
4063   // because it hides the issue.
4064   if (res && clear_interrupted) {
4065     osthread->set_interrupted(false);
4066   }
4067   return res;
4068 }
4069 
4070 
4071 void os::print_statistics() {
4072 }
4073 
4074 int os::message_box(const char* title, const char* message) {
4075   int i;
4076   fdStream err(defaultStream::error_fd());
4077   for (i = 0; i < 78; i++) err.print_raw("=");
4078   err.cr();
4079   err.print_raw_cr(title);
4080   for (i = 0; i < 78; i++) err.print_raw("-");
4081   err.cr();
4082   err.print_raw_cr(message);
4083   for (i = 0; i < 78; i++) err.print_raw("=");
4084   err.cr();
4085 
4086   char buf[16];
4087   // Prevent process from exiting upon "read error" without consuming all CPU
4088   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4089 
4090   return buf[0] == 'y' || buf[0] == 'Y';
4091 }
4092 
4093 static int sr_notify(OSThread* osthread) {
4094   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
4095   assert_status(status == 0, status, "thr_kill");
4096   return status;
4097 }
4098 
4099 // "Randomly" selected value for how long we want to spin
4100 // before bailing out on suspending a thread, also how often
4101 // we send a signal to a thread we want to resume
4102 static const int RANDOMLY_LARGE_INTEGER = 1000000;
4103 static const int RANDOMLY_LARGE_INTEGER2 = 100;
4104 
4105 static bool do_suspend(OSThread* osthread) {
4106   assert(osthread->sr.is_running(), "thread should be running");
4107   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
4108 
4109   // mark as suspended and send signal
4110   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
4111     // failed to switch, state wasn't running?
4112     ShouldNotReachHere();
4113     return false;
4114   }
4115 
4116   if (sr_notify(osthread) != 0) {
4117     ShouldNotReachHere();
4118   }
4119 
4120   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
4121   while (true) {
4122     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
4123       break;
4124     } else {
4125       // timeout
4126       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
4127       if (cancelled == os::SuspendResume::SR_RUNNING) {
4128         return false;
4129       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
4130         // make sure that we consume the signal on the semaphore as well
4131         sr_semaphore.wait();
4132         break;
4133       } else {
4134         ShouldNotReachHere();
4135         return false;
4136       }
4137     }
4138   }
4139 
4140   guarantee(osthread->sr.is_suspended(), "Must be suspended");
4141   return true;
4142 }
4143 
4144 static void do_resume(OSThread* osthread) {
4145   assert(osthread->sr.is_suspended(), "thread should be suspended");
4146   assert(!sr_semaphore.trywait(), "invalid semaphore state");
4147 
4148   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
4149     // failed to switch to WAKEUP_REQUEST
4150     ShouldNotReachHere();
4151     return;
4152   }
4153 
4154   while (true) {
4155     if (sr_notify(osthread) == 0) {
4156       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
4157         if (osthread->sr.is_running()) {
4158           return;
4159         }
4160       }
4161     } else {
4162       ShouldNotReachHere();
4163     }
4164   }
4165 
4166   guarantee(osthread->sr.is_running(), "Must be running!");
4167 }
4168 
4169 void os::SuspendedThreadTask::internal_do_task() {
4170   if (do_suspend(_thread->osthread())) {
4171     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
4172     do_task(context);
4173     do_resume(_thread->osthread());
4174   }
4175 }
4176 
4177 class PcFetcher : public os::SuspendedThreadTask {
4178 public:
4179   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
4180   ExtendedPC result();
4181 protected:
4182   void do_task(const os::SuspendedThreadTaskContext& context);
4183 private:
4184   ExtendedPC _epc;
4185 };
4186 
4187 ExtendedPC PcFetcher::result() {
4188   guarantee(is_done(), "task is not done yet.");
4189   return _epc;
4190 }
4191 
4192 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
4193   Thread* thread = context.thread();
4194   OSThread* osthread = thread->osthread();
4195   if (osthread->ucontext() != NULL) {
4196     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
4197   } else {
4198     // NULL context is unexpected, double-check this is the VMThread
4199     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
4200   }
4201 }
4202 
4203 // A lightweight implementation that does not suspend the target thread and
4204 // thus returns only a hint. Used for profiling only!
4205 ExtendedPC os::get_thread_pc(Thread* thread) {
4206   // Make sure that it is called by the watcher and the Threads lock is owned.
4207   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
4208   // For now, is only used to profile the VM Thread
4209   assert(thread->is_VM_thread(), "Can only be called for VMThread");
4210   PcFetcher fetcher(thread);
4211   fetcher.run();
4212   return fetcher.result();
4213 }
4214 
4215 
4216 // This does not do anything on Solaris. This is basically a hook for being
4217 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
4218 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
4219   f(value, method, args, thread);
4220 }
4221 
4222 // This routine may be used by user applications as a "hook" to catch signals.
4223 // The user-defined signal handler must pass unrecognized signals to this
4224 // routine, and if it returns true (non-zero), then the signal handler must
4225 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
4226 // routine will never retun false (zero), but instead will execute a VM panic
4227 // routine kill the process.
4228 //
4229 // If this routine returns false, it is OK to call it again.  This allows
4230 // the user-defined signal handler to perform checks either before or after
4231 // the VM performs its own checks.  Naturally, the user code would be making
4232 // a serious error if it tried to handle an exception (such as a null check
4233 // or breakpoint) that the VM was generating for its own correct operation.
4234 //
4235 // This routine may recognize any of the following kinds of signals:
4236 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
4237 // os::Solaris::SIGasync
4238 // It should be consulted by handlers for any of those signals.
4239 // It explicitly does not recognize os::Solaris::SIGinterrupt
4240 //
4241 // The caller of this routine must pass in the three arguments supplied
4242 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
4243 // field of the structure passed to sigaction().  This routine assumes that
4244 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
4245 //
4246 // Note that the VM will print warnings if it detects conflicting signal
4247 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
4248 //
4249 extern "C" JNIEXPORT int
4250 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
4251                           int abort_if_unrecognized);
4252 
4253 
4254 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
4255   int orig_errno = errno;  // Preserve errno value over signal handler.
4256   JVM_handle_solaris_signal(sig, info, ucVoid, true);
4257   errno = orig_errno;
4258 }
4259 
4260 /* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
4261    is needed to provoke threads blocked on IO to return an EINTR
4262    Note: this explicitly does NOT call JVM_handle_solaris_signal and
4263    does NOT participate in signal chaining due to requirement for
4264    NOT setting SA_RESTART to make EINTR work. */
4265 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
4266    if (UseSignalChaining) {
4267       struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
4268       if (actp && actp->sa_handler) {
4269         vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
4270       }
4271    }
4272 }
4273 
4274 // This boolean allows users to forward their own non-matching signals
4275 // to JVM_handle_solaris_signal, harmlessly.
4276 bool os::Solaris::signal_handlers_are_installed = false;
4277 
4278 // For signal-chaining
4279 bool os::Solaris::libjsig_is_loaded = false;
4280 typedef struct sigaction *(*get_signal_t)(int);
4281 get_signal_t os::Solaris::get_signal_action = NULL;
4282 
4283 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
4284   struct sigaction *actp = NULL;
4285 
4286   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
4287     // Retrieve the old signal handler from libjsig
4288     actp = (*get_signal_action)(sig);
4289   }
4290   if (actp == NULL) {
4291     // Retrieve the preinstalled signal handler from jvm
4292     actp = get_preinstalled_handler(sig);
4293   }
4294 
4295   return actp;
4296 }
4297 
4298 static bool call_chained_handler(struct sigaction *actp, int sig,
4299                                  siginfo_t *siginfo, void *context) {
4300   // Call the old signal handler
4301   if (actp->sa_handler == SIG_DFL) {
4302     // It's more reasonable to let jvm treat it as an unexpected exception
4303     // instead of taking the default action.
4304     return false;
4305   } else if (actp->sa_handler != SIG_IGN) {
4306     if ((actp->sa_flags & SA_NODEFER) == 0) {
4307       // automaticlly block the signal
4308       sigaddset(&(actp->sa_mask), sig);
4309     }
4310 
4311     sa_handler_t hand;
4312     sa_sigaction_t sa;
4313     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4314     // retrieve the chained handler
4315     if (siginfo_flag_set) {
4316       sa = actp->sa_sigaction;
4317     } else {
4318       hand = actp->sa_handler;
4319     }
4320 
4321     if ((actp->sa_flags & SA_RESETHAND) != 0) {
4322       actp->sa_handler = SIG_DFL;
4323     }
4324 
4325     // try to honor the signal mask
4326     sigset_t oset;
4327     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4328 
4329     // call into the chained handler
4330     if (siginfo_flag_set) {
4331       (*sa)(sig, siginfo, context);
4332     } else {
4333       (*hand)(sig);
4334     }
4335 
4336     // restore the signal mask
4337     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4338   }
4339   // Tell jvm's signal handler the signal is taken care of.
4340   return true;
4341 }
4342 
4343 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4344   bool chained = false;
4345   // signal-chaining
4346   if (UseSignalChaining) {
4347     struct sigaction *actp = get_chained_signal_action(sig);
4348     if (actp != NULL) {
4349       chained = call_chained_handler(actp, sig, siginfo, context);
4350     }
4351   }
4352   return chained;
4353 }
4354 
4355 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4356   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4357   if (preinstalled_sigs[sig] != 0) {
4358     return &chainedsigactions[sig];
4359   }
4360   return NULL;
4361 }
4362 
4363 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4364 
4365   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4366   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4367   chainedsigactions[sig] = oldAct;
4368   preinstalled_sigs[sig] = 1;
4369 }
4370 
4371 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4372   // Check for overwrite.
4373   struct sigaction oldAct;
4374   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4375   void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4376                                       : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4377   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4378       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4379       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4380     if (AllowUserSignalHandlers || !set_installed) {
4381       // Do not overwrite; user takes responsibility to forward to us.
4382       return;
4383     } else if (UseSignalChaining) {
4384       if (oktochain) {
4385         // save the old handler in jvm
4386         save_preinstalled_handler(sig, oldAct);
4387       } else {
4388         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4389       }
4390       // libjsig also interposes the sigaction() call below and saves the
4391       // old sigaction on it own.
4392     } else {
4393       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4394                     "%#lx for signal %d.", (long)oldhand, sig));
4395     }
4396   }
4397 
4398   struct sigaction sigAct;
4399   sigfillset(&(sigAct.sa_mask));
4400   sigAct.sa_handler = SIG_DFL;
4401 
4402   sigAct.sa_sigaction = signalHandler;
4403   // Handle SIGSEGV on alternate signal stack if
4404   // not using stack banging
4405   if (!UseStackBanging && sig == SIGSEGV) {
4406     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4407   // Interruptible i/o requires SA_RESTART cleared so EINTR
4408   // is returned instead of restarting system calls
4409   } else if (sig == os::Solaris::SIGinterrupt()) {
4410     sigemptyset(&sigAct.sa_mask);
4411     sigAct.sa_handler = NULL;
4412     sigAct.sa_flags = SA_SIGINFO;
4413     sigAct.sa_sigaction = sigINTRHandler;
4414   } else {
4415     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4416   }
4417   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4418 
4419   sigaction(sig, &sigAct, &oldAct);
4420 
4421   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4422                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4423   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4424 }
4425 
4426 
4427 #define DO_SIGNAL_CHECK(sig) \
4428   if (!sigismember(&check_signal_done, sig)) \
4429     os::Solaris::check_signal_handler(sig)
4430 
4431 // This method is a periodic task to check for misbehaving JNI applications
4432 // under CheckJNI, we can add any periodic checks here
4433 
4434 void os::run_periodic_checks() {
4435   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4436   // thereby preventing a NULL checks.
4437   if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4438 
4439   if (check_signals == false) return;
4440 
4441   // SEGV and BUS if overridden could potentially prevent
4442   // generation of hs*.log in the event of a crash, debugging
4443   // such a case can be very challenging, so we absolutely
4444   // check for the following for a good measure:
4445   DO_SIGNAL_CHECK(SIGSEGV);
4446   DO_SIGNAL_CHECK(SIGILL);
4447   DO_SIGNAL_CHECK(SIGFPE);
4448   DO_SIGNAL_CHECK(SIGBUS);
4449   DO_SIGNAL_CHECK(SIGPIPE);
4450   DO_SIGNAL_CHECK(SIGXFSZ);
4451 
4452   // ReduceSignalUsage allows the user to override these handlers
4453   // see comments at the very top and jvm_solaris.h
4454   if (!ReduceSignalUsage) {
4455     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4456     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4457     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4458     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4459   }
4460 
4461   // See comments above for using JVM1/JVM2 and UseAltSigs
4462   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4463   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4464 
4465 }
4466 
4467 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4468 
4469 static os_sigaction_t os_sigaction = NULL;
4470 
4471 void os::Solaris::check_signal_handler(int sig) {
4472   char buf[O_BUFLEN];
4473   address jvmHandler = NULL;
4474 
4475   struct sigaction act;
4476   if (os_sigaction == NULL) {
4477     // only trust the default sigaction, in case it has been interposed
4478     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4479     if (os_sigaction == NULL) return;
4480   }
4481 
4482   os_sigaction(sig, (struct sigaction*)NULL, &act);
4483 
4484   address thisHandler = (act.sa_flags & SA_SIGINFO)
4485     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4486     : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4487 
4488 
4489   switch(sig) {
4490     case SIGSEGV:
4491     case SIGBUS:
4492     case SIGFPE:
4493     case SIGPIPE:
4494     case SIGXFSZ:
4495     case SIGILL:
4496       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4497       break;
4498 
4499     case SHUTDOWN1_SIGNAL:
4500     case SHUTDOWN2_SIGNAL:
4501     case SHUTDOWN3_SIGNAL:
4502     case BREAK_SIGNAL:
4503       jvmHandler = (address)user_handler();
4504       break;
4505 
4506     default:
4507       int intrsig = os::Solaris::SIGinterrupt();
4508       int asynsig = os::Solaris::SIGasync();
4509 
4510       if (sig == intrsig) {
4511         jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4512       } else if (sig == asynsig) {
4513         jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4514       } else {
4515         return;
4516       }
4517       break;
4518   }
4519 
4520 
4521   if (thisHandler != jvmHandler) {
4522     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4523     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4524     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4525     // No need to check this sig any longer
4526     sigaddset(&check_signal_done, sig);
4527     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4528     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4529       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4530                     exception_name(sig, buf, O_BUFLEN));
4531     }
4532   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4533     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4534     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4535     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4536     // No need to check this sig any longer
4537     sigaddset(&check_signal_done, sig);
4538   }
4539 
4540   // Print all the signal handler state
4541   if (sigismember(&check_signal_done, sig)) {
4542     print_signal_handlers(tty, buf, O_BUFLEN);
4543   }
4544 
4545 }
4546 
4547 void os::Solaris::install_signal_handlers() {
4548   bool libjsigdone = false;
4549   signal_handlers_are_installed = true;
4550 
4551   // signal-chaining
4552   typedef void (*signal_setting_t)();
4553   signal_setting_t begin_signal_setting = NULL;
4554   signal_setting_t end_signal_setting = NULL;
4555   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4556                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4557   if (begin_signal_setting != NULL) {
4558     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4559                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4560     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4561                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4562     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4563                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4564     libjsig_is_loaded = true;
4565     if (os::Solaris::get_libjsig_version != NULL) {
4566       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4567     }
4568     assert(UseSignalChaining, "should enable signal-chaining");
4569   }
4570   if (libjsig_is_loaded) {
4571     // Tell libjsig jvm is setting signal handlers
4572     (*begin_signal_setting)();
4573   }
4574 
4575   set_signal_handler(SIGSEGV, true, true);
4576   set_signal_handler(SIGPIPE, true, true);
4577   set_signal_handler(SIGXFSZ, true, true);
4578   set_signal_handler(SIGBUS, true, true);
4579   set_signal_handler(SIGILL, true, true);
4580   set_signal_handler(SIGFPE, true, true);
4581 
4582 
4583   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4584 
4585     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4586     // can not register overridable signals which might be > 32
4587     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4588     // Tell libjsig jvm has finished setting signal handlers
4589       (*end_signal_setting)();
4590       libjsigdone = true;
4591     }
4592   }
4593 
4594   // Never ok to chain our SIGinterrupt
4595   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4596   set_signal_handler(os::Solaris::SIGasync(), true, true);
4597 
4598   if (libjsig_is_loaded && !libjsigdone) {
4599     // Tell libjsig jvm finishes setting signal handlers
4600     (*end_signal_setting)();
4601   }
4602 
4603   // We don't activate signal checker if libjsig is in place, we trust ourselves
4604   // and if UserSignalHandler is installed all bets are off.
4605   // Log that signal checking is off only if -verbose:jni is specified.
4606   if (CheckJNICalls) {
4607     if (libjsig_is_loaded) {
4608       if (PrintJNIResolving) {
4609         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4610       }
4611       check_signals = false;
4612     }
4613     if (AllowUserSignalHandlers) {
4614       if (PrintJNIResolving) {
4615         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4616       }
4617       check_signals = false;
4618     }
4619   }
4620 }
4621 
4622 
4623 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4624 
4625 const char * signames[] = {
4626   "SIG0",
4627   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4628   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4629   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4630   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4631   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4632   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4633   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4634   "SIGCANCEL", "SIGLOST"
4635 };
4636 
4637 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4638   if (0 < exception_code && exception_code <= SIGRTMAX) {
4639     // signal
4640     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4641        jio_snprintf(buf, size, "%s", signames[exception_code]);
4642     } else {
4643        jio_snprintf(buf, size, "SIG%d", exception_code);
4644     }
4645     return buf;
4646   } else {
4647     return NULL;
4648   }
4649 }
4650 
4651 // (Static) wrappers for the new libthread API
4652 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
4653 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
4654 int_fnP_thread_t_i os::Solaris::_thr_setmutator;
4655 int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
4656 int_fnP_thread_t os::Solaris::_thr_continue_mutator;
4657 
4658 // (Static) wrapper for getisax(2) call.
4659 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4660 
4661 // (Static) wrappers for the liblgrp API
4662 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4663 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4664 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4665 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4666 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4667 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4668 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4669 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4670 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4671 
4672 // (Static) wrapper for meminfo() call.
4673 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4674 
4675 static address resolve_symbol_lazy(const char* name) {
4676   address addr = (address) dlsym(RTLD_DEFAULT, name);
4677   if(addr == NULL) {
4678     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4679     addr = (address) dlsym(RTLD_NEXT, name);
4680   }
4681   return addr;
4682 }
4683 
4684 static address resolve_symbol(const char* name) {
4685   address addr = resolve_symbol_lazy(name);
4686   if(addr == NULL) {
4687     fatal(dlerror());
4688   }
4689   return addr;
4690 }
4691 
4692 
4693 
4694 // isT2_libthread()
4695 //
4696 // Routine to determine if we are currently using the new T2 libthread.
4697 //
4698 // We determine if we are using T2 by reading /proc/self/lstatus and
4699 // looking for a thread with the ASLWP bit set.  If we find this status
4700 // bit set, we must assume that we are NOT using T2.  The T2 team
4701 // has approved this algorithm.
4702 //
4703 // We need to determine if we are running with the new T2 libthread
4704 // since setting native thread priorities is handled differently
4705 // when using this library.  All threads created using T2 are bound
4706 // threads. Calling thr_setprio is meaningless in this case.
4707 //
4708 bool isT2_libthread() {
4709   static prheader_t * lwpArray = NULL;
4710   static int lwpSize = 0;
4711   static int lwpFile = -1;
4712   lwpstatus_t * that;
4713   char lwpName [128];
4714   bool isT2 = false;
4715 
4716 #define ADR(x)  ((uintptr_t)(x))
4717 #define LWPINDEX(ary,ix)   ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
4718 
4719   lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
4720   if (lwpFile < 0) {
4721       if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
4722       return false;
4723   }
4724   lwpSize = 16*1024;
4725   for (;;) {
4726     ::lseek64 (lwpFile, 0, SEEK_SET);
4727     lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
4728     if (::read(lwpFile, lwpArray, lwpSize) < 0) {
4729       if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
4730       break;
4731     }
4732     if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
4733        // We got a good snapshot - now iterate over the list.
4734       int aslwpcount = 0;
4735       for (int i = 0; i < lwpArray->pr_nent; i++ ) {
4736         that = LWPINDEX(lwpArray,i);
4737         if (that->pr_flags & PR_ASLWP) {
4738           aslwpcount++;
4739         }
4740       }
4741       if (aslwpcount == 0) isT2 = true;
4742       break;
4743     }
4744     lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
4745     FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);  // retry.
4746   }
4747 
4748   FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
4749   ::close (lwpFile);
4750   if (ThreadPriorityVerbose) {
4751     if (isT2) tty->print_cr("We are running with a T2 libthread\n");
4752     else tty->print_cr("We are not running with a T2 libthread\n");
4753   }
4754   return isT2;
4755 }
4756 
4757 
4758 void os::Solaris::libthread_init() {
4759   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4760 
4761   // Determine if we are running with the new T2 libthread
4762   os::Solaris::set_T2_libthread(isT2_libthread());
4763 
4764   lwp_priocntl_init();
4765 
4766   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4767   if(func == NULL) {
4768     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4769     // Guarantee that this VM is running on an new enough OS (5.6 or
4770     // later) that it will have a new enough libthread.so.
4771     guarantee(func != NULL, "libthread.so is too old.");
4772   }
4773 
4774   // Initialize the new libthread getstate API wrappers
4775   func = resolve_symbol("thr_getstate");
4776   os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
4777 
4778   func = resolve_symbol("thr_setstate");
4779   os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
4780 
4781   func = resolve_symbol("thr_setmutator");
4782   os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
4783 
4784   func = resolve_symbol("thr_suspend_mutator");
4785   os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4786 
4787   func = resolve_symbol("thr_continue_mutator");
4788   os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4789 
4790   int size;
4791   void (*handler_info_func)(address *, int *);
4792   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4793   handler_info_func(&handler_start, &size);
4794   handler_end = handler_start + size;
4795 }
4796 
4797 
4798 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4799 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4800 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4801 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4802 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4803 int os::Solaris::_mutex_scope = USYNC_THREAD;
4804 
4805 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4806 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4807 int_fnP_cond_tP os::Solaris::_cond_signal;
4808 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4809 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4810 int_fnP_cond_tP os::Solaris::_cond_destroy;
4811 int os::Solaris::_cond_scope = USYNC_THREAD;
4812 
4813 void os::Solaris::synchronization_init() {
4814   if(UseLWPSynchronization) {
4815     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4816     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4817     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4818     os::Solaris::set_mutex_init(lwp_mutex_init);
4819     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4820     os::Solaris::set_mutex_scope(USYNC_THREAD);
4821 
4822     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4823     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4824     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4825     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4826     os::Solaris::set_cond_init(lwp_cond_init);
4827     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4828     os::Solaris::set_cond_scope(USYNC_THREAD);
4829   }
4830   else {
4831     os::Solaris::set_mutex_scope(USYNC_THREAD);
4832     os::Solaris::set_cond_scope(USYNC_THREAD);
4833 
4834     if(UsePthreads) {
4835       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4836       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4837       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4838       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4839       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4840 
4841       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4842       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4843       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4844       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4845       os::Solaris::set_cond_init(pthread_cond_default_init);
4846       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4847     }
4848     else {
4849       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4850       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4851       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4852       os::Solaris::set_mutex_init(::mutex_init);
4853       os::Solaris::set_mutex_destroy(::mutex_destroy);
4854 
4855       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4856       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4857       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4858       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4859       os::Solaris::set_cond_init(::cond_init);
4860       os::Solaris::set_cond_destroy(::cond_destroy);
4861     }
4862   }
4863 }
4864 
4865 bool os::Solaris::liblgrp_init() {
4866   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4867   if (handle != NULL) {
4868     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4869     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4870     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4871     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4872     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4873     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4874     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4875     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4876                                        dlsym(handle, "lgrp_cookie_stale")));
4877 
4878     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4879     set_lgrp_cookie(c);
4880     return true;
4881   }
4882   return false;
4883 }
4884 
4885 void os::Solaris::misc_sym_init() {
4886   address func;
4887 
4888   // getisax
4889   func = resolve_symbol_lazy("getisax");
4890   if (func != NULL) {
4891     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4892   }
4893 
4894   // meminfo
4895   func = resolve_symbol_lazy("meminfo");
4896   if (func != NULL) {
4897     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4898   }
4899 }
4900 
4901 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4902   assert(_getisax != NULL, "_getisax not set");
4903   return _getisax(array, n);
4904 }
4905 
4906 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4907 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4908 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4909 
4910 void init_pset_getloadavg_ptr(void) {
4911   pset_getloadavg_ptr =
4912     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4913   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4914     warning("pset_getloadavg function not found");
4915   }
4916 }
4917 
4918 int os::Solaris::_dev_zero_fd = -1;
4919 
4920 // this is called _before_ the global arguments have been parsed
4921 void os::init(void) {
4922   _initial_pid = getpid();
4923 
4924   max_hrtime = first_hrtime = gethrtime();
4925 
4926   init_random(1234567);
4927 
4928   page_size = sysconf(_SC_PAGESIZE);
4929   if (page_size == -1)
4930     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4931                   strerror(errno)));
4932   init_page_sizes((size_t) page_size);
4933 
4934   Solaris::initialize_system_info();
4935 
4936   // Initialize misc. symbols as soon as possible, so we can use them
4937   // if we need them.
4938   Solaris::misc_sym_init();
4939 
4940   int fd = ::open("/dev/zero", O_RDWR);
4941   if (fd < 0) {
4942     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4943   } else {
4944     Solaris::set_dev_zero_fd(fd);
4945 
4946     // Close on exec, child won't inherit.
4947     fcntl(fd, F_SETFD, FD_CLOEXEC);
4948   }
4949 
4950   clock_tics_per_sec = CLK_TCK;
4951 
4952   // check if dladdr1() exists; dladdr1 can provide more information than
4953   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4954   // and is available on linker patches for 5.7 and 5.8.
4955   // libdl.so must have been loaded, this call is just an entry lookup
4956   void * hdl = dlopen("libdl.so", RTLD_NOW);
4957   if (hdl)
4958     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4959 
4960   // (Solaris only) this switches to calls that actually do locking.
4961   ThreadCritical::initialize();
4962 
4963   // main_thread points to the thread that created/loaded the JVM.
4964   main_thread = thr_self();
4965 
4966   // Constant minimum stack size allowed. It must be at least
4967   // the minimum of what the OS supports (thr_min_stack()), and
4968   // enough to allow the thread to get to user bytecode execution.
4969   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4970   // If the pagesize of the VM is greater than 8K determine the appropriate
4971   // number of initial guard pages.  The user can change this with the
4972   // command line arguments, if needed.
4973   if (vm_page_size() > 8*K) {
4974     StackYellowPages = 1;
4975     StackRedPages = 1;
4976     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4977   }
4978 }
4979 
4980 // To install functions for atexit system call
4981 extern "C" {
4982   static void perfMemory_exit_helper() {
4983     perfMemory_exit();
4984   }
4985 }
4986 
4987 // this is called _after_ the global arguments have been parsed
4988 jint os::init_2(void) {
4989   // try to enable extended file IO ASAP, see 6431278
4990   os::Solaris::try_enable_extended_io();
4991 
4992   // Allocate a single page and mark it as readable for safepoint polling.  Also
4993   // use this first mmap call to check support for MAP_ALIGN.
4994   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4995                                                       page_size,
4996                                                       MAP_PRIVATE | MAP_ALIGN,
4997                                                       PROT_READ);
4998   if (polling_page == NULL) {
4999     has_map_align = false;
5000     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
5001                                                 PROT_READ);
5002   }
5003 
5004   os::set_polling_page(polling_page);
5005 
5006 #ifndef PRODUCT
5007   if( Verbose && PrintMiscellaneous )
5008     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
5009 #endif
5010 
5011   if (!UseMembar) {
5012     address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
5013     guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
5014     os::set_memory_serialize_page( mem_serialize_page );
5015 
5016 #ifndef PRODUCT
5017     if(Verbose && PrintMiscellaneous)
5018       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
5019 #endif
5020   }
5021 
5022   // Check minimum allowable stack size for thread creation and to initialize
5023   // the java system classes, including StackOverflowError - depends on page
5024   // size.  Add a page for compiler2 recursion in main thread.
5025   // Add in 2*BytesPerWord times page size to account for VM stack during
5026   // class initialization depending on 32 or 64 bit VM.
5027   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
5028             (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
5029                     2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
5030 
5031   size_t threadStackSizeInBytes = ThreadStackSize * K;
5032   if (threadStackSizeInBytes != 0 &&
5033     threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
5034     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
5035                   os::Solaris::min_stack_allowed/K);
5036     return JNI_ERR;
5037   }
5038 
5039   // For 64kbps there will be a 64kb page size, which makes
5040   // the usable default stack size quite a bit less.  Increase the
5041   // stack for 64kb (or any > than 8kb) pages, this increases
5042   // virtual memory fragmentation (since we're not creating the
5043   // stack on a power of 2 boundary.  The real fix for this
5044   // should be to fix the guard page mechanism.
5045 
5046   if (vm_page_size() > 8*K) {
5047       threadStackSizeInBytes = (threadStackSizeInBytes != 0)
5048          ? threadStackSizeInBytes +
5049            ((StackYellowPages + StackRedPages) * vm_page_size())
5050          : 0;
5051       ThreadStackSize = threadStackSizeInBytes/K;
5052   }
5053 
5054   // Make the stack size a multiple of the page size so that
5055   // the yellow/red zones can be guarded.
5056   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
5057         vm_page_size()));
5058 
5059   Solaris::libthread_init();
5060 
5061   if (UseNUMA) {
5062     if (!Solaris::liblgrp_init()) {
5063       UseNUMA = false;
5064     } else {
5065       size_t lgrp_limit = os::numa_get_groups_num();
5066       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
5067       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
5068       FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
5069       if (lgrp_num < 2) {
5070         // There's only one locality group, disable NUMA.
5071         UseNUMA = false;
5072       }
5073     }
5074     if (!UseNUMA && ForceNUMA) {
5075       UseNUMA = true;
5076     }
5077   }
5078 
5079   Solaris::signal_sets_init();
5080   Solaris::init_signal_mem();
5081   Solaris::install_signal_handlers();
5082 
5083   if (libjsigversion < JSIG_VERSION_1_4_1) {
5084     Maxlibjsigsigs = OLDMAXSIGNUM;
5085   }
5086 
5087   // initialize synchronization primitives to use either thread or
5088   // lwp synchronization (controlled by UseLWPSynchronization)
5089   Solaris::synchronization_init();
5090 
5091   if (MaxFDLimit) {
5092     // set the number of file descriptors to max. print out error
5093     // if getrlimit/setrlimit fails but continue regardless.
5094     struct rlimit nbr_files;
5095     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
5096     if (status != 0) {
5097       if (PrintMiscellaneous && (Verbose || WizardMode))
5098         perror("os::init_2 getrlimit failed");
5099     } else {
5100       nbr_files.rlim_cur = nbr_files.rlim_max;
5101       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
5102       if (status != 0) {
5103         if (PrintMiscellaneous && (Verbose || WizardMode))
5104           perror("os::init_2 setrlimit failed");
5105       }
5106     }
5107   }
5108 
5109   // Calculate theoretical max. size of Threads to guard gainst
5110   // artifical out-of-memory situations, where all available address-
5111   // space has been reserved by thread stacks. Default stack size is 1Mb.
5112   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
5113     JavaThread::stack_size_at_create() : (1*K*K);
5114   assert(pre_thread_stack_size != 0, "Must have a stack");
5115   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
5116   // we should start doing Virtual Memory banging. Currently when the threads will
5117   // have used all but 200Mb of space.
5118   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
5119   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
5120 
5121   // at-exit methods are called in the reverse order of their registration.
5122   // In Solaris 7 and earlier, atexit functions are called on return from
5123   // main or as a result of a call to exit(3C). There can be only 32 of
5124   // these functions registered and atexit() does not set errno. In Solaris
5125   // 8 and later, there is no limit to the number of functions registered
5126   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
5127   // functions are called upon dlclose(3DL) in addition to return from main
5128   // and exit(3C).
5129 
5130   if (PerfAllowAtExitRegistration) {
5131     // only register atexit functions if PerfAllowAtExitRegistration is set.
5132     // atexit functions can be delayed until process exit time, which
5133     // can be problematic for embedded VM situations. Embedded VMs should
5134     // call DestroyJavaVM() to assure that VM resources are released.
5135 
5136     // note: perfMemory_exit_helper atexit function may be removed in
5137     // the future if the appropriate cleanup code can be added to the
5138     // VM_Exit VMOperation's doit method.
5139     if (atexit(perfMemory_exit_helper) != 0) {
5140       warning("os::init2 atexit(perfMemory_exit_helper) failed");
5141     }
5142   }
5143 
5144   // Init pset_loadavg function pointer
5145   init_pset_getloadavg_ptr();
5146 
5147   return JNI_OK;
5148 }
5149 
5150 // Mark the polling page as unreadable
5151 void os::make_polling_page_unreadable(void) {
5152   if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
5153     fatal("Could not disable polling page");
5154 };
5155 
5156 // Mark the polling page as readable
5157 void os::make_polling_page_readable(void) {
5158   if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
5159     fatal("Could not enable polling page");
5160 };
5161 
5162 // OS interface.
5163 
5164 bool os::check_heap(bool force) { return true; }
5165 
5166 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
5167 static vsnprintf_t sol_vsnprintf = NULL;
5168 
5169 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
5170   if (!sol_vsnprintf) {
5171     //search  for the named symbol in the objects that were loaded after libjvm
5172     void* where = RTLD_NEXT;
5173     if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5174         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5175     if (!sol_vsnprintf){
5176       //search  for the named symbol in the objects that were loaded before libjvm
5177       where = RTLD_DEFAULT;
5178       if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5179         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5180       assert(sol_vsnprintf != NULL, "vsnprintf not found");
5181     }
5182   }
5183   return (*sol_vsnprintf)(buf, count, fmt, argptr);
5184 }
5185 
5186 
5187 // Is a (classpath) directory empty?
5188 bool os::dir_is_empty(const char* path) {
5189   DIR *dir = NULL;
5190   struct dirent *ptr;
5191 
5192   dir = opendir(path);
5193   if (dir == NULL) return true;
5194 
5195   /* Scan the directory */
5196   bool result = true;
5197   while (result && (ptr = readdir(dirf)) != NULL) {
5198     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
5199       result = false;
5200     }
5201   }
5202   closedir(dir);
5203   return result;
5204 }
5205 
5206 // This code originates from JDK's sysOpen and open64_w
5207 // from src/solaris/hpi/src/system_md.c
5208 
5209 #ifndef O_DELETE
5210 #define O_DELETE 0x10000
5211 #endif
5212 
5213 // Open a file. Unlink the file immediately after open returns
5214 // if the specified oflag has the O_DELETE flag set.
5215 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
5216 
5217 int os::open(const char *path, int oflag, int mode) {
5218   if (strlen(path) > MAX_PATH - 1) {
5219     errno = ENAMETOOLONG;
5220     return -1;
5221   }
5222   int fd;
5223   int o_delete = (oflag & O_DELETE);
5224   oflag = oflag & ~O_DELETE;
5225 
5226   fd = ::open64(path, oflag, mode);
5227   if (fd == -1) return -1;
5228 
5229   //If the open succeeded, the file might still be a directory
5230   {
5231     struct stat64 buf64;
5232     int ret = ::fstat64(fd, &buf64);
5233     int st_mode = buf64.st_mode;
5234 
5235     if (ret != -1) {
5236       if ((st_mode & S_IFMT) == S_IFDIR) {
5237         errno = EISDIR;
5238         ::close(fd);
5239         return -1;
5240       }
5241     } else {
5242       ::close(fd);
5243       return -1;
5244     }
5245   }
5246     /*
5247      * 32-bit Solaris systems suffer from:
5248      *
5249      * - an historical default soft limit of 256 per-process file
5250      *   descriptors that is too low for many Java programs.
5251      *
5252      * - a design flaw where file descriptors created using stdio
5253      *   fopen must be less than 256, _even_ when the first limit above
5254      *   has been raised.  This can cause calls to fopen (but not calls to
5255      *   open, for example) to fail mysteriously, perhaps in 3rd party
5256      *   native code (although the JDK itself uses fopen).  One can hardly
5257      *   criticize them for using this most standard of all functions.
5258      *
5259      * We attempt to make everything work anyways by:
5260      *
5261      * - raising the soft limit on per-process file descriptors beyond
5262      *   256
5263      *
5264      * - As of Solaris 10u4, we can request that Solaris raise the 256
5265      *   stdio fopen limit by calling function enable_extended_FILE_stdio.
5266      *   This is done in init_2 and recorded in enabled_extended_FILE_stdio
5267      *
5268      * - If we are stuck on an old (pre 10u4) Solaris system, we can
5269      *   workaround the bug by remapping non-stdio file descriptors below
5270      *   256 to ones beyond 256, which is done below.
5271      *
5272      * See:
5273      * 1085341: 32-bit stdio routines should support file descriptors >255
5274      * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
5275      * 6431278: Netbeans crash on 32 bit Solaris: need to call
5276      *          enable_extended_FILE_stdio() in VM initialisation
5277      * Giri Mandalika's blog
5278      * http://technopark02.blogspot.com/2005_05_01_archive.html
5279      */
5280 #ifndef  _LP64
5281      if ((!enabled_extended_FILE_stdio) && fd < 256) {
5282          int newfd = ::fcntl(fd, F_DUPFD, 256);
5283          if (newfd != -1) {
5284              ::close(fd);
5285              fd = newfd;
5286          }
5287      }
5288 #endif // 32-bit Solaris
5289     /*
5290      * All file descriptors that are opened in the JVM and not
5291      * specifically destined for a subprocess should have the
5292      * close-on-exec flag set.  If we don't set it, then careless 3rd
5293      * party native code might fork and exec without closing all
5294      * appropriate file descriptors (e.g. as we do in closeDescriptors in
5295      * UNIXProcess.c), and this in turn might:
5296      *
5297      * - cause end-of-file to fail to be detected on some file
5298      *   descriptors, resulting in mysterious hangs, or
5299      *
5300      * - might cause an fopen in the subprocess to fail on a system
5301      *   suffering from bug 1085341.
5302      *
5303      * (Yes, the default setting of the close-on-exec flag is a Unix
5304      * design flaw)
5305      *
5306      * See:
5307      * 1085341: 32-bit stdio routines should support file descriptors >255
5308      * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
5309      * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
5310      */
5311 #ifdef FD_CLOEXEC
5312     {
5313         int flags = ::fcntl(fd, F_GETFD);
5314         if (flags != -1)
5315             ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5316     }
5317 #endif
5318 
5319   if (o_delete != 0) {
5320     ::unlink(path);
5321   }
5322   return fd;
5323 }
5324 
5325 // create binary file, rewriting existing file if required
5326 int os::create_binary_file(const char* path, bool rewrite_existing) {
5327   int oflags = O_WRONLY | O_CREAT;
5328   if (!rewrite_existing) {
5329     oflags |= O_EXCL;
5330   }
5331   return ::open64(path, oflags, S_IREAD | S_IWRITE);
5332 }
5333 
5334 // return current position of file pointer
5335 jlong os::current_file_offset(int fd) {
5336   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5337 }
5338 
5339 // move file pointer to the specified offset
5340 jlong os::seek_to_file_offset(int fd, jlong offset) {
5341   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5342 }
5343 
5344 jlong os::lseek(int fd, jlong offset, int whence) {
5345   return (jlong) ::lseek64(fd, offset, whence);
5346 }
5347 
5348 char * os::native_path(char *path) {
5349   return path;
5350 }
5351 
5352 int os::ftruncate(int fd, jlong length) {
5353   return ::ftruncate64(fd, length);
5354 }
5355 
5356 int os::fsync(int fd)  {
5357   RESTARTABLE_RETURN_INT(::fsync(fd));
5358 }
5359 
5360 int os::available(int fd, jlong *bytes) {
5361   jlong cur, end;
5362   int mode;
5363   struct stat64 buf64;
5364 
5365   if (::fstat64(fd, &buf64) >= 0) {
5366     mode = buf64.st_mode;
5367     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5368       /*
5369       * XXX: is the following call interruptible? If so, this might
5370       * need to go through the INTERRUPT_IO() wrapper as for other
5371       * blocking, interruptible calls in this file.
5372       */
5373       int n,ioctl_return;
5374 
5375       INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted);
5376       if (ioctl_return>= 0) {
5377           *bytes = n;
5378         return 1;
5379       }
5380     }
5381   }
5382   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5383     return 0;
5384   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5385     return 0;
5386   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5387     return 0;
5388   }
5389   *bytes = end - cur;
5390   return 1;
5391 }
5392 
5393 // Map a block of memory.
5394 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5395                      char *addr, size_t bytes, bool read_only,
5396                      bool allow_exec) {
5397   int prot;
5398   int flags;
5399 
5400   if (read_only) {
5401     prot = PROT_READ;
5402     flags = MAP_SHARED;
5403   } else {
5404     prot = PROT_READ | PROT_WRITE;
5405     flags = MAP_PRIVATE;
5406   }
5407 
5408   if (allow_exec) {
5409     prot |= PROT_EXEC;
5410   }
5411 
5412   if (addr != NULL) {
5413     flags |= MAP_FIXED;
5414   }
5415 
5416   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5417                                      fd, file_offset);
5418   if (mapped_address == MAP_FAILED) {
5419     return NULL;
5420   }
5421   return mapped_address;
5422 }
5423 
5424 
5425 // Remap a block of memory.
5426 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5427                        char *addr, size_t bytes, bool read_only,
5428                        bool allow_exec) {
5429   // same as map_memory() on this OS
5430   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5431                         allow_exec);
5432 }
5433 
5434 
5435 // Unmap a block of memory.
5436 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5437   return munmap(addr, bytes) == 0;
5438 }
5439 
5440 void os::pause() {
5441   char filename[MAX_PATH];
5442   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5443     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5444   } else {
5445     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5446   }
5447 
5448   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5449   if (fd != -1) {
5450     struct stat buf;
5451     ::close(fd);
5452     while (::stat(filename, &buf) == 0) {
5453       (void)::poll(NULL, 0, 100);
5454     }
5455   } else {
5456     jio_fprintf(stderr,
5457       "Could not open pause file '%s', continuing immediately.\n", filename);
5458   }
5459 }
5460 
5461 #ifndef PRODUCT
5462 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5463 // Turn this on if you need to trace synch operations.
5464 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5465 // and call record_synch_enable and record_synch_disable
5466 // around the computation of interest.
5467 
5468 void record_synch(char* name, bool returning);  // defined below
5469 
5470 class RecordSynch {
5471   char* _name;
5472  public:
5473   RecordSynch(char* name) :_name(name)
5474                  { record_synch(_name, false); }
5475   ~RecordSynch() { record_synch(_name,   true);  }
5476 };
5477 
5478 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5479 extern "C" ret name params {                                    \
5480   typedef ret name##_t params;                                  \
5481   static name##_t* implem = NULL;                               \
5482   static int callcount = 0;                                     \
5483   if (implem == NULL) {                                         \
5484     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5485     if (implem == NULL)  fatal(dlerror());                      \
5486   }                                                             \
5487   ++callcount;                                                  \
5488   RecordSynch _rs(#name);                                       \
5489   inner;                                                        \
5490   return implem args;                                           \
5491 }
5492 // in dbx, examine callcounts this way:
5493 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5494 
5495 #define CHECK_POINTER_OK(p) \
5496   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5497 #define CHECK_MU \
5498   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5499 #define CHECK_CV \
5500   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5501 #define CHECK_P(p) \
5502   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5503 
5504 #define CHECK_MUTEX(mutex_op) \
5505 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5506 
5507 CHECK_MUTEX(   mutex_lock)
5508 CHECK_MUTEX(  _mutex_lock)
5509 CHECK_MUTEX( mutex_unlock)
5510 CHECK_MUTEX(_mutex_unlock)
5511 CHECK_MUTEX( mutex_trylock)
5512 CHECK_MUTEX(_mutex_trylock)
5513 
5514 #define CHECK_COND(cond_op) \
5515 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5516 
5517 CHECK_COND( cond_wait);
5518 CHECK_COND(_cond_wait);
5519 CHECK_COND(_cond_wait_cancel);
5520 
5521 #define CHECK_COND2(cond_op) \
5522 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5523 
5524 CHECK_COND2( cond_timedwait);
5525 CHECK_COND2(_cond_timedwait);
5526 CHECK_COND2(_cond_timedwait_cancel);
5527 
5528 // do the _lwp_* versions too
5529 #define mutex_t lwp_mutex_t
5530 #define cond_t  lwp_cond_t
5531 CHECK_MUTEX(  _lwp_mutex_lock)
5532 CHECK_MUTEX(  _lwp_mutex_unlock)
5533 CHECK_MUTEX(  _lwp_mutex_trylock)
5534 CHECK_MUTEX( __lwp_mutex_lock)
5535 CHECK_MUTEX( __lwp_mutex_unlock)
5536 CHECK_MUTEX( __lwp_mutex_trylock)
5537 CHECK_MUTEX(___lwp_mutex_lock)
5538 CHECK_MUTEX(___lwp_mutex_unlock)
5539 
5540 CHECK_COND(  _lwp_cond_wait);
5541 CHECK_COND( __lwp_cond_wait);
5542 CHECK_COND(___lwp_cond_wait);
5543 
5544 CHECK_COND2(  _lwp_cond_timedwait);
5545 CHECK_COND2( __lwp_cond_timedwait);
5546 #undef mutex_t
5547 #undef cond_t
5548 
5549 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5550 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5551 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5552 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5553 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5554 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5555 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5556 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5557 
5558 
5559 // recording machinery:
5560 
5561 enum { RECORD_SYNCH_LIMIT = 200 };
5562 char* record_synch_name[RECORD_SYNCH_LIMIT];
5563 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5564 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5565 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5566 int record_synch_count = 0;
5567 bool record_synch_enabled = false;
5568 
5569 // in dbx, examine recorded data this way:
5570 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5571 
5572 void record_synch(char* name, bool returning) {
5573   if (record_synch_enabled) {
5574     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5575       record_synch_name[record_synch_count] = name;
5576       record_synch_returning[record_synch_count] = returning;
5577       record_synch_thread[record_synch_count] = thr_self();
5578       record_synch_arg0ptr[record_synch_count] = &name;
5579       record_synch_count++;
5580     }
5581     // put more checking code here:
5582     // ...
5583   }
5584 }
5585 
5586 void record_synch_enable() {
5587   // start collecting trace data, if not already doing so
5588   if (!record_synch_enabled)  record_synch_count = 0;
5589   record_synch_enabled = true;
5590 }
5591 
5592 void record_synch_disable() {
5593   // stop collecting trace data
5594   record_synch_enabled = false;
5595 }
5596 
5597 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5598 #endif // PRODUCT
5599 
5600 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5601 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5602                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5603 
5604 
5605 // JVMTI & JVM monitoring and management support
5606 // The thread_cpu_time() and current_thread_cpu_time() are only
5607 // supported if is_thread_cpu_time_supported() returns true.
5608 // They are not supported on Solaris T1.
5609 
5610 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5611 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5612 // of a thread.
5613 //
5614 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5615 // returns the fast estimate available on the platform.
5616 
5617 // hrtime_t gethrvtime() return value includes
5618 // user time but does not include system time
5619 jlong os::current_thread_cpu_time() {
5620   return (jlong) gethrvtime();
5621 }
5622 
5623 jlong os::thread_cpu_time(Thread *thread) {
5624   // return user level CPU time only to be consistent with
5625   // what current_thread_cpu_time returns.
5626   // thread_cpu_time_info() must be changed if this changes
5627   return os::thread_cpu_time(thread, false /* user time only */);
5628 }
5629 
5630 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5631   if (user_sys_cpu_time) {
5632     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5633   } else {
5634     return os::current_thread_cpu_time();
5635   }
5636 }
5637 
5638 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5639   char proc_name[64];
5640   int count;
5641   prusage_t prusage;
5642   jlong lwp_time;
5643   int fd;
5644 
5645   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5646                      getpid(),
5647                      thread->osthread()->lwp_id());
5648   fd = ::open(proc_name, O_RDONLY);
5649   if ( fd == -1 ) return -1;
5650 
5651   do {
5652     count = ::pread(fd,
5653                   (void *)&prusage.pr_utime,
5654                   thr_time_size,
5655                   thr_time_off);
5656   } while (count < 0 && errno == EINTR);
5657   ::close(fd);
5658   if ( count < 0 ) return -1;
5659 
5660   if (user_sys_cpu_time) {
5661     // user + system CPU time
5662     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5663                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5664                  (jlong)prusage.pr_stime.tv_nsec +
5665                  (jlong)prusage.pr_utime.tv_nsec;
5666   } else {
5667     // user level CPU time only
5668     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5669                 (jlong)prusage.pr_utime.tv_nsec;
5670   }
5671 
5672   return(lwp_time);
5673 }
5674 
5675 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5676   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5677   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5678   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5679   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5680 }
5681 
5682 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5683   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5684   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5685   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5686   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5687 }
5688 
5689 bool os::is_thread_cpu_time_supported() {
5690   if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
5691     return true;
5692   } else {
5693     return false;
5694   }
5695 }
5696 
5697 // System loadavg support.  Returns -1 if load average cannot be obtained.
5698 // Return the load average for our processor set if the primitive exists
5699 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5700 int os::loadavg(double loadavg[], int nelem) {
5701   if (pset_getloadavg_ptr != NULL) {
5702     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5703   } else {
5704     return ::getloadavg(loadavg, nelem);
5705   }
5706 }
5707 
5708 //---------------------------------------------------------------------------------
5709 
5710 bool os::find(address addr, outputStream* st) {
5711   Dl_info dlinfo;
5712   memset(&dlinfo, 0, sizeof(dlinfo));
5713   if (dladdr(addr, &dlinfo) != 0) {
5714     st->print(PTR_FORMAT ": ", addr);
5715     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5716       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5717     } else if (dlinfo.dli_fbase != NULL)
5718       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5719     else
5720       st->print("<absolute address>");
5721     if (dlinfo.dli_fname != NULL) {
5722       st->print(" in %s", dlinfo.dli_fname);
5723     }
5724     if (dlinfo.dli_fbase != NULL) {
5725       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5726     }
5727     st->cr();
5728 
5729     if (Verbose) {
5730       // decode some bytes around the PC
5731       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5732       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5733       address       lowest = (address) dlinfo.dli_sname;
5734       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5735       if (begin < lowest)  begin = lowest;
5736       Dl_info dlinfo2;
5737       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5738           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5739         end = (address) dlinfo2.dli_saddr;
5740       Disassembler::decode(begin, end, st);
5741     }
5742     return true;
5743   }
5744   return false;
5745 }
5746 
5747 // Following function has been added to support HotSparc's libjvm.so running
5748 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5749 // src/solaris/hpi/native_threads in the EVM codebase.
5750 //
5751 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5752 // libraries and should thus be removed. We will leave it behind for a while
5753 // until we no longer want to able to run on top of 1.3.0 Solaris production
5754 // JDK. See 4341971.
5755 
5756 #define STACK_SLACK 0x800
5757 
5758 extern "C" {
5759   intptr_t sysThreadAvailableStackWithSlack() {
5760     stack_t st;
5761     intptr_t retval, stack_top;
5762     retval = thr_stksegment(&st);
5763     assert(retval == 0, "incorrect return value from thr_stksegment");
5764     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5765     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5766     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5767     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5768   }
5769 }
5770 
5771 // ObjectMonitor park-unpark infrastructure ...
5772 //
5773 // We implement Solaris and Linux PlatformEvents with the
5774 // obvious condvar-mutex-flag triple.
5775 // Another alternative that works quite well is pipes:
5776 // Each PlatformEvent consists of a pipe-pair.
5777 // The thread associated with the PlatformEvent
5778 // calls park(), which reads from the input end of the pipe.
5779 // Unpark() writes into the other end of the pipe.
5780 // The write-side of the pipe must be set NDELAY.
5781 // Unfortunately pipes consume a large # of handles.
5782 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5783 // Using pipes for the 1st few threads might be workable, however.
5784 //
5785 // park() is permitted to return spuriously.
5786 // Callers of park() should wrap the call to park() in
5787 // an appropriate loop.  A litmus test for the correct
5788 // usage of park is the following: if park() were modified
5789 // to immediately return 0 your code should still work,
5790 // albeit degenerating to a spin loop.
5791 //
5792 // An interesting optimization for park() is to use a trylock()
5793 // to attempt to acquire the mutex.  If the trylock() fails
5794 // then we know that a concurrent unpark() operation is in-progress.
5795 // in that case the park() code could simply set _count to 0
5796 // and return immediately.  The subsequent park() operation *might*
5797 // return immediately.  That's harmless as the caller of park() is
5798 // expected to loop.  By using trylock() we will have avoided a
5799 // avoided a context switch caused by contention on the per-thread mutex.
5800 //
5801 // TODO-FIXME:
5802 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
5803 //     objectmonitor implementation.
5804 // 2.  Collapse the JSR166 parker event, and the
5805 //     objectmonitor ParkEvent into a single "Event" construct.
5806 // 3.  In park() and unpark() add:
5807 //     assert (Thread::current() == AssociatedWith).
5808 // 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5809 //     1-out-of-N park() operations will return immediately.
5810 //
5811 // _Event transitions in park()
5812 //   -1 => -1 : illegal
5813 //    1 =>  0 : pass - return immediately
5814 //    0 => -1 : block
5815 //
5816 // _Event serves as a restricted-range semaphore.
5817 //
5818 // Another possible encoding of _Event would be with
5819 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5820 //
5821 // TODO-FIXME: add DTRACE probes for:
5822 // 1.   Tx parks
5823 // 2.   Ty unparks Tx
5824 // 3.   Tx resumes from park
5825 
5826 
5827 // value determined through experimentation
5828 #define ROUNDINGFIX 11
5829 
5830 // utility to compute the abstime argument to timedwait.
5831 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5832 
5833 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5834   // millis is the relative timeout time
5835   // abstime will be the absolute timeout time
5836   if (millis < 0)  millis = 0;
5837   struct timeval now;
5838   int status = gettimeofday(&now, NULL);
5839   assert(status == 0, "gettimeofday");
5840   jlong seconds = millis / 1000;
5841   jlong max_wait_period;
5842 
5843   if (UseLWPSynchronization) {
5844     // forward port of fix for 4275818 (not sleeping long enough)
5845     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5846     // _lwp_cond_timedwait() used a round_down algorithm rather
5847     // than a round_up. For millis less than our roundfactor
5848     // it rounded down to 0 which doesn't meet the spec.
5849     // For millis > roundfactor we may return a bit sooner, but
5850     // since we can not accurately identify the patch level and
5851     // this has already been fixed in Solaris 9 and 8 we will
5852     // leave it alone rather than always rounding down.
5853 
5854     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5855        // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5856            // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5857            max_wait_period = 21000000;
5858   } else {
5859     max_wait_period = 50000000;
5860   }
5861   millis %= 1000;
5862   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5863      seconds = max_wait_period;
5864   }
5865   abstime->tv_sec = now.tv_sec  + seconds;
5866   long       usec = now.tv_usec + millis * 1000;
5867   if (usec >= 1000000) {
5868     abstime->tv_sec += 1;
5869     usec -= 1000000;
5870   }
5871   abstime->tv_nsec = usec * 1000;
5872   return abstime;
5873 }
5874 
5875 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5876 // Conceptually TryPark() should be equivalent to park(0).
5877 
5878 int os::PlatformEvent::TryPark() {
5879   for (;;) {
5880     const int v = _Event ;
5881     guarantee ((v == 0) || (v == 1), "invariant") ;
5882     if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
5883   }
5884 }
5885 
5886 void os::PlatformEvent::park() {           // AKA: down()
5887   // Invariant: Only the thread associated with the Event/PlatformEvent
5888   // may call park().
5889   int v ;
5890   for (;;) {
5891       v = _Event ;
5892       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5893   }
5894   guarantee (v >= 0, "invariant") ;
5895   if (v == 0) {
5896      // Do this the hard way by blocking ...
5897      // See http://monaco.sfbay/detail.jsf?cr=5094058.
5898      // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5899      // Only for SPARC >= V8PlusA
5900 #if defined(__sparc) && defined(COMPILER2)
5901      if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5902 #endif
5903      int status = os::Solaris::mutex_lock(_mutex);
5904      assert_status(status == 0, status,  "mutex_lock");
5905      guarantee (_nParked == 0, "invariant") ;
5906      ++ _nParked ;
5907      while (_Event < 0) {
5908         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5909         // Treat this the same as if the wait was interrupted
5910         // With usr/lib/lwp going to kernel, always handle ETIME
5911         status = os::Solaris::cond_wait(_cond, _mutex);
5912         if (status == ETIME) status = EINTR ;
5913         assert_status(status == 0 || status == EINTR, status, "cond_wait");
5914      }
5915      -- _nParked ;
5916      _Event = 0 ;
5917      status = os::Solaris::mutex_unlock(_mutex);
5918      assert_status(status == 0, status, "mutex_unlock");
5919     // Paranoia to ensure our locked and lock-free paths interact
5920     // correctly with each other.
5921     OrderAccess::fence();
5922   }
5923 }
5924 
5925 int os::PlatformEvent::park(jlong millis) {
5926   guarantee (_nParked == 0, "invariant") ;
5927   int v ;
5928   for (;;) {
5929       v = _Event ;
5930       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5931   }
5932   guarantee (v >= 0, "invariant") ;
5933   if (v != 0) return OS_OK ;
5934 
5935   int ret = OS_TIMEOUT;
5936   timestruc_t abst;
5937   compute_abstime (&abst, millis);
5938 
5939   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5940   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5941   // Only for SPARC >= V8PlusA
5942 #if defined(__sparc) && defined(COMPILER2)
5943  if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5944 #endif
5945   int status = os::Solaris::mutex_lock(_mutex);
5946   assert_status(status == 0, status, "mutex_lock");
5947   guarantee (_nParked == 0, "invariant") ;
5948   ++ _nParked ;
5949   while (_Event < 0) {
5950      int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5951      assert_status(status == 0 || status == EINTR ||
5952                    status == ETIME || status == ETIMEDOUT,
5953                    status, "cond_timedwait");
5954      if (!FilterSpuriousWakeups) break ;                // previous semantics
5955      if (status == ETIME || status == ETIMEDOUT) break ;
5956      // We consume and ignore EINTR and spurious wakeups.
5957   }
5958   -- _nParked ;
5959   if (_Event >= 0) ret = OS_OK ;
5960   _Event = 0 ;
5961   status = os::Solaris::mutex_unlock(_mutex);
5962   assert_status(status == 0, status, "mutex_unlock");
5963   // Paranoia to ensure our locked and lock-free paths interact
5964   // correctly with each other.
5965   OrderAccess::fence();
5966   return ret;
5967 }
5968 
5969 void os::PlatformEvent::unpark() {
5970   // Transitions for _Event:
5971   //    0 :=> 1
5972   //    1 :=> 1
5973   //   -1 :=> either 0 or 1; must signal target thread
5974   //          That is, we can safely transition _Event from -1 to either
5975   //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
5976   //          unpark() calls.
5977   // See also: "Semaphores in Plan 9" by Mullender & Cox
5978   //
5979   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5980   // that it will take two back-to-back park() calls for the owning
5981   // thread to block. This has the benefit of forcing a spurious return
5982   // from the first park() call after an unpark() call which will help
5983   // shake out uses of park() and unpark() without condition variables.
5984 
5985   if (Atomic::xchg(1, &_Event) >= 0) return;
5986 
5987   // If the thread associated with the event was parked, wake it.
5988   // Wait for the thread assoc with the PlatformEvent to vacate.
5989   int status = os::Solaris::mutex_lock(_mutex);
5990   assert_status(status == 0, status, "mutex_lock");
5991   int AnyWaiters = _nParked;
5992   status = os::Solaris::mutex_unlock(_mutex);
5993   assert_status(status == 0, status, "mutex_unlock");
5994   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5995   if (AnyWaiters != 0) {
5996     // We intentional signal *after* dropping the lock
5997     // to avoid a common class of futile wakeups.
5998     status = os::Solaris::cond_signal(_cond);
5999     assert_status(status == 0, status, "cond_signal");
6000   }
6001 }
6002 
6003 // JSR166
6004 // -------------------------------------------------------
6005 
6006 /*
6007  * The solaris and linux implementations of park/unpark are fairly
6008  * conservative for now, but can be improved. They currently use a
6009  * mutex/condvar pair, plus _counter.
6010  * Park decrements _counter if > 0, else does a condvar wait.  Unpark
6011  * sets count to 1 and signals condvar.  Only one thread ever waits
6012  * on the condvar. Contention seen when trying to park implies that someone
6013  * is unparking you, so don't wait. And spurious returns are fine, so there
6014  * is no need to track notifications.
6015  */
6016 
6017 #define MAX_SECS 100000000
6018 /*
6019  * This code is common to linux and solaris and will be moved to a
6020  * common place in dolphin.
6021  *
6022  * The passed in time value is either a relative time in nanoseconds
6023  * or an absolute time in milliseconds. Either way it has to be unpacked
6024  * into suitable seconds and nanoseconds components and stored in the
6025  * given timespec structure.
6026  * Given time is a 64-bit value and the time_t used in the timespec is only
6027  * a signed-32-bit value (except on 64-bit Linux) we have to watch for
6028  * overflow if times way in the future are given. Further on Solaris versions
6029  * prior to 10 there is a restriction (see cond_timedwait) that the specified
6030  * number of seconds, in abstime, is less than current_time  + 100,000,000.
6031  * As it will be 28 years before "now + 100000000" will overflow we can
6032  * ignore overflow and just impose a hard-limit on seconds using the value
6033  * of "now + 100,000,000". This places a limit on the timeout of about 3.17
6034  * years from "now".
6035  */
6036 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
6037   assert (time > 0, "convertTime");
6038 
6039   struct timeval now;
6040   int status = gettimeofday(&now, NULL);
6041   assert(status == 0, "gettimeofday");
6042 
6043   time_t max_secs = now.tv_sec + MAX_SECS;
6044 
6045   if (isAbsolute) {
6046     jlong secs = time / 1000;
6047     if (secs > max_secs) {
6048       absTime->tv_sec = max_secs;
6049     }
6050     else {
6051       absTime->tv_sec = secs;
6052     }
6053     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
6054   }
6055   else {
6056     jlong secs = time / NANOSECS_PER_SEC;
6057     if (secs >= MAX_SECS) {
6058       absTime->tv_sec = max_secs;
6059       absTime->tv_nsec = 0;
6060     }
6061     else {
6062       absTime->tv_sec = now.tv_sec + secs;
6063       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
6064       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
6065         absTime->tv_nsec -= NANOSECS_PER_SEC;
6066         ++absTime->tv_sec; // note: this must be <= max_secs
6067       }
6068     }
6069   }
6070   assert(absTime->tv_sec >= 0, "tv_sec < 0");
6071   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
6072   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
6073   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
6074 }
6075 
6076 void Parker::park(bool isAbsolute, jlong time) {
6077   // Ideally we'd do something useful while spinning, such
6078   // as calling unpackTime().
6079 
6080   // Optional fast-path check:
6081   // Return immediately if a permit is available.
6082   // We depend on Atomic::xchg() having full barrier semantics
6083   // since we are doing a lock-free update to _counter.
6084   if (Atomic::xchg(0, &_counter) > 0) return;
6085 
6086   // Optional fast-exit: Check interrupt before trying to wait
6087   Thread* thread = Thread::current();
6088   assert(thread->is_Java_thread(), "Must be JavaThread");
6089   JavaThread *jt = (JavaThread *)thread;
6090   if (Thread::is_interrupted(thread, false)) {
6091     return;
6092   }
6093 
6094   // First, demultiplex/decode time arguments
6095   timespec absTime;
6096   if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
6097     return;
6098   }
6099   if (time > 0) {
6100     // Warning: this code might be exposed to the old Solaris time
6101     // round-down bugs.  Grep "roundingFix" for details.
6102     unpackTime(&absTime, isAbsolute, time);
6103   }
6104 
6105   // Enter safepoint region
6106   // Beware of deadlocks such as 6317397.
6107   // The per-thread Parker:: _mutex is a classic leaf-lock.
6108   // In particular a thread must never block on the Threads_lock while
6109   // holding the Parker:: mutex.  If safepoints are pending both the
6110   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
6111   ThreadBlockInVM tbivm(jt);
6112 
6113   // Don't wait if cannot get lock since interference arises from
6114   // unblocking.  Also. check interrupt before trying wait
6115   if (Thread::is_interrupted(thread, false) ||
6116       os::Solaris::mutex_trylock(_mutex) != 0) {
6117     return;
6118   }
6119 
6120   int status ;
6121 
6122   if (_counter > 0)  { // no wait needed
6123     _counter = 0;
6124     status = os::Solaris::mutex_unlock(_mutex);
6125     assert (status == 0, "invariant") ;
6126     // Paranoia to ensure our locked and lock-free paths interact
6127     // correctly with each other and Java-level accesses.
6128     OrderAccess::fence();
6129     return;
6130   }
6131 
6132 #ifdef ASSERT
6133   // Don't catch signals while blocked; let the running threads have the signals.
6134   // (This allows a debugger to break into the running thread.)
6135   sigset_t oldsigs;
6136   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
6137   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
6138 #endif
6139 
6140   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
6141   jt->set_suspend_equivalent();
6142   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
6143 
6144   // Do this the hard way by blocking ...
6145   // See http://monaco.sfbay/detail.jsf?cr=5094058.
6146   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
6147   // Only for SPARC >= V8PlusA
6148 #if defined(__sparc) && defined(COMPILER2)
6149   if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
6150 #endif
6151 
6152   if (time == 0) {
6153     status = os::Solaris::cond_wait (_cond, _mutex) ;
6154   } else {
6155     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
6156   }
6157   // Note that an untimed cond_wait() can sometimes return ETIME on older
6158   // versions of the Solaris.
6159   assert_status(status == 0 || status == EINTR ||
6160                 status == ETIME || status == ETIMEDOUT,
6161                 status, "cond_timedwait");
6162 
6163 #ifdef ASSERT
6164   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
6165 #endif
6166   _counter = 0 ;
6167   status = os::Solaris::mutex_unlock(_mutex);
6168   assert_status(status == 0, status, "mutex_unlock") ;
6169   // Paranoia to ensure our locked and lock-free paths interact
6170   // correctly with each other and Java-level accesses.
6171   OrderAccess::fence();
6172 
6173   // If externally suspended while waiting, re-suspend
6174   if (jt->handle_special_suspend_equivalent_condition()) {
6175     jt->java_suspend_self();
6176   }
6177 }
6178 
6179 void Parker::unpark() {
6180   int s, status ;
6181   status = os::Solaris::mutex_lock (_mutex) ;
6182   assert (status == 0, "invariant") ;
6183   s = _counter;
6184   _counter = 1;
6185   status = os::Solaris::mutex_unlock (_mutex) ;
6186   assert (status == 0, "invariant") ;
6187 
6188   if (s < 1) {
6189     status = os::Solaris::cond_signal (_cond) ;
6190     assert (status == 0, "invariant") ;
6191   }
6192 }
6193 
6194 extern char** environ;
6195 
6196 // Run the specified command in a separate process. Return its exit value,
6197 // or -1 on failure (e.g. can't fork a new process).
6198 // Unlike system(), this function can be called from signal handler. It
6199 // doesn't block SIGINT et al.
6200 int os::fork_and_exec(char* cmd) {
6201   char * argv[4];
6202   argv[0] = (char *)"sh";
6203   argv[1] = (char *)"-c";
6204   argv[2] = cmd;
6205   argv[3] = NULL;
6206 
6207   // fork is async-safe, fork1 is not so can't use in signal handler
6208   pid_t pid;
6209   Thread* t = ThreadLocalStorage::get_thread_slow();
6210   if (t != NULL && t->is_inside_signal_handler()) {
6211     pid = fork();
6212   } else {
6213     pid = fork1();
6214   }
6215 
6216   if (pid < 0) {
6217     // fork failed
6218     warning("fork failed: %s", strerror(errno));
6219     return -1;
6220 
6221   } else if (pid == 0) {
6222     // child process
6223 
6224     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
6225     execve("/usr/bin/sh", argv, environ);
6226 
6227     // execve failed
6228     _exit(-1);
6229 
6230   } else  {
6231     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
6232     // care about the actual exit code, for now.
6233 
6234     int status;
6235 
6236     // Wait for the child process to exit.  This returns immediately if
6237     // the child has already exited. */
6238     while (waitpid(pid, &status, 0) < 0) {
6239         switch (errno) {
6240         case ECHILD: return 0;
6241         case EINTR: break;
6242         default: return -1;
6243         }
6244     }
6245 
6246     if (WIFEXITED(status)) {
6247        // The child exited normally; get its exit code.
6248        return WEXITSTATUS(status);
6249     } else if (WIFSIGNALED(status)) {
6250        // The child exited because of a signal
6251        // The best value to return is 0x80 + signal number,
6252        // because that is what all Unix shells do, and because
6253        // it allows callers to distinguish between process exit and
6254        // process death by signal.
6255        return 0x80 + WTERMSIG(status);
6256     } else {
6257        // Unknown exit code; pass it through
6258        return status;
6259     }
6260   }
6261 }
6262 
6263 // is_headless_jre()
6264 //
6265 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
6266 // in order to report if we are running in a headless jre
6267 //
6268 // Since JDK8 xawt/libmawt.so was moved into the same directory
6269 // as libawt.so, and renamed libawt_xawt.so
6270 //
6271 bool os::is_headless_jre() {
6272     struct stat statbuf;
6273     char buf[MAXPATHLEN];
6274     char libmawtpath[MAXPATHLEN];
6275     const char *xawtstr  = "/xawt/libmawt.so";
6276     const char *new_xawtstr = "/libawt_xawt.so";
6277     char *p;
6278 
6279     // Get path to libjvm.so
6280     os::jvm_path(buf, sizeof(buf));
6281 
6282     // Get rid of libjvm.so
6283     p = strrchr(buf, '/');
6284     if (p == NULL) return false;
6285     else *p = '\0';
6286 
6287     // Get rid of client or server
6288     p = strrchr(buf, '/');
6289     if (p == NULL) return false;
6290     else *p = '\0';
6291 
6292     // check xawt/libmawt.so
6293     strcpy(libmawtpath, buf);
6294     strcat(libmawtpath, xawtstr);
6295     if (::stat(libmawtpath, &statbuf) == 0) return false;
6296 
6297     // check libawt_xawt.so
6298     strcpy(libmawtpath, buf);
6299     strcat(libmawtpath, new_xawtstr);
6300     if (::stat(libmawtpath, &statbuf) == 0) return false;
6301 
6302     return true;
6303 }
6304 
6305 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
6306   INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted);
6307 }
6308 
6309 int os::close(int fd) {
6310   return ::close(fd);
6311 }
6312 
6313 int os::socket_close(int fd) {
6314   return ::close(fd);
6315 }
6316 
6317 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
6318   INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6319 }
6320 
6321 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
6322   INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6323 }
6324 
6325 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
6326   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
6327 }
6328 
6329 // As both poll and select can be interrupted by signals, we have to be
6330 // prepared to restart the system call after updating the timeout, unless
6331 // a poll() is done with timeout == -1, in which case we repeat with this
6332 // "wait forever" value.
6333 
6334 int os::timeout(int fd, long timeout) {
6335   int res;
6336   struct timeval t;
6337   julong prevtime, newtime;
6338   static const char* aNull = 0;
6339   struct pollfd pfd;
6340   pfd.fd = fd;
6341   pfd.events = POLLIN;
6342 
6343   gettimeofday(&t, &aNull);
6344   prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
6345 
6346   for(;;) {
6347     INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted);
6348     if(res == OS_ERR && errno == EINTR) {
6349         if(timeout != -1) {
6350           gettimeofday(&t, &aNull);
6351           newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
6352           timeout -= newtime - prevtime;
6353           if(timeout <= 0)
6354             return OS_OK;
6355           prevtime = newtime;
6356         }
6357     } else return res;
6358   }
6359 }
6360 
6361 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
6362   int _result;
6363   INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\
6364                           os::Solaris::clear_interrupted);
6365 
6366   // Depending on when thread interruption is reset, _result could be
6367   // one of two values when errno == EINTR
6368 
6369   if (((_result == OS_INTRPT) || (_result == OS_ERR))
6370       && (errno == EINTR)) {
6371      /* restarting a connect() changes its errno semantics */
6372      INTERRUPTIBLE(::connect(fd, him, len), _result,\
6373                    os::Solaris::clear_interrupted);
6374      /* undo these changes */
6375      if (_result == OS_ERR) {
6376        if (errno == EALREADY) {
6377          errno = EINPROGRESS; /* fall through */
6378        } else if (errno == EISCONN) {
6379          errno = 0;
6380          return OS_OK;
6381        }
6382      }
6383    }
6384    return _result;
6385  }
6386 
6387 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
6388   if (fd < 0) {
6389     return OS_ERR;
6390   }
6391   INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\
6392                            os::Solaris::clear_interrupted);
6393 }
6394 
6395 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
6396                  sockaddr* from, socklen_t* fromlen) {
6397   INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\
6398                            os::Solaris::clear_interrupted);
6399 }
6400 
6401 int os::sendto(int fd, char* buf, size_t len, uint flags,
6402                struct sockaddr* to, socklen_t tolen) {
6403   INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\
6404                            os::Solaris::clear_interrupted);
6405 }
6406 
6407 int os::socket_available(int fd, jint *pbytes) {
6408   if (fd < 0) {
6409     return OS_OK;
6410   }
6411   int ret;
6412   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6413   // note: ioctl can return 0 when successful, JVM_SocketAvailable
6414   // is expected to return 0 on failure and 1 on success to the jdk.
6415   return (ret == OS_ERR) ? 0 : 1;
6416 }
6417 
6418 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6419    INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
6420                                       os::Solaris::clear_interrupted);
6421 }
6422 
6423 // Get the default path to the core file
6424 // Returns the length of the string
6425 int os::get_core_path(char* buffer, size_t bufferSize) {
6426   const char* p = get_current_directory(buffer, bufferSize);
6427 
6428   if (p == NULL) {
6429     assert(p != NULL, "failed to get current directory");
6430     return 0;
6431   }
6432 
6433   return strlen(buffer);
6434 }
6435 
6436 #ifndef PRODUCT
6437 void TestReserveMemorySpecial_test() {
6438   // No tests available for this platform
6439 }
6440 #endif