1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "prims/jniFastGetField.hpp"
  41 #include "prims/jvm.h"
  42 #include "prims/jvm_misc.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/extendedPC.hpp"
  45 #include "runtime/globals.hpp"
  46 #include "runtime/interfaceSupport.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/javaCalls.hpp"
  49 #include "runtime/mutexLocker.hpp"
  50 #include "runtime/objectMonitor.hpp"
  51 #include "runtime/osThread.hpp"
  52 #include "runtime/perfMemory.hpp"
  53 #include "runtime/sharedRuntime.hpp"
  54 #include "runtime/statSampler.hpp"
  55 #include "runtime/stubRoutines.hpp"
  56 #include "runtime/thread.inline.hpp"
  57 #include "runtime/threadCritical.hpp"
  58 #include "runtime/timer.hpp"
  59 #include "services/attachListener.hpp"
  60 #include "services/memTracker.hpp"
  61 #include "services/runtimeService.hpp"
  62 #include "utilities/decoder.hpp"
  63 #include "utilities/defaultStream.hpp"
  64 #include "utilities/events.hpp"
  65 #include "utilities/growableArray.hpp"
  66 #include "utilities/vmError.hpp"
  67 
  68 // put OS-includes here
  69 # include <dlfcn.h>
  70 # include <errno.h>
  71 # include <exception>
  72 # include <link.h>
  73 # include <poll.h>
  74 # include <pthread.h>
  75 # include <pwd.h>
  76 # include <schedctl.h>
  77 # include <setjmp.h>
  78 # include <signal.h>
  79 # include <stdio.h>
  80 # include <alloca.h>
  81 # include <sys/filio.h>
  82 # include <sys/ipc.h>
  83 # include <sys/lwp.h>
  84 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  85 # include <sys/mman.h>
  86 # include <sys/processor.h>
  87 # include <sys/procset.h>
  88 # include <sys/pset.h>
  89 # include <sys/resource.h>
  90 # include <sys/shm.h>
  91 # include <sys/socket.h>
  92 # include <sys/stat.h>
  93 # include <sys/systeminfo.h>
  94 # include <sys/time.h>
  95 # include <sys/times.h>
  96 # include <sys/types.h>
  97 # include <sys/wait.h>
  98 # include <sys/utsname.h>
  99 # include <thread.h>
 100 # include <unistd.h>
 101 # include <sys/priocntl.h>
 102 # include <sys/rtpriocntl.h>
 103 # include <sys/tspriocntl.h>
 104 # include <sys/iapriocntl.h>
 105 # include <sys/fxpriocntl.h>
 106 # include <sys/loadavg.h>
 107 # include <string.h>
 108 # include <stdio.h>
 109 
 110 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 111 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 112 
 113 #define MAX_PATH (2 * K)
 114 
 115 // for timer info max values which include all bits
 116 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 117 
 118 
 119 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 120 // compile on older systems without this header file.
 121 
 122 #ifndef MADV_ACCESS_LWP
 123 # define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
 124 #endif
 125 #ifndef MADV_ACCESS_MANY
 126 # define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
 127 #endif
 128 
 129 #ifndef LGRP_RSRC_CPU
 130 # define LGRP_RSRC_CPU           0       /* CPU resources */
 131 #endif
 132 #ifndef LGRP_RSRC_MEM
 133 # define LGRP_RSRC_MEM           1       /* memory resources */
 134 #endif
 135 
 136 // see thr_setprio(3T) for the basis of these numbers
 137 #define MinimumPriority 0
 138 #define NormalPriority  64
 139 #define MaximumPriority 127
 140 
 141 // Values for ThreadPriorityPolicy == 1
 142 int prio_policy1[CriticalPriority+1] = {
 143   -99999,  0, 16,  32,  48,  64,
 144           80, 96, 112, 124, 127, 127 };
 145 
 146 // System parameters used internally
 147 static clock_t clock_tics_per_sec = 100;
 148 
 149 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 150 static bool enabled_extended_FILE_stdio = false;
 151 
 152 // For diagnostics to print a message once. see run_periodic_checks
 153 static bool check_addr0_done = false;
 154 static sigset_t check_signal_done;
 155 static bool check_signals = true;
 156 
 157 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 158 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 159 
 160 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 161 
 162 
 163 // "default" initializers for missing libc APIs
 164 extern "C" {
 165   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 166   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 167 
 168   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 169   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 170 }
 171 
 172 // "default" initializers for pthread-based synchronization
 173 extern "C" {
 174   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 175   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 176 }
 177 
 178 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 179 
 180 // Thread Local Storage
 181 // This is common to all Solaris platforms so it is defined here,
 182 // in this common file.
 183 // The declarations are in the os_cpu threadLS*.hpp files.
 184 //
 185 // Static member initialization for TLS
 186 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
 187 
 188 #ifndef PRODUCT
 189 #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
 190 
 191 int ThreadLocalStorage::_tcacheHit = 0;
 192 int ThreadLocalStorage::_tcacheMiss = 0;
 193 
 194 void ThreadLocalStorage::print_statistics() {
 195   int total = _tcacheMiss+_tcacheHit;
 196   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
 197                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
 198 }
 199 #undef _PCT
 200 #endif // PRODUCT
 201 
 202 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
 203                                                         int index) {
 204   Thread *thread = get_thread_slow();
 205   if (thread != NULL) {
 206     address sp = os::current_stack_pointer();
 207     guarantee(thread->_stack_base == NULL ||
 208               (sp <= thread->_stack_base &&
 209                  sp >= thread->_stack_base - thread->_stack_size) ||
 210                is_error_reported(),
 211               "sp must be inside of selected thread stack");
 212 
 213     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
 214     _get_thread_cache[ index ] = thread;
 215   }
 216   return thread;
 217 }
 218 
 219 
 220 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
 221 #define NO_CACHED_THREAD ((Thread*)all_zero)
 222 
 223 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
 224 
 225   // Store the new value before updating the cache to prevent a race
 226   // between get_thread_via_cache_slowly() and this store operation.
 227   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
 228 
 229   // Update thread cache with new thread if setting on thread create,
 230   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
 231   uintptr_t raw = pd_raw_thread_id();
 232   int ix = pd_cache_index(raw);
 233   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
 234 }
 235 
 236 void ThreadLocalStorage::pd_init() {
 237   for (int i = 0; i < _pd_cache_size; i++) {
 238     _get_thread_cache[i] = NO_CACHED_THREAD;
 239   }
 240 }
 241 
 242 // Invalidate all the caches (happens to be the same as pd_init).
 243 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
 244 
 245 #undef NO_CACHED_THREAD
 246 
 247 // END Thread Local Storage
 248 
 249 static inline size_t adjust_stack_size(address base, size_t size) {
 250   if ((ssize_t)size < 0) {
 251     // 4759953: Compensate for ridiculous stack size.
 252     size = max_intx;
 253   }
 254   if (size > (size_t)base) {
 255     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 256     size = (size_t)base;
 257   }
 258   return size;
 259 }
 260 
 261 static inline stack_t get_stack_info() {
 262   stack_t st;
 263   int retval = thr_stksegment(&st);
 264   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 265   assert(retval == 0, "incorrect return value from thr_stksegment");
 266   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 267   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 268   return st;
 269 }
 270 
 271 address os::current_stack_base() {
 272   int r = thr_main() ;
 273   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 274   bool is_primordial_thread = r;
 275 
 276   // Workaround 4352906, avoid calls to thr_stksegment by
 277   // thr_main after the first one (it looks like we trash
 278   // some data, causing the value for ss_sp to be incorrect).
 279   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 280     stack_t st = get_stack_info();
 281     if (is_primordial_thread) {
 282       // cache initial value of stack base
 283       os::Solaris::_main_stack_base = (address)st.ss_sp;
 284     }
 285     return (address)st.ss_sp;
 286   } else {
 287     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 288     return os::Solaris::_main_stack_base;
 289   }
 290 }
 291 
 292 size_t os::current_stack_size() {
 293   size_t size;
 294 
 295   int r = thr_main() ;
 296   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 297   if(!r) {
 298     size = get_stack_info().ss_size;
 299   } else {
 300     struct rlimit limits;
 301     getrlimit(RLIMIT_STACK, &limits);
 302     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 303   }
 304   // base may not be page aligned
 305   address base = current_stack_base();
 306   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 307   return (size_t)(base - bottom);
 308 }
 309 
 310 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 311   return localtime_r(clock, res);
 312 }
 313 
 314 // interruptible infrastructure
 315 
 316 // setup_interruptible saves the thread state before going into an
 317 // interruptible system call.
 318 // The saved state is used to restore the thread to
 319 // its former state whether or not an interrupt is received.
 320 // Used by classloader os::read
 321 // os::restartable_read calls skip this layer and stay in _thread_in_native
 322 
 323 void os::Solaris::setup_interruptible(JavaThread* thread) {
 324 
 325   JavaThreadState thread_state = thread->thread_state();
 326 
 327   assert(thread_state != _thread_blocked, "Coming from the wrong thread");
 328   assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
 329   OSThread* osthread = thread->osthread();
 330   osthread->set_saved_interrupt_thread_state(thread_state);
 331   thread->frame_anchor()->make_walkable(thread);
 332   ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
 333 }
 334 
 335 JavaThread* os::Solaris::setup_interruptible() {
 336   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
 337   setup_interruptible(thread);
 338   return thread;
 339 }
 340 
 341 void os::Solaris::try_enable_extended_io() {
 342   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 343 
 344   if (!UseExtendedFileIO) {
 345     return;
 346   }
 347 
 348   enable_extended_FILE_stdio_t enabler =
 349     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 350                                          "enable_extended_FILE_stdio");
 351   if (enabler) {
 352     enabler(-1, -1);
 353   }
 354 }
 355 
 356 
 357 #ifdef ASSERT
 358 
 359 JavaThread* os::Solaris::setup_interruptible_native() {
 360   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
 361   JavaThreadState thread_state = thread->thread_state();
 362   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
 363   return thread;
 364 }
 365 
 366 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
 367   JavaThreadState thread_state = thread->thread_state();
 368   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
 369 }
 370 #endif
 371 
 372 // cleanup_interruptible reverses the effects of setup_interruptible
 373 // setup_interruptible_already_blocked() does not need any cleanup.
 374 
 375 void os::Solaris::cleanup_interruptible(JavaThread* thread) {
 376   OSThread* osthread = thread->osthread();
 377 
 378   ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
 379 }
 380 
 381 // I/O interruption related counters called in _INTERRUPTIBLE
 382 
 383 void os::Solaris::bump_interrupted_before_count() {
 384   RuntimeService::record_interrupted_before_count();
 385 }
 386 
 387 void os::Solaris::bump_interrupted_during_count() {
 388   RuntimeService::record_interrupted_during_count();
 389 }
 390 
 391 static int _processors_online = 0;
 392 
 393          jint os::Solaris::_os_thread_limit = 0;
 394 volatile jint os::Solaris::_os_thread_count = 0;
 395 
 396 julong os::available_memory() {
 397   return Solaris::available_memory();
 398 }
 399 
 400 julong os::Solaris::available_memory() {
 401   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 402 }
 403 
 404 julong os::Solaris::_physical_memory = 0;
 405 
 406 julong os::physical_memory() {
 407    return Solaris::physical_memory();
 408 }
 409 
 410 static hrtime_t first_hrtime = 0;
 411 static const hrtime_t hrtime_hz = 1000*1000*1000;
 412 const int LOCK_BUSY = 1;
 413 const int LOCK_FREE = 0;
 414 const int LOCK_INVALID = -1;
 415 static volatile hrtime_t max_hrtime = 0;
 416 static volatile int max_hrtime_lock = LOCK_FREE;     // Update counter with LSB as lock-in-progress
 417 
 418 
 419 void os::Solaris::initialize_system_info() {
 420   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 421   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
 422   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 423 }
 424 
 425 int os::active_processor_count() {
 426   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 427   pid_t pid = getpid();
 428   psetid_t pset = PS_NONE;
 429   // Are we running in a processor set or is there any processor set around?
 430   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 431     uint_t pset_cpus;
 432     // Query the number of cpus available to us.
 433     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 434       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 435       _processors_online = pset_cpus;
 436       return pset_cpus;
 437     }
 438   }
 439   // Otherwise return number of online cpus
 440   return online_cpus;
 441 }
 442 
 443 static bool find_processors_in_pset(psetid_t        pset,
 444                                     processorid_t** id_array,
 445                                     uint_t*         id_length) {
 446   bool result = false;
 447   // Find the number of processors in the processor set.
 448   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 449     // Make up an array to hold their ids.
 450     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 451     // Fill in the array with their processor ids.
 452     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 453       result = true;
 454     }
 455   }
 456   return result;
 457 }
 458 
 459 // Callers of find_processors_online() must tolerate imprecise results --
 460 // the system configuration can change asynchronously because of DR
 461 // or explicit psradm operations.
 462 //
 463 // We also need to take care that the loop (below) terminates as the
 464 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 465 // request and the loop that builds the list of processor ids.   Unfortunately
 466 // there's no reliable way to determine the maximum valid processor id,
 467 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 468 // man pages, which claim the processor id set is "sparse, but
 469 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 470 // exit the loop.
 471 //
 472 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 473 // not available on S8.0.
 474 
 475 static bool find_processors_online(processorid_t** id_array,
 476                                    uint*           id_length) {
 477   const processorid_t MAX_PROCESSOR_ID = 100000 ;
 478   // Find the number of processors online.
 479   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 480   // Make up an array to hold their ids.
 481   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 482   // Processors need not be numbered consecutively.
 483   long found = 0;
 484   processorid_t next = 0;
 485   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 486     processor_info_t info;
 487     if (processor_info(next, &info) == 0) {
 488       // NB, PI_NOINTR processors are effectively online ...
 489       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 490         (*id_array)[found] = next;
 491         found += 1;
 492       }
 493     }
 494     next += 1;
 495   }
 496   if (found < *id_length) {
 497       // The loop above didn't identify the expected number of processors.
 498       // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 499       // and re-running the loop, above, but there's no guarantee of progress
 500       // if the system configuration is in flux.  Instead, we just return what
 501       // we've got.  Note that in the worst case find_processors_online() could
 502       // return an empty set.  (As a fall-back in the case of the empty set we
 503       // could just return the ID of the current processor).
 504       *id_length = found ;
 505   }
 506 
 507   return true;
 508 }
 509 
 510 static bool assign_distribution(processorid_t* id_array,
 511                                 uint           id_length,
 512                                 uint*          distribution,
 513                                 uint           distribution_length) {
 514   // We assume we can assign processorid_t's to uint's.
 515   assert(sizeof(processorid_t) == sizeof(uint),
 516          "can't convert processorid_t to uint");
 517   // Quick check to see if we won't succeed.
 518   if (id_length < distribution_length) {
 519     return false;
 520   }
 521   // Assign processor ids to the distribution.
 522   // Try to shuffle processors to distribute work across boards,
 523   // assuming 4 processors per board.
 524   const uint processors_per_board = ProcessDistributionStride;
 525   // Find the maximum processor id.
 526   processorid_t max_id = 0;
 527   for (uint m = 0; m < id_length; m += 1) {
 528     max_id = MAX2(max_id, id_array[m]);
 529   }
 530   // The next id, to limit loops.
 531   const processorid_t limit_id = max_id + 1;
 532   // Make up markers for available processors.
 533   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 534   for (uint c = 0; c < limit_id; c += 1) {
 535     available_id[c] = false;
 536   }
 537   for (uint a = 0; a < id_length; a += 1) {
 538     available_id[id_array[a]] = true;
 539   }
 540   // Step by "boards", then by "slot", copying to "assigned".
 541   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 542   //                remembering which processors have been assigned by
 543   //                previous calls, etc., so as to distribute several
 544   //                independent calls of this method.  What we'd like is
 545   //                It would be nice to have an API that let us ask
 546   //                how many processes are bound to a processor,
 547   //                but we don't have that, either.
 548   //                In the short term, "board" is static so that
 549   //                subsequent distributions don't all start at board 0.
 550   static uint board = 0;
 551   uint assigned = 0;
 552   // Until we've found enough processors ....
 553   while (assigned < distribution_length) {
 554     // ... find the next available processor in the board.
 555     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 556       uint try_id = board * processors_per_board + slot;
 557       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 558         distribution[assigned] = try_id;
 559         available_id[try_id] = false;
 560         assigned += 1;
 561         break;
 562       }
 563     }
 564     board += 1;
 565     if (board * processors_per_board + 0 >= limit_id) {
 566       board = 0;
 567     }
 568   }
 569   if (available_id != NULL) {
 570     FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
 571   }
 572   return true;
 573 }
 574 
 575 void os::set_native_thread_name(const char *name) {
 576   // Not yet implemented.
 577   return;
 578 }
 579 
 580 bool os::distribute_processes(uint length, uint* distribution) {
 581   bool result = false;
 582   // Find the processor id's of all the available CPUs.
 583   processorid_t* id_array  = NULL;
 584   uint           id_length = 0;
 585   // There are some races between querying information and using it,
 586   // since processor sets can change dynamically.
 587   psetid_t pset = PS_NONE;
 588   // Are we running in a processor set?
 589   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 590     result = find_processors_in_pset(pset, &id_array, &id_length);
 591   } else {
 592     result = find_processors_online(&id_array, &id_length);
 593   }
 594   if (result == true) {
 595     if (id_length >= length) {
 596       result = assign_distribution(id_array, id_length, distribution, length);
 597     } else {
 598       result = false;
 599     }
 600   }
 601   if (id_array != NULL) {
 602     FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
 603   }
 604   return result;
 605 }
 606 
 607 bool os::bind_to_processor(uint processor_id) {
 608   // We assume that a processorid_t can be stored in a uint.
 609   assert(sizeof(uint) == sizeof(processorid_t),
 610          "can't convert uint to processorid_t");
 611   int bind_result =
 612     processor_bind(P_LWPID,                       // bind LWP.
 613                    P_MYID,                        // bind current LWP.
 614                    (processorid_t) processor_id,  // id.
 615                    NULL);                         // don't return old binding.
 616   return (bind_result == 0);
 617 }
 618 
 619 bool os::getenv(const char* name, char* buffer, int len) {
 620   char* val = ::getenv( name );
 621   if ( val == NULL
 622   ||   strlen(val) + 1  >  len ) {
 623     if (len > 0)  buffer[0] = 0; // return a null string
 624     return false;
 625   }
 626   strcpy( buffer, val );
 627   return true;
 628 }
 629 
 630 
 631 // Return true if user is running as root.
 632 
 633 bool os::have_special_privileges() {
 634   static bool init = false;
 635   static bool privileges = false;
 636   if (!init) {
 637     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 638     init = true;
 639   }
 640   return privileges;
 641 }
 642 
 643 
 644 void os::init_system_properties_values() {
 645   // The next steps are taken in the product version:
 646   //
 647   // Obtain the JAVA_HOME value from the location of libjvm.so.
 648   // This library should be located at:
 649   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 650   //
 651   // If "/jre/lib/" appears at the right place in the path, then we
 652   // assume libjvm.so is installed in a JDK and we use this path.
 653   //
 654   // Otherwise exit with message: "Could not create the Java virtual machine."
 655   //
 656   // The following extra steps are taken in the debugging version:
 657   //
 658   // If "/jre/lib/" does NOT appear at the right place in the path
 659   // instead of exit check for $JAVA_HOME environment variable.
 660   //
 661   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 662   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 663   // it looks like libjvm.so is installed there
 664   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 665   //
 666   // Otherwise exit.
 667   //
 668   // Important note: if the location of libjvm.so changes this
 669   // code needs to be changed accordingly.
 670 
 671 // Base path of extensions installed on the system.
 672 #define SYS_EXT_DIR     "/usr/jdk/packages"
 673 #define EXTENSIONS_DIR  "/lib/ext"
 674 #define ENDORSED_DIR    "/lib/endorsed"
 675 
 676   char cpu_arch[12];
 677   // Buffer that fits several sprintfs.
 678   // Note that the space for the colon and the trailing null are provided
 679   // by the nulls included by the sizeof operator.
 680   const size_t bufsize =
 681     MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
 682          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 683          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
 684          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
 685   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 686 
 687   // sysclasspath, java_home, dll_dir
 688   {
 689     char *pslash;
 690     os::jvm_path(buf, bufsize);
 691 
 692     // Found the full path to libjvm.so.
 693     // Now cut the path to <java_home>/jre if we can.
 694     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 695     pslash = strrchr(buf, '/');
 696     if (pslash != NULL) {
 697       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 698     }
 699     Arguments::set_dll_dir(buf);
 700 
 701     if (pslash != NULL) {
 702       pslash = strrchr(buf, '/');
 703       if (pslash != NULL) {
 704         *pslash = '\0';          // Get rid of /<arch>.
 705         pslash = strrchr(buf, '/');
 706         if (pslash != NULL) {
 707           *pslash = '\0';        // Get rid of /lib.
 708         }
 709       }
 710     }
 711     Arguments::set_java_home(buf);
 712 
 713     if (!set_boot_path('/', ':')) {
 714       return;
 715     }
 716   }
 717 
 718   // Where to look for native libraries.
 719   {
 720     // Use dlinfo() to determine the correct java.library.path.
 721     //
 722     // If we're launched by the Java launcher, and the user
 723     // does not set java.library.path explicitly on the commandline,
 724     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 725     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 726     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 727     // /usr/lib), which is exactly what we want.
 728     //
 729     // If the user does set java.library.path, it completely
 730     // overwrites this setting, and always has.
 731     //
 732     // If we're not launched by the Java launcher, we may
 733     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 734     // settings.  Again, dlinfo does exactly what we want.
 735 
 736     Dl_serinfo     info_sz, *info = &info_sz;
 737     Dl_serpath     *path;
 738     char           *library_path;
 739     char           *common_path = buf;
 740 
 741     // Determine search path count and required buffer size.
 742     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 743       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 744     }
 745 
 746     // Allocate new buffer and initialize.
 747     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 748     info->dls_size = info_sz.dls_size;
 749     info->dls_cnt = info_sz.dls_cnt;
 750 
 751     // Obtain search path information.
 752     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 753       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 754       FREE_C_HEAP_ARRAY(char, info, mtInternal);
 755       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 756     }
 757 
 758     path = &info->dls_serpath[0];
 759 
 760     // Note: Due to a legacy implementation, most of the library path
 761     // is set in the launcher. This was to accomodate linking restrictions
 762     // on legacy Solaris implementations (which are no longer supported).
 763     // Eventually, all the library path setting will be done here.
 764     //
 765     // However, to prevent the proliferation of improperly built native
 766     // libraries, the new path component /usr/jdk/packages is added here.
 767 
 768     // Determine the actual CPU architecture.
 769     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 770 #ifdef _LP64
 771     // If we are a 64-bit vm, perform the following translations:
 772     //   sparc   -> sparcv9
 773     //   i386    -> amd64
 774     if (strcmp(cpu_arch, "sparc") == 0) {
 775       strcat(cpu_arch, "v9");
 776     } else if (strcmp(cpu_arch, "i386") == 0) {
 777       strcpy(cpu_arch, "amd64");
 778     }
 779 #endif
 780 
 781     // Construct the invariant part of ld_library_path.
 782     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 783 
 784     // Struct size is more than sufficient for the path components obtained
 785     // through the dlinfo() call, so only add additional space for the path
 786     // components explicitly added here.
 787     size_t library_path_size = info->dls_size + strlen(common_path);
 788     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 789     library_path[0] = '\0';
 790 
 791     // Construct the desired Java library path from the linker's library
 792     // search path.
 793     //
 794     // For compatibility, it is optimal that we insert the additional path
 795     // components specific to the Java VM after those components specified
 796     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 797     // infrastructure.
 798     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 799       strcpy(library_path, common_path);
 800     } else {
 801       int inserted = 0;
 802       int i;
 803       for (i = 0; i < info->dls_cnt; i++, path++) {
 804         uint_t flags = path->dls_flags & LA_SER_MASK;
 805         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 806           strcat(library_path, common_path);
 807           strcat(library_path, os::path_separator());
 808           inserted = 1;
 809         }
 810         strcat(library_path, path->dls_name);
 811         strcat(library_path, os::path_separator());
 812       }
 813       // Eliminate trailing path separator.
 814       library_path[strlen(library_path)-1] = '\0';
 815     }
 816 
 817     // happens before argument parsing - can't use a trace flag
 818     // tty->print_raw("init_system_properties_values: native lib path: ");
 819     // tty->print_raw_cr(library_path);
 820 
 821     // Callee copies into its own buffer.
 822     Arguments::set_library_path(library_path);
 823 
 824     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
 825     FREE_C_HEAP_ARRAY(char, info, mtInternal);
 826   }
 827 
 828   // Extensions directories.
 829   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 830   Arguments::set_ext_dirs(buf);
 831 
 832   // Endorsed standards default directory.
 833   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
 834   Arguments::set_endorsed_dirs(buf);
 835 
 836   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 837 
 838 #undef SYS_EXT_DIR
 839 #undef EXTENSIONS_DIR
 840 #undef ENDORSED_DIR
 841 }
 842 
 843 void os::breakpoint() {
 844   BREAKPOINT;
 845 }
 846 
 847 bool os::obsolete_option(const JavaVMOption *option)
 848 {
 849   if (!strncmp(option->optionString, "-Xt", 3)) {
 850     return true;
 851   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 852     return true;
 853   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 854     return true;
 855   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 856     return true;
 857   }
 858   return false;
 859 }
 860 
 861 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 862   address  stackStart  = (address)thread->stack_base();
 863   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 864   if (sp < stackStart && sp >= stackEnd ) return true;
 865   return false;
 866 }
 867 
 868 extern "C" void breakpoint() {
 869   // use debugger to set breakpoint here
 870 }
 871 
 872 static thread_t main_thread;
 873 
 874 // Thread start routine for all new Java threads
 875 extern "C" void* java_start(void* thread_addr) {
 876   // Try to randomize the cache line index of hot stack frames.
 877   // This helps when threads of the same stack traces evict each other's
 878   // cache lines. The threads can be either from the same JVM instance, or
 879   // from different JVM instances. The benefit is especially true for
 880   // processors with hyperthreading technology.
 881   static int counter = 0;
 882   int pid = os::current_process_id();
 883   alloca(((pid ^ counter++) & 7) * 128);
 884 
 885   int prio;
 886   Thread* thread = (Thread*)thread_addr;
 887   OSThread* osthr = thread->osthread();
 888 
 889   osthr->set_lwp_id( _lwp_self() );  // Store lwp in case we are bound
 890   thread->_schedctl = (void *) schedctl_init () ;
 891 
 892   if (UseNUMA) {
 893     int lgrp_id = os::numa_get_group_id();
 894     if (lgrp_id != -1) {
 895       thread->set_lgrp_id(lgrp_id);
 896     }
 897   }
 898 
 899   // If the creator called set priority before we started,
 900   // we need to call set_native_priority now that we have an lwp.
 901   // We used to get the priority from thr_getprio (we called
 902   // thr_setprio way back in create_thread) and pass it to
 903   // set_native_priority, but Solaris scales the priority
 904   // in java_to_os_priority, so when we read it back here,
 905   // we pass trash to set_native_priority instead of what's
 906   // in java_to_os_priority. So we save the native priority
 907   // in the osThread and recall it here.
 908 
 909   if ( osthr->thread_id() != -1 ) {
 910     if ( UseThreadPriorities ) {
 911       int prio = osthr->native_priority();
 912       if (ThreadPriorityVerbose) {
 913         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 914                       INTPTR_FORMAT ", setting priority: %d\n",
 915                       osthr->thread_id(), osthr->lwp_id(), prio);
 916       }
 917       os::set_native_priority(thread, prio);
 918     }
 919   } else if (ThreadPriorityVerbose) {
 920     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 921   }
 922 
 923   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 924 
 925   // initialize signal mask for this thread
 926   os::Solaris::hotspot_sigmask(thread);
 927 
 928   thread->run();
 929 
 930   // One less thread is executing
 931   // When the VMThread gets here, the main thread may have already exited
 932   // which frees the CodeHeap containing the Atomic::dec code
 933   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 934     Atomic::dec(&os::Solaris::_os_thread_count);
 935   }
 936 
 937   if (UseDetachedThreads) {
 938     thr_exit(NULL);
 939     ShouldNotReachHere();
 940   }
 941   return NULL;
 942 }
 943 
 944 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 945   // Allocate the OSThread object
 946   OSThread* osthread = new OSThread(NULL, NULL);
 947   if (osthread == NULL) return NULL;
 948 
 949   // Store info on the Solaris thread into the OSThread
 950   osthread->set_thread_id(thread_id);
 951   osthread->set_lwp_id(_lwp_self());
 952   thread->_schedctl = (void *) schedctl_init () ;
 953 
 954   if (UseNUMA) {
 955     int lgrp_id = os::numa_get_group_id();
 956     if (lgrp_id != -1) {
 957       thread->set_lgrp_id(lgrp_id);
 958     }
 959   }
 960 
 961   if ( ThreadPriorityVerbose ) {
 962     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 963                   osthread->thread_id(), osthread->lwp_id() );
 964   }
 965 
 966   // Initial thread state is INITIALIZED, not SUSPENDED
 967   osthread->set_state(INITIALIZED);
 968 
 969   return osthread;
 970 }
 971 
 972 void os::Solaris::hotspot_sigmask(Thread* thread) {
 973 
 974   //Save caller's signal mask
 975   sigset_t sigmask;
 976   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 977   OSThread *osthread = thread->osthread();
 978   osthread->set_caller_sigmask(sigmask);
 979 
 980   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 981   if (!ReduceSignalUsage) {
 982     if (thread->is_VM_thread()) {
 983       // Only the VM thread handles BREAK_SIGNAL ...
 984       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 985     } else {
 986       // ... all other threads block BREAK_SIGNAL
 987       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 988       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 989     }
 990   }
 991 }
 992 
 993 bool os::create_attached_thread(JavaThread* thread) {
 994 #ifdef ASSERT
 995   thread->verify_not_published();
 996 #endif
 997   OSThread* osthread = create_os_thread(thread, thr_self());
 998   if (osthread == NULL) {
 999      return false;
1000   }
1001 
1002   // Initial thread state is RUNNABLE
1003   osthread->set_state(RUNNABLE);
1004   thread->set_osthread(osthread);
1005 
1006   // initialize signal mask for this thread
1007   // and save the caller's signal mask
1008   os::Solaris::hotspot_sigmask(thread);
1009 
1010   return true;
1011 }
1012 
1013 bool os::create_main_thread(JavaThread* thread) {
1014 #ifdef ASSERT
1015   thread->verify_not_published();
1016 #endif
1017   if (_starting_thread == NULL) {
1018     _starting_thread = create_os_thread(thread, main_thread);
1019      if (_starting_thread == NULL) {
1020         return false;
1021      }
1022   }
1023 
1024   // The primodial thread is runnable from the start
1025   _starting_thread->set_state(RUNNABLE);
1026 
1027   thread->set_osthread(_starting_thread);
1028 
1029   // initialize signal mask for this thread
1030   // and save the caller's signal mask
1031   os::Solaris::hotspot_sigmask(thread);
1032 
1033   return true;
1034 }
1035 
1036 // _T2_libthread is true if we believe we are running with the newer
1037 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
1038 bool os::Solaris::_T2_libthread = false;
1039 
1040 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
1041   // Allocate the OSThread object
1042   OSThread* osthread = new OSThread(NULL, NULL);
1043   if (osthread == NULL) {
1044     return false;
1045   }
1046 
1047   if ( ThreadPriorityVerbose ) {
1048     char *thrtyp;
1049     switch ( thr_type ) {
1050       case vm_thread:
1051         thrtyp = (char *)"vm";
1052         break;
1053       case cgc_thread:
1054         thrtyp = (char *)"cgc";
1055         break;
1056       case pgc_thread:
1057         thrtyp = (char *)"pgc";
1058         break;
1059       case java_thread:
1060         thrtyp = (char *)"java";
1061         break;
1062       case compiler_thread:
1063         thrtyp = (char *)"compiler";
1064         break;
1065       case watcher_thread:
1066         thrtyp = (char *)"watcher";
1067         break;
1068       default:
1069         thrtyp = (char *)"unknown";
1070         break;
1071     }
1072     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1073   }
1074 
1075   // Calculate stack size if it's not specified by caller.
1076   if (stack_size == 0) {
1077     // The default stack size 1M (2M for LP64).
1078     stack_size = (BytesPerWord >> 2) * K * K;
1079 
1080     switch (thr_type) {
1081     case os::java_thread:
1082       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1083       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1084       break;
1085     case os::compiler_thread:
1086       if (CompilerThreadStackSize > 0) {
1087         stack_size = (size_t)(CompilerThreadStackSize * K);
1088         break;
1089       } // else fall through:
1090         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1091     case os::vm_thread:
1092     case os::pgc_thread:
1093     case os::cgc_thread:
1094     case os::watcher_thread:
1095       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1096       break;
1097     }
1098   }
1099   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1100 
1101   // Initial state is ALLOCATED but not INITIALIZED
1102   osthread->set_state(ALLOCATED);
1103 
1104   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1105     // We got lots of threads. Check if we still have some address space left.
1106     // Need to be at least 5Mb of unreserved address space. We do check by
1107     // trying to reserve some.
1108     const size_t VirtualMemoryBangSize = 20*K*K;
1109     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1110     if (mem == NULL) {
1111       delete osthread;
1112       return false;
1113     } else {
1114       // Release the memory again
1115       os::release_memory(mem, VirtualMemoryBangSize);
1116     }
1117   }
1118 
1119   // Setup osthread because the child thread may need it.
1120   thread->set_osthread(osthread);
1121 
1122   // Create the Solaris thread
1123   // explicit THR_BOUND for T2_libthread case in case
1124   // that assumption is not accurate, but our alternate signal stack
1125   // handling is based on it which must have bound threads
1126   thread_t tid = 0;
1127   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
1128                    | ((UseBoundThreads || os::Solaris::T2_libthread() ||
1129                        (thr_type == vm_thread) ||
1130                        (thr_type == cgc_thread) ||
1131                        (thr_type == pgc_thread) ||
1132                        (thr_type == compiler_thread && BackgroundCompilation)) ?
1133                       THR_BOUND : 0);
1134   int      status;
1135 
1136   // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
1137   //
1138   // On multiprocessors systems, libthread sometimes under-provisions our
1139   // process with LWPs.  On a 30-way systems, for instance, we could have
1140   // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
1141   // to our process.  This can result in under utilization of PEs.
1142   // I suspect the problem is related to libthread's LWP
1143   // pool management and to the kernel's SIGBLOCKING "last LWP parked"
1144   // upcall policy.
1145   //
1146   // The following code is palliative -- it attempts to ensure that our
1147   // process has sufficient LWPs to take advantage of multiple PEs.
1148   // Proper long-term cures include using user-level threads bound to LWPs
1149   // (THR_BOUND) or using LWP-based synchronization.  Note that there is a
1150   // slight timing window with respect to sampling _os_thread_count, but
1151   // the race is benign.  Also, we should periodically recompute
1152   // _processors_online as the min of SC_NPROCESSORS_ONLN and the
1153   // the number of PEs in our partition.  You might be tempted to use
1154   // THR_NEW_LWP here, but I'd recommend against it as that could
1155   // result in undesirable growth of the libthread's LWP pool.
1156   // The fix below isn't sufficient; for instance, it doesn't take into count
1157   // LWPs parked on IO.  It does, however, help certain CPU-bound benchmarks.
1158   //
1159   // Some pathologies this scheme doesn't handle:
1160   // *  Threads can block, releasing the LWPs.  The LWPs can age out.
1161   //    When a large number of threads become ready again there aren't
1162   //    enough LWPs available to service them.  This can occur when the
1163   //    number of ready threads oscillates.
1164   // *  LWPs/Threads park on IO, thus taking the LWP out of circulation.
1165   //
1166   // Finally, we should call thr_setconcurrency() periodically to refresh
1167   // the LWP pool and thwart the LWP age-out mechanism.
1168   // The "+3" term provides a little slop -- we want to slightly overprovision.
1169 
1170   if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
1171     if (!(flags & THR_BOUND)) {
1172       thr_setconcurrency (os::Solaris::_os_thread_count);       // avoid starvation
1173     }
1174   }
1175   // Although this doesn't hurt, we should warn of undefined behavior
1176   // when using unbound T1 threads with schedctl().  This should never
1177   // happen, as the compiler and VM threads are always created bound
1178   DEBUG_ONLY(
1179       if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
1180           (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
1181           ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
1182            (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
1183          warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
1184       }
1185   );
1186 
1187 
1188   // Mark that we don't have an lwp or thread id yet.
1189   // In case we attempt to set the priority before the thread starts.
1190   osthread->set_lwp_id(-1);
1191   osthread->set_thread_id(-1);
1192 
1193   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1194   if (status != 0) {
1195     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1196       perror("os::create_thread");
1197     }
1198     thread->set_osthread(NULL);
1199     // Need to clean up stuff we've allocated so far
1200     delete osthread;
1201     return false;
1202   }
1203 
1204   Atomic::inc(&os::Solaris::_os_thread_count);
1205 
1206   // Store info on the Solaris thread into the OSThread
1207   osthread->set_thread_id(tid);
1208 
1209   // Remember that we created this thread so we can set priority on it
1210   osthread->set_vm_created();
1211 
1212   // Set the default thread priority.  If using bound threads, setting
1213   // lwp priority will be delayed until thread start.
1214   set_native_priority(thread,
1215                       DefaultThreadPriority == -1 ?
1216                         java_to_os_priority[NormPriority] :
1217                         DefaultThreadPriority);
1218 
1219   // Initial thread state is INITIALIZED, not SUSPENDED
1220   osthread->set_state(INITIALIZED);
1221 
1222   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1223   return true;
1224 }
1225 
1226 /* defined for >= Solaris 10. This allows builds on earlier versions
1227  *  of Solaris to take advantage of the newly reserved Solaris JVM signals
1228  *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1229  *  and -XX:+UseAltSigs does nothing since these should have no conflict
1230  */
1231 #if !defined(SIGJVM1)
1232 #define SIGJVM1 39
1233 #define SIGJVM2 40
1234 #endif
1235 
1236 debug_only(static bool signal_sets_initialized = false);
1237 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1238 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1239 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1240 
1241 bool os::Solaris::is_sig_ignored(int sig) {
1242       struct sigaction oact;
1243       sigaction(sig, (struct sigaction*)NULL, &oact);
1244       void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1245                                      : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1246       if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1247            return true;
1248       else
1249            return false;
1250 }
1251 
1252 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1253 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1254 static bool isJVM1available() {
1255   return SIGJVM1 < SIGRTMIN;
1256 }
1257 
1258 void os::Solaris::signal_sets_init() {
1259   // Should also have an assertion stating we are still single-threaded.
1260   assert(!signal_sets_initialized, "Already initialized");
1261   // Fill in signals that are necessarily unblocked for all threads in
1262   // the VM. Currently, we unblock the following signals:
1263   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1264   //                         by -Xrs (=ReduceSignalUsage));
1265   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1266   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1267   // the dispositions or masks wrt these signals.
1268   // Programs embedding the VM that want to use the above signals for their
1269   // own purposes must, at this time, use the "-Xrs" option to prevent
1270   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1271   // (See bug 4345157, and other related bugs).
1272   // In reality, though, unblocking these signals is really a nop, since
1273   // these signals are not blocked by default.
1274   sigemptyset(&unblocked_sigs);
1275   sigemptyset(&allowdebug_blocked_sigs);
1276   sigaddset(&unblocked_sigs, SIGILL);
1277   sigaddset(&unblocked_sigs, SIGSEGV);
1278   sigaddset(&unblocked_sigs, SIGBUS);
1279   sigaddset(&unblocked_sigs, SIGFPE);
1280 
1281   if (isJVM1available) {
1282     os::Solaris::set_SIGinterrupt(SIGJVM1);
1283     os::Solaris::set_SIGasync(SIGJVM2);
1284   } else if (UseAltSigs) {
1285     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1286     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1287   } else {
1288     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1289     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1290   }
1291 
1292   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1293   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1294 
1295   if (!ReduceSignalUsage) {
1296    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1297       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1298       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1299    }
1300    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1301       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1302       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1303    }
1304    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1305       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1306       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1307    }
1308   }
1309   // Fill in signals that are blocked by all but the VM thread.
1310   sigemptyset(&vm_sigs);
1311   if (!ReduceSignalUsage)
1312     sigaddset(&vm_sigs, BREAK_SIGNAL);
1313   debug_only(signal_sets_initialized = true);
1314 
1315   // For diagnostics only used in run_periodic_checks
1316   sigemptyset(&check_signal_done);
1317 }
1318 
1319 // These are signals that are unblocked while a thread is running Java.
1320 // (For some reason, they get blocked by default.)
1321 sigset_t* os::Solaris::unblocked_signals() {
1322   assert(signal_sets_initialized, "Not initialized");
1323   return &unblocked_sigs;
1324 }
1325 
1326 // These are the signals that are blocked while a (non-VM) thread is
1327 // running Java. Only the VM thread handles these signals.
1328 sigset_t* os::Solaris::vm_signals() {
1329   assert(signal_sets_initialized, "Not initialized");
1330   return &vm_sigs;
1331 }
1332 
1333 // These are signals that are blocked during cond_wait to allow debugger in
1334 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1335   assert(signal_sets_initialized, "Not initialized");
1336   return &allowdebug_blocked_sigs;
1337 }
1338 
1339 
1340 void _handle_uncaught_cxx_exception() {
1341   VMError err("An uncaught C++ exception");
1342   err.report_and_die();
1343 }
1344 
1345 
1346 // First crack at OS-specific initialization, from inside the new thread.
1347 void os::initialize_thread(Thread* thr) {
1348   int r = thr_main() ;
1349   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
1350   if (r) {
1351     JavaThread* jt = (JavaThread *)thr;
1352     assert(jt != NULL,"Sanity check");
1353     size_t stack_size;
1354     address base = jt->stack_base();
1355     if (Arguments::created_by_java_launcher()) {
1356       // Use 2MB to allow for Solaris 7 64 bit mode.
1357       stack_size = JavaThread::stack_size_at_create() == 0
1358         ? 2048*K : JavaThread::stack_size_at_create();
1359 
1360       // There are rare cases when we may have already used more than
1361       // the basic stack size allotment before this method is invoked.
1362       // Attempt to allow for a normally sized java_stack.
1363       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1364       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1365     } else {
1366       // 6269555: If we were not created by a Java launcher, i.e. if we are
1367       // running embedded in a native application, treat the primordial thread
1368       // as much like a native attached thread as possible.  This means using
1369       // the current stack size from thr_stksegment(), unless it is too large
1370       // to reliably setup guard pages.  A reasonable max size is 8MB.
1371       size_t current_size = current_stack_size();
1372       // This should never happen, but just in case....
1373       if (current_size == 0) current_size = 2 * K * K;
1374       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1375     }
1376     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1377     stack_size = (size_t)(base - bottom);
1378 
1379     assert(stack_size > 0, "Stack size calculation problem");
1380 
1381     if (stack_size > jt->stack_size()) {
1382       NOT_PRODUCT(
1383         struct rlimit limits;
1384         getrlimit(RLIMIT_STACK, &limits);
1385         size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1386         assert(size >= jt->stack_size(), "Stack size problem in main thread");
1387       )
1388       tty->print_cr(
1389         "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1390         "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1391         "See limit(1) to increase the stack size limit.",
1392         stack_size / K, jt->stack_size() / K);
1393       vm_exit(1);
1394     }
1395     assert(jt->stack_size() >= stack_size,
1396           "Attempt to map more stack than was allocated");
1397     jt->set_stack_size(stack_size);
1398   }
1399 
1400    // 5/22/01: Right now alternate signal stacks do not handle
1401    // throwing stack overflow exceptions, see bug 4463178
1402    // Until a fix is found for this, T2 will NOT imply alternate signal
1403    // stacks.
1404    // If using T2 libthread threads, install an alternate signal stack.
1405    // Because alternate stacks associate with LWPs on Solaris,
1406    // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
1407    // we prefer to explicitly stack bang.
1408    // If not using T2 libthread, but using UseBoundThreads any threads
1409    // (primordial thread, jni_attachCurrentThread) we do not create,
1410    // probably are not bound, therefore they can not have an alternate
1411    // signal stack. Since our stack banging code is generated and
1412    // is shared across threads, all threads must be bound to allow
1413    // using alternate signal stacks.  The alternative is to interpose
1414    // on _lwp_create to associate an alt sig stack with each LWP,
1415    // and this could be a problem when the JVM is embedded.
1416    // We would prefer to use alternate signal stacks with T2
1417    // Since there is currently no accurate way to detect T2
1418    // we do not. Assuming T2 when running T1 causes sig 11s or assertions
1419    // on installing alternate signal stacks
1420 
1421 
1422    // 05/09/03: removed alternate signal stack support for Solaris
1423    // The alternate signal stack mechanism is no longer needed to
1424    // handle stack overflow. This is now handled by allocating
1425    // guard pages (red zone) and stackbanging.
1426    // Initially the alternate signal stack mechanism was removed because
1427    // it did not work with T1 llibthread. Alternate
1428    // signal stacks MUST have all threads bound to lwps. Applications
1429    // can create their own threads and attach them without their being
1430    // bound under T1. This is frequently the case for the primordial thread.
1431    // If we were ever to reenable this mechanism we would need to
1432    // use the dynamic check for T2 libthread.
1433 
1434   os::Solaris::init_thread_fpu_state();
1435   std::set_terminate(_handle_uncaught_cxx_exception);
1436 }
1437 
1438 
1439 
1440 // Free Solaris resources related to the OSThread
1441 void os::free_thread(OSThread* osthread) {
1442   assert(osthread != NULL, "os::free_thread but osthread not set");
1443 
1444 
1445   // We are told to free resources of the argument thread,
1446   // but we can only really operate on the current thread.
1447   // The main thread must take the VMThread down synchronously
1448   // before the main thread exits and frees up CodeHeap
1449   guarantee((Thread::current()->osthread() == osthread
1450      || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1451   if (Thread::current()->osthread() == osthread) {
1452     // Restore caller's signal mask
1453     sigset_t sigmask = osthread->caller_sigmask();
1454     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1455   }
1456   delete osthread;
1457 }
1458 
1459 void os::pd_start_thread(Thread* thread) {
1460   int status = thr_continue(thread->osthread()->thread_id());
1461   assert_status(status == 0, status, "thr_continue failed");
1462 }
1463 
1464 
1465 intx os::current_thread_id() {
1466   return (intx)thr_self();
1467 }
1468 
1469 static pid_t _initial_pid = 0;
1470 
1471 int os::current_process_id() {
1472   return (int)(_initial_pid ? _initial_pid : getpid());
1473 }
1474 
1475 int os::allocate_thread_local_storage() {
1476   // %%%       in Win32 this allocates a memory segment pointed to by a
1477   //           register.  Dan Stein can implement a similar feature in
1478   //           Solaris.  Alternatively, the VM can do the same thing
1479   //           explicitly: malloc some storage and keep the pointer in a
1480   //           register (which is part of the thread's context) (or keep it
1481   //           in TLS).
1482   // %%%       In current versions of Solaris, thr_self and TSD can
1483   //           be accessed via short sequences of displaced indirections.
1484   //           The value of thr_self is available as %g7(36).
1485   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1486   //           assuming that the current thread already has a value bound to k.
1487   //           It may be worth experimenting with such access patterns,
1488   //           and later having the parameters formally exported from a Solaris
1489   //           interface.  I think, however, that it will be faster to
1490   //           maintain the invariant that %g2 always contains the
1491   //           JavaThread in Java code, and have stubs simply
1492   //           treat %g2 as a caller-save register, preserving it in a %lN.
1493   thread_key_t tk;
1494   if (thr_keycreate( &tk, NULL ) )
1495     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1496                   "(%s)", strerror(errno)));
1497   return int(tk);
1498 }
1499 
1500 void os::free_thread_local_storage(int index) {
1501   // %%% don't think we need anything here
1502   // if ( pthread_key_delete((pthread_key_t) tk) )
1503   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
1504 }
1505 
1506 #define SMALLINT 32   // libthread allocate for tsd_common is a version specific
1507                       // small number - point is NO swap space available
1508 void os::thread_local_storage_at_put(int index, void* value) {
1509   // %%% this is used only in threadLocalStorage.cpp
1510   if (thr_setspecific((thread_key_t)index, value)) {
1511     if (errno == ENOMEM) {
1512        vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1513                              "thr_setspecific: out of swap space");
1514     } else {
1515       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1516                     "(%s)", strerror(errno)));
1517     }
1518   } else {
1519       ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
1520   }
1521 }
1522 
1523 // This function could be called before TLS is initialized, for example, when
1524 // VM receives an async signal or when VM causes a fatal error during
1525 // initialization. Return NULL if thr_getspecific() fails.
1526 void* os::thread_local_storage_at(int index) {
1527   // %%% this is used only in threadLocalStorage.cpp
1528   void* r = NULL;
1529   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1530 }
1531 
1532 
1533 // gethrtime can move backwards if read from one cpu and then a different cpu
1534 // getTimeNanos is guaranteed to not move backward on Solaris
1535 // local spinloop created as faster for a CAS on an int than
1536 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not
1537 // supported on sparc v8 or pre supports_cx8 intel boxes.
1538 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong
1539 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes
1540 inline hrtime_t oldgetTimeNanos() {
1541   int gotlock = LOCK_INVALID;
1542   hrtime_t newtime = gethrtime();
1543 
1544   for (;;) {
1545 // grab lock for max_hrtime
1546     int curlock = max_hrtime_lock;
1547     if (curlock & LOCK_BUSY)  continue;
1548     if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue;
1549     if (newtime > max_hrtime) {
1550       max_hrtime = newtime;
1551     } else {
1552       newtime = max_hrtime;
1553     }
1554     // release lock
1555     max_hrtime_lock = LOCK_FREE;
1556     return newtime;
1557   }
1558 }
1559 // gethrtime can move backwards if read from one cpu and then a different cpu
1560 // getTimeNanos is guaranteed to not move backward on Solaris
1561 inline hrtime_t getTimeNanos() {
1562   if (VM_Version::supports_cx8()) {
1563     const hrtime_t now = gethrtime();
1564     // Use atomic long load since 32-bit x86 uses 2 registers to keep long.
1565     const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime);
1566     if (now <= prev)  return prev;   // same or retrograde time;
1567     const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1568     assert(obsv >= prev, "invariant");   // Monotonicity
1569     // If the CAS succeeded then we're done and return "now".
1570     // If the CAS failed and the observed value "obs" is >= now then
1571     // we should return "obs".  If the CAS failed and now > obs > prv then
1572     // some other thread raced this thread and installed a new value, in which case
1573     // we could either (a) retry the entire operation, (b) retry trying to install now
1574     // or (c) just return obs.  We use (c).   No loop is required although in some cases
1575     // we might discard a higher "now" value in deference to a slightly lower but freshly
1576     // installed obs value.   That's entirely benign -- it admits no new orderings compared
1577     // to (a) or (b) -- and greatly reduces coherence traffic.
1578     // We might also condition (c) on the magnitude of the delta between obs and now.
1579     // Avoiding excessive CAS operations to hot RW locations is critical.
1580     // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate
1581     return (prev == obsv) ? now : obsv ;
1582   } else {
1583     return oldgetTimeNanos();
1584   }
1585 }
1586 
1587 // Time since start-up in seconds to a fine granularity.
1588 // Used by VMSelfDestructTimer and the MemProfiler.
1589 double os::elapsedTime() {
1590   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1591 }
1592 
1593 jlong os::elapsed_counter() {
1594   return (jlong)(getTimeNanos() - first_hrtime);
1595 }
1596 
1597 jlong os::elapsed_frequency() {
1598    return hrtime_hz;
1599 }
1600 
1601 // Return the real, user, and system times in seconds from an
1602 // arbitrary fixed point in the past.
1603 bool os::getTimesSecs(double* process_real_time,
1604                   double* process_user_time,
1605                   double* process_system_time) {
1606   struct tms ticks;
1607   clock_t real_ticks = times(&ticks);
1608 
1609   if (real_ticks == (clock_t) (-1)) {
1610     return false;
1611   } else {
1612     double ticks_per_second = (double) clock_tics_per_sec;
1613     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1614     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1615     // For consistency return the real time from getTimeNanos()
1616     // converted to seconds.
1617     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1618 
1619     return true;
1620   }
1621 }
1622 
1623 bool os::supports_vtime() { return true; }
1624 
1625 bool os::enable_vtime() {
1626   int fd = ::open("/proc/self/ctl", O_WRONLY);
1627   if (fd == -1)
1628     return false;
1629 
1630   long cmd[] = { PCSET, PR_MSACCT };
1631   int res = ::write(fd, cmd, sizeof(long) * 2);
1632   ::close(fd);
1633   if (res != sizeof(long) * 2)
1634     return false;
1635 
1636   return true;
1637 }
1638 
1639 bool os::vtime_enabled() {
1640   int fd = ::open("/proc/self/status", O_RDONLY);
1641   if (fd == -1)
1642     return false;
1643 
1644   pstatus_t status;
1645   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1646   ::close(fd);
1647   if (res != sizeof(pstatus_t))
1648     return false;
1649 
1650   return status.pr_flags & PR_MSACCT;
1651 }
1652 
1653 double os::elapsedVTime() {
1654   return (double)gethrvtime() / (double)hrtime_hz;
1655 }
1656 
1657 // Used internally for comparisons only
1658 // getTimeMillis guaranteed to not move backwards on Solaris
1659 jlong getTimeMillis() {
1660   jlong nanotime = getTimeNanos();
1661   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1662 }
1663 
1664 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1665 jlong os::javaTimeMillis() {
1666   timeval t;
1667   if (gettimeofday( &t, NULL) == -1)
1668     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1669   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1670 }
1671 
1672 jlong os::javaTimeNanos() {
1673   return (jlong)getTimeNanos();
1674 }
1675 
1676 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1677   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1678   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1679   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1680   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1681 }
1682 
1683 char * os::local_time_string(char *buf, size_t buflen) {
1684   struct tm t;
1685   time_t long_time;
1686   time(&long_time);
1687   localtime_r(&long_time, &t);
1688   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1689                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1690                t.tm_hour, t.tm_min, t.tm_sec);
1691   return buf;
1692 }
1693 
1694 // Note: os::shutdown() might be called very early during initialization, or
1695 // called from signal handler. Before adding something to os::shutdown(), make
1696 // sure it is async-safe and can handle partially initialized VM.
1697 void os::shutdown() {
1698 
1699   // allow PerfMemory to attempt cleanup of any persistent resources
1700   perfMemory_exit();
1701 
1702   // needs to remove object in file system
1703   AttachListener::abort();
1704 
1705   // flush buffered output, finish log files
1706   ostream_abort();
1707 
1708   // Check for abort hook
1709   abort_hook_t abort_hook = Arguments::abort_hook();
1710   if (abort_hook != NULL) {
1711     abort_hook();
1712   }
1713 }
1714 
1715 // Note: os::abort() might be called very early during initialization, or
1716 // called from signal handler. Before adding something to os::abort(), make
1717 // sure it is async-safe and can handle partially initialized VM.
1718 void os::abort(bool dump_core) {
1719   os::shutdown();
1720   if (dump_core) {
1721 #ifndef PRODUCT
1722     fdStream out(defaultStream::output_fd());
1723     out.print_raw("Current thread is ");
1724     char buf[16];
1725     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1726     out.print_raw_cr(buf);
1727     out.print_raw_cr("Dumping core ...");
1728 #endif
1729     ::abort(); // dump core (for debugging)
1730   }
1731 
1732   ::exit(1);
1733 }
1734 
1735 // Die immediately, no exit hook, no abort hook, no cleanup.
1736 void os::die() {
1737   ::abort(); // dump core (for debugging)
1738 }
1739 
1740 // unused
1741 void os::set_error_file(const char *logfile) {}
1742 
1743 // DLL functions
1744 
1745 const char* os::dll_file_extension() { return ".so"; }
1746 
1747 // This must be hard coded because it's the system's temporary
1748 // directory not the java application's temp directory, ala java.io.tmpdir.
1749 const char* os::get_temp_directory() { return "/tmp"; }
1750 
1751 static bool file_exists(const char* filename) {
1752   struct stat statbuf;
1753   if (filename == NULL || strlen(filename) == 0) {
1754     return false;
1755   }
1756   return os::stat(filename, &statbuf) == 0;
1757 }
1758 
1759 bool os::dll_build_name(char* buffer, size_t buflen,
1760                         const char* pname, const char* fname) {
1761   bool retval = false;
1762   const size_t pnamelen = pname ? strlen(pname) : 0;
1763 
1764   // Return error on buffer overflow.
1765   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1766     return retval;
1767   }
1768 
1769   if (pnamelen == 0) {
1770     snprintf(buffer, buflen, "lib%s.so", fname);
1771     retval = true;
1772   } else if (strchr(pname, *os::path_separator()) != NULL) {
1773     int n;
1774     char** pelements = split_path(pname, &n);
1775     if (pelements == NULL) {
1776       return false;
1777     }
1778     for (int i = 0 ; i < n ; i++) {
1779       // really shouldn't be NULL but what the heck, check can't hurt
1780       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1781         continue; // skip the empty path values
1782       }
1783       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1784       if (file_exists(buffer)) {
1785         retval = true;
1786         break;
1787       }
1788     }
1789     // release the storage
1790     for (int i = 0 ; i < n ; i++) {
1791       if (pelements[i] != NULL) {
1792         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1793       }
1794     }
1795     if (pelements != NULL) {
1796       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1797     }
1798   } else {
1799     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1800     retval = true;
1801   }
1802   return retval;
1803 }
1804 
1805 // check if addr is inside libjvm.so
1806 bool os::address_is_in_vm(address addr) {
1807   static address libjvm_base_addr;
1808   Dl_info dlinfo;
1809 
1810   if (libjvm_base_addr == NULL) {
1811     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1812       libjvm_base_addr = (address)dlinfo.dli_fbase;
1813     }
1814     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1815   }
1816 
1817   if (dladdr((void *)addr, &dlinfo) != 0) {
1818     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1819   }
1820 
1821   return false;
1822 }
1823 
1824 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1825 static dladdr1_func_type dladdr1_func = NULL;
1826 
1827 bool os::dll_address_to_function_name(address addr, char *buf,
1828                                       int buflen, int * offset) {
1829   // buf is not optional, but offset is optional
1830   assert(buf != NULL, "sanity check");
1831 
1832   Dl_info dlinfo;
1833 
1834   // dladdr1_func was initialized in os::init()
1835   if (dladdr1_func != NULL) {
1836     // yes, we have dladdr1
1837 
1838     // Support for dladdr1 is checked at runtime; it may be
1839     // available even if the vm is built on a machine that does
1840     // not have dladdr1 support.  Make sure there is a value for
1841     // RTLD_DL_SYMENT.
1842     #ifndef RTLD_DL_SYMENT
1843     #define RTLD_DL_SYMENT 1
1844     #endif
1845 #ifdef _LP64
1846     Elf64_Sym * info;
1847 #else
1848     Elf32_Sym * info;
1849 #endif
1850     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1851                      RTLD_DL_SYMENT) != 0) {
1852       // see if we have a matching symbol that covers our address
1853       if (dlinfo.dli_saddr != NULL &&
1854           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1855         if (dlinfo.dli_sname != NULL) {
1856           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1857             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1858           }
1859           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1860           return true;
1861         }
1862       }
1863       // no matching symbol so try for just file info
1864       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1865         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1866                             buf, buflen, offset, dlinfo.dli_fname)) {
1867           return true;
1868         }
1869       }
1870     }
1871     buf[0] = '\0';
1872     if (offset != NULL) *offset  = -1;
1873     return false;
1874   }
1875 
1876   // no, only dladdr is available
1877   if (dladdr((void *)addr, &dlinfo) != 0) {
1878     // see if we have a matching symbol
1879     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1880       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1881         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1882       }
1883       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1884       return true;
1885     }
1886     // no matching symbol so try for just file info
1887     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1888       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1889                           buf, buflen, offset, dlinfo.dli_fname)) {
1890         return true;
1891       }
1892     }
1893   }
1894   buf[0] = '\0';
1895   if (offset != NULL) *offset  = -1;
1896   return false;
1897 }
1898 
1899 bool os::dll_address_to_library_name(address addr, char* buf,
1900                                      int buflen, int* offset) {
1901   // buf is not optional, but offset is optional
1902   assert(buf != NULL, "sanity check");
1903 
1904   Dl_info dlinfo;
1905 
1906   if (dladdr((void*)addr, &dlinfo) != 0) {
1907     if (dlinfo.dli_fname != NULL) {
1908       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1909     }
1910     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1911       *offset = addr - (address)dlinfo.dli_fbase;
1912     }
1913     return true;
1914   }
1915 
1916   buf[0] = '\0';
1917   if (offset) *offset = -1;
1918   return false;
1919 }
1920 
1921 // Prints the names and full paths of all opened dynamic libraries
1922 // for current process
1923 void os::print_dll_info(outputStream * st) {
1924   Dl_info dli;
1925   void *handle;
1926   Link_map *map;
1927   Link_map *p;
1928 
1929   st->print_cr("Dynamic libraries:"); st->flush();
1930 
1931   if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
1932       dli.dli_fname == NULL) {
1933     st->print_cr("Error: Cannot print dynamic libraries.");
1934     return;
1935   }
1936   handle = dlopen(dli.dli_fname, RTLD_LAZY);
1937   if (handle == NULL) {
1938     st->print_cr("Error: Cannot print dynamic libraries.");
1939     return;
1940   }
1941   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1942   if (map == NULL) {
1943     st->print_cr("Error: Cannot print dynamic libraries.");
1944     return;
1945   }
1946 
1947   while (map->l_prev != NULL)
1948     map = map->l_prev;
1949 
1950   while (map != NULL) {
1951     st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
1952     map = map->l_next;
1953   }
1954 
1955   dlclose(handle);
1956 }
1957 
1958   // Loads .dll/.so and
1959   // in case of error it checks if .dll/.so was built for the
1960   // same architecture as Hotspot is running on
1961 
1962 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1963 {
1964   void * result= ::dlopen(filename, RTLD_LAZY);
1965   if (result != NULL) {
1966     // Successful loading
1967     return result;
1968   }
1969 
1970   Elf32_Ehdr elf_head;
1971 
1972   // Read system error message into ebuf
1973   // It may or may not be overwritten below
1974   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1975   ebuf[ebuflen-1]='\0';
1976   int diag_msg_max_length=ebuflen-strlen(ebuf);
1977   char* diag_msg_buf=ebuf+strlen(ebuf);
1978 
1979   if (diag_msg_max_length==0) {
1980     // No more space in ebuf for additional diagnostics message
1981     return NULL;
1982   }
1983 
1984 
1985   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1986 
1987   if (file_descriptor < 0) {
1988     // Can't open library, report dlerror() message
1989     return NULL;
1990   }
1991 
1992   bool failed_to_read_elf_head=
1993     (sizeof(elf_head)!=
1994         (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1995 
1996   ::close(file_descriptor);
1997   if (failed_to_read_elf_head) {
1998     // file i/o error - report dlerror() msg
1999     return NULL;
2000   }
2001 
2002   typedef struct {
2003     Elf32_Half  code;         // Actual value as defined in elf.h
2004     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
2005     char        elf_class;    // 32 or 64 bit
2006     char        endianess;    // MSB or LSB
2007     char*       name;         // String representation
2008   } arch_t;
2009 
2010   static const arch_t arch_array[]={
2011     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
2012     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
2013     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
2014     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
2015     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
2016     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
2017     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
2018     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
2019     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
2020     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
2021   };
2022 
2023   #if  (defined IA32)
2024     static  Elf32_Half running_arch_code=EM_386;
2025   #elif   (defined AMD64)
2026     static  Elf32_Half running_arch_code=EM_X86_64;
2027   #elif  (defined IA64)
2028     static  Elf32_Half running_arch_code=EM_IA_64;
2029   #elif  (defined __sparc) && (defined _LP64)
2030     static  Elf32_Half running_arch_code=EM_SPARCV9;
2031   #elif  (defined __sparc) && (!defined _LP64)
2032     static  Elf32_Half running_arch_code=EM_SPARC;
2033   #elif  (defined __powerpc64__)
2034     static  Elf32_Half running_arch_code=EM_PPC64;
2035   #elif  (defined __powerpc__)
2036     static  Elf32_Half running_arch_code=EM_PPC;
2037   #elif (defined ARM)
2038     static  Elf32_Half running_arch_code=EM_ARM;
2039   #else
2040     #error Method os::dll_load requires that one of following is defined:\
2041          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
2042   #endif
2043 
2044   // Identify compatability class for VM's architecture and library's architecture
2045   // Obtain string descriptions for architectures
2046 
2047   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
2048   int running_arch_index=-1;
2049 
2050   for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
2051     if (running_arch_code == arch_array[i].code) {
2052       running_arch_index    = i;
2053     }
2054     if (lib_arch.code == arch_array[i].code) {
2055       lib_arch.compat_class = arch_array[i].compat_class;
2056       lib_arch.name         = arch_array[i].name;
2057     }
2058   }
2059 
2060   assert(running_arch_index != -1,
2061     "Didn't find running architecture code (running_arch_code) in arch_array");
2062   if (running_arch_index == -1) {
2063     // Even though running architecture detection failed
2064     // we may still continue with reporting dlerror() message
2065     return NULL;
2066   }
2067 
2068   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
2069     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
2070     return NULL;
2071   }
2072 
2073   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
2074     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
2075     return NULL;
2076   }
2077 
2078   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
2079     if ( lib_arch.name!=NULL ) {
2080       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
2081         " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
2082         lib_arch.name, arch_array[running_arch_index].name);
2083     } else {
2084       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
2085       " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
2086         lib_arch.code,
2087         arch_array[running_arch_index].name);
2088     }
2089   }
2090 
2091   return NULL;
2092 }
2093 
2094 void* os::dll_lookup(void* handle, const char* name) {
2095   return dlsym(handle, name);
2096 }
2097 
2098 void* os::get_default_process_handle() {
2099   return (void*)::dlopen(NULL, RTLD_LAZY);
2100 }
2101 
2102 int os::stat(const char *path, struct stat *sbuf) {
2103   char pathbuf[MAX_PATH];
2104   if (strlen(path) > MAX_PATH - 1) {
2105     errno = ENAMETOOLONG;
2106     return -1;
2107   }
2108   os::native_path(strcpy(pathbuf, path));
2109   return ::stat(pathbuf, sbuf);
2110 }
2111 
2112 static bool _print_ascii_file(const char* filename, outputStream* st) {
2113   int fd = ::open(filename, O_RDONLY);
2114   if (fd == -1) {
2115      return false;
2116   }
2117 
2118   char buf[32];
2119   int bytes;
2120   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
2121     st->print_raw(buf, bytes);
2122   }
2123 
2124   ::close(fd);
2125 
2126   return true;
2127 }
2128 
2129 void os::print_os_info_brief(outputStream* st) {
2130   os::Solaris::print_distro_info(st);
2131 
2132   os::Posix::print_uname_info(st);
2133 
2134   os::Solaris::print_libversion_info(st);
2135 }
2136 
2137 void os::print_os_info(outputStream* st) {
2138   st->print("OS:");
2139 
2140   os::Solaris::print_distro_info(st);
2141 
2142   os::Posix::print_uname_info(st);
2143 
2144   os::Solaris::print_libversion_info(st);
2145 
2146   os::Posix::print_rlimit_info(st);
2147 
2148   os::Posix::print_load_average(st);
2149 }
2150 
2151 void os::Solaris::print_distro_info(outputStream* st) {
2152   if (!_print_ascii_file("/etc/release", st)) {
2153       st->print("Solaris");
2154     }
2155     st->cr();
2156 }
2157 
2158 void os::Solaris::print_libversion_info(outputStream* st) {
2159   if (os::Solaris::T2_libthread()) {
2160     st->print("  (T2 libthread)");
2161   }
2162   else {
2163     st->print("  (T1 libthread)");
2164   }
2165   st->cr();
2166 }
2167 
2168 static bool check_addr0(outputStream* st) {
2169   jboolean status = false;
2170   int fd = ::open("/proc/self/map",O_RDONLY);
2171   if (fd >= 0) {
2172     prmap_t p;
2173     while(::read(fd, &p, sizeof(p)) > 0) {
2174       if (p.pr_vaddr == 0x0) {
2175         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
2176         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
2177         st->print("Access:");
2178         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
2179         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
2180         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
2181         st->cr();
2182         status = true;
2183       }
2184     }
2185     ::close(fd);
2186   }
2187   return status;
2188 }
2189 
2190 void os::pd_print_cpu_info(outputStream* st) {
2191   // Nothing to do for now.
2192 }
2193 
2194 void os::print_memory_info(outputStream* st) {
2195   st->print("Memory:");
2196   st->print(" %dk page", os::vm_page_size()>>10);
2197   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2198   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2199   st->cr();
2200   (void) check_addr0(st);
2201 }
2202 
2203 void os::print_siginfo(outputStream* st, void* siginfo) {
2204   const siginfo_t* si = (const siginfo_t*)siginfo;
2205 
2206   os::Posix::print_siginfo_brief(st, si);
2207 
2208   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2209       UseSharedSpaces) {
2210     FileMapInfo* mapinfo = FileMapInfo::current_info();
2211     if (mapinfo->is_in_shared_space(si->si_addr)) {
2212       st->print("\n\nError accessing class data sharing archive."   \
2213                 " Mapped file inaccessible during execution, "      \
2214                 " possible disk/network problem.");
2215     }
2216   }
2217   st->cr();
2218 }
2219 
2220 // Moved from whole group, because we need them here for diagnostic
2221 // prints.
2222 #define OLDMAXSIGNUM 32
2223 static int Maxsignum = 0;
2224 static int *ourSigFlags = NULL;
2225 
2226 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2227 
2228 int os::Solaris::get_our_sigflags(int sig) {
2229   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2230   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2231   return ourSigFlags[sig];
2232 }
2233 
2234 void os::Solaris::set_our_sigflags(int sig, int flags) {
2235   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2236   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2237   ourSigFlags[sig] = flags;
2238 }
2239 
2240 
2241 static const char* get_signal_handler_name(address handler,
2242                                            char* buf, int buflen) {
2243   int offset;
2244   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2245   if (found) {
2246     // skip directory names
2247     const char *p1, *p2;
2248     p1 = buf;
2249     size_t len = strlen(os::file_separator());
2250     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2251     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2252   } else {
2253     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2254   }
2255   return buf;
2256 }
2257 
2258 static void print_signal_handler(outputStream* st, int sig,
2259                                   char* buf, size_t buflen) {
2260   struct sigaction sa;
2261 
2262   sigaction(sig, NULL, &sa);
2263 
2264   st->print("%s: ", os::exception_name(sig, buf, buflen));
2265 
2266   address handler = (sa.sa_flags & SA_SIGINFO)
2267                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2268                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2269 
2270   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2271     st->print("SIG_DFL");
2272   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2273     st->print("SIG_IGN");
2274   } else {
2275     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2276   }
2277 
2278   st->print(", sa_mask[0]=");
2279   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2280 
2281   address rh = VMError::get_resetted_sighandler(sig);
2282   // May be, handler was resetted by VMError?
2283   if(rh != NULL) {
2284     handler = rh;
2285     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2286   }
2287 
2288   st->print(", sa_flags=");
2289   os::Posix::print_sa_flags(st, sa.sa_flags);
2290 
2291   // Check: is it our handler?
2292   if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2293      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2294     // It is our signal handler
2295     // check for flags
2296     if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2297       st->print(
2298         ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2299         os::Solaris::get_our_sigflags(sig));
2300     }
2301   }
2302   st->cr();
2303 }
2304 
2305 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2306   st->print_cr("Signal Handlers:");
2307   print_signal_handler(st, SIGSEGV, buf, buflen);
2308   print_signal_handler(st, SIGBUS , buf, buflen);
2309   print_signal_handler(st, SIGFPE , buf, buflen);
2310   print_signal_handler(st, SIGPIPE, buf, buflen);
2311   print_signal_handler(st, SIGXFSZ, buf, buflen);
2312   print_signal_handler(st, SIGILL , buf, buflen);
2313   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2314   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2315   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2316   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2317   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2318   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2319   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2320   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2321 }
2322 
2323 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2324 
2325 // Find the full path to the current module, libjvm.so
2326 void os::jvm_path(char *buf, jint buflen) {
2327   // Error checking.
2328   if (buflen < MAXPATHLEN) {
2329     assert(false, "must use a large-enough buffer");
2330     buf[0] = '\0';
2331     return;
2332   }
2333   // Lazy resolve the path to current module.
2334   if (saved_jvm_path[0] != 0) {
2335     strcpy(buf, saved_jvm_path);
2336     return;
2337   }
2338 
2339   Dl_info dlinfo;
2340   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2341   assert(ret != 0, "cannot locate libjvm");
2342   if (ret != 0 && dlinfo.dli_fname != NULL) {
2343     realpath((char *)dlinfo.dli_fname, buf);
2344   } else {
2345     buf[0] = '\0';
2346     return;
2347   }
2348 
2349   if (Arguments::sun_java_launcher_is_altjvm()) {
2350     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2351     // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
2352     // If "/jre/lib/" appears at the right place in the string, then
2353     // assume we are installed in a JDK and we're done.  Otherwise, check
2354     // for a JAVA_HOME environment variable and fix up the path so it
2355     // looks like libjvm.so is installed there (append a fake suffix
2356     // hotspot/libjvm.so).
2357     const char *p = buf + strlen(buf) - 1;
2358     for (int count = 0; p > buf && count < 5; ++count) {
2359       for (--p; p > buf && *p != '/'; --p)
2360         /* empty */ ;
2361     }
2362 
2363     if (strncmp(p, "/jre/lib/", 9) != 0) {
2364       // Look for JAVA_HOME in the environment.
2365       char* java_home_var = ::getenv("JAVA_HOME");
2366       if (java_home_var != NULL && java_home_var[0] != 0) {
2367         char cpu_arch[12];
2368         char* jrelib_p;
2369         int   len;
2370         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2371 #ifdef _LP64
2372         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2373         if (strcmp(cpu_arch, "sparc") == 0) {
2374           strcat(cpu_arch, "v9");
2375         } else if (strcmp(cpu_arch, "i386") == 0) {
2376           strcpy(cpu_arch, "amd64");
2377         }
2378 #endif
2379         // Check the current module name "libjvm.so".
2380         p = strrchr(buf, '/');
2381         assert(strstr(p, "/libjvm") == p, "invalid library name");
2382 
2383         realpath(java_home_var, buf);
2384         // determine if this is a legacy image or modules image
2385         // modules image doesn't have "jre" subdirectory
2386         len = strlen(buf);
2387         jrelib_p = buf + len;
2388         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2389         if (0 != access(buf, F_OK)) {
2390           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2391         }
2392 
2393         if (0 == access(buf, F_OK)) {
2394           // Use current module name "libjvm.so"
2395           len = strlen(buf);
2396           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2397         } else {
2398           // Go back to path of .so
2399           realpath((char *)dlinfo.dli_fname, buf);
2400         }
2401       }
2402     }
2403   }
2404 
2405   strcpy(saved_jvm_path, buf);
2406 }
2407 
2408 
2409 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2410   // no prefix required, not even "_"
2411 }
2412 
2413 
2414 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2415   // no suffix required
2416 }
2417 
2418 // This method is a copy of JDK's sysGetLastErrorString
2419 // from src/solaris/hpi/src/system_md.c
2420 
2421 size_t os::lasterror(char *buf, size_t len) {
2422 
2423   if (errno == 0)  return 0;
2424 
2425   const char *s = ::strerror(errno);
2426   size_t n = ::strlen(s);
2427   if (n >= len) {
2428     n = len - 1;
2429   }
2430   ::strncpy(buf, s, n);
2431   buf[n] = '\0';
2432   return n;
2433 }
2434 
2435 
2436 // sun.misc.Signal
2437 
2438 extern "C" {
2439   static void UserHandler(int sig, void *siginfo, void *context) {
2440     // Ctrl-C is pressed during error reporting, likely because the error
2441     // handler fails to abort. Let VM die immediately.
2442     if (sig == SIGINT && is_error_reported()) {
2443        os::die();
2444     }
2445 
2446     os::signal_notify(sig);
2447     // We do not need to reinstate the signal handler each time...
2448   }
2449 }
2450 
2451 void* os::user_handler() {
2452   return CAST_FROM_FN_PTR(void*, UserHandler);
2453 }
2454 
2455 class Semaphore : public StackObj {
2456   public:
2457     Semaphore();
2458     ~Semaphore();
2459     void signal();
2460     void wait();
2461     bool trywait();
2462     bool timedwait(unsigned int sec, int nsec);
2463   private:
2464     sema_t _semaphore;
2465 };
2466 
2467 
2468 Semaphore::Semaphore() {
2469   sema_init(&_semaphore, 0, NULL, NULL);
2470 }
2471 
2472 Semaphore::~Semaphore() {
2473   sema_destroy(&_semaphore);
2474 }
2475 
2476 void Semaphore::signal() {
2477   sema_post(&_semaphore);
2478 }
2479 
2480 void Semaphore::wait() {
2481   sema_wait(&_semaphore);
2482 }
2483 
2484 bool Semaphore::trywait() {
2485   return sema_trywait(&_semaphore) == 0;
2486 }
2487 
2488 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2489   struct timespec ts;
2490   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2491 
2492   while (1) {
2493     int result = sema_timedwait(&_semaphore, &ts);
2494     if (result == 0) {
2495       return true;
2496     } else if (errno == EINTR) {
2497       continue;
2498     } else if (errno == ETIME) {
2499       return false;
2500     } else {
2501       return false;
2502     }
2503   }
2504 }
2505 
2506 extern "C" {
2507   typedef void (*sa_handler_t)(int);
2508   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2509 }
2510 
2511 void* os::signal(int signal_number, void* handler) {
2512   struct sigaction sigAct, oldSigAct;
2513   sigfillset(&(sigAct.sa_mask));
2514   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2515   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2516 
2517   if (sigaction(signal_number, &sigAct, &oldSigAct))
2518     // -1 means registration failed
2519     return (void *)-1;
2520 
2521   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2522 }
2523 
2524 void os::signal_raise(int signal_number) {
2525   raise(signal_number);
2526 }
2527 
2528 /*
2529  * The following code is moved from os.cpp for making this
2530  * code platform specific, which it is by its very nature.
2531  */
2532 
2533 // a counter for each possible signal value
2534 static int Sigexit = 0;
2535 static int Maxlibjsigsigs;
2536 static jint *pending_signals = NULL;
2537 static int *preinstalled_sigs = NULL;
2538 static struct sigaction *chainedsigactions = NULL;
2539 static sema_t sig_sem;
2540 typedef int (*version_getting_t)();
2541 version_getting_t os::Solaris::get_libjsig_version = NULL;
2542 static int libjsigversion = NULL;
2543 
2544 int os::sigexitnum_pd() {
2545   assert(Sigexit > 0, "signal memory not yet initialized");
2546   return Sigexit;
2547 }
2548 
2549 void os::Solaris::init_signal_mem() {
2550   // Initialize signal structures
2551   Maxsignum = SIGRTMAX;
2552   Sigexit = Maxsignum+1;
2553   assert(Maxsignum >0, "Unable to obtain max signal number");
2554 
2555   Maxlibjsigsigs = Maxsignum;
2556 
2557   // pending_signals has one int per signal
2558   // The additional signal is for SIGEXIT - exit signal to signal_thread
2559   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2560   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2561 
2562   if (UseSignalChaining) {
2563      chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2564        * (Maxsignum + 1), mtInternal);
2565      memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2566      preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2567      memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2568   }
2569   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
2570   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2571 }
2572 
2573 void os::signal_init_pd() {
2574   int ret;
2575 
2576   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2577   assert(ret == 0, "sema_init() failed");
2578 }
2579 
2580 void os::signal_notify(int signal_number) {
2581   int ret;
2582 
2583   Atomic::inc(&pending_signals[signal_number]);
2584   ret = ::sema_post(&sig_sem);
2585   assert(ret == 0, "sema_post() failed");
2586 }
2587 
2588 static int check_pending_signals(bool wait_for_signal) {
2589   int ret;
2590   while (true) {
2591     for (int i = 0; i < Sigexit + 1; i++) {
2592       jint n = pending_signals[i];
2593       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2594         return i;
2595       }
2596     }
2597     if (!wait_for_signal) {
2598       return -1;
2599     }
2600     JavaThread *thread = JavaThread::current();
2601     ThreadBlockInVM tbivm(thread);
2602 
2603     bool threadIsSuspended;
2604     do {
2605       thread->set_suspend_equivalent();
2606       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2607       while((ret = ::sema_wait(&sig_sem)) == EINTR)
2608           ;
2609       assert(ret == 0, "sema_wait() failed");
2610 
2611       // were we externally suspended while we were waiting?
2612       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2613       if (threadIsSuspended) {
2614         //
2615         // The semaphore has been incremented, but while we were waiting
2616         // another thread suspended us. We don't want to continue running
2617         // while suspended because that would surprise the thread that
2618         // suspended us.
2619         //
2620         ret = ::sema_post(&sig_sem);
2621         assert(ret == 0, "sema_post() failed");
2622 
2623         thread->java_suspend_self();
2624       }
2625     } while (threadIsSuspended);
2626   }
2627 }
2628 
2629 int os::signal_lookup() {
2630   return check_pending_signals(false);
2631 }
2632 
2633 int os::signal_wait() {
2634   return check_pending_signals(true);
2635 }
2636 
2637 ////////////////////////////////////////////////////////////////////////////////
2638 // Virtual Memory
2639 
2640 static int page_size = -1;
2641 
2642 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2643 // clear this var if support is not available.
2644 static bool has_map_align = true;
2645 
2646 int os::vm_page_size() {
2647   assert(page_size != -1, "must call os::init");
2648   return page_size;
2649 }
2650 
2651 // Solaris allocates memory by pages.
2652 int os::vm_allocation_granularity() {
2653   assert(page_size != -1, "must call os::init");
2654   return page_size;
2655 }
2656 
2657 static bool recoverable_mmap_error(int err) {
2658   // See if the error is one we can let the caller handle. This
2659   // list of errno values comes from the Solaris mmap(2) man page.
2660   switch (err) {
2661   case EBADF:
2662   case EINVAL:
2663   case ENOTSUP:
2664     // let the caller deal with these errors
2665     return true;
2666 
2667   default:
2668     // Any remaining errors on this OS can cause our reserved mapping
2669     // to be lost. That can cause confusion where different data
2670     // structures think they have the same memory mapped. The worst
2671     // scenario is if both the VM and a library think they have the
2672     // same memory mapped.
2673     return false;
2674   }
2675 }
2676 
2677 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2678                                     int err) {
2679   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2680           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2681           strerror(err), err);
2682 }
2683 
2684 static void warn_fail_commit_memory(char* addr, size_t bytes,
2685                                     size_t alignment_hint, bool exec,
2686                                     int err) {
2687   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2688           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2689           alignment_hint, exec, strerror(err), err);
2690 }
2691 
2692 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2693   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2694   size_t size = bytes;
2695   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2696   if (res != NULL) {
2697     if (UseNUMAInterleaving) {
2698       numa_make_global(addr, bytes);
2699     }
2700     return 0;
2701   }
2702 
2703   int err = errno;  // save errno from mmap() call in mmap_chunk()
2704 
2705   if (!recoverable_mmap_error(err)) {
2706     warn_fail_commit_memory(addr, bytes, exec, err);
2707     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2708   }
2709 
2710   return err;
2711 }
2712 
2713 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2714   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2715 }
2716 
2717 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2718                                   const char* mesg) {
2719   assert(mesg != NULL, "mesg must be specified");
2720   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2721   if (err != 0) {
2722     // the caller wants all commit errors to exit with the specified mesg:
2723     warn_fail_commit_memory(addr, bytes, exec, err);
2724     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2725   }
2726 }
2727 
2728 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2729                                     size_t alignment_hint, bool exec) {
2730   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2731   if (err == 0) {
2732     if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) {
2733       // If the large page size has been set and the VM
2734       // is using large pages, use the large page size
2735       // if it is smaller than the alignment hint. This is
2736       // a case where the VM wants to use a larger alignment size
2737       // for its own reasons but still want to use large pages
2738       // (which is what matters to setting the mpss range.
2739       size_t page_size = 0;
2740       if (large_page_size() < alignment_hint) {
2741         assert(UseLargePages, "Expected to be here for large page use only");
2742         page_size = large_page_size();
2743       } else {
2744         // If the alignment hint is less than the large page
2745         // size, the VM wants a particular alignment (thus the hint)
2746         // for internal reasons.  Try to set the mpss range using
2747         // the alignment_hint.
2748         page_size = alignment_hint;
2749       }
2750       // Since this is a hint, ignore any failures.
2751       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2752     }
2753   }
2754   return err;
2755 }
2756 
2757 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2758                           bool exec) {
2759   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2760 }
2761 
2762 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2763                                   size_t alignment_hint, bool exec,
2764                                   const char* mesg) {
2765   assert(mesg != NULL, "mesg must be specified");
2766   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2767   if (err != 0) {
2768     // the caller wants all commit errors to exit with the specified mesg:
2769     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2770     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2771   }
2772 }
2773 
2774 // Uncommit the pages in a specified region.
2775 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2776   if (madvise(addr, bytes, MADV_FREE) < 0) {
2777     debug_only(warning("MADV_FREE failed."));
2778     return;
2779   }
2780 }
2781 
2782 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2783   return os::commit_memory(addr, size, !ExecMem);
2784 }
2785 
2786 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2787   return os::uncommit_memory(addr, size);
2788 }
2789 
2790 // Change the page size in a given range.
2791 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2792   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2793   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2794   if (UseLargePages) {
2795     Solaris::setup_large_pages(addr, bytes, alignment_hint);
2796   }
2797 }
2798 
2799 // Tell the OS to make the range local to the first-touching LWP
2800 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2801   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2802   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2803     debug_only(warning("MADV_ACCESS_LWP failed."));
2804   }
2805 }
2806 
2807 // Tell the OS that this range would be accessed from different LWPs.
2808 void os::numa_make_global(char *addr, size_t bytes) {
2809   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2810   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2811     debug_only(warning("MADV_ACCESS_MANY failed."));
2812   }
2813 }
2814 
2815 // Get the number of the locality groups.
2816 size_t os::numa_get_groups_num() {
2817   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2818   return n != -1 ? n : 1;
2819 }
2820 
2821 // Get a list of leaf locality groups. A leaf lgroup is group that
2822 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2823 // board. An LWP is assigned to one of these groups upon creation.
2824 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2825    if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2826      ids[0] = 0;
2827      return 1;
2828    }
2829    int result_size = 0, top = 1, bottom = 0, cur = 0;
2830    for (int k = 0; k < size; k++) {
2831      int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2832                                     (Solaris::lgrp_id_t*)&ids[top], size - top);
2833      if (r == -1) {
2834        ids[0] = 0;
2835        return 1;
2836      }
2837      if (!r) {
2838        // That's a leaf node.
2839        assert (bottom <= cur, "Sanity check");
2840        // Check if the node has memory
2841        if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2842                                    NULL, 0, LGRP_RSRC_MEM) > 0) {
2843          ids[bottom++] = ids[cur];
2844        }
2845      }
2846      top += r;
2847      cur++;
2848    }
2849    if (bottom == 0) {
2850      // Handle a situation, when the OS reports no memory available.
2851      // Assume UMA architecture.
2852      ids[0] = 0;
2853      return 1;
2854    }
2855    return bottom;
2856 }
2857 
2858 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2859 bool os::numa_topology_changed() {
2860   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2861   if (is_stale != -1 && is_stale) {
2862     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2863     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2864     assert(c != 0, "Failure to initialize LGRP API");
2865     Solaris::set_lgrp_cookie(c);
2866     return true;
2867   }
2868   return false;
2869 }
2870 
2871 // Get the group id of the current LWP.
2872 int os::numa_get_group_id() {
2873   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2874   if (lgrp_id == -1) {
2875     return 0;
2876   }
2877   const int size = os::numa_get_groups_num();
2878   int *ids = (int*)alloca(size * sizeof(int));
2879 
2880   // Get the ids of all lgroups with memory; r is the count.
2881   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2882                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2883   if (r <= 0) {
2884     return 0;
2885   }
2886   return ids[os::random() % r];
2887 }
2888 
2889 // Request information about the page.
2890 bool os::get_page_info(char *start, page_info* info) {
2891   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2892   uint64_t addr = (uintptr_t)start;
2893   uint64_t outdata[2];
2894   uint_t validity = 0;
2895 
2896   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2897     return false;
2898   }
2899 
2900   info->size = 0;
2901   info->lgrp_id = -1;
2902 
2903   if ((validity & 1) != 0) {
2904     if ((validity & 2) != 0) {
2905       info->lgrp_id = outdata[0];
2906     }
2907     if ((validity & 4) != 0) {
2908       info->size = outdata[1];
2909     }
2910     return true;
2911   }
2912   return false;
2913 }
2914 
2915 // Scan the pages from start to end until a page different than
2916 // the one described in the info parameter is encountered.
2917 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2918   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2919   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2920   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2921   uint_t validity[MAX_MEMINFO_CNT];
2922 
2923   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2924   uint64_t p = (uint64_t)start;
2925   while (p < (uint64_t)end) {
2926     addrs[0] = p;
2927     size_t addrs_count = 1;
2928     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2929       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2930       addrs_count++;
2931     }
2932 
2933     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2934       return NULL;
2935     }
2936 
2937     size_t i = 0;
2938     for (; i < addrs_count; i++) {
2939       if ((validity[i] & 1) != 0) {
2940         if ((validity[i] & 4) != 0) {
2941           if (outdata[types * i + 1] != page_expected->size) {
2942             break;
2943           }
2944         } else
2945           if (page_expected->size != 0) {
2946             break;
2947           }
2948 
2949         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2950           if (outdata[types * i] != page_expected->lgrp_id) {
2951             break;
2952           }
2953         }
2954       } else {
2955         return NULL;
2956       }
2957     }
2958 
2959     if (i < addrs_count) {
2960       if ((validity[i] & 2) != 0) {
2961         page_found->lgrp_id = outdata[types * i];
2962       } else {
2963         page_found->lgrp_id = -1;
2964       }
2965       if ((validity[i] & 4) != 0) {
2966         page_found->size = outdata[types * i + 1];
2967       } else {
2968         page_found->size = 0;
2969       }
2970       return (char*)addrs[i];
2971     }
2972 
2973     p = addrs[addrs_count - 1] + page_size;
2974   }
2975   return end;
2976 }
2977 
2978 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2979   size_t size = bytes;
2980   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2981   // uncommitted page. Otherwise, the read/write might succeed if we
2982   // have enough swap space to back the physical page.
2983   return
2984     NULL != Solaris::mmap_chunk(addr, size,
2985                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2986                                 PROT_NONE);
2987 }
2988 
2989 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2990   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2991 
2992   if (b == MAP_FAILED) {
2993     return NULL;
2994   }
2995   return b;
2996 }
2997 
2998 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
2999   char* addr = requested_addr;
3000   int flags = MAP_PRIVATE | MAP_NORESERVE;
3001 
3002   assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
3003 
3004   if (fixed) {
3005     flags |= MAP_FIXED;
3006   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
3007     flags |= MAP_ALIGN;
3008     addr = (char*) alignment_hint;
3009   }
3010 
3011   // Map uncommitted pages PROT_NONE so we fail early if we touch an
3012   // uncommitted page. Otherwise, the read/write might succeed if we
3013   // have enough swap space to back the physical page.
3014   return mmap_chunk(addr, bytes, flags, PROT_NONE);
3015 }
3016 
3017 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
3018   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
3019 
3020   guarantee(requested_addr == NULL || requested_addr == addr,
3021             "OS failed to return requested mmap address.");
3022   return addr;
3023 }
3024 
3025 // Reserve memory at an arbitrary address, only if that area is
3026 // available (and not reserved for something else).
3027 
3028 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3029   const int max_tries = 10;
3030   char* base[max_tries];
3031   size_t size[max_tries];
3032 
3033   // Solaris adds a gap between mmap'ed regions.  The size of the gap
3034   // is dependent on the requested size and the MMU.  Our initial gap
3035   // value here is just a guess and will be corrected later.
3036   bool had_top_overlap = false;
3037   bool have_adjusted_gap = false;
3038   size_t gap = 0x400000;
3039 
3040   // Assert only that the size is a multiple of the page size, since
3041   // that's all that mmap requires, and since that's all we really know
3042   // about at this low abstraction level.  If we need higher alignment,
3043   // we can either pass an alignment to this method or verify alignment
3044   // in one of the methods further up the call chain.  See bug 5044738.
3045   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
3046 
3047   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
3048   // Give it a try, if the kernel honors the hint we can return immediately.
3049   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
3050 
3051   volatile int err = errno;
3052   if (addr == requested_addr) {
3053     return addr;
3054   } else if (addr != NULL) {
3055     pd_unmap_memory(addr, bytes);
3056   }
3057 
3058   if (PrintMiscellaneous && Verbose) {
3059     char buf[256];
3060     buf[0] = '\0';
3061     if (addr == NULL) {
3062       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
3063     }
3064     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
3065             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
3066             "%s", bytes, requested_addr, addr, buf);
3067   }
3068 
3069   // Address hint method didn't work.  Fall back to the old method.
3070   // In theory, once SNV becomes our oldest supported platform, this
3071   // code will no longer be needed.
3072   //
3073   // Repeatedly allocate blocks until the block is allocated at the
3074   // right spot. Give up after max_tries.
3075   int i;
3076   for (i = 0; i < max_tries; ++i) {
3077     base[i] = reserve_memory(bytes);
3078 
3079     if (base[i] != NULL) {
3080       // Is this the block we wanted?
3081       if (base[i] == requested_addr) {
3082         size[i] = bytes;
3083         break;
3084       }
3085 
3086       // check that the gap value is right
3087       if (had_top_overlap && !have_adjusted_gap) {
3088         size_t actual_gap = base[i-1] - base[i] - bytes;
3089         if (gap != actual_gap) {
3090           // adjust the gap value and retry the last 2 allocations
3091           assert(i > 0, "gap adjustment code problem");
3092           have_adjusted_gap = true;  // adjust the gap only once, just in case
3093           gap = actual_gap;
3094           if (PrintMiscellaneous && Verbose) {
3095             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
3096           }
3097           unmap_memory(base[i], bytes);
3098           unmap_memory(base[i-1], size[i-1]);
3099           i-=2;
3100           continue;
3101         }
3102       }
3103 
3104       // Does this overlap the block we wanted? Give back the overlapped
3105       // parts and try again.
3106       //
3107       // There is still a bug in this code: if top_overlap == bytes,
3108       // the overlap is offset from requested region by the value of gap.
3109       // In this case giving back the overlapped part will not work,
3110       // because we'll give back the entire block at base[i] and
3111       // therefore the subsequent allocation will not generate a new gap.
3112       // This could be fixed with a new algorithm that used larger
3113       // or variable size chunks to find the requested region -
3114       // but such a change would introduce additional complications.
3115       // It's rare enough that the planets align for this bug,
3116       // so we'll just wait for a fix for 6204603/5003415 which
3117       // will provide a mmap flag to allow us to avoid this business.
3118 
3119       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
3120       if (top_overlap >= 0 && top_overlap < bytes) {
3121         had_top_overlap = true;
3122         unmap_memory(base[i], top_overlap);
3123         base[i] += top_overlap;
3124         size[i] = bytes - top_overlap;
3125       } else {
3126         size_t bottom_overlap = base[i] + bytes - requested_addr;
3127         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
3128           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
3129             warning("attempt_reserve_memory_at: possible alignment bug");
3130           }
3131           unmap_memory(requested_addr, bottom_overlap);
3132           size[i] = bytes - bottom_overlap;
3133         } else {
3134           size[i] = bytes;
3135         }
3136       }
3137     }
3138   }
3139 
3140   // Give back the unused reserved pieces.
3141 
3142   for (int j = 0; j < i; ++j) {
3143     if (base[j] != NULL) {
3144       unmap_memory(base[j], size[j]);
3145     }
3146   }
3147 
3148   return (i < max_tries) ? requested_addr : NULL;
3149 }
3150 
3151 bool os::pd_release_memory(char* addr, size_t bytes) {
3152   size_t size = bytes;
3153   return munmap(addr, size) == 0;
3154 }
3155 
3156 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
3157   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
3158          "addr must be page aligned");
3159   int retVal = mprotect(addr, bytes, prot);
3160   return retVal == 0;
3161 }
3162 
3163 // Protect memory (Used to pass readonly pages through
3164 // JNI GetArray<type>Elements with empty arrays.)
3165 // Also, used for serialization page and for compressed oops null pointer
3166 // checking.
3167 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3168                         bool is_committed) {
3169   unsigned int p = 0;
3170   switch (prot) {
3171   case MEM_PROT_NONE: p = PROT_NONE; break;
3172   case MEM_PROT_READ: p = PROT_READ; break;
3173   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
3174   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3175   default:
3176     ShouldNotReachHere();
3177   }
3178   // is_committed is unused.
3179   return solaris_mprotect(addr, bytes, p);
3180 }
3181 
3182 // guard_memory and unguard_memory only happens within stack guard pages.
3183 // Since ISM pertains only to the heap, guard and unguard memory should not
3184 /// happen with an ISM region.
3185 bool os::guard_memory(char* addr, size_t bytes) {
3186   return solaris_mprotect(addr, bytes, PROT_NONE);
3187 }
3188 
3189 bool os::unguard_memory(char* addr, size_t bytes) {
3190   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3191 }
3192 
3193 // Large page support
3194 static size_t _large_page_size = 0;
3195 
3196 // Insertion sort for small arrays (descending order).
3197 static void insertion_sort_descending(size_t* array, int len) {
3198   for (int i = 0; i < len; i++) {
3199     size_t val = array[i];
3200     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3201       size_t tmp = array[key];
3202       array[key] = array[key - 1];
3203       array[key - 1] = tmp;
3204     }
3205   }
3206 }
3207 
3208 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3209   const unsigned int usable_count = VM_Version::page_size_count();
3210   if (usable_count == 1) {
3211     return false;
3212   }
3213 
3214   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3215   // build platform, getpagesizes() (without the '2') can be called directly.
3216   typedef int (*gps_t)(size_t[], int);
3217   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3218   if (gps_func == NULL) {
3219     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3220     if (gps_func == NULL) {
3221       if (warn) {
3222         warning("MPSS is not supported by the operating system.");
3223       }
3224       return false;
3225     }
3226   }
3227 
3228   // Fill the array of page sizes.
3229   int n = (*gps_func)(_page_sizes, page_sizes_max);
3230   assert(n > 0, "Solaris bug?");
3231 
3232   if (n == page_sizes_max) {
3233     // Add a sentinel value (necessary only if the array was completely filled
3234     // since it is static (zeroed at initialization)).
3235     _page_sizes[--n] = 0;
3236     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3237   }
3238   assert(_page_sizes[n] == 0, "missing sentinel");
3239   trace_page_sizes("available page sizes", _page_sizes, n);
3240 
3241   if (n == 1) return false;     // Only one page size available.
3242 
3243   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3244   // select up to usable_count elements.  First sort the array, find the first
3245   // acceptable value, then copy the usable sizes to the top of the array and
3246   // trim the rest.  Make sure to include the default page size :-).
3247   //
3248   // A better policy could get rid of the 4M limit by taking the sizes of the
3249   // important VM memory regions (java heap and possibly the code cache) into
3250   // account.
3251   insertion_sort_descending(_page_sizes, n);
3252   const size_t size_limit =
3253     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3254   int beg;
3255   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3256   const int end = MIN2((int)usable_count, n) - 1;
3257   for (int cur = 0; cur < end; ++cur, ++beg) {
3258     _page_sizes[cur] = _page_sizes[beg];
3259   }
3260   _page_sizes[end] = vm_page_size();
3261   _page_sizes[end + 1] = 0;
3262 
3263   if (_page_sizes[end] > _page_sizes[end - 1]) {
3264     // Default page size is not the smallest; sort again.
3265     insertion_sort_descending(_page_sizes, end + 1);
3266   }
3267   *page_size = _page_sizes[0];
3268 
3269   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3270   return true;
3271 }
3272 
3273 void os::large_page_init() {
3274   if (UseLargePages) {
3275     // print a warning if any large page related flag is specified on command line
3276     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3277                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3278 
3279     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3280   }
3281 }
3282 
3283 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3284   // Signal to OS that we want large pages for addresses
3285   // from addr, addr + bytes
3286   struct memcntl_mha mpss_struct;
3287   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3288   mpss_struct.mha_pagesize = align;
3289   mpss_struct.mha_flags = 0;
3290   // Upon successful completion, memcntl() returns 0
3291   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3292     debug_only(warning("Attempt to use MPSS failed."));
3293     return false;
3294   }
3295   return true;
3296 }
3297 
3298 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3299   fatal("os::reserve_memory_special should not be called on Solaris.");
3300   return NULL;
3301 }
3302 
3303 bool os::release_memory_special(char* base, size_t bytes) {
3304   fatal("os::release_memory_special should not be called on Solaris.");
3305   return false;
3306 }
3307 
3308 size_t os::large_page_size() {
3309   return _large_page_size;
3310 }
3311 
3312 // MPSS allows application to commit large page memory on demand; with ISM
3313 // the entire memory region must be allocated as shared memory.
3314 bool os::can_commit_large_page_memory() {
3315   return true;
3316 }
3317 
3318 bool os::can_execute_large_page_memory() {
3319   return true;
3320 }
3321 
3322 // Read calls from inside the vm need to perform state transitions
3323 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3324   INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3325 }
3326 
3327 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3328   INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3329 }
3330 
3331 void os::naked_short_sleep(jlong ms) {
3332   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3333 
3334   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3335   // Solaris requires -lrt for this.
3336   usleep((ms * 1000));
3337 
3338   return;
3339 }
3340 
3341 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3342 void os::infinite_sleep() {
3343   while (true) {    // sleep forever ...
3344     ::sleep(100);   // ... 100 seconds at a time
3345   }
3346 }
3347 
3348 // Used to convert frequent JVM_Yield() to nops
3349 bool os::dont_yield() {
3350   if (DontYieldALot) {
3351     static hrtime_t last_time = 0;
3352     hrtime_t diff = getTimeNanos() - last_time;
3353 
3354     if (diff < DontYieldALotInterval * 1000000)
3355       return true;
3356 
3357     last_time += diff;
3358 
3359     return false;
3360   }
3361   else {
3362     return false;
3363   }
3364 }
3365 
3366 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3367 // the linux and win32 implementations do not.  This should be checked.
3368 
3369 void os::yield() {
3370   // Yields to all threads with same or greater priority
3371   os::sleep(Thread::current(), 0, false);
3372 }
3373 
3374 // Note that yield semantics are defined by the scheduling class to which
3375 // the thread currently belongs.  Typically, yield will _not yield to
3376 // other equal or higher priority threads that reside on the dispatch queues
3377 // of other CPUs.
3378 
3379 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3380 
3381 
3382 // On Solaris we found that yield_all doesn't always yield to all other threads.
3383 // There have been cases where there is a thread ready to execute but it doesn't
3384 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
3385 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a
3386 // SIGWAITING signal which will cause a new lwp to be created. So we count the
3387 // number of times yield_all is called in the one loop and increase the sleep
3388 // time after 8 attempts. If this fails too we increase the concurrency level
3389 // so that the starving thread would get an lwp
3390 
3391 void os::yield_all(int attempts) {
3392   // Yields to all threads, including threads with lower priorities
3393   if (attempts == 0) {
3394     os::sleep(Thread::current(), 1, false);
3395   } else {
3396     int iterations = attempts % 30;
3397     if (iterations == 0 && !os::Solaris::T2_libthread()) {
3398       // thr_setconcurrency and _getconcurrency make sense only under T1.
3399       int noofLWPS = thr_getconcurrency();
3400       if (noofLWPS < (Threads::number_of_threads() + 2)) {
3401         thr_setconcurrency(thr_getconcurrency() + 1);
3402       }
3403     } else if (iterations < 25) {
3404       os::sleep(Thread::current(), 1, false);
3405     } else {
3406       os::sleep(Thread::current(), 10, false);
3407     }
3408   }
3409 }
3410 
3411 // Called from the tight loops to possibly influence time-sharing heuristics
3412 void os::loop_breaker(int attempts) {
3413   os::yield_all(attempts);
3414 }
3415 
3416 
3417 // Interface for setting lwp priorities.  If we are using T2 libthread,
3418 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3419 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3420 // function is meaningless in this mode so we must adjust the real lwp's priority
3421 // The routines below implement the getting and setting of lwp priorities.
3422 //
3423 // Note: There are three priority scales used on Solaris.  Java priotities
3424 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3425 //       from 0 to 127, and the current scheduling class of the process we
3426 //       are running in.  This is typically from -60 to +60.
3427 //       The setting of the lwp priorities in done after a call to thr_setprio
3428 //       so Java priorities are mapped to libthread priorities and we map from
3429 //       the latter to lwp priorities.  We don't keep priorities stored in
3430 //       Java priorities since some of our worker threads want to set priorities
3431 //       higher than all Java threads.
3432 //
3433 // For related information:
3434 // (1)  man -s 2 priocntl
3435 // (2)  man -s 4 priocntl
3436 // (3)  man dispadmin
3437 // =    librt.so
3438 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3439 // =    ps -cL <pid> ... to validate priority.
3440 // =    sched_get_priority_min and _max
3441 //              pthread_create
3442 //              sched_setparam
3443 //              pthread_setschedparam
3444 //
3445 // Assumptions:
3446 // +    We assume that all threads in the process belong to the same
3447 //              scheduling class.   IE. an homogenous process.
3448 // +    Must be root or in IA group to change change "interactive" attribute.
3449 //              Priocntl() will fail silently.  The only indication of failure is when
3450 //              we read-back the value and notice that it hasn't changed.
3451 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3452 // +    For RT, change timeslice as well.  Invariant:
3453 //              constant "priority integral"
3454 //              Konst == TimeSlice * (60-Priority)
3455 //              Given a priority, compute appropriate timeslice.
3456 // +    Higher numerical values have higher priority.
3457 
3458 // sched class attributes
3459 typedef struct {
3460         int   schedPolicy;              // classID
3461         int   maxPrio;
3462         int   minPrio;
3463 } SchedInfo;
3464 
3465 
3466 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3467 
3468 #ifdef ASSERT
3469 static int  ReadBackValidate = 1;
3470 #endif
3471 static int  myClass     = 0;
3472 static int  myMin       = 0;
3473 static int  myMax       = 0;
3474 static int  myCur       = 0;
3475 static bool priocntl_enable = false;
3476 
3477 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3478 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3479 
3480 
3481 // lwp_priocntl_init
3482 //
3483 // Try to determine the priority scale for our process.
3484 //
3485 // Return errno or 0 if OK.
3486 //
3487 static int lwp_priocntl_init () {
3488   int rslt;
3489   pcinfo_t ClassInfo;
3490   pcparms_t ParmInfo;
3491   int i;
3492 
3493   if (!UseThreadPriorities) return 0;
3494 
3495   // We are using Bound threads, we need to determine our priority ranges
3496   if (os::Solaris::T2_libthread() || UseBoundThreads) {
3497     // If ThreadPriorityPolicy is 1, switch tables
3498     if (ThreadPriorityPolicy == 1) {
3499       for (i = 0 ; i < CriticalPriority+1; i++)
3500         os::java_to_os_priority[i] = prio_policy1[i];
3501     }
3502     if (UseCriticalJavaThreadPriority) {
3503       // MaxPriority always maps to the FX scheduling class and criticalPrio.
3504       // See set_native_priority() and set_lwp_class_and_priority().
3505       // Save original MaxPriority mapping in case attempt to
3506       // use critical priority fails.
3507       java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3508       // Set negative to distinguish from other priorities
3509       os::java_to_os_priority[MaxPriority] = -criticalPrio;
3510     }
3511   }
3512   // Not using Bound Threads, set to ThreadPolicy 1
3513   else {
3514     for ( i = 0 ; i < CriticalPriority+1; i++ ) {
3515       os::java_to_os_priority[i] = prio_policy1[i];
3516     }
3517     return 0;
3518   }
3519 
3520   // Get IDs for a set of well-known scheduling classes.
3521   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3522   // the system.  We should have a loop that iterates over the
3523   // classID values, which are known to be "small" integers.
3524 
3525   strcpy(ClassInfo.pc_clname, "TS");
3526   ClassInfo.pc_cid = -1;
3527   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3528   if (rslt < 0) return errno;
3529   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3530   tsLimits.schedPolicy = ClassInfo.pc_cid;
3531   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3532   tsLimits.minPrio = -tsLimits.maxPrio;
3533 
3534   strcpy(ClassInfo.pc_clname, "IA");
3535   ClassInfo.pc_cid = -1;
3536   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3537   if (rslt < 0) return errno;
3538   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3539   iaLimits.schedPolicy = ClassInfo.pc_cid;
3540   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3541   iaLimits.minPrio = -iaLimits.maxPrio;
3542 
3543   strcpy(ClassInfo.pc_clname, "RT");
3544   ClassInfo.pc_cid = -1;
3545   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3546   if (rslt < 0) return errno;
3547   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3548   rtLimits.schedPolicy = ClassInfo.pc_cid;
3549   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3550   rtLimits.minPrio = 0;
3551 
3552   strcpy(ClassInfo.pc_clname, "FX");
3553   ClassInfo.pc_cid = -1;
3554   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3555   if (rslt < 0) return errno;
3556   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3557   fxLimits.schedPolicy = ClassInfo.pc_cid;
3558   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3559   fxLimits.minPrio = 0;
3560 
3561   // Query our "current" scheduling class.
3562   // This will normally be IA, TS or, rarely, FX or RT.
3563   memset(&ParmInfo, 0, sizeof(ParmInfo));
3564   ParmInfo.pc_cid = PC_CLNULL;
3565   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3566   if (rslt < 0) return errno;
3567   myClass = ParmInfo.pc_cid;
3568 
3569   // We now know our scheduling classId, get specific information
3570   // about the class.
3571   ClassInfo.pc_cid = myClass;
3572   ClassInfo.pc_clname[0] = 0;
3573   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3574   if (rslt < 0) return errno;
3575 
3576   if (ThreadPriorityVerbose) {
3577     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3578   }
3579 
3580   memset(&ParmInfo, 0, sizeof(pcparms_t));
3581   ParmInfo.pc_cid = PC_CLNULL;
3582   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3583   if (rslt < 0) return errno;
3584 
3585   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3586     myMin = rtLimits.minPrio;
3587     myMax = rtLimits.maxPrio;
3588   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3589     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3590     myMin = iaLimits.minPrio;
3591     myMax = iaLimits.maxPrio;
3592     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3593   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3594     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3595     myMin = tsLimits.minPrio;
3596     myMax = tsLimits.maxPrio;
3597     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3598   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3599     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3600     myMin = fxLimits.minPrio;
3601     myMax = fxLimits.maxPrio;
3602     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3603   } else {
3604     // No clue - punt
3605     if (ThreadPriorityVerbose)
3606       tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3607     return EINVAL;      // no clue, punt
3608   }
3609 
3610   if (ThreadPriorityVerbose) {
3611     tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3612   }
3613 
3614   priocntl_enable = true;  // Enable changing priorities
3615   return 0;
3616 }
3617 
3618 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3619 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3620 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3621 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3622 
3623 
3624 // scale_to_lwp_priority
3625 //
3626 // Convert from the libthread "thr_setprio" scale to our current
3627 // lwp scheduling class scale.
3628 //
3629 static
3630 int     scale_to_lwp_priority (int rMin, int rMax, int x)
3631 {
3632   int v;
3633 
3634   if (x == 127) return rMax;            // avoid round-down
3635     v = (((x*(rMax-rMin)))/128)+rMin;
3636   return v;
3637 }
3638 
3639 
3640 // set_lwp_class_and_priority
3641 //
3642 // Set the class and priority of the lwp.  This call should only
3643 // be made when using bound threads (T2 threads are bound by default).
3644 //
3645 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3646                                int newPrio, int new_class, bool scale) {
3647   int rslt;
3648   int Actual, Expected, prv;
3649   pcparms_t ParmInfo;                   // for GET-SET
3650 #ifdef ASSERT
3651   pcparms_t ReadBack;                   // for readback
3652 #endif
3653 
3654   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3655   // Query current values.
3656   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3657   // Cache "pcparms_t" in global ParmCache.
3658   // TODO: elide set-to-same-value
3659 
3660   // If something went wrong on init, don't change priorities.
3661   if ( !priocntl_enable ) {
3662     if (ThreadPriorityVerbose)
3663       tty->print_cr("Trying to set priority but init failed, ignoring");
3664     return EINVAL;
3665   }
3666 
3667   // If lwp hasn't started yet, just return
3668   // the _start routine will call us again.
3669   if ( lwpid <= 0 ) {
3670     if (ThreadPriorityVerbose) {
3671       tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
3672                      INTPTR_FORMAT " to %d, lwpid not set",
3673                      ThreadID, newPrio);
3674     }
3675     return 0;
3676   }
3677 
3678   if (ThreadPriorityVerbose) {
3679     tty->print_cr ("set_lwp_class_and_priority("
3680                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3681                    ThreadID, lwpid, newPrio);
3682   }
3683 
3684   memset(&ParmInfo, 0, sizeof(pcparms_t));
3685   ParmInfo.pc_cid = PC_CLNULL;
3686   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3687   if (rslt < 0) return errno;
3688 
3689   int cur_class = ParmInfo.pc_cid;
3690   ParmInfo.pc_cid = (id_t)new_class;
3691 
3692   if (new_class == rtLimits.schedPolicy) {
3693     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3694     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3695                                                        rtLimits.maxPrio, newPrio)
3696                                : newPrio;
3697     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3698     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3699     if (ThreadPriorityVerbose) {
3700       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3701     }
3702   } else if (new_class == iaLimits.schedPolicy) {
3703     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3704     int maxClamped     = MIN2(iaLimits.maxPrio,
3705                               cur_class == new_class
3706                                 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3707     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3708                                                        maxClamped, newPrio)
3709                                : newPrio;
3710     iaInfo->ia_uprilim = cur_class == new_class
3711                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3712     iaInfo->ia_mode    = IA_NOCHANGE;
3713     if (ThreadPriorityVerbose) {
3714       tty->print_cr("IA: [%d...%d] %d->%d\n",
3715                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3716     }
3717   } else if (new_class == tsLimits.schedPolicy) {
3718     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3719     int maxClamped     = MIN2(tsLimits.maxPrio,
3720                               cur_class == new_class
3721                                 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3722     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3723                                                        maxClamped, newPrio)
3724                                : newPrio;
3725     tsInfo->ts_uprilim = cur_class == new_class
3726                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3727     if (ThreadPriorityVerbose) {
3728       tty->print_cr("TS: [%d...%d] %d->%d\n",
3729                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3730     }
3731   } else if (new_class == fxLimits.schedPolicy) {
3732     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3733     int maxClamped     = MIN2(fxLimits.maxPrio,
3734                               cur_class == new_class
3735                                 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3736     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3737                                                        maxClamped, newPrio)
3738                                : newPrio;
3739     fxInfo->fx_uprilim = cur_class == new_class
3740                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3741     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3742     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3743     if (ThreadPriorityVerbose) {
3744       tty->print_cr("FX: [%d...%d] %d->%d\n",
3745                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3746     }
3747   } else {
3748     if (ThreadPriorityVerbose) {
3749       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3750     }
3751     return EINVAL;    // no clue, punt
3752   }
3753 
3754   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3755   if (ThreadPriorityVerbose && rslt) {
3756     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3757   }
3758   if (rslt < 0) return errno;
3759 
3760 #ifdef ASSERT
3761   // Sanity check: read back what we just attempted to set.
3762   // In theory it could have changed in the interim ...
3763   //
3764   // The priocntl system call is tricky.
3765   // Sometimes it'll validate the priority value argument and
3766   // return EINVAL if unhappy.  At other times it fails silently.
3767   // Readbacks are prudent.
3768 
3769   if (!ReadBackValidate) return 0;
3770 
3771   memset(&ReadBack, 0, sizeof(pcparms_t));
3772   ReadBack.pc_cid = PC_CLNULL;
3773   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3774   assert(rslt >= 0, "priocntl failed");
3775   Actual = Expected = 0xBAD;
3776   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3777   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3778     Actual   = RTPRI(ReadBack)->rt_pri;
3779     Expected = RTPRI(ParmInfo)->rt_pri;
3780   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3781     Actual   = IAPRI(ReadBack)->ia_upri;
3782     Expected = IAPRI(ParmInfo)->ia_upri;
3783   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3784     Actual   = TSPRI(ReadBack)->ts_upri;
3785     Expected = TSPRI(ParmInfo)->ts_upri;
3786   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3787     Actual   = FXPRI(ReadBack)->fx_upri;
3788     Expected = FXPRI(ParmInfo)->fx_upri;
3789   } else {
3790     if (ThreadPriorityVerbose) {
3791       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3792                     ParmInfo.pc_cid);
3793     }
3794   }
3795 
3796   if (Actual != Expected) {
3797     if (ThreadPriorityVerbose) {
3798       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3799                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3800     }
3801   }
3802 #endif
3803 
3804   return 0;
3805 }
3806 
3807 // Solaris only gives access to 128 real priorities at a time,
3808 // so we expand Java's ten to fill this range.  This would be better
3809 // if we dynamically adjusted relative priorities.
3810 //
3811 // The ThreadPriorityPolicy option allows us to select 2 different
3812 // priority scales.
3813 //
3814 // ThreadPriorityPolicy=0
3815 // Since the Solaris' default priority is MaximumPriority, we do not
3816 // set a priority lower than Max unless a priority lower than
3817 // NormPriority is requested.
3818 //
3819 // ThreadPriorityPolicy=1
3820 // This mode causes the priority table to get filled with
3821 // linear values.  NormPriority get's mapped to 50% of the
3822 // Maximum priority an so on.  This will cause VM threads
3823 // to get unfair treatment against other Solaris processes
3824 // which do not explicitly alter their thread priorities.
3825 //
3826 
3827 int os::java_to_os_priority[CriticalPriority + 1] = {
3828   -99999,         // 0 Entry should never be used
3829 
3830   0,              // 1 MinPriority
3831   32,             // 2
3832   64,             // 3
3833 
3834   96,             // 4
3835   127,            // 5 NormPriority
3836   127,            // 6
3837 
3838   127,            // 7
3839   127,            // 8
3840   127,            // 9 NearMaxPriority
3841 
3842   127,            // 10 MaxPriority
3843 
3844   -criticalPrio   // 11 CriticalPriority
3845 };
3846 
3847 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3848   OSThread* osthread = thread->osthread();
3849 
3850   // Save requested priority in case the thread hasn't been started
3851   osthread->set_native_priority(newpri);
3852 
3853   // Check for critical priority request
3854   bool fxcritical = false;
3855   if (newpri == -criticalPrio) {
3856     fxcritical = true;
3857     newpri = criticalPrio;
3858   }
3859 
3860   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3861   if (!UseThreadPriorities) return OS_OK;
3862 
3863   int status = 0;
3864 
3865   if (!fxcritical) {
3866     // Use thr_setprio only if we have a priority that thr_setprio understands
3867     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3868   }
3869 
3870   if (os::Solaris::T2_libthread() ||
3871       (UseBoundThreads && osthread->is_vm_created())) {
3872     int lwp_status =
3873       set_lwp_class_and_priority(osthread->thread_id(),
3874                                  osthread->lwp_id(),
3875                                  newpri,
3876                                  fxcritical ? fxLimits.schedPolicy : myClass,
3877                                  !fxcritical);
3878     if (lwp_status != 0 && fxcritical) {
3879       // Try again, this time without changing the scheduling class
3880       newpri = java_MaxPriority_to_os_priority;
3881       lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3882                                               osthread->lwp_id(),
3883                                               newpri, myClass, false);
3884     }
3885     status |= lwp_status;
3886   }
3887   return (status == 0) ? OS_OK : OS_ERR;
3888 }
3889 
3890 
3891 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3892   int p;
3893   if ( !UseThreadPriorities ) {
3894     *priority_ptr = NormalPriority;
3895     return OS_OK;
3896   }
3897   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3898   if (status != 0) {
3899     return OS_ERR;
3900   }
3901   *priority_ptr = p;
3902   return OS_OK;
3903 }
3904 
3905 
3906 // Hint to the underlying OS that a task switch would not be good.
3907 // Void return because it's a hint and can fail.
3908 void os::hint_no_preempt() {
3909   schedctl_start(schedctl_init());
3910 }
3911 
3912 static void resume_clear_context(OSThread *osthread) {
3913   osthread->set_ucontext(NULL);
3914 }
3915 
3916 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3917   osthread->set_ucontext(context);
3918 }
3919 
3920 static Semaphore sr_semaphore;
3921 
3922 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3923   // Save and restore errno to avoid confusing native code with EINTR
3924   // after sigsuspend.
3925   int old_errno = errno;
3926 
3927   OSThread* osthread = thread->osthread();
3928   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3929 
3930   os::SuspendResume::State current = osthread->sr.state();
3931   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3932     suspend_save_context(osthread, uc);
3933 
3934     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3935     os::SuspendResume::State state = osthread->sr.suspended();
3936     if (state == os::SuspendResume::SR_SUSPENDED) {
3937       sigset_t suspend_set;  // signals for sigsuspend()
3938 
3939       // get current set of blocked signals and unblock resume signal
3940       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3941       sigdelset(&suspend_set, os::Solaris::SIGasync());
3942 
3943       sr_semaphore.signal();
3944       // wait here until we are resumed
3945       while (1) {
3946         sigsuspend(&suspend_set);
3947 
3948         os::SuspendResume::State result = osthread->sr.running();
3949         if (result == os::SuspendResume::SR_RUNNING) {
3950           sr_semaphore.signal();
3951           break;
3952         }
3953       }
3954 
3955     } else if (state == os::SuspendResume::SR_RUNNING) {
3956       // request was cancelled, continue
3957     } else {
3958       ShouldNotReachHere();
3959     }
3960 
3961     resume_clear_context(osthread);
3962   } else if (current == os::SuspendResume::SR_RUNNING) {
3963     // request was cancelled, continue
3964   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3965     // ignore
3966   } else {
3967     // ignore
3968   }
3969 
3970   errno = old_errno;
3971 }
3972 
3973 void os::print_statistics() {
3974 }
3975 
3976 int os::message_box(const char* title, const char* message) {
3977   int i;
3978   fdStream err(defaultStream::error_fd());
3979   for (i = 0; i < 78; i++) err.print_raw("=");
3980   err.cr();
3981   err.print_raw_cr(title);
3982   for (i = 0; i < 78; i++) err.print_raw("-");
3983   err.cr();
3984   err.print_raw_cr(message);
3985   for (i = 0; i < 78; i++) err.print_raw("=");
3986   err.cr();
3987 
3988   char buf[16];
3989   // Prevent process from exiting upon "read error" without consuming all CPU
3990   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3991 
3992   return buf[0] == 'y' || buf[0] == 'Y';
3993 }
3994 
3995 static int sr_notify(OSThread* osthread) {
3996   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
3997   assert_status(status == 0, status, "thr_kill");
3998   return status;
3999 }
4000 
4001 // "Randomly" selected value for how long we want to spin
4002 // before bailing out on suspending a thread, also how often
4003 // we send a signal to a thread we want to resume
4004 static const int RANDOMLY_LARGE_INTEGER = 1000000;
4005 static const int RANDOMLY_LARGE_INTEGER2 = 100;
4006 
4007 static bool do_suspend(OSThread* osthread) {
4008   assert(osthread->sr.is_running(), "thread should be running");
4009   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
4010 
4011   // mark as suspended and send signal
4012   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
4013     // failed to switch, state wasn't running?
4014     ShouldNotReachHere();
4015     return false;
4016   }
4017 
4018   if (sr_notify(osthread) != 0) {
4019     ShouldNotReachHere();
4020   }
4021 
4022   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
4023   while (true) {
4024     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
4025       break;
4026     } else {
4027       // timeout
4028       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
4029       if (cancelled == os::SuspendResume::SR_RUNNING) {
4030         return false;
4031       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
4032         // make sure that we consume the signal on the semaphore as well
4033         sr_semaphore.wait();
4034         break;
4035       } else {
4036         ShouldNotReachHere();
4037         return false;
4038       }
4039     }
4040   }
4041 
4042   guarantee(osthread->sr.is_suspended(), "Must be suspended");
4043   return true;
4044 }
4045 
4046 static void do_resume(OSThread* osthread) {
4047   assert(osthread->sr.is_suspended(), "thread should be suspended");
4048   assert(!sr_semaphore.trywait(), "invalid semaphore state");
4049 
4050   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
4051     // failed to switch to WAKEUP_REQUEST
4052     ShouldNotReachHere();
4053     return;
4054   }
4055 
4056   while (true) {
4057     if (sr_notify(osthread) == 0) {
4058       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
4059         if (osthread->sr.is_running()) {
4060           return;
4061         }
4062       }
4063     } else {
4064       ShouldNotReachHere();
4065     }
4066   }
4067 
4068   guarantee(osthread->sr.is_running(), "Must be running!");
4069 }
4070 
4071 void os::SuspendedThreadTask::internal_do_task() {
4072   if (do_suspend(_thread->osthread())) {
4073     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
4074     do_task(context);
4075     do_resume(_thread->osthread());
4076   }
4077 }
4078 
4079 class PcFetcher : public os::SuspendedThreadTask {
4080 public:
4081   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
4082   ExtendedPC result();
4083 protected:
4084   void do_task(const os::SuspendedThreadTaskContext& context);
4085 private:
4086   ExtendedPC _epc;
4087 };
4088 
4089 ExtendedPC PcFetcher::result() {
4090   guarantee(is_done(), "task is not done yet.");
4091   return _epc;
4092 }
4093 
4094 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
4095   Thread* thread = context.thread();
4096   OSThread* osthread = thread->osthread();
4097   if (osthread->ucontext() != NULL) {
4098     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
4099   } else {
4100     // NULL context is unexpected, double-check this is the VMThread
4101     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
4102   }
4103 }
4104 
4105 // A lightweight implementation that does not suspend the target thread and
4106 // thus returns only a hint. Used for profiling only!
4107 ExtendedPC os::get_thread_pc(Thread* thread) {
4108   // Make sure that it is called by the watcher and the Threads lock is owned.
4109   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
4110   // For now, is only used to profile the VM Thread
4111   assert(thread->is_VM_thread(), "Can only be called for VMThread");
4112   PcFetcher fetcher(thread);
4113   fetcher.run();
4114   return fetcher.result();
4115 }
4116 
4117 
4118 // This does not do anything on Solaris. This is basically a hook for being
4119 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
4120 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
4121   f(value, method, args, thread);
4122 }
4123 
4124 // This routine may be used by user applications as a "hook" to catch signals.
4125 // The user-defined signal handler must pass unrecognized signals to this
4126 // routine, and if it returns true (non-zero), then the signal handler must
4127 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
4128 // routine will never retun false (zero), but instead will execute a VM panic
4129 // routine kill the process.
4130 //
4131 // If this routine returns false, it is OK to call it again.  This allows
4132 // the user-defined signal handler to perform checks either before or after
4133 // the VM performs its own checks.  Naturally, the user code would be making
4134 // a serious error if it tried to handle an exception (such as a null check
4135 // or breakpoint) that the VM was generating for its own correct operation.
4136 //
4137 // This routine may recognize any of the following kinds of signals:
4138 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
4139 // os::Solaris::SIGasync
4140 // It should be consulted by handlers for any of those signals.
4141 // It explicitly does not recognize os::Solaris::SIGinterrupt
4142 //
4143 // The caller of this routine must pass in the three arguments supplied
4144 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
4145 // field of the structure passed to sigaction().  This routine assumes that
4146 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
4147 //
4148 // Note that the VM will print warnings if it detects conflicting signal
4149 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
4150 //
4151 extern "C" JNIEXPORT int
4152 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
4153                           int abort_if_unrecognized);
4154 
4155 
4156 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
4157   int orig_errno = errno;  // Preserve errno value over signal handler.
4158   JVM_handle_solaris_signal(sig, info, ucVoid, true);
4159   errno = orig_errno;
4160 }
4161 
4162 /* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
4163    is needed to provoke threads blocked on IO to return an EINTR
4164    Note: this explicitly does NOT call JVM_handle_solaris_signal and
4165    does NOT participate in signal chaining due to requirement for
4166    NOT setting SA_RESTART to make EINTR work. */
4167 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
4168    if (UseSignalChaining) {
4169       struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
4170       if (actp && actp->sa_handler) {
4171         vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
4172       }
4173    }
4174 }
4175 
4176 // This boolean allows users to forward their own non-matching signals
4177 // to JVM_handle_solaris_signal, harmlessly.
4178 bool os::Solaris::signal_handlers_are_installed = false;
4179 
4180 // For signal-chaining
4181 bool os::Solaris::libjsig_is_loaded = false;
4182 typedef struct sigaction *(*get_signal_t)(int);
4183 get_signal_t os::Solaris::get_signal_action = NULL;
4184 
4185 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
4186   struct sigaction *actp = NULL;
4187 
4188   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
4189     // Retrieve the old signal handler from libjsig
4190     actp = (*get_signal_action)(sig);
4191   }
4192   if (actp == NULL) {
4193     // Retrieve the preinstalled signal handler from jvm
4194     actp = get_preinstalled_handler(sig);
4195   }
4196 
4197   return actp;
4198 }
4199 
4200 static bool call_chained_handler(struct sigaction *actp, int sig,
4201                                  siginfo_t *siginfo, void *context) {
4202   // Call the old signal handler
4203   if (actp->sa_handler == SIG_DFL) {
4204     // It's more reasonable to let jvm treat it as an unexpected exception
4205     // instead of taking the default action.
4206     return false;
4207   } else if (actp->sa_handler != SIG_IGN) {
4208     if ((actp->sa_flags & SA_NODEFER) == 0) {
4209       // automaticlly block the signal
4210       sigaddset(&(actp->sa_mask), sig);
4211     }
4212 
4213     sa_handler_t hand;
4214     sa_sigaction_t sa;
4215     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4216     // retrieve the chained handler
4217     if (siginfo_flag_set) {
4218       sa = actp->sa_sigaction;
4219     } else {
4220       hand = actp->sa_handler;
4221     }
4222 
4223     if ((actp->sa_flags & SA_RESETHAND) != 0) {
4224       actp->sa_handler = SIG_DFL;
4225     }
4226 
4227     // try to honor the signal mask
4228     sigset_t oset;
4229     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4230 
4231     // call into the chained handler
4232     if (siginfo_flag_set) {
4233       (*sa)(sig, siginfo, context);
4234     } else {
4235       (*hand)(sig);
4236     }
4237 
4238     // restore the signal mask
4239     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4240   }
4241   // Tell jvm's signal handler the signal is taken care of.
4242   return true;
4243 }
4244 
4245 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4246   bool chained = false;
4247   // signal-chaining
4248   if (UseSignalChaining) {
4249     struct sigaction *actp = get_chained_signal_action(sig);
4250     if (actp != NULL) {
4251       chained = call_chained_handler(actp, sig, siginfo, context);
4252     }
4253   }
4254   return chained;
4255 }
4256 
4257 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4258   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4259   if (preinstalled_sigs[sig] != 0) {
4260     return &chainedsigactions[sig];
4261   }
4262   return NULL;
4263 }
4264 
4265 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4266 
4267   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4268   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4269   chainedsigactions[sig] = oldAct;
4270   preinstalled_sigs[sig] = 1;
4271 }
4272 
4273 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4274   // Check for overwrite.
4275   struct sigaction oldAct;
4276   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4277   void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4278                                       : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4279   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4280       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4281       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4282     if (AllowUserSignalHandlers || !set_installed) {
4283       // Do not overwrite; user takes responsibility to forward to us.
4284       return;
4285     } else if (UseSignalChaining) {
4286       if (oktochain) {
4287         // save the old handler in jvm
4288         save_preinstalled_handler(sig, oldAct);
4289       } else {
4290         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4291       }
4292       // libjsig also interposes the sigaction() call below and saves the
4293       // old sigaction on it own.
4294     } else {
4295       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4296                     "%#lx for signal %d.", (long)oldhand, sig));
4297     }
4298   }
4299 
4300   struct sigaction sigAct;
4301   sigfillset(&(sigAct.sa_mask));
4302   sigAct.sa_handler = SIG_DFL;
4303 
4304   sigAct.sa_sigaction = signalHandler;
4305   // Handle SIGSEGV on alternate signal stack if
4306   // not using stack banging
4307   if (!UseStackBanging && sig == SIGSEGV) {
4308     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4309   // Interruptible i/o requires SA_RESTART cleared so EINTR
4310   // is returned instead of restarting system calls
4311   } else if (sig == os::Solaris::SIGinterrupt()) {
4312     sigemptyset(&sigAct.sa_mask);
4313     sigAct.sa_handler = NULL;
4314     sigAct.sa_flags = SA_SIGINFO;
4315     sigAct.sa_sigaction = sigINTRHandler;
4316   } else {
4317     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4318   }
4319   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4320 
4321   sigaction(sig, &sigAct, &oldAct);
4322 
4323   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4324                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4325   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4326 }
4327 
4328 
4329 #define DO_SIGNAL_CHECK(sig) \
4330   if (!sigismember(&check_signal_done, sig)) \
4331     os::Solaris::check_signal_handler(sig)
4332 
4333 // This method is a periodic task to check for misbehaving JNI applications
4334 // under CheckJNI, we can add any periodic checks here
4335 
4336 void os::run_periodic_checks() {
4337   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4338   // thereby preventing a NULL checks.
4339   if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4340 
4341   if (check_signals == false) return;
4342 
4343   // SEGV and BUS if overridden could potentially prevent
4344   // generation of hs*.log in the event of a crash, debugging
4345   // such a case can be very challenging, so we absolutely
4346   // check for the following for a good measure:
4347   DO_SIGNAL_CHECK(SIGSEGV);
4348   DO_SIGNAL_CHECK(SIGILL);
4349   DO_SIGNAL_CHECK(SIGFPE);
4350   DO_SIGNAL_CHECK(SIGBUS);
4351   DO_SIGNAL_CHECK(SIGPIPE);
4352   DO_SIGNAL_CHECK(SIGXFSZ);
4353 
4354   // ReduceSignalUsage allows the user to override these handlers
4355   // see comments at the very top and jvm_solaris.h
4356   if (!ReduceSignalUsage) {
4357     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4358     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4359     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4360     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4361   }
4362 
4363   // See comments above for using JVM1/JVM2 and UseAltSigs
4364   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4365   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4366 
4367 }
4368 
4369 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4370 
4371 static os_sigaction_t os_sigaction = NULL;
4372 
4373 void os::Solaris::check_signal_handler(int sig) {
4374   char buf[O_BUFLEN];
4375   address jvmHandler = NULL;
4376 
4377   struct sigaction act;
4378   if (os_sigaction == NULL) {
4379     // only trust the default sigaction, in case it has been interposed
4380     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4381     if (os_sigaction == NULL) return;
4382   }
4383 
4384   os_sigaction(sig, (struct sigaction*)NULL, &act);
4385 
4386   address thisHandler = (act.sa_flags & SA_SIGINFO)
4387     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4388     : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4389 
4390 
4391   switch(sig) {
4392     case SIGSEGV:
4393     case SIGBUS:
4394     case SIGFPE:
4395     case SIGPIPE:
4396     case SIGXFSZ:
4397     case SIGILL:
4398       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4399       break;
4400 
4401     case SHUTDOWN1_SIGNAL:
4402     case SHUTDOWN2_SIGNAL:
4403     case SHUTDOWN3_SIGNAL:
4404     case BREAK_SIGNAL:
4405       jvmHandler = (address)user_handler();
4406       break;
4407 
4408     default:
4409       int intrsig = os::Solaris::SIGinterrupt();
4410       int asynsig = os::Solaris::SIGasync();
4411 
4412       if (sig == intrsig) {
4413         jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4414       } else if (sig == asynsig) {
4415         jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4416       } else {
4417         return;
4418       }
4419       break;
4420   }
4421 
4422 
4423   if (thisHandler != jvmHandler) {
4424     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4425     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4426     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4427     // No need to check this sig any longer
4428     sigaddset(&check_signal_done, sig);
4429   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4430     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4431     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4432     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4433     // No need to check this sig any longer
4434     sigaddset(&check_signal_done, sig);
4435   }
4436 
4437   // Print all the signal handler state
4438   if (sigismember(&check_signal_done, sig)) {
4439     print_signal_handlers(tty, buf, O_BUFLEN);
4440   }
4441 
4442 }
4443 
4444 void os::Solaris::install_signal_handlers() {
4445   bool libjsigdone = false;
4446   signal_handlers_are_installed = true;
4447 
4448   // signal-chaining
4449   typedef void (*signal_setting_t)();
4450   signal_setting_t begin_signal_setting = NULL;
4451   signal_setting_t end_signal_setting = NULL;
4452   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4453                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4454   if (begin_signal_setting != NULL) {
4455     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4456                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4457     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4458                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4459     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4460                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4461     libjsig_is_loaded = true;
4462     if (os::Solaris::get_libjsig_version != NULL) {
4463       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4464     }
4465     assert(UseSignalChaining, "should enable signal-chaining");
4466   }
4467   if (libjsig_is_loaded) {
4468     // Tell libjsig jvm is setting signal handlers
4469     (*begin_signal_setting)();
4470   }
4471 
4472   set_signal_handler(SIGSEGV, true, true);
4473   set_signal_handler(SIGPIPE, true, true);
4474   set_signal_handler(SIGXFSZ, true, true);
4475   set_signal_handler(SIGBUS, true, true);
4476   set_signal_handler(SIGILL, true, true);
4477   set_signal_handler(SIGFPE, true, true);
4478 
4479 
4480   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4481 
4482     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4483     // can not register overridable signals which might be > 32
4484     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4485     // Tell libjsig jvm has finished setting signal handlers
4486       (*end_signal_setting)();
4487       libjsigdone = true;
4488     }
4489   }
4490 
4491   // Never ok to chain our SIGinterrupt
4492   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4493   set_signal_handler(os::Solaris::SIGasync(), true, true);
4494 
4495   if (libjsig_is_loaded && !libjsigdone) {
4496     // Tell libjsig jvm finishes setting signal handlers
4497     (*end_signal_setting)();
4498   }
4499 
4500   // We don't activate signal checker if libjsig is in place, we trust ourselves
4501   // and if UserSignalHandler is installed all bets are off.
4502   // Log that signal checking is off only if -verbose:jni is specified.
4503   if (CheckJNICalls) {
4504     if (libjsig_is_loaded) {
4505       if (PrintJNIResolving) {
4506         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4507       }
4508       check_signals = false;
4509     }
4510     if (AllowUserSignalHandlers) {
4511       if (PrintJNIResolving) {
4512         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4513       }
4514       check_signals = false;
4515     }
4516   }
4517 }
4518 
4519 
4520 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4521 
4522 const char * signames[] = {
4523   "SIG0",
4524   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4525   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4526   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4527   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4528   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4529   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4530   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4531   "SIGCANCEL", "SIGLOST"
4532 };
4533 
4534 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4535   if (0 < exception_code && exception_code <= SIGRTMAX) {
4536     // signal
4537     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4538        jio_snprintf(buf, size, "%s", signames[exception_code]);
4539     } else {
4540        jio_snprintf(buf, size, "SIG%d", exception_code);
4541     }
4542     return buf;
4543   } else {
4544     return NULL;
4545   }
4546 }
4547 
4548 // (Static) wrappers for the new libthread API
4549 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
4550 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
4551 int_fnP_thread_t_i os::Solaris::_thr_setmutator;
4552 int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
4553 int_fnP_thread_t os::Solaris::_thr_continue_mutator;
4554 
4555 // (Static) wrapper for getisax(2) call.
4556 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4557 
4558 // (Static) wrappers for the liblgrp API
4559 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4560 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4561 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4562 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4563 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4564 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4565 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4566 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4567 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4568 
4569 // (Static) wrapper for meminfo() call.
4570 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4571 
4572 static address resolve_symbol_lazy(const char* name) {
4573   address addr = (address) dlsym(RTLD_DEFAULT, name);
4574   if(addr == NULL) {
4575     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4576     addr = (address) dlsym(RTLD_NEXT, name);
4577   }
4578   return addr;
4579 }
4580 
4581 static address resolve_symbol(const char* name) {
4582   address addr = resolve_symbol_lazy(name);
4583   if(addr == NULL) {
4584     fatal(dlerror());
4585   }
4586   return addr;
4587 }
4588 
4589 
4590 
4591 // isT2_libthread()
4592 //
4593 // Routine to determine if we are currently using the new T2 libthread.
4594 //
4595 // We determine if we are using T2 by reading /proc/self/lstatus and
4596 // looking for a thread with the ASLWP bit set.  If we find this status
4597 // bit set, we must assume that we are NOT using T2.  The T2 team
4598 // has approved this algorithm.
4599 //
4600 // We need to determine if we are running with the new T2 libthread
4601 // since setting native thread priorities is handled differently
4602 // when using this library.  All threads created using T2 are bound
4603 // threads. Calling thr_setprio is meaningless in this case.
4604 //
4605 bool isT2_libthread() {
4606   static prheader_t * lwpArray = NULL;
4607   static int lwpSize = 0;
4608   static int lwpFile = -1;
4609   lwpstatus_t * that;
4610   char lwpName [128];
4611   bool isT2 = false;
4612 
4613 #define ADR(x)  ((uintptr_t)(x))
4614 #define LWPINDEX(ary,ix)   ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
4615 
4616   lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
4617   if (lwpFile < 0) {
4618       if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
4619       return false;
4620   }
4621   lwpSize = 16*1024;
4622   for (;;) {
4623     ::lseek64 (lwpFile, 0, SEEK_SET);
4624     lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
4625     if (::read(lwpFile, lwpArray, lwpSize) < 0) {
4626       if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
4627       break;
4628     }
4629     if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
4630        // We got a good snapshot - now iterate over the list.
4631       int aslwpcount = 0;
4632       for (int i = 0; i < lwpArray->pr_nent; i++ ) {
4633         that = LWPINDEX(lwpArray,i);
4634         if (that->pr_flags & PR_ASLWP) {
4635           aslwpcount++;
4636         }
4637       }
4638       if (aslwpcount == 0) isT2 = true;
4639       break;
4640     }
4641     lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
4642     FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);  // retry.
4643   }
4644 
4645   FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
4646   ::close (lwpFile);
4647   if (ThreadPriorityVerbose) {
4648     if (isT2) tty->print_cr("We are running with a T2 libthread\n");
4649     else tty->print_cr("We are not running with a T2 libthread\n");
4650   }
4651   return isT2;
4652 }
4653 
4654 
4655 void os::Solaris::libthread_init() {
4656   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4657 
4658   // Determine if we are running with the new T2 libthread
4659   os::Solaris::set_T2_libthread(isT2_libthread());
4660 
4661   lwp_priocntl_init();
4662 
4663   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4664   if(func == NULL) {
4665     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4666     // Guarantee that this VM is running on an new enough OS (5.6 or
4667     // later) that it will have a new enough libthread.so.
4668     guarantee(func != NULL, "libthread.so is too old.");
4669   }
4670 
4671   // Initialize the new libthread getstate API wrappers
4672   func = resolve_symbol("thr_getstate");
4673   os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
4674 
4675   func = resolve_symbol("thr_setstate");
4676   os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
4677 
4678   func = resolve_symbol("thr_setmutator");
4679   os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
4680 
4681   func = resolve_symbol("thr_suspend_mutator");
4682   os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4683 
4684   func = resolve_symbol("thr_continue_mutator");
4685   os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4686 
4687   int size;
4688   void (*handler_info_func)(address *, int *);
4689   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4690   handler_info_func(&handler_start, &size);
4691   handler_end = handler_start + size;
4692 }
4693 
4694 
4695 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4696 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4697 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4698 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4699 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4700 int os::Solaris::_mutex_scope = USYNC_THREAD;
4701 
4702 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4703 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4704 int_fnP_cond_tP os::Solaris::_cond_signal;
4705 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4706 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4707 int_fnP_cond_tP os::Solaris::_cond_destroy;
4708 int os::Solaris::_cond_scope = USYNC_THREAD;
4709 
4710 void os::Solaris::synchronization_init() {
4711   if(UseLWPSynchronization) {
4712     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4713     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4714     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4715     os::Solaris::set_mutex_init(lwp_mutex_init);
4716     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4717     os::Solaris::set_mutex_scope(USYNC_THREAD);
4718 
4719     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4720     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4721     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4722     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4723     os::Solaris::set_cond_init(lwp_cond_init);
4724     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4725     os::Solaris::set_cond_scope(USYNC_THREAD);
4726   }
4727   else {
4728     os::Solaris::set_mutex_scope(USYNC_THREAD);
4729     os::Solaris::set_cond_scope(USYNC_THREAD);
4730 
4731     if(UsePthreads) {
4732       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4733       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4734       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4735       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4736       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4737 
4738       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4739       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4740       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4741       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4742       os::Solaris::set_cond_init(pthread_cond_default_init);
4743       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4744     }
4745     else {
4746       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4747       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4748       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4749       os::Solaris::set_mutex_init(::mutex_init);
4750       os::Solaris::set_mutex_destroy(::mutex_destroy);
4751 
4752       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4753       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4754       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4755       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4756       os::Solaris::set_cond_init(::cond_init);
4757       os::Solaris::set_cond_destroy(::cond_destroy);
4758     }
4759   }
4760 }
4761 
4762 bool os::Solaris::liblgrp_init() {
4763   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4764   if (handle != NULL) {
4765     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4766     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4767     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4768     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4769     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4770     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4771     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4772     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4773                                        dlsym(handle, "lgrp_cookie_stale")));
4774 
4775     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4776     set_lgrp_cookie(c);
4777     return true;
4778   }
4779   return false;
4780 }
4781 
4782 void os::Solaris::misc_sym_init() {
4783   address func;
4784 
4785   // getisax
4786   func = resolve_symbol_lazy("getisax");
4787   if (func != NULL) {
4788     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4789   }
4790 
4791   // meminfo
4792   func = resolve_symbol_lazy("meminfo");
4793   if (func != NULL) {
4794     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4795   }
4796 }
4797 
4798 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4799   assert(_getisax != NULL, "_getisax not set");
4800   return _getisax(array, n);
4801 }
4802 
4803 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4804 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4805 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4806 
4807 void init_pset_getloadavg_ptr(void) {
4808   pset_getloadavg_ptr =
4809     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4810   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4811     warning("pset_getloadavg function not found");
4812   }
4813 }
4814 
4815 int os::Solaris::_dev_zero_fd = -1;
4816 
4817 // this is called _before_ the global arguments have been parsed
4818 void os::init(void) {
4819   _initial_pid = getpid();
4820 
4821   max_hrtime = first_hrtime = gethrtime();
4822 
4823   init_random(1234567);
4824 
4825   page_size = sysconf(_SC_PAGESIZE);
4826   if (page_size == -1)
4827     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4828                   strerror(errno)));
4829   init_page_sizes((size_t) page_size);
4830 
4831   Solaris::initialize_system_info();
4832 
4833   // Initialize misc. symbols as soon as possible, so we can use them
4834   // if we need them.
4835   Solaris::misc_sym_init();
4836 
4837   int fd = ::open("/dev/zero", O_RDWR);
4838   if (fd < 0) {
4839     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4840   } else {
4841     Solaris::set_dev_zero_fd(fd);
4842 
4843     // Close on exec, child won't inherit.
4844     fcntl(fd, F_SETFD, FD_CLOEXEC);
4845   }
4846 
4847   clock_tics_per_sec = CLK_TCK;
4848 
4849   // check if dladdr1() exists; dladdr1 can provide more information than
4850   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4851   // and is available on linker patches for 5.7 and 5.8.
4852   // libdl.so must have been loaded, this call is just an entry lookup
4853   void * hdl = dlopen("libdl.so", RTLD_NOW);
4854   if (hdl)
4855     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4856 
4857   // (Solaris only) this switches to calls that actually do locking.
4858   ThreadCritical::initialize();
4859 
4860   main_thread = thr_self();
4861 
4862   // Constant minimum stack size allowed. It must be at least
4863   // the minimum of what the OS supports (thr_min_stack()), and
4864   // enough to allow the thread to get to user bytecode execution.
4865   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4866   // If the pagesize of the VM is greater than 8K determine the appropriate
4867   // number of initial guard pages.  The user can change this with the
4868   // command line arguments, if needed.
4869   if (vm_page_size() > 8*K) {
4870     StackYellowPages = 1;
4871     StackRedPages = 1;
4872     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4873   }
4874 }
4875 
4876 // To install functions for atexit system call
4877 extern "C" {
4878   static void perfMemory_exit_helper() {
4879     perfMemory_exit();
4880   }
4881 }
4882 
4883 // this is called _after_ the global arguments have been parsed
4884 jint os::init_2(void) {
4885   // try to enable extended file IO ASAP, see 6431278
4886   os::Solaris::try_enable_extended_io();
4887 
4888   // Allocate a single page and mark it as readable for safepoint polling.  Also
4889   // use this first mmap call to check support for MAP_ALIGN.
4890   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4891                                                       page_size,
4892                                                       MAP_PRIVATE | MAP_ALIGN,
4893                                                       PROT_READ);
4894   if (polling_page == NULL) {
4895     has_map_align = false;
4896     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4897                                                 PROT_READ);
4898   }
4899 
4900   os::set_polling_page(polling_page);
4901 
4902 #ifndef PRODUCT
4903   if( Verbose && PrintMiscellaneous )
4904     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
4905 #endif
4906 
4907   if (!UseMembar) {
4908     address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
4909     guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4910     os::set_memory_serialize_page( mem_serialize_page );
4911 
4912 #ifndef PRODUCT
4913     if(Verbose && PrintMiscellaneous)
4914       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
4915 #endif
4916   }
4917 
4918   // Check minimum allowable stack size for thread creation and to initialize
4919   // the java system classes, including StackOverflowError - depends on page
4920   // size.  Add a page for compiler2 recursion in main thread.
4921   // Add in 2*BytesPerWord times page size to account for VM stack during
4922   // class initialization depending on 32 or 64 bit VM.
4923   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4924             (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4925                     2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4926 
4927   size_t threadStackSizeInBytes = ThreadStackSize * K;
4928   if (threadStackSizeInBytes != 0 &&
4929     threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4930     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4931                   os::Solaris::min_stack_allowed/K);
4932     return JNI_ERR;
4933   }
4934 
4935   // For 64kbps there will be a 64kb page size, which makes
4936   // the usable default stack size quite a bit less.  Increase the
4937   // stack for 64kb (or any > than 8kb) pages, this increases
4938   // virtual memory fragmentation (since we're not creating the
4939   // stack on a power of 2 boundary.  The real fix for this
4940   // should be to fix the guard page mechanism.
4941 
4942   if (vm_page_size() > 8*K) {
4943       threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4944          ? threadStackSizeInBytes +
4945            ((StackYellowPages + StackRedPages) * vm_page_size())
4946          : 0;
4947       ThreadStackSize = threadStackSizeInBytes/K;
4948   }
4949 
4950   // Make the stack size a multiple of the page size so that
4951   // the yellow/red zones can be guarded.
4952   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4953         vm_page_size()));
4954 
4955   Solaris::libthread_init();
4956 
4957   if (UseNUMA) {
4958     if (!Solaris::liblgrp_init()) {
4959       UseNUMA = false;
4960     } else {
4961       size_t lgrp_limit = os::numa_get_groups_num();
4962       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4963       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4964       FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
4965       if (lgrp_num < 2) {
4966         // There's only one locality group, disable NUMA.
4967         UseNUMA = false;
4968       }
4969     }
4970     if (!UseNUMA && ForceNUMA) {
4971       UseNUMA = true;
4972     }
4973   }
4974 
4975   Solaris::signal_sets_init();
4976   Solaris::init_signal_mem();
4977   Solaris::install_signal_handlers();
4978 
4979   if (libjsigversion < JSIG_VERSION_1_4_1) {
4980     Maxlibjsigsigs = OLDMAXSIGNUM;
4981   }
4982 
4983   // initialize synchronization primitives to use either thread or
4984   // lwp synchronization (controlled by UseLWPSynchronization)
4985   Solaris::synchronization_init();
4986 
4987   if (MaxFDLimit) {
4988     // set the number of file descriptors to max. print out error
4989     // if getrlimit/setrlimit fails but continue regardless.
4990     struct rlimit nbr_files;
4991     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4992     if (status != 0) {
4993       if (PrintMiscellaneous && (Verbose || WizardMode))
4994         perror("os::init_2 getrlimit failed");
4995     } else {
4996       nbr_files.rlim_cur = nbr_files.rlim_max;
4997       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4998       if (status != 0) {
4999         if (PrintMiscellaneous && (Verbose || WizardMode))
5000           perror("os::init_2 setrlimit failed");
5001       }
5002     }
5003   }
5004 
5005   // Calculate theoretical max. size of Threads to guard gainst
5006   // artifical out-of-memory situations, where all available address-
5007   // space has been reserved by thread stacks. Default stack size is 1Mb.
5008   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
5009     JavaThread::stack_size_at_create() : (1*K*K);
5010   assert(pre_thread_stack_size != 0, "Must have a stack");
5011   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
5012   // we should start doing Virtual Memory banging. Currently when the threads will
5013   // have used all but 200Mb of space.
5014   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
5015   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
5016 
5017   // at-exit methods are called in the reverse order of their registration.
5018   // In Solaris 7 and earlier, atexit functions are called on return from
5019   // main or as a result of a call to exit(3C). There can be only 32 of
5020   // these functions registered and atexit() does not set errno. In Solaris
5021   // 8 and later, there is no limit to the number of functions registered
5022   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
5023   // functions are called upon dlclose(3DL) in addition to return from main
5024   // and exit(3C).
5025 
5026   if (PerfAllowAtExitRegistration) {
5027     // only register atexit functions if PerfAllowAtExitRegistration is set.
5028     // atexit functions can be delayed until process exit time, which
5029     // can be problematic for embedded VM situations. Embedded VMs should
5030     // call DestroyJavaVM() to assure that VM resources are released.
5031 
5032     // note: perfMemory_exit_helper atexit function may be removed in
5033     // the future if the appropriate cleanup code can be added to the
5034     // VM_Exit VMOperation's doit method.
5035     if (atexit(perfMemory_exit_helper) != 0) {
5036       warning("os::init2 atexit(perfMemory_exit_helper) failed");
5037     }
5038   }
5039 
5040   // Init pset_loadavg function pointer
5041   init_pset_getloadavg_ptr();
5042 
5043   return JNI_OK;
5044 }
5045 
5046 void os::init_3(void) {
5047   return;
5048 }
5049 
5050 // Mark the polling page as unreadable
5051 void os::make_polling_page_unreadable(void) {
5052   if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
5053     fatal("Could not disable polling page");
5054 };
5055 
5056 // Mark the polling page as readable
5057 void os::make_polling_page_readable(void) {
5058   if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
5059     fatal("Could not enable polling page");
5060 };
5061 
5062 // OS interface.
5063 
5064 bool os::check_heap(bool force) { return true; }
5065 
5066 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
5067 static vsnprintf_t sol_vsnprintf = NULL;
5068 
5069 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
5070   if (!sol_vsnprintf) {
5071     //search  for the named symbol in the objects that were loaded after libjvm
5072     void* where = RTLD_NEXT;
5073     if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5074         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5075     if (!sol_vsnprintf){
5076       //search  for the named symbol in the objects that were loaded before libjvm
5077       where = RTLD_DEFAULT;
5078       if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5079         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5080       assert(sol_vsnprintf != NULL, "vsnprintf not found");
5081     }
5082   }
5083   return (*sol_vsnprintf)(buf, count, fmt, argptr);
5084 }
5085 
5086 
5087 // Is a (classpath) directory empty?
5088 bool os::dir_is_empty(const char* path) {
5089   DIR *dir = NULL;
5090   struct dirent *ptr;
5091 
5092   dir = opendir(path);
5093   if (dir == NULL) return true;
5094 
5095   /* Scan the directory */
5096   bool result = true;
5097   char buf[sizeof(struct dirent) + MAX_PATH];
5098   struct dirent *dbuf = (struct dirent *) buf;
5099   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
5100     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
5101       result = false;
5102     }
5103   }
5104   closedir(dir);
5105   return result;
5106 }
5107 
5108 // This code originates from JDK's sysOpen and open64_w
5109 // from src/solaris/hpi/src/system_md.c
5110 
5111 #ifndef O_DELETE
5112 #define O_DELETE 0x10000
5113 #endif
5114 
5115 // Open a file. Unlink the file immediately after open returns
5116 // if the specified oflag has the O_DELETE flag set.
5117 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
5118 
5119 int os::open(const char *path, int oflag, int mode) {
5120   if (strlen(path) > MAX_PATH - 1) {
5121     errno = ENAMETOOLONG;
5122     return -1;
5123   }
5124   int fd;
5125   int o_delete = (oflag & O_DELETE);
5126   oflag = oflag & ~O_DELETE;
5127 
5128   fd = ::open64(path, oflag, mode);
5129   if (fd == -1) return -1;
5130 
5131   //If the open succeeded, the file might still be a directory
5132   {
5133     struct stat64 buf64;
5134     int ret = ::fstat64(fd, &buf64);
5135     int st_mode = buf64.st_mode;
5136 
5137     if (ret != -1) {
5138       if ((st_mode & S_IFMT) == S_IFDIR) {
5139         errno = EISDIR;
5140         ::close(fd);
5141         return -1;
5142       }
5143     } else {
5144       ::close(fd);
5145       return -1;
5146     }
5147   }
5148     /*
5149      * 32-bit Solaris systems suffer from:
5150      *
5151      * - an historical default soft limit of 256 per-process file
5152      *   descriptors that is too low for many Java programs.
5153      *
5154      * - a design flaw where file descriptors created using stdio
5155      *   fopen must be less than 256, _even_ when the first limit above
5156      *   has been raised.  This can cause calls to fopen (but not calls to
5157      *   open, for example) to fail mysteriously, perhaps in 3rd party
5158      *   native code (although the JDK itself uses fopen).  One can hardly
5159      *   criticize them for using this most standard of all functions.
5160      *
5161      * We attempt to make everything work anyways by:
5162      *
5163      * - raising the soft limit on per-process file descriptors beyond
5164      *   256
5165      *
5166      * - As of Solaris 10u4, we can request that Solaris raise the 256
5167      *   stdio fopen limit by calling function enable_extended_FILE_stdio.
5168      *   This is done in init_2 and recorded in enabled_extended_FILE_stdio
5169      *
5170      * - If we are stuck on an old (pre 10u4) Solaris system, we can
5171      *   workaround the bug by remapping non-stdio file descriptors below
5172      *   256 to ones beyond 256, which is done below.
5173      *
5174      * See:
5175      * 1085341: 32-bit stdio routines should support file descriptors >255
5176      * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
5177      * 6431278: Netbeans crash on 32 bit Solaris: need to call
5178      *          enable_extended_FILE_stdio() in VM initialisation
5179      * Giri Mandalika's blog
5180      * http://technopark02.blogspot.com/2005_05_01_archive.html
5181      */
5182 #ifndef  _LP64
5183      if ((!enabled_extended_FILE_stdio) && fd < 256) {
5184          int newfd = ::fcntl(fd, F_DUPFD, 256);
5185          if (newfd != -1) {
5186              ::close(fd);
5187              fd = newfd;
5188          }
5189      }
5190 #endif // 32-bit Solaris
5191     /*
5192      * All file descriptors that are opened in the JVM and not
5193      * specifically destined for a subprocess should have the
5194      * close-on-exec flag set.  If we don't set it, then careless 3rd
5195      * party native code might fork and exec without closing all
5196      * appropriate file descriptors (e.g. as we do in closeDescriptors in
5197      * UNIXProcess.c), and this in turn might:
5198      *
5199      * - cause end-of-file to fail to be detected on some file
5200      *   descriptors, resulting in mysterious hangs, or
5201      *
5202      * - might cause an fopen in the subprocess to fail on a system
5203      *   suffering from bug 1085341.
5204      *
5205      * (Yes, the default setting of the close-on-exec flag is a Unix
5206      * design flaw)
5207      *
5208      * See:
5209      * 1085341: 32-bit stdio routines should support file descriptors >255
5210      * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
5211      * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
5212      */
5213 #ifdef FD_CLOEXEC
5214     {
5215         int flags = ::fcntl(fd, F_GETFD);
5216         if (flags != -1)
5217             ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5218     }
5219 #endif
5220 
5221   if (o_delete != 0) {
5222     ::unlink(path);
5223   }
5224   return fd;
5225 }
5226 
5227 // create binary file, rewriting existing file if required
5228 int os::create_binary_file(const char* path, bool rewrite_existing) {
5229   int oflags = O_WRONLY | O_CREAT;
5230   if (!rewrite_existing) {
5231     oflags |= O_EXCL;
5232   }
5233   return ::open64(path, oflags, S_IREAD | S_IWRITE);
5234 }
5235 
5236 // return current position of file pointer
5237 jlong os::current_file_offset(int fd) {
5238   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5239 }
5240 
5241 // move file pointer to the specified offset
5242 jlong os::seek_to_file_offset(int fd, jlong offset) {
5243   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5244 }
5245 
5246 jlong os::lseek(int fd, jlong offset, int whence) {
5247   return (jlong) ::lseek64(fd, offset, whence);
5248 }
5249 
5250 char * os::native_path(char *path) {
5251   return path;
5252 }
5253 
5254 int os::ftruncate(int fd, jlong length) {
5255   return ::ftruncate64(fd, length);
5256 }
5257 
5258 int os::fsync(int fd)  {
5259   RESTARTABLE_RETURN_INT(::fsync(fd));
5260 }
5261 
5262 int os::available(int fd, jlong *bytes) {
5263   jlong cur, end;
5264   int mode;
5265   struct stat64 buf64;
5266 
5267   if (::fstat64(fd, &buf64) >= 0) {
5268     mode = buf64.st_mode;
5269     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5270       /*
5271       * XXX: is the following call interruptible? If so, this might
5272       * need to go through the INTERRUPT_IO() wrapper as for other
5273       * blocking, interruptible calls in this file.
5274       */
5275       int n,ioctl_return;
5276 
5277       INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted);
5278       if (ioctl_return>= 0) {
5279           *bytes = n;
5280         return 1;
5281       }
5282     }
5283   }
5284   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5285     return 0;
5286   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5287     return 0;
5288   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5289     return 0;
5290   }
5291   *bytes = end - cur;
5292   return 1;
5293 }
5294 
5295 // Map a block of memory.
5296 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5297                      char *addr, size_t bytes, bool read_only,
5298                      bool allow_exec) {
5299   int prot;
5300   int flags;
5301 
5302   if (read_only) {
5303     prot = PROT_READ;
5304     flags = MAP_SHARED;
5305   } else {
5306     prot = PROT_READ | PROT_WRITE;
5307     flags = MAP_PRIVATE;
5308   }
5309 
5310   if (allow_exec) {
5311     prot |= PROT_EXEC;
5312   }
5313 
5314   if (addr != NULL) {
5315     flags |= MAP_FIXED;
5316   }
5317 
5318   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5319                                      fd, file_offset);
5320   if (mapped_address == MAP_FAILED) {
5321     return NULL;
5322   }
5323   return mapped_address;
5324 }
5325 
5326 
5327 // Remap a block of memory.
5328 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5329                        char *addr, size_t bytes, bool read_only,
5330                        bool allow_exec) {
5331   // same as map_memory() on this OS
5332   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5333                         allow_exec);
5334 }
5335 
5336 
5337 // Unmap a block of memory.
5338 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5339   return munmap(addr, bytes) == 0;
5340 }
5341 
5342 void os::pause() {
5343   char filename[MAX_PATH];
5344   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5345     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5346   } else {
5347     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5348   }
5349 
5350   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5351   if (fd != -1) {
5352     struct stat buf;
5353     ::close(fd);
5354     while (::stat(filename, &buf) == 0) {
5355       (void)::poll(NULL, 0, 100);
5356     }
5357   } else {
5358     jio_fprintf(stderr,
5359       "Could not open pause file '%s', continuing immediately.\n", filename);
5360   }
5361 }
5362 
5363 #ifndef PRODUCT
5364 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5365 // Turn this on if you need to trace synch operations.
5366 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5367 // and call record_synch_enable and record_synch_disable
5368 // around the computation of interest.
5369 
5370 void record_synch(char* name, bool returning);  // defined below
5371 
5372 class RecordSynch {
5373   char* _name;
5374  public:
5375   RecordSynch(char* name) :_name(name)
5376                  { record_synch(_name, false); }
5377   ~RecordSynch() { record_synch(_name,   true);  }
5378 };
5379 
5380 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5381 extern "C" ret name params {                                    \
5382   typedef ret name##_t params;                                  \
5383   static name##_t* implem = NULL;                               \
5384   static int callcount = 0;                                     \
5385   if (implem == NULL) {                                         \
5386     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5387     if (implem == NULL)  fatal(dlerror());                      \
5388   }                                                             \
5389   ++callcount;                                                  \
5390   RecordSynch _rs(#name);                                       \
5391   inner;                                                        \
5392   return implem args;                                           \
5393 }
5394 // in dbx, examine callcounts this way:
5395 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5396 
5397 #define CHECK_POINTER_OK(p) \
5398   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5399 #define CHECK_MU \
5400   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5401 #define CHECK_CV \
5402   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5403 #define CHECK_P(p) \
5404   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5405 
5406 #define CHECK_MUTEX(mutex_op) \
5407 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5408 
5409 CHECK_MUTEX(   mutex_lock)
5410 CHECK_MUTEX(  _mutex_lock)
5411 CHECK_MUTEX( mutex_unlock)
5412 CHECK_MUTEX(_mutex_unlock)
5413 CHECK_MUTEX( mutex_trylock)
5414 CHECK_MUTEX(_mutex_trylock)
5415 
5416 #define CHECK_COND(cond_op) \
5417 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5418 
5419 CHECK_COND( cond_wait);
5420 CHECK_COND(_cond_wait);
5421 CHECK_COND(_cond_wait_cancel);
5422 
5423 #define CHECK_COND2(cond_op) \
5424 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5425 
5426 CHECK_COND2( cond_timedwait);
5427 CHECK_COND2(_cond_timedwait);
5428 CHECK_COND2(_cond_timedwait_cancel);
5429 
5430 // do the _lwp_* versions too
5431 #define mutex_t lwp_mutex_t
5432 #define cond_t  lwp_cond_t
5433 CHECK_MUTEX(  _lwp_mutex_lock)
5434 CHECK_MUTEX(  _lwp_mutex_unlock)
5435 CHECK_MUTEX(  _lwp_mutex_trylock)
5436 CHECK_MUTEX( __lwp_mutex_lock)
5437 CHECK_MUTEX( __lwp_mutex_unlock)
5438 CHECK_MUTEX( __lwp_mutex_trylock)
5439 CHECK_MUTEX(___lwp_mutex_lock)
5440 CHECK_MUTEX(___lwp_mutex_unlock)
5441 
5442 CHECK_COND(  _lwp_cond_wait);
5443 CHECK_COND( __lwp_cond_wait);
5444 CHECK_COND(___lwp_cond_wait);
5445 
5446 CHECK_COND2(  _lwp_cond_timedwait);
5447 CHECK_COND2( __lwp_cond_timedwait);
5448 #undef mutex_t
5449 #undef cond_t
5450 
5451 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5452 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5453 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5454 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5455 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5456 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5457 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5458 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5459 
5460 
5461 // recording machinery:
5462 
5463 enum { RECORD_SYNCH_LIMIT = 200 };
5464 char* record_synch_name[RECORD_SYNCH_LIMIT];
5465 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5466 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5467 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5468 int record_synch_count = 0;
5469 bool record_synch_enabled = false;
5470 
5471 // in dbx, examine recorded data this way:
5472 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5473 
5474 void record_synch(char* name, bool returning) {
5475   if (record_synch_enabled) {
5476     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5477       record_synch_name[record_synch_count] = name;
5478       record_synch_returning[record_synch_count] = returning;
5479       record_synch_thread[record_synch_count] = thr_self();
5480       record_synch_arg0ptr[record_synch_count] = &name;
5481       record_synch_count++;
5482     }
5483     // put more checking code here:
5484     // ...
5485   }
5486 }
5487 
5488 void record_synch_enable() {
5489   // start collecting trace data, if not already doing so
5490   if (!record_synch_enabled)  record_synch_count = 0;
5491   record_synch_enabled = true;
5492 }
5493 
5494 void record_synch_disable() {
5495   // stop collecting trace data
5496   record_synch_enabled = false;
5497 }
5498 
5499 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5500 #endif // PRODUCT
5501 
5502 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5503 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5504                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5505 
5506 
5507 // JVMTI & JVM monitoring and management support
5508 // The thread_cpu_time() and current_thread_cpu_time() are only
5509 // supported if is_thread_cpu_time_supported() returns true.
5510 // They are not supported on Solaris T1.
5511 
5512 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5513 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5514 // of a thread.
5515 //
5516 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5517 // returns the fast estimate available on the platform.
5518 
5519 // hrtime_t gethrvtime() return value includes
5520 // user time but does not include system time
5521 jlong os::current_thread_cpu_time() {
5522   return (jlong) gethrvtime();
5523 }
5524 
5525 jlong os::thread_cpu_time(Thread *thread) {
5526   // return user level CPU time only to be consistent with
5527   // what current_thread_cpu_time returns.
5528   // thread_cpu_time_info() must be changed if this changes
5529   return os::thread_cpu_time(thread, false /* user time only */);
5530 }
5531 
5532 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5533   if (user_sys_cpu_time) {
5534     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5535   } else {
5536     return os::current_thread_cpu_time();
5537   }
5538 }
5539 
5540 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5541   char proc_name[64];
5542   int count;
5543   prusage_t prusage;
5544   jlong lwp_time;
5545   int fd;
5546 
5547   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5548                      getpid(),
5549                      thread->osthread()->lwp_id());
5550   fd = ::open(proc_name, O_RDONLY);
5551   if ( fd == -1 ) return -1;
5552 
5553   do {
5554     count = ::pread(fd,
5555                   (void *)&prusage.pr_utime,
5556                   thr_time_size,
5557                   thr_time_off);
5558   } while (count < 0 && errno == EINTR);
5559   ::close(fd);
5560   if ( count < 0 ) return -1;
5561 
5562   if (user_sys_cpu_time) {
5563     // user + system CPU time
5564     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5565                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5566                  (jlong)prusage.pr_stime.tv_nsec +
5567                  (jlong)prusage.pr_utime.tv_nsec;
5568   } else {
5569     // user level CPU time only
5570     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5571                 (jlong)prusage.pr_utime.tv_nsec;
5572   }
5573 
5574   return(lwp_time);
5575 }
5576 
5577 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5578   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5579   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5580   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5581   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5582 }
5583 
5584 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5585   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5586   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5587   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5588   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5589 }
5590 
5591 bool os::is_thread_cpu_time_supported() {
5592   if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
5593     return true;
5594   } else {
5595     return false;
5596   }
5597 }
5598 
5599 // System loadavg support.  Returns -1 if load average cannot be obtained.
5600 // Return the load average for our processor set if the primitive exists
5601 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5602 int os::loadavg(double loadavg[], int nelem) {
5603   if (pset_getloadavg_ptr != NULL) {
5604     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5605   } else {
5606     return ::getloadavg(loadavg, nelem);
5607   }
5608 }
5609 
5610 //---------------------------------------------------------------------------------
5611 
5612 bool os::find(address addr, outputStream* st) {
5613   Dl_info dlinfo;
5614   memset(&dlinfo, 0, sizeof(dlinfo));
5615   if (dladdr(addr, &dlinfo) != 0) {
5616     st->print(PTR_FORMAT ": ", addr);
5617     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5618       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5619     } else if (dlinfo.dli_fbase != NULL)
5620       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5621     else
5622       st->print("<absolute address>");
5623     if (dlinfo.dli_fname != NULL) {
5624       st->print(" in %s", dlinfo.dli_fname);
5625     }
5626     if (dlinfo.dli_fbase != NULL) {
5627       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5628     }
5629     st->cr();
5630 
5631     if (Verbose) {
5632       // decode some bytes around the PC
5633       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5634       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5635       address       lowest = (address) dlinfo.dli_sname;
5636       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5637       if (begin < lowest)  begin = lowest;
5638       Dl_info dlinfo2;
5639       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5640           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5641         end = (address) dlinfo2.dli_saddr;
5642       Disassembler::decode(begin, end, st);
5643     }
5644     return true;
5645   }
5646   return false;
5647 }
5648 
5649 // Following function has been added to support HotSparc's libjvm.so running
5650 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5651 // src/solaris/hpi/native_threads in the EVM codebase.
5652 //
5653 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5654 // libraries and should thus be removed. We will leave it behind for a while
5655 // until we no longer want to able to run on top of 1.3.0 Solaris production
5656 // JDK. See 4341971.
5657 
5658 #define STACK_SLACK 0x800
5659 
5660 extern "C" {
5661   intptr_t sysThreadAvailableStackWithSlack() {
5662     stack_t st;
5663     intptr_t retval, stack_top;
5664     retval = thr_stksegment(&st);
5665     assert(retval == 0, "incorrect return value from thr_stksegment");
5666     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5667     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5668     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5669     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5670   }
5671 }
5672 
5673 // ObjectMonitor park-unpark infrastructure ...
5674 //
5675 // We implement Solaris and Linux PlatformEvents with the
5676 // obvious condvar-mutex-flag triple.
5677 // Another alternative that works quite well is pipes:
5678 // Each PlatformEvent consists of a pipe-pair.
5679 // The thread associated with the PlatformEvent
5680 // calls park(), which reads from the input end of the pipe.
5681 // Unpark() writes into the other end of the pipe.
5682 // The write-side of the pipe must be set NDELAY.
5683 // Unfortunately pipes consume a large # of handles.
5684 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5685 // Using pipes for the 1st few threads might be workable, however.
5686 //
5687 // park() is permitted to return spuriously.
5688 // Callers of park() should wrap the call to park() in
5689 // an appropriate loop.  A litmus test for the correct
5690 // usage of park is the following: if park() were modified
5691 // to immediately return 0 your code should still work,
5692 // albeit degenerating to a spin loop.
5693 //
5694 // An interesting optimization for park() is to use a trylock()
5695 // to attempt to acquire the mutex.  If the trylock() fails
5696 // then we know that a concurrent unpark() operation is in-progress.
5697 // in that case the park() code could simply set _count to 0
5698 // and return immediately.  The subsequent park() operation *might*
5699 // return immediately.  That's harmless as the caller of park() is
5700 // expected to loop.  By using trylock() we will have avoided a
5701 // avoided a context switch caused by contention on the per-thread mutex.
5702 //
5703 // TODO-FIXME:
5704 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
5705 //     objectmonitor implementation.
5706 // 2.  Collapse the JSR166 parker event, and the
5707 //     objectmonitor ParkEvent into a single "Event" construct.
5708 // 3.  In park() and unpark() add:
5709 //     assert (Thread::current() == AssociatedWith).
5710 // 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5711 //     1-out-of-N park() operations will return immediately.
5712 //
5713 // _Event transitions in park()
5714 //   -1 => -1 : illegal
5715 //    1 =>  0 : pass - return immediately
5716 //    0 => -1 : block
5717 //
5718 // _Event serves as a restricted-range semaphore.
5719 //
5720 // Another possible encoding of _Event would be with
5721 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5722 //
5723 // TODO-FIXME: add DTRACE probes for:
5724 // 1.   Tx parks
5725 // 2.   Ty unparks Tx
5726 // 3.   Tx resumes from park
5727 
5728 
5729 // value determined through experimentation
5730 #define ROUNDINGFIX 11
5731 
5732 // utility to compute the abstime argument to timedwait.
5733 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5734 
5735 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5736   // millis is the relative timeout time
5737   // abstime will be the absolute timeout time
5738   if (millis < 0)  millis = 0;
5739   struct timeval now;
5740   int status = gettimeofday(&now, NULL);
5741   assert(status == 0, "gettimeofday");
5742   jlong seconds = millis / 1000;
5743   jlong max_wait_period;
5744 
5745   if (UseLWPSynchronization) {
5746     // forward port of fix for 4275818 (not sleeping long enough)
5747     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5748     // _lwp_cond_timedwait() used a round_down algorithm rather
5749     // than a round_up. For millis less than our roundfactor
5750     // it rounded down to 0 which doesn't meet the spec.
5751     // For millis > roundfactor we may return a bit sooner, but
5752     // since we can not accurately identify the patch level and
5753     // this has already been fixed in Solaris 9 and 8 we will
5754     // leave it alone rather than always rounding down.
5755 
5756     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5757        // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5758            // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5759            max_wait_period = 21000000;
5760   } else {
5761     max_wait_period = 50000000;
5762   }
5763   millis %= 1000;
5764   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5765      seconds = max_wait_period;
5766   }
5767   abstime->tv_sec = now.tv_sec  + seconds;
5768   long       usec = now.tv_usec + millis * 1000;
5769   if (usec >= 1000000) {
5770     abstime->tv_sec += 1;
5771     usec -= 1000000;
5772   }
5773   abstime->tv_nsec = usec * 1000;
5774   return abstime;
5775 }
5776 
5777 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5778 // Conceptually TryPark() should be equivalent to park(0).
5779 
5780 int os::PlatformEvent::TryPark() {
5781   for (;;) {
5782     const int v = _Event ;
5783     guarantee ((v == 0) || (v == 1), "invariant") ;
5784     if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
5785   }
5786 }
5787 
5788 void os::PlatformEvent::park() {           // AKA: down()
5789   // Invariant: Only the thread associated with the Event/PlatformEvent
5790   // may call park().
5791   int v ;
5792   for (;;) {
5793       v = _Event ;
5794       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5795   }
5796   guarantee (v >= 0, "invariant") ;
5797   if (v == 0) {
5798      // Do this the hard way by blocking ...
5799      // See http://monaco.sfbay/detail.jsf?cr=5094058.
5800      // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5801      // Only for SPARC >= V8PlusA
5802 #if defined(__sparc) && defined(COMPILER2)
5803      if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5804 #endif
5805      int status = os::Solaris::mutex_lock(_mutex);
5806      assert_status(status == 0, status,  "mutex_lock");
5807      guarantee (_nParked == 0, "invariant") ;
5808      ++ _nParked ;
5809      while (_Event < 0) {
5810         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5811         // Treat this the same as if the wait was interrupted
5812         // With usr/lib/lwp going to kernel, always handle ETIME
5813         status = os::Solaris::cond_wait(_cond, _mutex);
5814         if (status == ETIME) status = EINTR ;
5815         assert_status(status == 0 || status == EINTR, status, "cond_wait");
5816      }
5817      -- _nParked ;
5818      _Event = 0 ;
5819      status = os::Solaris::mutex_unlock(_mutex);
5820      assert_status(status == 0, status, "mutex_unlock");
5821     // Paranoia to ensure our locked and lock-free paths interact
5822     // correctly with each other.
5823     OrderAccess::fence();
5824   }
5825 }
5826 
5827 int os::PlatformEvent::park(jlong millis) {
5828   guarantee (_nParked == 0, "invariant") ;
5829   int v ;
5830   for (;;) {
5831       v = _Event ;
5832       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5833   }
5834   guarantee (v >= 0, "invariant") ;
5835   if (v != 0) return OS_OK ;
5836 
5837   int ret = OS_TIMEOUT;
5838   timestruc_t abst;
5839   compute_abstime (&abst, millis);
5840 
5841   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5842   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5843   // Only for SPARC >= V8PlusA
5844 #if defined(__sparc) && defined(COMPILER2)
5845  if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5846 #endif
5847   int status = os::Solaris::mutex_lock(_mutex);
5848   assert_status(status == 0, status, "mutex_lock");
5849   guarantee (_nParked == 0, "invariant") ;
5850   ++ _nParked ;
5851   while (_Event < 0) {
5852      int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5853      assert_status(status == 0 || status == EINTR ||
5854                    status == ETIME || status == ETIMEDOUT,
5855                    status, "cond_timedwait");
5856      if (!FilterSpuriousWakeups) break ;                // previous semantics
5857      if (status == ETIME || status == ETIMEDOUT) break ;
5858      // We consume and ignore EINTR and spurious wakeups.
5859   }
5860   -- _nParked ;
5861   if (_Event >= 0) ret = OS_OK ;
5862   _Event = 0 ;
5863   status = os::Solaris::mutex_unlock(_mutex);
5864   assert_status(status == 0, status, "mutex_unlock");
5865   // Paranoia to ensure our locked and lock-free paths interact
5866   // correctly with each other.
5867   OrderAccess::fence();
5868   return ret;
5869 }
5870 
5871 void os::PlatformEvent::unpark() {
5872   // Transitions for _Event:
5873   //    0 :=> 1
5874   //    1 :=> 1
5875   //   -1 :=> either 0 or 1; must signal target thread
5876   //          That is, we can safely transition _Event from -1 to either
5877   //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
5878   //          unpark() calls.
5879   // See also: "Semaphores in Plan 9" by Mullender & Cox
5880   //
5881   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5882   // that it will take two back-to-back park() calls for the owning
5883   // thread to block. This has the benefit of forcing a spurious return
5884   // from the first park() call after an unpark() call which will help
5885   // shake out uses of park() and unpark() without condition variables.
5886 
5887   if (Atomic::xchg(1, &_Event) >= 0) return;
5888 
5889   // If the thread associated with the event was parked, wake it.
5890   // Wait for the thread assoc with the PlatformEvent to vacate.
5891   int status = os::Solaris::mutex_lock(_mutex);
5892   assert_status(status == 0, status, "mutex_lock");
5893   int AnyWaiters = _nParked;
5894   status = os::Solaris::mutex_unlock(_mutex);
5895   assert_status(status == 0, status, "mutex_unlock");
5896   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5897   if (AnyWaiters != 0) {
5898     // We intentional signal *after* dropping the lock
5899     // to avoid a common class of futile wakeups.
5900     status = os::Solaris::cond_signal(_cond);
5901     assert_status(status == 0, status, "cond_signal");
5902   }
5903 }
5904 
5905 // JSR166
5906 // -------------------------------------------------------
5907 
5908 /*
5909  * The solaris and linux implementations of park/unpark are fairly
5910  * conservative for now, but can be improved. They currently use a
5911  * mutex/condvar pair, plus _counter.
5912  * Park decrements _counter if > 0, else does a condvar wait.  Unpark
5913  * sets count to 1 and signals condvar.  Only one thread ever waits
5914  * on the condvar. Contention seen when trying to park implies that someone
5915  * is unparking you, so don't wait. And spurious returns are fine, so there
5916  * is no need to track notifications.
5917  */
5918 
5919 #define MAX_SECS 100000000
5920 /*
5921  * This code is common to linux and solaris and will be moved to a
5922  * common place in dolphin.
5923  *
5924  * The passed in time value is either a relative time in nanoseconds
5925  * or an absolute time in milliseconds. Either way it has to be unpacked
5926  * into suitable seconds and nanoseconds components and stored in the
5927  * given timespec structure.
5928  * Given time is a 64-bit value and the time_t used in the timespec is only
5929  * a signed-32-bit value (except on 64-bit Linux) we have to watch for
5930  * overflow if times way in the future are given. Further on Solaris versions
5931  * prior to 10 there is a restriction (see cond_timedwait) that the specified
5932  * number of seconds, in abstime, is less than current_time  + 100,000,000.
5933  * As it will be 28 years before "now + 100000000" will overflow we can
5934  * ignore overflow and just impose a hard-limit on seconds using the value
5935  * of "now + 100,000,000". This places a limit on the timeout of about 3.17
5936  * years from "now".
5937  */
5938 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5939   assert (time > 0, "convertTime");
5940 
5941   struct timeval now;
5942   int status = gettimeofday(&now, NULL);
5943   assert(status == 0, "gettimeofday");
5944 
5945   time_t max_secs = now.tv_sec + MAX_SECS;
5946 
5947   if (isAbsolute) {
5948     jlong secs = time / 1000;
5949     if (secs > max_secs) {
5950       absTime->tv_sec = max_secs;
5951     }
5952     else {
5953       absTime->tv_sec = secs;
5954     }
5955     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5956   }
5957   else {
5958     jlong secs = time / NANOSECS_PER_SEC;
5959     if (secs >= MAX_SECS) {
5960       absTime->tv_sec = max_secs;
5961       absTime->tv_nsec = 0;
5962     }
5963     else {
5964       absTime->tv_sec = now.tv_sec + secs;
5965       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5966       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5967         absTime->tv_nsec -= NANOSECS_PER_SEC;
5968         ++absTime->tv_sec; // note: this must be <= max_secs
5969       }
5970     }
5971   }
5972   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5973   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5974   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5975   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5976 }
5977 
5978 void Parker::park(bool isAbsolute, jlong time) {
5979   // Ideally we'd do something useful while spinning, such
5980   // as calling unpackTime().
5981 
5982   // Optional fast-path check:
5983   // Return immediately if a permit is available.
5984   // We depend on Atomic::xchg() having full barrier semantics
5985   // since we are doing a lock-free update to _counter.
5986   if (Atomic::xchg(0, &_counter) > 0) return;
5987 
5988   // Optional fast-exit: Check interrupt before trying to wait
5989   Thread* thread = Thread::current();
5990   assert(thread->is_Java_thread(), "Must be JavaThread");
5991   JavaThread *jt = (JavaThread *)thread;
5992   if (Thread::is_interrupted(thread, false)) {
5993     return;
5994   }
5995 
5996   // First, demultiplex/decode time arguments
5997   timespec absTime;
5998   if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
5999     return;
6000   }
6001   if (time > 0) {
6002     // Warning: this code might be exposed to the old Solaris time
6003     // round-down bugs.  Grep "roundingFix" for details.
6004     unpackTime(&absTime, isAbsolute, time);
6005   }
6006 
6007   // Enter safepoint region
6008   // Beware of deadlocks such as 6317397.
6009   // The per-thread Parker:: _mutex is a classic leaf-lock.
6010   // In particular a thread must never block on the Threads_lock while
6011   // holding the Parker:: mutex.  If safepoints are pending both the
6012   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
6013   ThreadBlockInVM tbivm(jt);
6014 
6015   // Don't wait if cannot get lock since interference arises from
6016   // unblocking.  Also. check interrupt before trying wait
6017   if (Thread::is_interrupted(thread, false) ||
6018       os::Solaris::mutex_trylock(_mutex) != 0) {
6019     return;
6020   }
6021 
6022   int status ;
6023 
6024   if (_counter > 0)  { // no wait needed
6025     _counter = 0;
6026     status = os::Solaris::mutex_unlock(_mutex);
6027     assert (status == 0, "invariant") ;
6028     // Paranoia to ensure our locked and lock-free paths interact
6029     // correctly with each other and Java-level accesses.
6030     OrderAccess::fence();
6031     return;
6032   }
6033 
6034 #ifdef ASSERT
6035   // Don't catch signals while blocked; let the running threads have the signals.
6036   // (This allows a debugger to break into the running thread.)
6037   sigset_t oldsigs;
6038   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
6039   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
6040 #endif
6041 
6042   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
6043   jt->set_suspend_equivalent();
6044   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
6045 
6046   // Do this the hard way by blocking ...
6047   // See http://monaco.sfbay/detail.jsf?cr=5094058.
6048   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
6049   // Only for SPARC >= V8PlusA
6050 #if defined(__sparc) && defined(COMPILER2)
6051   if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
6052 #endif
6053 
6054   if (time == 0) {
6055     status = os::Solaris::cond_wait (_cond, _mutex) ;
6056   } else {
6057     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
6058   }
6059   // Note that an untimed cond_wait() can sometimes return ETIME on older
6060   // versions of the Solaris.
6061   assert_status(status == 0 || status == EINTR ||
6062                 status == ETIME || status == ETIMEDOUT,
6063                 status, "cond_timedwait");
6064 
6065 #ifdef ASSERT
6066   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
6067 #endif
6068   _counter = 0 ;
6069   status = os::Solaris::mutex_unlock(_mutex);
6070   assert_status(status == 0, status, "mutex_unlock") ;
6071   // Paranoia to ensure our locked and lock-free paths interact
6072   // correctly with each other and Java-level accesses.
6073   OrderAccess::fence();
6074 
6075   // If externally suspended while waiting, re-suspend
6076   if (jt->handle_special_suspend_equivalent_condition()) {
6077     jt->java_suspend_self();
6078   }
6079 }
6080 
6081 void Parker::unpark() {
6082   int s, status ;
6083   status = os::Solaris::mutex_lock (_mutex) ;
6084   assert (status == 0, "invariant") ;
6085   s = _counter;
6086   _counter = 1;
6087   status = os::Solaris::mutex_unlock (_mutex) ;
6088   assert (status == 0, "invariant") ;
6089 
6090   if (s < 1) {
6091     status = os::Solaris::cond_signal (_cond) ;
6092     assert (status == 0, "invariant") ;
6093   }
6094 }
6095 
6096 extern char** environ;
6097 
6098 // Run the specified command in a separate process. Return its exit value,
6099 // or -1 on failure (e.g. can't fork a new process).
6100 // Unlike system(), this function can be called from signal handler. It
6101 // doesn't block SIGINT et al.
6102 int os::fork_and_exec(char* cmd) {
6103   char * argv[4];
6104   argv[0] = (char *)"sh";
6105   argv[1] = (char *)"-c";
6106   argv[2] = cmd;
6107   argv[3] = NULL;
6108 
6109   // fork is async-safe, fork1 is not so can't use in signal handler
6110   pid_t pid;
6111   Thread* t = ThreadLocalStorage::get_thread_slow();
6112   if (t != NULL && t->is_inside_signal_handler()) {
6113     pid = fork();
6114   } else {
6115     pid = fork1();
6116   }
6117 
6118   if (pid < 0) {
6119     // fork failed
6120     warning("fork failed: %s", strerror(errno));
6121     return -1;
6122 
6123   } else if (pid == 0) {
6124     // child process
6125 
6126     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
6127     execve("/usr/bin/sh", argv, environ);
6128 
6129     // execve failed
6130     _exit(-1);
6131 
6132   } else  {
6133     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
6134     // care about the actual exit code, for now.
6135 
6136     int status;
6137 
6138     // Wait for the child process to exit.  This returns immediately if
6139     // the child has already exited. */
6140     while (waitpid(pid, &status, 0) < 0) {
6141         switch (errno) {
6142         case ECHILD: return 0;
6143         case EINTR: break;
6144         default: return -1;
6145         }
6146     }
6147 
6148     if (WIFEXITED(status)) {
6149        // The child exited normally; get its exit code.
6150        return WEXITSTATUS(status);
6151     } else if (WIFSIGNALED(status)) {
6152        // The child exited because of a signal
6153        // The best value to return is 0x80 + signal number,
6154        // because that is what all Unix shells do, and because
6155        // it allows callers to distinguish between process exit and
6156        // process death by signal.
6157        return 0x80 + WTERMSIG(status);
6158     } else {
6159        // Unknown exit code; pass it through
6160        return status;
6161     }
6162   }
6163 }
6164 
6165 // is_headless_jre()
6166 //
6167 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
6168 // in order to report if we are running in a headless jre
6169 //
6170 // Since JDK8 xawt/libmawt.so was moved into the same directory
6171 // as libawt.so, and renamed libawt_xawt.so
6172 //
6173 bool os::is_headless_jre() {
6174     struct stat statbuf;
6175     char buf[MAXPATHLEN];
6176     char libmawtpath[MAXPATHLEN];
6177     const char *xawtstr  = "/xawt/libmawt.so";
6178     const char *new_xawtstr = "/libawt_xawt.so";
6179     char *p;
6180 
6181     // Get path to libjvm.so
6182     os::jvm_path(buf, sizeof(buf));
6183 
6184     // Get rid of libjvm.so
6185     p = strrchr(buf, '/');
6186     if (p == NULL) return false;
6187     else *p = '\0';
6188 
6189     // Get rid of client or server
6190     p = strrchr(buf, '/');
6191     if (p == NULL) return false;
6192     else *p = '\0';
6193 
6194     // check xawt/libmawt.so
6195     strcpy(libmawtpath, buf);
6196     strcat(libmawtpath, xawtstr);
6197     if (::stat(libmawtpath, &statbuf) == 0) return false;
6198 
6199     // check libawt_xawt.so
6200     strcpy(libmawtpath, buf);
6201     strcat(libmawtpath, new_xawtstr);
6202     if (::stat(libmawtpath, &statbuf) == 0) return false;
6203 
6204     return true;
6205 }
6206 
6207 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
6208   INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted);
6209 }
6210 
6211 int os::close(int fd) {
6212   return ::close(fd);
6213 }
6214 
6215 int os::socket_close(int fd) {
6216   return ::close(fd);
6217 }
6218 
6219 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
6220   INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6221 }
6222 
6223 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
6224   INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6225 }
6226 
6227 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
6228   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
6229 }
6230 
6231 // As both poll and select can be interrupted by signals, we have to be
6232 // prepared to restart the system call after updating the timeout, unless
6233 // a poll() is done with timeout == -1, in which case we repeat with this
6234 // "wait forever" value.
6235 
6236 int os::timeout(int fd, long timeout) {
6237   int res;
6238   struct timeval t;
6239   julong prevtime, newtime;
6240   static const char* aNull = 0;
6241   struct pollfd pfd;
6242   pfd.fd = fd;
6243   pfd.events = POLLIN;
6244 
6245   gettimeofday(&t, &aNull);
6246   prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
6247 
6248   for(;;) {
6249     INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted);
6250     if(res == OS_ERR && errno == EINTR) {
6251         if(timeout != -1) {
6252           gettimeofday(&t, &aNull);
6253           newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
6254           timeout -= newtime - prevtime;
6255           if(timeout <= 0)
6256             return OS_OK;
6257           prevtime = newtime;
6258         }
6259     } else return res;
6260   }
6261 }
6262 
6263 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
6264   int _result;
6265   INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\
6266                           os::Solaris::clear_interrupted);
6267 
6268   // Depending on when thread interruption is reset, _result could be
6269   // one of two values when errno == EINTR
6270 
6271   if (((_result == OS_INTRPT) || (_result == OS_ERR))
6272       && (errno == EINTR)) {
6273      /* restarting a connect() changes its errno semantics */
6274      INTERRUPTIBLE(::connect(fd, him, len), _result,\
6275                    os::Solaris::clear_interrupted);
6276      /* undo these changes */
6277      if (_result == OS_ERR) {
6278        if (errno == EALREADY) {
6279          errno = EINPROGRESS; /* fall through */
6280        } else if (errno == EISCONN) {
6281          errno = 0;
6282          return OS_OK;
6283        }
6284      }
6285    }
6286    return _result;
6287  }
6288 
6289 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
6290   if (fd < 0) {
6291     return OS_ERR;
6292   }
6293   INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\
6294                            os::Solaris::clear_interrupted);
6295 }
6296 
6297 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
6298                  sockaddr* from, socklen_t* fromlen) {
6299   INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\
6300                            os::Solaris::clear_interrupted);
6301 }
6302 
6303 int os::sendto(int fd, char* buf, size_t len, uint flags,
6304                struct sockaddr* to, socklen_t tolen) {
6305   INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\
6306                            os::Solaris::clear_interrupted);
6307 }
6308 
6309 int os::socket_available(int fd, jint *pbytes) {
6310   if (fd < 0) {
6311     return OS_OK;
6312   }
6313   int ret;
6314   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6315   // note: ioctl can return 0 when successful, JVM_SocketAvailable
6316   // is expected to return 0 on failure and 1 on success to the jdk.
6317   return (ret == OS_ERR) ? 0 : 1;
6318 }
6319 
6320 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6321    INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
6322                                       os::Solaris::clear_interrupted);
6323 }
6324 
6325 // Get the default path to the core file
6326 // Returns the length of the string
6327 int os::get_core_path(char* buffer, size_t bufferSize) {
6328   const char* p = get_current_directory(buffer, bufferSize);
6329 
6330   if (p == NULL) {
6331     assert(p != NULL, "failed to get current directory");
6332     return 0;
6333   }
6334 
6335   return strlen(buffer);
6336 }
6337 
6338 #ifndef PRODUCT
6339 void TestReserveMemorySpecial_test() {
6340   // No tests available for this platform
6341 }
6342 #endif