1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/filemap.hpp"
  37 #include "mutex_solaris.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "prims/jniFastGetField.hpp"
  41 #include "prims/jvm.h"
  42 #include "prims/jvm_misc.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/atomic.inline.hpp"
  45 #include "runtime/extendedPC.hpp"
  46 #include "runtime/globals.hpp"
  47 #include "runtime/interfaceSupport.hpp"
  48 #include "runtime/java.hpp"
  49 #include "runtime/javaCalls.hpp"
  50 #include "runtime/mutexLocker.hpp"
  51 #include "runtime/objectMonitor.hpp"
  52 #include "runtime/orderAccess.inline.hpp"
  53 #include "runtime/osThread.hpp"
  54 #include "runtime/perfMemory.hpp"
  55 #include "runtime/sharedRuntime.hpp"
  56 #include "runtime/statSampler.hpp"
  57 #include "runtime/stubRoutines.hpp"
  58 #include "runtime/thread.inline.hpp"
  59 #include "runtime/threadCritical.hpp"
  60 #include "runtime/timer.hpp"
  61 #include "services/attachListener.hpp"
  62 #include "services/memTracker.hpp"
  63 #include "services/runtimeService.hpp"
  64 #include "utilities/decoder.hpp"
  65 #include "utilities/defaultStream.hpp"
  66 #include "utilities/events.hpp"
  67 #include "utilities/growableArray.hpp"
  68 #include "utilities/vmError.hpp"
  69 
  70 // put OS-includes here
  71 # include <dlfcn.h>
  72 # include <errno.h>
  73 # include <exception>
  74 # include <link.h>
  75 # include <poll.h>
  76 # include <pthread.h>
  77 # include <pwd.h>
  78 # include <schedctl.h>
  79 # include <setjmp.h>
  80 # include <signal.h>
  81 # include <stdio.h>
  82 # include <alloca.h>
  83 # include <sys/filio.h>
  84 # include <sys/ipc.h>
  85 # include <sys/lwp.h>
  86 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  87 # include <sys/mman.h>
  88 # include <sys/processor.h>
  89 # include <sys/procset.h>
  90 # include <sys/pset.h>
  91 # include <sys/resource.h>
  92 # include <sys/shm.h>
  93 # include <sys/socket.h>
  94 # include <sys/stat.h>
  95 # include <sys/systeminfo.h>
  96 # include <sys/time.h>
  97 # include <sys/times.h>
  98 # include <sys/types.h>
  99 # include <sys/wait.h>
 100 # include <sys/utsname.h>
 101 # include <thread.h>
 102 # include <unistd.h>
 103 # include <sys/priocntl.h>
 104 # include <sys/rtpriocntl.h>
 105 # include <sys/tspriocntl.h>
 106 # include <sys/iapriocntl.h>
 107 # include <sys/fxpriocntl.h>
 108 # include <sys/loadavg.h>
 109 # include <string.h>
 110 # include <stdio.h>
 111 
 112 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 113 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 114 
 115 #define MAX_PATH (2 * K)
 116 
 117 // for timer info max values which include all bits
 118 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 119 
 120 
 121 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 122 // compile on older systems without this header file.
 123 
 124 #ifndef MADV_ACCESS_LWP
 125 # define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
 126 #endif
 127 #ifndef MADV_ACCESS_MANY
 128 # define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
 129 #endif
 130 
 131 #ifndef LGRP_RSRC_CPU
 132 # define LGRP_RSRC_CPU           0       /* CPU resources */
 133 #endif
 134 #ifndef LGRP_RSRC_MEM
 135 # define LGRP_RSRC_MEM           1       /* memory resources */
 136 #endif
 137 
 138 // see thr_setprio(3T) for the basis of these numbers
 139 #define MinimumPriority 0
 140 #define NormalPriority  64
 141 #define MaximumPriority 127
 142 
 143 // Values for ThreadPriorityPolicy == 1
 144 int prio_policy1[CriticalPriority+1] = {
 145   -99999,  0, 16,  32,  48,  64,
 146           80, 96, 112, 124, 127, 127 };
 147 
 148 // System parameters used internally
 149 static clock_t clock_tics_per_sec = 100;
 150 
 151 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 152 static bool enabled_extended_FILE_stdio = false;
 153 
 154 // For diagnostics to print a message once. see run_periodic_checks
 155 static bool check_addr0_done = false;
 156 static sigset_t check_signal_done;
 157 static bool check_signals = true;
 158 
 159 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 160 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 161 
 162 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 163 
 164 
 165 // "default" initializers for missing libc APIs
 166 extern "C" {
 167   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 168   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 169 
 170   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 171   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 172 }
 173 
 174 // "default" initializers for pthread-based synchronization
 175 extern "C" {
 176   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 177   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 178 }
 179 
 180 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 181 
 182 // Thread Local Storage
 183 // This is common to all Solaris platforms so it is defined here,
 184 // in this common file.
 185 // The declarations are in the os_cpu threadLS*.hpp files.
 186 //
 187 // Static member initialization for TLS
 188 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
 189 
 190 #ifndef PRODUCT
 191 #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
 192 
 193 int ThreadLocalStorage::_tcacheHit = 0;
 194 int ThreadLocalStorage::_tcacheMiss = 0;
 195 
 196 void ThreadLocalStorage::print_statistics() {
 197   int total = _tcacheMiss+_tcacheHit;
 198   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
 199                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
 200 }
 201 #undef _PCT
 202 #endif // PRODUCT
 203 
 204 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
 205                                                         int index) {
 206   Thread *thread = get_thread_slow();
 207   if (thread != NULL) {
 208     address sp = os::current_stack_pointer();
 209     guarantee(thread->_stack_base == NULL ||
 210               (sp <= thread->_stack_base &&
 211                  sp >= thread->_stack_base - thread->_stack_size) ||
 212                is_error_reported(),
 213               "sp must be inside of selected thread stack");
 214 
 215     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
 216     _get_thread_cache[ index ] = thread;
 217   }
 218   return thread;
 219 }
 220 
 221 
 222 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
 223 #define NO_CACHED_THREAD ((Thread*)all_zero)
 224 
 225 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
 226 
 227   // Store the new value before updating the cache to prevent a race
 228   // between get_thread_via_cache_slowly() and this store operation.
 229   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
 230 
 231   // Update thread cache with new thread if setting on thread create,
 232   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
 233   uintptr_t raw = pd_raw_thread_id();
 234   int ix = pd_cache_index(raw);
 235   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
 236 }
 237 
 238 void ThreadLocalStorage::pd_init() {
 239   for (int i = 0; i < _pd_cache_size; i++) {
 240     _get_thread_cache[i] = NO_CACHED_THREAD;
 241   }
 242 }
 243 
 244 // Invalidate all the caches (happens to be the same as pd_init).
 245 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
 246 
 247 #undef NO_CACHED_THREAD
 248 
 249 // END Thread Local Storage
 250 
 251 static inline size_t adjust_stack_size(address base, size_t size) {
 252   if ((ssize_t)size < 0) {
 253     // 4759953: Compensate for ridiculous stack size.
 254     size = max_intx;
 255   }
 256   if (size > (size_t)base) {
 257     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 258     size = (size_t)base;
 259   }
 260   return size;
 261 }
 262 
 263 static inline stack_t get_stack_info() {
 264   stack_t st;
 265   int retval = thr_stksegment(&st);
 266   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 267   assert(retval == 0, "incorrect return value from thr_stksegment");
 268   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 269   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 270   return st;
 271 }
 272 
 273 address os::current_stack_base() {
 274   int r = thr_main() ;
 275   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 276   bool is_primordial_thread = r;
 277 
 278   // Workaround 4352906, avoid calls to thr_stksegment by
 279   // thr_main after the first one (it looks like we trash
 280   // some data, causing the value for ss_sp to be incorrect).
 281   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 282     stack_t st = get_stack_info();
 283     if (is_primordial_thread) {
 284       // cache initial value of stack base
 285       os::Solaris::_main_stack_base = (address)st.ss_sp;
 286     }
 287     return (address)st.ss_sp;
 288   } else {
 289     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 290     return os::Solaris::_main_stack_base;
 291   }
 292 }
 293 
 294 size_t os::current_stack_size() {
 295   size_t size;
 296 
 297   int r = thr_main() ;
 298   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
 299   if(!r) {
 300     size = get_stack_info().ss_size;
 301   } else {
 302     struct rlimit limits;
 303     getrlimit(RLIMIT_STACK, &limits);
 304     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 305   }
 306   // base may not be page aligned
 307   address base = current_stack_base();
 308   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
 309   return (size_t)(base - bottom);
 310 }
 311 
 312 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 313   return localtime_r(clock, res);
 314 }
 315 
 316 void os::Solaris::try_enable_extended_io() {
 317   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 318 
 319   if (!UseExtendedFileIO) {
 320     return;
 321   }
 322 
 323   enable_extended_FILE_stdio_t enabler =
 324     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 325                                          "enable_extended_FILE_stdio");
 326   if (enabler) {
 327     enabler(-1, -1);
 328   }
 329 }
 330 
 331 static int _processors_online = 0;
 332 
 333          jint os::Solaris::_os_thread_limit = 0;
 334 volatile jint os::Solaris::_os_thread_count = 0;
 335 
 336 julong os::available_memory() {
 337   return Solaris::available_memory();
 338 }
 339 
 340 julong os::Solaris::available_memory() {
 341   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 342 }
 343 
 344 julong os::Solaris::_physical_memory = 0;
 345 
 346 julong os::physical_memory() {
 347    return Solaris::physical_memory();
 348 }
 349 
 350 static hrtime_t first_hrtime = 0;
 351 static const hrtime_t hrtime_hz = 1000*1000*1000;
 352 static volatile hrtime_t max_hrtime = 0;
 353 
 354 
 355 void os::Solaris::initialize_system_info() {
 356   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 357   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
 358   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 359 }
 360 
 361 int os::active_processor_count() {
 362   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 363   pid_t pid = getpid();
 364   psetid_t pset = PS_NONE;
 365   // Are we running in a processor set or is there any processor set around?
 366   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 367     uint_t pset_cpus;
 368     // Query the number of cpus available to us.
 369     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 370       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 371       _processors_online = pset_cpus;
 372       return pset_cpus;
 373     }
 374   }
 375   // Otherwise return number of online cpus
 376   return online_cpus;
 377 }
 378 
 379 static bool find_processors_in_pset(psetid_t        pset,
 380                                     processorid_t** id_array,
 381                                     uint_t*         id_length) {
 382   bool result = false;
 383   // Find the number of processors in the processor set.
 384   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 385     // Make up an array to hold their ids.
 386     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 387     // Fill in the array with their processor ids.
 388     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 389       result = true;
 390     }
 391   }
 392   return result;
 393 }
 394 
 395 // Callers of find_processors_online() must tolerate imprecise results --
 396 // the system configuration can change asynchronously because of DR
 397 // or explicit psradm operations.
 398 //
 399 // We also need to take care that the loop (below) terminates as the
 400 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 401 // request and the loop that builds the list of processor ids.   Unfortunately
 402 // there's no reliable way to determine the maximum valid processor id,
 403 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 404 // man pages, which claim the processor id set is "sparse, but
 405 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 406 // exit the loop.
 407 //
 408 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 409 // not available on S8.0.
 410 
 411 static bool find_processors_online(processorid_t** id_array,
 412                                    uint*           id_length) {
 413   const processorid_t MAX_PROCESSOR_ID = 100000 ;
 414   // Find the number of processors online.
 415   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 416   // Make up an array to hold their ids.
 417   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 418   // Processors need not be numbered consecutively.
 419   long found = 0;
 420   processorid_t next = 0;
 421   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 422     processor_info_t info;
 423     if (processor_info(next, &info) == 0) {
 424       // NB, PI_NOINTR processors are effectively online ...
 425       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 426         (*id_array)[found] = next;
 427         found += 1;
 428       }
 429     }
 430     next += 1;
 431   }
 432   if (found < *id_length) {
 433       // The loop above didn't identify the expected number of processors.
 434       // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 435       // and re-running the loop, above, but there's no guarantee of progress
 436       // if the system configuration is in flux.  Instead, we just return what
 437       // we've got.  Note that in the worst case find_processors_online() could
 438       // return an empty set.  (As a fall-back in the case of the empty set we
 439       // could just return the ID of the current processor).
 440       *id_length = found ;
 441   }
 442 
 443   return true;
 444 }
 445 
 446 static bool assign_distribution(processorid_t* id_array,
 447                                 uint           id_length,
 448                                 uint*          distribution,
 449                                 uint           distribution_length) {
 450   // We assume we can assign processorid_t's to uint's.
 451   assert(sizeof(processorid_t) == sizeof(uint),
 452          "can't convert processorid_t to uint");
 453   // Quick check to see if we won't succeed.
 454   if (id_length < distribution_length) {
 455     return false;
 456   }
 457   // Assign processor ids to the distribution.
 458   // Try to shuffle processors to distribute work across boards,
 459   // assuming 4 processors per board.
 460   const uint processors_per_board = ProcessDistributionStride;
 461   // Find the maximum processor id.
 462   processorid_t max_id = 0;
 463   for (uint m = 0; m < id_length; m += 1) {
 464     max_id = MAX2(max_id, id_array[m]);
 465   }
 466   // The next id, to limit loops.
 467   const processorid_t limit_id = max_id + 1;
 468   // Make up markers for available processors.
 469   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 470   for (uint c = 0; c < limit_id; c += 1) {
 471     available_id[c] = false;
 472   }
 473   for (uint a = 0; a < id_length; a += 1) {
 474     available_id[id_array[a]] = true;
 475   }
 476   // Step by "boards", then by "slot", copying to "assigned".
 477   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 478   //                remembering which processors have been assigned by
 479   //                previous calls, etc., so as to distribute several
 480   //                independent calls of this method.  What we'd like is
 481   //                It would be nice to have an API that let us ask
 482   //                how many processes are bound to a processor,
 483   //                but we don't have that, either.
 484   //                In the short term, "board" is static so that
 485   //                subsequent distributions don't all start at board 0.
 486   static uint board = 0;
 487   uint assigned = 0;
 488   // Until we've found enough processors ....
 489   while (assigned < distribution_length) {
 490     // ... find the next available processor in the board.
 491     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 492       uint try_id = board * processors_per_board + slot;
 493       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 494         distribution[assigned] = try_id;
 495         available_id[try_id] = false;
 496         assigned += 1;
 497         break;
 498       }
 499     }
 500     board += 1;
 501     if (board * processors_per_board + 0 >= limit_id) {
 502       board = 0;
 503     }
 504   }
 505   if (available_id != NULL) {
 506     FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
 507   }
 508   return true;
 509 }
 510 
 511 void os::set_native_thread_name(const char *name) {
 512   // Not yet implemented.
 513   return;
 514 }
 515 
 516 bool os::distribute_processes(uint length, uint* distribution) {
 517   bool result = false;
 518   // Find the processor id's of all the available CPUs.
 519   processorid_t* id_array  = NULL;
 520   uint           id_length = 0;
 521   // There are some races between querying information and using it,
 522   // since processor sets can change dynamically.
 523   psetid_t pset = PS_NONE;
 524   // Are we running in a processor set?
 525   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 526     result = find_processors_in_pset(pset, &id_array, &id_length);
 527   } else {
 528     result = find_processors_online(&id_array, &id_length);
 529   }
 530   if (result == true) {
 531     if (id_length >= length) {
 532       result = assign_distribution(id_array, id_length, distribution, length);
 533     } else {
 534       result = false;
 535     }
 536   }
 537   if (id_array != NULL) {
 538     FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
 539   }
 540   return result;
 541 }
 542 
 543 bool os::bind_to_processor(uint processor_id) {
 544   // We assume that a processorid_t can be stored in a uint.
 545   assert(sizeof(uint) == sizeof(processorid_t),
 546          "can't convert uint to processorid_t");
 547   int bind_result =
 548     processor_bind(P_LWPID,                       // bind LWP.
 549                    P_MYID,                        // bind current LWP.
 550                    (processorid_t) processor_id,  // id.
 551                    NULL);                         // don't return old binding.
 552   return (bind_result == 0);
 553 }
 554 
 555 bool os::getenv(const char* name, char* buffer, int len) {
 556   char* val = ::getenv( name );
 557   if ( val == NULL
 558   ||   strlen(val) + 1  >  len ) {
 559     if (len > 0)  buffer[0] = 0; // return a null string
 560     return false;
 561   }
 562   strcpy( buffer, val );
 563   return true;
 564 }
 565 
 566 
 567 // Return true if user is running as root.
 568 
 569 bool os::have_special_privileges() {
 570   static bool init = false;
 571   static bool privileges = false;
 572   if (!init) {
 573     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 574     init = true;
 575   }
 576   return privileges;
 577 }
 578 
 579 
 580 void os::init_system_properties_values() {
 581   // The next steps are taken in the product version:
 582   //
 583   // Obtain the JAVA_HOME value from the location of libjvm.so.
 584   // This library should be located at:
 585   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 586   //
 587   // If "/jre/lib/" appears at the right place in the path, then we
 588   // assume libjvm.so is installed in a JDK and we use this path.
 589   //
 590   // Otherwise exit with message: "Could not create the Java virtual machine."
 591   //
 592   // The following extra steps are taken in the debugging version:
 593   //
 594   // If "/jre/lib/" does NOT appear at the right place in the path
 595   // instead of exit check for $JAVA_HOME environment variable.
 596   //
 597   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 598   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 599   // it looks like libjvm.so is installed there
 600   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 601   //
 602   // Otherwise exit.
 603   //
 604   // Important note: if the location of libjvm.so changes this
 605   // code needs to be changed accordingly.
 606 
 607 // Base path of extensions installed on the system.
 608 #define SYS_EXT_DIR     "/usr/jdk/packages"
 609 #define EXTENSIONS_DIR  "/lib/ext"
 610 #define ENDORSED_DIR    "/lib/endorsed"
 611 
 612   char cpu_arch[12];
 613   // Buffer that fits several sprintfs.
 614   // Note that the space for the colon and the trailing null are provided
 615   // by the nulls included by the sizeof operator.
 616   const size_t bufsize =
 617     MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
 618          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
 619          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
 620          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
 621   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 622 
 623   // sysclasspath, java_home, dll_dir
 624   {
 625     char *pslash;
 626     os::jvm_path(buf, bufsize);
 627 
 628     // Found the full path to libjvm.so.
 629     // Now cut the path to <java_home>/jre if we can.
 630     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 631     pslash = strrchr(buf, '/');
 632     if (pslash != NULL) {
 633       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 634     }
 635     Arguments::set_dll_dir(buf);
 636 
 637     if (pslash != NULL) {
 638       pslash = strrchr(buf, '/');
 639       if (pslash != NULL) {
 640         *pslash = '\0';          // Get rid of /<arch>.
 641         pslash = strrchr(buf, '/');
 642         if (pslash != NULL) {
 643           *pslash = '\0';        // Get rid of /lib.
 644         }
 645       }
 646     }
 647     Arguments::set_java_home(buf);
 648     set_boot_path('/', ':');
 649   }
 650 
 651   // Where to look for native libraries.
 652   {
 653     // Use dlinfo() to determine the correct java.library.path.
 654     //
 655     // If we're launched by the Java launcher, and the user
 656     // does not set java.library.path explicitly on the commandline,
 657     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 658     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 659     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 660     // /usr/lib), which is exactly what we want.
 661     //
 662     // If the user does set java.library.path, it completely
 663     // overwrites this setting, and always has.
 664     //
 665     // If we're not launched by the Java launcher, we may
 666     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 667     // settings.  Again, dlinfo does exactly what we want.
 668 
 669     Dl_serinfo     info_sz, *info = &info_sz;
 670     Dl_serpath     *path;
 671     char           *library_path;
 672     char           *common_path = buf;
 673 
 674     // Determine search path count and required buffer size.
 675     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 676       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 677       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 678     }
 679 
 680     // Allocate new buffer and initialize.
 681     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 682     info->dls_size = info_sz.dls_size;
 683     info->dls_cnt = info_sz.dls_cnt;
 684 
 685     // Obtain search path information.
 686     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 687       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
 688       FREE_C_HEAP_ARRAY(char, info, mtInternal);
 689       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 690     }
 691 
 692     path = &info->dls_serpath[0];
 693 
 694     // Note: Due to a legacy implementation, most of the library path
 695     // is set in the launcher. This was to accomodate linking restrictions
 696     // on legacy Solaris implementations (which are no longer supported).
 697     // Eventually, all the library path setting will be done here.
 698     //
 699     // However, to prevent the proliferation of improperly built native
 700     // libraries, the new path component /usr/jdk/packages is added here.
 701 
 702     // Determine the actual CPU architecture.
 703     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
 704 #ifdef _LP64
 705     // If we are a 64-bit vm, perform the following translations:
 706     //   sparc   -> sparcv9
 707     //   i386    -> amd64
 708     if (strcmp(cpu_arch, "sparc") == 0) {
 709       strcat(cpu_arch, "v9");
 710     } else if (strcmp(cpu_arch, "i386") == 0) {
 711       strcpy(cpu_arch, "amd64");
 712     }
 713 #endif
 714 
 715     // Construct the invariant part of ld_library_path.
 716     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
 717 
 718     // Struct size is more than sufficient for the path components obtained
 719     // through the dlinfo() call, so only add additional space for the path
 720     // components explicitly added here.
 721     size_t library_path_size = info->dls_size + strlen(common_path);
 722     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 723     library_path[0] = '\0';
 724 
 725     // Construct the desired Java library path from the linker's library
 726     // search path.
 727     //
 728     // For compatibility, it is optimal that we insert the additional path
 729     // components specific to the Java VM after those components specified
 730     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 731     // infrastructure.
 732     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 733       strcpy(library_path, common_path);
 734     } else {
 735       int inserted = 0;
 736       int i;
 737       for (i = 0; i < info->dls_cnt; i++, path++) {
 738         uint_t flags = path->dls_flags & LA_SER_MASK;
 739         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 740           strcat(library_path, common_path);
 741           strcat(library_path, os::path_separator());
 742           inserted = 1;
 743         }
 744         strcat(library_path, path->dls_name);
 745         strcat(library_path, os::path_separator());
 746       }
 747       // Eliminate trailing path separator.
 748       library_path[strlen(library_path)-1] = '\0';
 749     }
 750 
 751     // happens before argument parsing - can't use a trace flag
 752     // tty->print_raw("init_system_properties_values: native lib path: ");
 753     // tty->print_raw_cr(library_path);
 754 
 755     // Callee copies into its own buffer.
 756     Arguments::set_library_path(library_path);
 757 
 758     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
 759     FREE_C_HEAP_ARRAY(char, info, mtInternal);
 760   }
 761 
 762   // Extensions directories.
 763   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 764   Arguments::set_ext_dirs(buf);
 765 
 766   // Endorsed standards default directory.
 767   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
 768   Arguments::set_endorsed_dirs(buf);
 769 
 770   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 771 
 772 #undef SYS_EXT_DIR
 773 #undef EXTENSIONS_DIR
 774 #undef ENDORSED_DIR
 775 }
 776 
 777 void os::breakpoint() {
 778   BREAKPOINT;
 779 }
 780 
 781 bool os::obsolete_option(const JavaVMOption *option)
 782 {
 783   if (!strncmp(option->optionString, "-Xt", 3)) {
 784     return true;
 785   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 786     return true;
 787   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 788     return true;
 789   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 790     return true;
 791   }
 792   return false;
 793 }
 794 
 795 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 796   address  stackStart  = (address)thread->stack_base();
 797   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 798   if (sp < stackStart && sp >= stackEnd ) return true;
 799   return false;
 800 }
 801 
 802 extern "C" void breakpoint() {
 803   // use debugger to set breakpoint here
 804 }
 805 
 806 static thread_t main_thread;
 807 
 808 // Thread start routine for all new Java threads
 809 extern "C" void* java_start(void* thread_addr) {
 810   // Try to randomize the cache line index of hot stack frames.
 811   // This helps when threads of the same stack traces evict each other's
 812   // cache lines. The threads can be either from the same JVM instance, or
 813   // from different JVM instances. The benefit is especially true for
 814   // processors with hyperthreading technology.
 815   static int counter = 0;
 816   int pid = os::current_process_id();
 817   alloca(((pid ^ counter++) & 7) * 128);
 818 
 819   int prio;
 820   Thread* thread = (Thread*)thread_addr;
 821   OSThread* osthr = thread->osthread();
 822 
 823   osthr->set_lwp_id( _lwp_self() );  // Store lwp in case we are bound
 824   thread->_schedctl = (void *) schedctl_init () ;
 825 
 826   if (UseNUMA) {
 827     int lgrp_id = os::numa_get_group_id();
 828     if (lgrp_id != -1) {
 829       thread->set_lgrp_id(lgrp_id);
 830     }
 831   }
 832 
 833   // If the creator called set priority before we started,
 834   // we need to call set_native_priority now that we have an lwp.
 835   // We used to get the priority from thr_getprio (we called
 836   // thr_setprio way back in create_thread) and pass it to
 837   // set_native_priority, but Solaris scales the priority
 838   // in java_to_os_priority, so when we read it back here,
 839   // we pass trash to set_native_priority instead of what's
 840   // in java_to_os_priority. So we save the native priority
 841   // in the osThread and recall it here.
 842 
 843   if ( osthr->thread_id() != -1 ) {
 844     if ( UseThreadPriorities ) {
 845       int prio = osthr->native_priority();
 846       if (ThreadPriorityVerbose) {
 847         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 848                       INTPTR_FORMAT ", setting priority: %d\n",
 849                       osthr->thread_id(), osthr->lwp_id(), prio);
 850       }
 851       os::set_native_priority(thread, prio);
 852     }
 853   } else if (ThreadPriorityVerbose) {
 854     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 855   }
 856 
 857   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 858 
 859   // initialize signal mask for this thread
 860   os::Solaris::hotspot_sigmask(thread);
 861 
 862   thread->run();
 863 
 864   // One less thread is executing
 865   // When the VMThread gets here, the main thread may have already exited
 866   // which frees the CodeHeap containing the Atomic::dec code
 867   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 868     Atomic::dec(&os::Solaris::_os_thread_count);
 869   }
 870 
 871   if (UseDetachedThreads) {
 872     thr_exit(NULL);
 873     ShouldNotReachHere();
 874   }
 875   return NULL;
 876 }
 877 
 878 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 879   // Allocate the OSThread object
 880   OSThread* osthread = new OSThread(NULL, NULL);
 881   if (osthread == NULL) return NULL;
 882 
 883   // Store info on the Solaris thread into the OSThread
 884   osthread->set_thread_id(thread_id);
 885   osthread->set_lwp_id(_lwp_self());
 886   thread->_schedctl = (void *) schedctl_init () ;
 887 
 888   if (UseNUMA) {
 889     int lgrp_id = os::numa_get_group_id();
 890     if (lgrp_id != -1) {
 891       thread->set_lgrp_id(lgrp_id);
 892     }
 893   }
 894 
 895   if ( ThreadPriorityVerbose ) {
 896     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 897                   osthread->thread_id(), osthread->lwp_id() );
 898   }
 899 
 900   // Initial thread state is INITIALIZED, not SUSPENDED
 901   osthread->set_state(INITIALIZED);
 902 
 903   return osthread;
 904 }
 905 
 906 void os::Solaris::hotspot_sigmask(Thread* thread) {
 907 
 908   //Save caller's signal mask
 909   sigset_t sigmask;
 910   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
 911   OSThread *osthread = thread->osthread();
 912   osthread->set_caller_sigmask(sigmask);
 913 
 914   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 915   if (!ReduceSignalUsage) {
 916     if (thread->is_VM_thread()) {
 917       // Only the VM thread handles BREAK_SIGNAL ...
 918       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
 919     } else {
 920       // ... all other threads block BREAK_SIGNAL
 921       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 922       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
 923     }
 924   }
 925 }
 926 
 927 bool os::create_attached_thread(JavaThread* thread) {
 928 #ifdef ASSERT
 929   thread->verify_not_published();
 930 #endif
 931   OSThread* osthread = create_os_thread(thread, thr_self());
 932   if (osthread == NULL) {
 933      return false;
 934   }
 935 
 936   // Initial thread state is RUNNABLE
 937   osthread->set_state(RUNNABLE);
 938   thread->set_osthread(osthread);
 939 
 940   // initialize signal mask for this thread
 941   // and save the caller's signal mask
 942   os::Solaris::hotspot_sigmask(thread);
 943 
 944   return true;
 945 }
 946 
 947 bool os::create_main_thread(JavaThread* thread) {
 948 #ifdef ASSERT
 949   thread->verify_not_published();
 950 #endif
 951   if (_starting_thread == NULL) {
 952     _starting_thread = create_os_thread(thread, main_thread);
 953      if (_starting_thread == NULL) {
 954         return false;
 955      }
 956   }
 957 
 958   // The primodial thread is runnable from the start
 959   _starting_thread->set_state(RUNNABLE);
 960 
 961   thread->set_osthread(_starting_thread);
 962 
 963   // initialize signal mask for this thread
 964   // and save the caller's signal mask
 965   os::Solaris::hotspot_sigmask(thread);
 966 
 967   return true;
 968 }
 969 
 970 
 971 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 972   // Allocate the OSThread object
 973   OSThread* osthread = new OSThread(NULL, NULL);
 974   if (osthread == NULL) {
 975     return false;
 976   }
 977 
 978   if ( ThreadPriorityVerbose ) {
 979     char *thrtyp;
 980     switch ( thr_type ) {
 981       case vm_thread:
 982         thrtyp = (char *)"vm";
 983         break;
 984       case cgc_thread:
 985         thrtyp = (char *)"cgc";
 986         break;
 987       case pgc_thread:
 988         thrtyp = (char *)"pgc";
 989         break;
 990       case java_thread:
 991         thrtyp = (char *)"java";
 992         break;
 993       case compiler_thread:
 994         thrtyp = (char *)"compiler";
 995         break;
 996       case watcher_thread:
 997         thrtyp = (char *)"watcher";
 998         break;
 999       default:
1000         thrtyp = (char *)"unknown";
1001         break;
1002     }
1003     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1004   }
1005 
1006   // Calculate stack size if it's not specified by caller.
1007   if (stack_size == 0) {
1008     // The default stack size 1M (2M for LP64).
1009     stack_size = (BytesPerWord >> 2) * K * K;
1010 
1011     switch (thr_type) {
1012     case os::java_thread:
1013       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1014       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1015       break;
1016     case os::compiler_thread:
1017       if (CompilerThreadStackSize > 0) {
1018         stack_size = (size_t)(CompilerThreadStackSize * K);
1019         break;
1020       } // else fall through:
1021         // use VMThreadStackSize if CompilerThreadStackSize is not defined
1022     case os::vm_thread:
1023     case os::pgc_thread:
1024     case os::cgc_thread:
1025     case os::watcher_thread:
1026       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1027       break;
1028     }
1029   }
1030   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1031 
1032   // Initial state is ALLOCATED but not INITIALIZED
1033   osthread->set_state(ALLOCATED);
1034 
1035   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1036     // We got lots of threads. Check if we still have some address space left.
1037     // Need to be at least 5Mb of unreserved address space. We do check by
1038     // trying to reserve some.
1039     const size_t VirtualMemoryBangSize = 20*K*K;
1040     char* mem = os::reserve_memory(VirtualMemoryBangSize);
1041     if (mem == NULL) {
1042       delete osthread;
1043       return false;
1044     } else {
1045       // Release the memory again
1046       os::release_memory(mem, VirtualMemoryBangSize);
1047     }
1048   }
1049 
1050   // Setup osthread because the child thread may need it.
1051   thread->set_osthread(osthread);
1052 
1053   // Create the Solaris thread
1054   thread_t tid = 0;
1055   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
1056   int      status;
1057 
1058   // Mark that we don't have an lwp or thread id yet.
1059   // In case we attempt to set the priority before the thread starts.
1060   osthread->set_lwp_id(-1);
1061   osthread->set_thread_id(-1);
1062 
1063   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1064   if (status != 0) {
1065     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1066       perror("os::create_thread");
1067     }
1068     thread->set_osthread(NULL);
1069     // Need to clean up stuff we've allocated so far
1070     delete osthread;
1071     return false;
1072   }
1073 
1074   Atomic::inc(&os::Solaris::_os_thread_count);
1075 
1076   // Store info on the Solaris thread into the OSThread
1077   osthread->set_thread_id(tid);
1078 
1079   // Remember that we created this thread so we can set priority on it
1080   osthread->set_vm_created();
1081 
1082   // Initial thread state is INITIALIZED, not SUSPENDED
1083   osthread->set_state(INITIALIZED);
1084 
1085   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1086   return true;
1087 }
1088 
1089 /* defined for >= Solaris 10. This allows builds on earlier versions
1090  *  of Solaris to take advantage of the newly reserved Solaris JVM signals
1091  *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1092  *  and -XX:+UseAltSigs does nothing since these should have no conflict
1093  */
1094 #if !defined(SIGJVM1)
1095 #define SIGJVM1 39
1096 #define SIGJVM2 40
1097 #endif
1098 
1099 debug_only(static bool signal_sets_initialized = false);
1100 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1101 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1102 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1103 
1104 bool os::Solaris::is_sig_ignored(int sig) {
1105       struct sigaction oact;
1106       sigaction(sig, (struct sigaction*)NULL, &oact);
1107       void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1108                                      : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1109       if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1110            return true;
1111       else
1112            return false;
1113 }
1114 
1115 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1116 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1117 static bool isJVM1available() {
1118   return SIGJVM1 < SIGRTMIN;
1119 }
1120 
1121 void os::Solaris::signal_sets_init() {
1122   // Should also have an assertion stating we are still single-threaded.
1123   assert(!signal_sets_initialized, "Already initialized");
1124   // Fill in signals that are necessarily unblocked for all threads in
1125   // the VM. Currently, we unblock the following signals:
1126   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1127   //                         by -Xrs (=ReduceSignalUsage));
1128   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1129   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1130   // the dispositions or masks wrt these signals.
1131   // Programs embedding the VM that want to use the above signals for their
1132   // own purposes must, at this time, use the "-Xrs" option to prevent
1133   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1134   // (See bug 4345157, and other related bugs).
1135   // In reality, though, unblocking these signals is really a nop, since
1136   // these signals are not blocked by default.
1137   sigemptyset(&unblocked_sigs);
1138   sigemptyset(&allowdebug_blocked_sigs);
1139   sigaddset(&unblocked_sigs, SIGILL);
1140   sigaddset(&unblocked_sigs, SIGSEGV);
1141   sigaddset(&unblocked_sigs, SIGBUS);
1142   sigaddset(&unblocked_sigs, SIGFPE);
1143 
1144   if (isJVM1available) {
1145     os::Solaris::set_SIGinterrupt(SIGJVM1);
1146     os::Solaris::set_SIGasync(SIGJVM2);
1147   } else if (UseAltSigs) {
1148     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1149     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1150   } else {
1151     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1152     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1153   }
1154 
1155   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1156   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1157 
1158   if (!ReduceSignalUsage) {
1159    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1160       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1161       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1162    }
1163    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1164       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1165       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1166    }
1167    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1168       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1169       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1170    }
1171   }
1172   // Fill in signals that are blocked by all but the VM thread.
1173   sigemptyset(&vm_sigs);
1174   if (!ReduceSignalUsage)
1175     sigaddset(&vm_sigs, BREAK_SIGNAL);
1176   debug_only(signal_sets_initialized = true);
1177 
1178   // For diagnostics only used in run_periodic_checks
1179   sigemptyset(&check_signal_done);
1180 }
1181 
1182 // These are signals that are unblocked while a thread is running Java.
1183 // (For some reason, they get blocked by default.)
1184 sigset_t* os::Solaris::unblocked_signals() {
1185   assert(signal_sets_initialized, "Not initialized");
1186   return &unblocked_sigs;
1187 }
1188 
1189 // These are the signals that are blocked while a (non-VM) thread is
1190 // running Java. Only the VM thread handles these signals.
1191 sigset_t* os::Solaris::vm_signals() {
1192   assert(signal_sets_initialized, "Not initialized");
1193   return &vm_sigs;
1194 }
1195 
1196 // These are signals that are blocked during cond_wait to allow debugger in
1197 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1198   assert(signal_sets_initialized, "Not initialized");
1199   return &allowdebug_blocked_sigs;
1200 }
1201 
1202 
1203 void _handle_uncaught_cxx_exception() {
1204   VMError err("An uncaught C++ exception");
1205   err.report_and_die();
1206 }
1207 
1208 
1209 // First crack at OS-specific initialization, from inside the new thread.
1210 void os::initialize_thread(Thread* thr) {
1211   int r = thr_main() ;
1212   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
1213   if (r) {
1214     JavaThread* jt = (JavaThread *)thr;
1215     assert(jt != NULL,"Sanity check");
1216     size_t stack_size;
1217     address base = jt->stack_base();
1218     if (Arguments::created_by_java_launcher()) {
1219       // Use 2MB to allow for Solaris 7 64 bit mode.
1220       stack_size = JavaThread::stack_size_at_create() == 0
1221         ? 2048*K : JavaThread::stack_size_at_create();
1222 
1223       // There are rare cases when we may have already used more than
1224       // the basic stack size allotment before this method is invoked.
1225       // Attempt to allow for a normally sized java_stack.
1226       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1227       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1228     } else {
1229       // 6269555: If we were not created by a Java launcher, i.e. if we are
1230       // running embedded in a native application, treat the primordial thread
1231       // as much like a native attached thread as possible.  This means using
1232       // the current stack size from thr_stksegment(), unless it is too large
1233       // to reliably setup guard pages.  A reasonable max size is 8MB.
1234       size_t current_size = current_stack_size();
1235       // This should never happen, but just in case....
1236       if (current_size == 0) current_size = 2 * K * K;
1237       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1238     }
1239     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1240     stack_size = (size_t)(base - bottom);
1241 
1242     assert(stack_size > 0, "Stack size calculation problem");
1243 
1244     if (stack_size > jt->stack_size()) {
1245       NOT_PRODUCT(
1246         struct rlimit limits;
1247         getrlimit(RLIMIT_STACK, &limits);
1248         size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1249         assert(size >= jt->stack_size(), "Stack size problem in main thread");
1250       )
1251       tty->print_cr(
1252         "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1253         "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1254         "See limit(1) to increase the stack size limit.",
1255         stack_size / K, jt->stack_size() / K);
1256       vm_exit(1);
1257     }
1258     assert(jt->stack_size() >= stack_size,
1259           "Attempt to map more stack than was allocated");
1260     jt->set_stack_size(stack_size);
1261   }
1262 
1263   // With the T2 libthread (T1 is no longer supported) threads are always bound
1264   // and we use stackbanging in all cases.
1265 
1266   os::Solaris::init_thread_fpu_state();
1267   std::set_terminate(_handle_uncaught_cxx_exception);
1268 }
1269 
1270 
1271 
1272 // Free Solaris resources related to the OSThread
1273 void os::free_thread(OSThread* osthread) {
1274   assert(osthread != NULL, "os::free_thread but osthread not set");
1275 
1276 
1277   // We are told to free resources of the argument thread,
1278   // but we can only really operate on the current thread.
1279   // The main thread must take the VMThread down synchronously
1280   // before the main thread exits and frees up CodeHeap
1281   guarantee((Thread::current()->osthread() == osthread
1282      || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1283   if (Thread::current()->osthread() == osthread) {
1284     // Restore caller's signal mask
1285     sigset_t sigmask = osthread->caller_sigmask();
1286     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1287   }
1288   delete osthread;
1289 }
1290 
1291 void os::pd_start_thread(Thread* thread) {
1292   int status = thr_continue(thread->osthread()->thread_id());
1293   assert_status(status == 0, status, "thr_continue failed");
1294 }
1295 
1296 
1297 intx os::current_thread_id() {
1298   return (intx)thr_self();
1299 }
1300 
1301 static pid_t _initial_pid = 0;
1302 
1303 int os::current_process_id() {
1304   return (int)(_initial_pid ? _initial_pid : getpid());
1305 }
1306 
1307 int os::allocate_thread_local_storage() {
1308   // %%%       in Win32 this allocates a memory segment pointed to by a
1309   //           register.  Dan Stein can implement a similar feature in
1310   //           Solaris.  Alternatively, the VM can do the same thing
1311   //           explicitly: malloc some storage and keep the pointer in a
1312   //           register (which is part of the thread's context) (or keep it
1313   //           in TLS).
1314   // %%%       In current versions of Solaris, thr_self and TSD can
1315   //           be accessed via short sequences of displaced indirections.
1316   //           The value of thr_self is available as %g7(36).
1317   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1318   //           assuming that the current thread already has a value bound to k.
1319   //           It may be worth experimenting with such access patterns,
1320   //           and later having the parameters formally exported from a Solaris
1321   //           interface.  I think, however, that it will be faster to
1322   //           maintain the invariant that %g2 always contains the
1323   //           JavaThread in Java code, and have stubs simply
1324   //           treat %g2 as a caller-save register, preserving it in a %lN.
1325   thread_key_t tk;
1326   if (thr_keycreate( &tk, NULL ) )
1327     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1328                   "(%s)", strerror(errno)));
1329   return int(tk);
1330 }
1331 
1332 void os::free_thread_local_storage(int index) {
1333   // %%% don't think we need anything here
1334   // if ( pthread_key_delete((pthread_key_t) tk) )
1335   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
1336 }
1337 
1338 #define SMALLINT 32   // libthread allocate for tsd_common is a version specific
1339                       // small number - point is NO swap space available
1340 void os::thread_local_storage_at_put(int index, void* value) {
1341   // %%% this is used only in threadLocalStorage.cpp
1342   if (thr_setspecific((thread_key_t)index, value)) {
1343     if (errno == ENOMEM) {
1344        vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1345                              "thr_setspecific: out of swap space");
1346     } else {
1347       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1348                     "(%s)", strerror(errno)));
1349     }
1350   } else {
1351       ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
1352   }
1353 }
1354 
1355 // This function could be called before TLS is initialized, for example, when
1356 // VM receives an async signal or when VM causes a fatal error during
1357 // initialization. Return NULL if thr_getspecific() fails.
1358 void* os::thread_local_storage_at(int index) {
1359   // %%% this is used only in threadLocalStorage.cpp
1360   void* r = NULL;
1361   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1362 }
1363 
1364 
1365 // gethrtime() should be monotonic according to the documentation,
1366 // but some virtualized platforms are known to break this guarantee.
1367 // getTimeNanos() must be guaranteed not to move backwards, so we
1368 // are forced to add a check here.
1369 inline hrtime_t getTimeNanos() {
1370   const hrtime_t now = gethrtime();
1371   const hrtime_t prev = max_hrtime;
1372   if (now <= prev) {
1373     return prev;   // same or retrograde time;
1374   }
1375   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1376   assert(obsv >= prev, "invariant");   // Monotonicity
1377   // If the CAS succeeded then we're done and return "now".
1378   // If the CAS failed and the observed value "obsv" is >= now then
1379   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1380   // some other thread raced this thread and installed a new value, in which case
1381   // we could either (a) retry the entire operation, (b) retry trying to install now
1382   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1383   // we might discard a higher "now" value in deference to a slightly lower but freshly
1384   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1385   // to (a) or (b) -- and greatly reduces coherence traffic.
1386   // We might also condition (c) on the magnitude of the delta between obsv and now.
1387   // Avoiding excessive CAS operations to hot RW locations is critical.
1388   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1389   return (prev == obsv) ? now : obsv;
1390 }
1391 
1392 // Time since start-up in seconds to a fine granularity.
1393 // Used by VMSelfDestructTimer and the MemProfiler.
1394 double os::elapsedTime() {
1395   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1396 }
1397 
1398 jlong os::elapsed_counter() {
1399   return (jlong)(getTimeNanos() - first_hrtime);
1400 }
1401 
1402 jlong os::elapsed_frequency() {
1403    return hrtime_hz;
1404 }
1405 
1406 // Return the real, user, and system times in seconds from an
1407 // arbitrary fixed point in the past.
1408 bool os::getTimesSecs(double* process_real_time,
1409                   double* process_user_time,
1410                   double* process_system_time) {
1411   struct tms ticks;
1412   clock_t real_ticks = times(&ticks);
1413 
1414   if (real_ticks == (clock_t) (-1)) {
1415     return false;
1416   } else {
1417     double ticks_per_second = (double) clock_tics_per_sec;
1418     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1419     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1420     // For consistency return the real time from getTimeNanos()
1421     // converted to seconds.
1422     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1423 
1424     return true;
1425   }
1426 }
1427 
1428 bool os::supports_vtime() { return true; }
1429 
1430 bool os::enable_vtime() {
1431   int fd = ::open("/proc/self/ctl", O_WRONLY);
1432   if (fd == -1)
1433     return false;
1434 
1435   long cmd[] = { PCSET, PR_MSACCT };
1436   int res = ::write(fd, cmd, sizeof(long) * 2);
1437   ::close(fd);
1438   if (res != sizeof(long) * 2)
1439     return false;
1440 
1441   return true;
1442 }
1443 
1444 bool os::vtime_enabled() {
1445   int fd = ::open("/proc/self/status", O_RDONLY);
1446   if (fd == -1)
1447     return false;
1448 
1449   pstatus_t status;
1450   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1451   ::close(fd);
1452   if (res != sizeof(pstatus_t))
1453     return false;
1454 
1455   return status.pr_flags & PR_MSACCT;
1456 }
1457 
1458 double os::elapsedVTime() {
1459   return (double)gethrvtime() / (double)hrtime_hz;
1460 }
1461 
1462 // Used internally for comparisons only
1463 // getTimeMillis guaranteed to not move backwards on Solaris
1464 jlong getTimeMillis() {
1465   jlong nanotime = getTimeNanos();
1466   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1467 }
1468 
1469 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1470 jlong os::javaTimeMillis() {
1471   timeval t;
1472   if (gettimeofday( &t, NULL) == -1)
1473     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1474   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1475 }
1476 
1477 jlong os::javaTimeNanos() {
1478   return (jlong)getTimeNanos();
1479 }
1480 
1481 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1482   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1483   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1484   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1485   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1486 }
1487 
1488 char * os::local_time_string(char *buf, size_t buflen) {
1489   struct tm t;
1490   time_t long_time;
1491   time(&long_time);
1492   localtime_r(&long_time, &t);
1493   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1494                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1495                t.tm_hour, t.tm_min, t.tm_sec);
1496   return buf;
1497 }
1498 
1499 // Note: os::shutdown() might be called very early during initialization, or
1500 // called from signal handler. Before adding something to os::shutdown(), make
1501 // sure it is async-safe and can handle partially initialized VM.
1502 void os::shutdown() {
1503 
1504   // allow PerfMemory to attempt cleanup of any persistent resources
1505   perfMemory_exit();
1506 
1507   // needs to remove object in file system
1508   AttachListener::abort();
1509 
1510   // flush buffered output, finish log files
1511   ostream_abort();
1512 
1513   // Check for abort hook
1514   abort_hook_t abort_hook = Arguments::abort_hook();
1515   if (abort_hook != NULL) {
1516     abort_hook();
1517   }
1518 }
1519 
1520 // Note: os::abort() might be called very early during initialization, or
1521 // called from signal handler. Before adding something to os::abort(), make
1522 // sure it is async-safe and can handle partially initialized VM.
1523 void os::abort(bool dump_core) {
1524   os::shutdown();
1525   if (dump_core) {
1526 #ifndef PRODUCT
1527     fdStream out(defaultStream::output_fd());
1528     out.print_raw("Current thread is ");
1529     char buf[16];
1530     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1531     out.print_raw_cr(buf);
1532     out.print_raw_cr("Dumping core ...");
1533 #endif
1534     ::abort(); // dump core (for debugging)
1535   }
1536 
1537   ::exit(1);
1538 }
1539 
1540 // Die immediately, no exit hook, no abort hook, no cleanup.
1541 void os::die() {
1542   ::abort(); // dump core (for debugging)
1543 }
1544 
1545 // unused
1546 void os::set_error_file(const char *logfile) {}
1547 
1548 // DLL functions
1549 
1550 const char* os::dll_file_extension() { return ".so"; }
1551 
1552 // This must be hard coded because it's the system's temporary
1553 // directory not the java application's temp directory, ala java.io.tmpdir.
1554 const char* os::get_temp_directory() { return "/tmp"; }
1555 
1556 static bool file_exists(const char* filename) {
1557   struct stat statbuf;
1558   if (filename == NULL || strlen(filename) == 0) {
1559     return false;
1560   }
1561   return os::stat(filename, &statbuf) == 0;
1562 }
1563 
1564 bool os::dll_build_name(char* buffer, size_t buflen,
1565                         const char* pname, const char* fname) {
1566   bool retval = false;
1567   const size_t pnamelen = pname ? strlen(pname) : 0;
1568 
1569   // Return error on buffer overflow.
1570   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1571     return retval;
1572   }
1573 
1574   if (pnamelen == 0) {
1575     snprintf(buffer, buflen, "lib%s.so", fname);
1576     retval = true;
1577   } else if (strchr(pname, *os::path_separator()) != NULL) {
1578     int n;
1579     char** pelements = split_path(pname, &n);
1580     if (pelements == NULL) {
1581       return false;
1582     }
1583     for (int i = 0 ; i < n ; i++) {
1584       // really shouldn't be NULL but what the heck, check can't hurt
1585       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1586         continue; // skip the empty path values
1587       }
1588       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1589       if (file_exists(buffer)) {
1590         retval = true;
1591         break;
1592       }
1593     }
1594     // release the storage
1595     for (int i = 0 ; i < n ; i++) {
1596       if (pelements[i] != NULL) {
1597         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1598       }
1599     }
1600     if (pelements != NULL) {
1601       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1602     }
1603   } else {
1604     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1605     retval = true;
1606   }
1607   return retval;
1608 }
1609 
1610 // check if addr is inside libjvm.so
1611 bool os::address_is_in_vm(address addr) {
1612   static address libjvm_base_addr;
1613   Dl_info dlinfo;
1614 
1615   if (libjvm_base_addr == NULL) {
1616     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1617       libjvm_base_addr = (address)dlinfo.dli_fbase;
1618     }
1619     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1620   }
1621 
1622   if (dladdr((void *)addr, &dlinfo) != 0) {
1623     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1624   }
1625 
1626   return false;
1627 }
1628 
1629 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1630 static dladdr1_func_type dladdr1_func = NULL;
1631 
1632 bool os::dll_address_to_function_name(address addr, char *buf,
1633                                       int buflen, int * offset) {
1634   // buf is not optional, but offset is optional
1635   assert(buf != NULL, "sanity check");
1636 
1637   Dl_info dlinfo;
1638 
1639   // dladdr1_func was initialized in os::init()
1640   if (dladdr1_func != NULL) {
1641     // yes, we have dladdr1
1642 
1643     // Support for dladdr1 is checked at runtime; it may be
1644     // available even if the vm is built on a machine that does
1645     // not have dladdr1 support.  Make sure there is a value for
1646     // RTLD_DL_SYMENT.
1647     #ifndef RTLD_DL_SYMENT
1648     #define RTLD_DL_SYMENT 1
1649     #endif
1650 #ifdef _LP64
1651     Elf64_Sym * info;
1652 #else
1653     Elf32_Sym * info;
1654 #endif
1655     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1656                      RTLD_DL_SYMENT) != 0) {
1657       // see if we have a matching symbol that covers our address
1658       if (dlinfo.dli_saddr != NULL &&
1659           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1660         if (dlinfo.dli_sname != NULL) {
1661           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1662             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1663           }
1664           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1665           return true;
1666         }
1667       }
1668       // no matching symbol so try for just file info
1669       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1670         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1671                             buf, buflen, offset, dlinfo.dli_fname)) {
1672           return true;
1673         }
1674       }
1675     }
1676     buf[0] = '\0';
1677     if (offset != NULL) *offset  = -1;
1678     return false;
1679   }
1680 
1681   // no, only dladdr is available
1682   if (dladdr((void *)addr, &dlinfo) != 0) {
1683     // see if we have a matching symbol
1684     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1685       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1686         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1687       }
1688       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1689       return true;
1690     }
1691     // no matching symbol so try for just file info
1692     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1693       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1694                           buf, buflen, offset, dlinfo.dli_fname)) {
1695         return true;
1696       }
1697     }
1698   }
1699   buf[0] = '\0';
1700   if (offset != NULL) *offset  = -1;
1701   return false;
1702 }
1703 
1704 bool os::dll_address_to_library_name(address addr, char* buf,
1705                                      int buflen, int* offset) {
1706   // buf is not optional, but offset is optional
1707   assert(buf != NULL, "sanity check");
1708 
1709   Dl_info dlinfo;
1710 
1711   if (dladdr((void*)addr, &dlinfo) != 0) {
1712     if (dlinfo.dli_fname != NULL) {
1713       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1714     }
1715     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1716       *offset = addr - (address)dlinfo.dli_fbase;
1717     }
1718     return true;
1719   }
1720 
1721   buf[0] = '\0';
1722   if (offset) *offset = -1;
1723   return false;
1724 }
1725 
1726 // Prints the names and full paths of all opened dynamic libraries
1727 // for current process
1728 void os::print_dll_info(outputStream * st) {
1729   Dl_info dli;
1730   void *handle;
1731   Link_map *map;
1732   Link_map *p;
1733 
1734   st->print_cr("Dynamic libraries:"); st->flush();
1735 
1736   if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
1737       dli.dli_fname == NULL) {
1738     st->print_cr("Error: Cannot print dynamic libraries.");
1739     return;
1740   }
1741   handle = dlopen(dli.dli_fname, RTLD_LAZY);
1742   if (handle == NULL) {
1743     st->print_cr("Error: Cannot print dynamic libraries.");
1744     return;
1745   }
1746   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1747   if (map == NULL) {
1748     st->print_cr("Error: Cannot print dynamic libraries.");
1749     return;
1750   }
1751 
1752   while (map->l_prev != NULL)
1753     map = map->l_prev;
1754 
1755   while (map != NULL) {
1756     st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
1757     map = map->l_next;
1758   }
1759 
1760   dlclose(handle);
1761 }
1762 
1763   // Loads .dll/.so and
1764   // in case of error it checks if .dll/.so was built for the
1765   // same architecture as Hotspot is running on
1766 
1767 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1768 {
1769   void * result= ::dlopen(filename, RTLD_LAZY);
1770   if (result != NULL) {
1771     // Successful loading
1772     return result;
1773   }
1774 
1775   Elf32_Ehdr elf_head;
1776 
1777   // Read system error message into ebuf
1778   // It may or may not be overwritten below
1779   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1780   ebuf[ebuflen-1]='\0';
1781   int diag_msg_max_length=ebuflen-strlen(ebuf);
1782   char* diag_msg_buf=ebuf+strlen(ebuf);
1783 
1784   if (diag_msg_max_length==0) {
1785     // No more space in ebuf for additional diagnostics message
1786     return NULL;
1787   }
1788 
1789 
1790   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1791 
1792   if (file_descriptor < 0) {
1793     // Can't open library, report dlerror() message
1794     return NULL;
1795   }
1796 
1797   bool failed_to_read_elf_head=
1798     (sizeof(elf_head)!=
1799         (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1800 
1801   ::close(file_descriptor);
1802   if (failed_to_read_elf_head) {
1803     // file i/o error - report dlerror() msg
1804     return NULL;
1805   }
1806 
1807   typedef struct {
1808     Elf32_Half  code;         // Actual value as defined in elf.h
1809     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1810     char        elf_class;    // 32 or 64 bit
1811     char        endianess;    // MSB or LSB
1812     char*       name;         // String representation
1813   } arch_t;
1814 
1815   static const arch_t arch_array[]={
1816     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1817     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1818     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1819     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1820     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1821     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1822     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1823     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1824     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1825     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1826   };
1827 
1828   #if  (defined IA32)
1829     static  Elf32_Half running_arch_code=EM_386;
1830   #elif   (defined AMD64)
1831     static  Elf32_Half running_arch_code=EM_X86_64;
1832   #elif  (defined IA64)
1833     static  Elf32_Half running_arch_code=EM_IA_64;
1834   #elif  (defined __sparc) && (defined _LP64)
1835     static  Elf32_Half running_arch_code=EM_SPARCV9;
1836   #elif  (defined __sparc) && (!defined _LP64)
1837     static  Elf32_Half running_arch_code=EM_SPARC;
1838   #elif  (defined __powerpc64__)
1839     static  Elf32_Half running_arch_code=EM_PPC64;
1840   #elif  (defined __powerpc__)
1841     static  Elf32_Half running_arch_code=EM_PPC;
1842   #elif (defined ARM)
1843     static  Elf32_Half running_arch_code=EM_ARM;
1844   #else
1845     #error Method os::dll_load requires that one of following is defined:\
1846          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1847   #endif
1848 
1849   // Identify compatability class for VM's architecture and library's architecture
1850   // Obtain string descriptions for architectures
1851 
1852   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1853   int running_arch_index=-1;
1854 
1855   for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
1856     if (running_arch_code == arch_array[i].code) {
1857       running_arch_index    = i;
1858     }
1859     if (lib_arch.code == arch_array[i].code) {
1860       lib_arch.compat_class = arch_array[i].compat_class;
1861       lib_arch.name         = arch_array[i].name;
1862     }
1863   }
1864 
1865   assert(running_arch_index != -1,
1866     "Didn't find running architecture code (running_arch_code) in arch_array");
1867   if (running_arch_index == -1) {
1868     // Even though running architecture detection failed
1869     // we may still continue with reporting dlerror() message
1870     return NULL;
1871   }
1872 
1873   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1874     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1875     return NULL;
1876   }
1877 
1878   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1879     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1880     return NULL;
1881   }
1882 
1883   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1884     if ( lib_arch.name!=NULL ) {
1885       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1886         " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1887         lib_arch.name, arch_array[running_arch_index].name);
1888     } else {
1889       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1890       " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1891         lib_arch.code,
1892         arch_array[running_arch_index].name);
1893     }
1894   }
1895 
1896   return NULL;
1897 }
1898 
1899 void* os::dll_lookup(void* handle, const char* name) {
1900   return dlsym(handle, name);
1901 }
1902 
1903 void* os::get_default_process_handle() {
1904   return (void*)::dlopen(NULL, RTLD_LAZY);
1905 }
1906 
1907 int os::stat(const char *path, struct stat *sbuf) {
1908   char pathbuf[MAX_PATH];
1909   if (strlen(path) > MAX_PATH - 1) {
1910     errno = ENAMETOOLONG;
1911     return -1;
1912   }
1913   os::native_path(strcpy(pathbuf, path));
1914   return ::stat(pathbuf, sbuf);
1915 }
1916 
1917 static bool _print_ascii_file(const char* filename, outputStream* st) {
1918   int fd = ::open(filename, O_RDONLY);
1919   if (fd == -1) {
1920      return false;
1921   }
1922 
1923   char buf[32];
1924   int bytes;
1925   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1926     st->print_raw(buf, bytes);
1927   }
1928 
1929   ::close(fd);
1930 
1931   return true;
1932 }
1933 
1934 void os::print_os_info_brief(outputStream* st) {
1935   os::Solaris::print_distro_info(st);
1936 
1937   os::Posix::print_uname_info(st);
1938 
1939   os::Solaris::print_libversion_info(st);
1940 }
1941 
1942 void os::print_os_info(outputStream* st) {
1943   st->print("OS:");
1944 
1945   os::Solaris::print_distro_info(st);
1946 
1947   os::Posix::print_uname_info(st);
1948 
1949   os::Solaris::print_libversion_info(st);
1950 
1951   os::Posix::print_rlimit_info(st);
1952 
1953   os::Posix::print_load_average(st);
1954 }
1955 
1956 void os::Solaris::print_distro_info(outputStream* st) {
1957   if (!_print_ascii_file("/etc/release", st)) {
1958       st->print("Solaris");
1959     }
1960     st->cr();
1961 }
1962 
1963 void os::Solaris::print_libversion_info(outputStream* st) {
1964   st->print("  (T2 libthread)");
1965   st->cr();
1966 }
1967 
1968 static bool check_addr0(outputStream* st) {
1969   jboolean status = false;
1970   int fd = ::open("/proc/self/map",O_RDONLY);
1971   if (fd >= 0) {
1972     prmap_t p;
1973     while(::read(fd, &p, sizeof(p)) > 0) {
1974       if (p.pr_vaddr == 0x0) {
1975         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
1976         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
1977         st->print("Access:");
1978         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
1979         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
1980         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
1981         st->cr();
1982         status = true;
1983       }
1984     }
1985     ::close(fd);
1986   }
1987   return status;
1988 }
1989 
1990 void os::pd_print_cpu_info(outputStream* st) {
1991   // Nothing to do for now.
1992 }
1993 
1994 void os::print_memory_info(outputStream* st) {
1995   st->print("Memory:");
1996   st->print(" %dk page", os::vm_page_size()>>10);
1997   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
1998   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
1999   st->cr();
2000   (void) check_addr0(st);
2001 }
2002 
2003 void os::print_siginfo(outputStream* st, void* siginfo) {
2004   const siginfo_t* si = (const siginfo_t*)siginfo;
2005 
2006   os::Posix::print_siginfo_brief(st, si);
2007 
2008   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2009       UseSharedSpaces) {
2010     FileMapInfo* mapinfo = FileMapInfo::current_info();
2011     if (mapinfo->is_in_shared_space(si->si_addr)) {
2012       st->print("\n\nError accessing class data sharing archive."   \
2013                 " Mapped file inaccessible during execution, "      \
2014                 " possible disk/network problem.");
2015     }
2016   }
2017   st->cr();
2018 }
2019 
2020 // Moved from whole group, because we need them here for diagnostic
2021 // prints.
2022 #define OLDMAXSIGNUM 32
2023 static int Maxsignum = 0;
2024 static int *ourSigFlags = NULL;
2025 
2026 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2027 
2028 int os::Solaris::get_our_sigflags(int sig) {
2029   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2030   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2031   return ourSigFlags[sig];
2032 }
2033 
2034 void os::Solaris::set_our_sigflags(int sig, int flags) {
2035   assert(ourSigFlags!=NULL, "signal data structure not initialized");
2036   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2037   ourSigFlags[sig] = flags;
2038 }
2039 
2040 
2041 static const char* get_signal_handler_name(address handler,
2042                                            char* buf, int buflen) {
2043   int offset;
2044   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2045   if (found) {
2046     // skip directory names
2047     const char *p1, *p2;
2048     p1 = buf;
2049     size_t len = strlen(os::file_separator());
2050     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2051     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2052   } else {
2053     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2054   }
2055   return buf;
2056 }
2057 
2058 static void print_signal_handler(outputStream* st, int sig,
2059                                   char* buf, size_t buflen) {
2060   struct sigaction sa;
2061 
2062   sigaction(sig, NULL, &sa);
2063 
2064   st->print("%s: ", os::exception_name(sig, buf, buflen));
2065 
2066   address handler = (sa.sa_flags & SA_SIGINFO)
2067                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2068                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
2069 
2070   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2071     st->print("SIG_DFL");
2072   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2073     st->print("SIG_IGN");
2074   } else {
2075     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2076   }
2077 
2078   st->print(", sa_mask[0]=");
2079   os::Posix::print_signal_set_short(st, &sa.sa_mask);
2080 
2081   address rh = VMError::get_resetted_sighandler(sig);
2082   // May be, handler was resetted by VMError?
2083   if(rh != NULL) {
2084     handler = rh;
2085     sa.sa_flags = VMError::get_resetted_sigflags(sig);
2086   }
2087 
2088   st->print(", sa_flags=");
2089   os::Posix::print_sa_flags(st, sa.sa_flags);
2090 
2091   // Check: is it our handler?
2092   if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2093      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2094     // It is our signal handler
2095     // check for flags
2096     if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2097       st->print(
2098         ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2099         os::Solaris::get_our_sigflags(sig));
2100     }
2101   }
2102   st->cr();
2103 }
2104 
2105 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2106   st->print_cr("Signal Handlers:");
2107   print_signal_handler(st, SIGSEGV, buf, buflen);
2108   print_signal_handler(st, SIGBUS , buf, buflen);
2109   print_signal_handler(st, SIGFPE , buf, buflen);
2110   print_signal_handler(st, SIGPIPE, buf, buflen);
2111   print_signal_handler(st, SIGXFSZ, buf, buflen);
2112   print_signal_handler(st, SIGILL , buf, buflen);
2113   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2114   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2115   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2116   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2117   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2118   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2119   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2120   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2121 }
2122 
2123 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2124 
2125 // Find the full path to the current module, libjvm.so
2126 void os::jvm_path(char *buf, jint buflen) {
2127   // Error checking.
2128   if (buflen < MAXPATHLEN) {
2129     assert(false, "must use a large-enough buffer");
2130     buf[0] = '\0';
2131     return;
2132   }
2133   // Lazy resolve the path to current module.
2134   if (saved_jvm_path[0] != 0) {
2135     strcpy(buf, saved_jvm_path);
2136     return;
2137   }
2138 
2139   Dl_info dlinfo;
2140   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2141   assert(ret != 0, "cannot locate libjvm");
2142   if (ret != 0 && dlinfo.dli_fname != NULL) {
2143     realpath((char *)dlinfo.dli_fname, buf);
2144   } else {
2145     buf[0] = '\0';
2146     return;
2147   }
2148 
2149   if (Arguments::sun_java_launcher_is_altjvm()) {
2150     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2151     // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
2152     // If "/jre/lib/" appears at the right place in the string, then
2153     // assume we are installed in a JDK and we're done.  Otherwise, check
2154     // for a JAVA_HOME environment variable and fix up the path so it
2155     // looks like libjvm.so is installed there (append a fake suffix
2156     // hotspot/libjvm.so).
2157     const char *p = buf + strlen(buf) - 1;
2158     for (int count = 0; p > buf && count < 5; ++count) {
2159       for (--p; p > buf && *p != '/'; --p)
2160         /* empty */ ;
2161     }
2162 
2163     if (strncmp(p, "/jre/lib/", 9) != 0) {
2164       // Look for JAVA_HOME in the environment.
2165       char* java_home_var = ::getenv("JAVA_HOME");
2166       if (java_home_var != NULL && java_home_var[0] != 0) {
2167         char cpu_arch[12];
2168         char* jrelib_p;
2169         int   len;
2170         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2171 #ifdef _LP64
2172         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2173         if (strcmp(cpu_arch, "sparc") == 0) {
2174           strcat(cpu_arch, "v9");
2175         } else if (strcmp(cpu_arch, "i386") == 0) {
2176           strcpy(cpu_arch, "amd64");
2177         }
2178 #endif
2179         // Check the current module name "libjvm.so".
2180         p = strrchr(buf, '/');
2181         assert(strstr(p, "/libjvm") == p, "invalid library name");
2182 
2183         realpath(java_home_var, buf);
2184         // determine if this is a legacy image or modules image
2185         // modules image doesn't have "jre" subdirectory
2186         len = strlen(buf);
2187         jrelib_p = buf + len;
2188         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2189         if (0 != access(buf, F_OK)) {
2190           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2191         }
2192 
2193         if (0 == access(buf, F_OK)) {
2194           // Use current module name "libjvm.so"
2195           len = strlen(buf);
2196           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2197         } else {
2198           // Go back to path of .so
2199           realpath((char *)dlinfo.dli_fname, buf);
2200         }
2201       }
2202     }
2203   }
2204 
2205   strcpy(saved_jvm_path, buf);
2206 }
2207 
2208 
2209 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2210   // no prefix required, not even "_"
2211 }
2212 
2213 
2214 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2215   // no suffix required
2216 }
2217 
2218 // This method is a copy of JDK's sysGetLastErrorString
2219 // from src/solaris/hpi/src/system_md.c
2220 
2221 size_t os::lasterror(char *buf, size_t len) {
2222 
2223   if (errno == 0)  return 0;
2224 
2225   const char *s = ::strerror(errno);
2226   size_t n = ::strlen(s);
2227   if (n >= len) {
2228     n = len - 1;
2229   }
2230   ::strncpy(buf, s, n);
2231   buf[n] = '\0';
2232   return n;
2233 }
2234 
2235 
2236 // sun.misc.Signal
2237 
2238 extern "C" {
2239   static void UserHandler(int sig, void *siginfo, void *context) {
2240     // Ctrl-C is pressed during error reporting, likely because the error
2241     // handler fails to abort. Let VM die immediately.
2242     if (sig == SIGINT && is_error_reported()) {
2243        os::die();
2244     }
2245 
2246     os::signal_notify(sig);
2247     // We do not need to reinstate the signal handler each time...
2248   }
2249 }
2250 
2251 void* os::user_handler() {
2252   return CAST_FROM_FN_PTR(void*, UserHandler);
2253 }
2254 
2255 class Semaphore : public StackObj {
2256   public:
2257     Semaphore();
2258     ~Semaphore();
2259     void signal();
2260     void wait();
2261     bool trywait();
2262     bool timedwait(unsigned int sec, int nsec);
2263   private:
2264     sema_t _semaphore;
2265 };
2266 
2267 
2268 Semaphore::Semaphore() {
2269   sema_init(&_semaphore, 0, NULL, NULL);
2270 }
2271 
2272 Semaphore::~Semaphore() {
2273   sema_destroy(&_semaphore);
2274 }
2275 
2276 void Semaphore::signal() {
2277   sema_post(&_semaphore);
2278 }
2279 
2280 void Semaphore::wait() {
2281   sema_wait(&_semaphore);
2282 }
2283 
2284 bool Semaphore::trywait() {
2285   return sema_trywait(&_semaphore) == 0;
2286 }
2287 
2288 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2289   struct timespec ts;
2290   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2291 
2292   while (1) {
2293     int result = sema_timedwait(&_semaphore, &ts);
2294     if (result == 0) {
2295       return true;
2296     } else if (errno == EINTR) {
2297       continue;
2298     } else if (errno == ETIME) {
2299       return false;
2300     } else {
2301       return false;
2302     }
2303   }
2304 }
2305 
2306 extern "C" {
2307   typedef void (*sa_handler_t)(int);
2308   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2309 }
2310 
2311 void* os::signal(int signal_number, void* handler) {
2312   struct sigaction sigAct, oldSigAct;
2313   sigfillset(&(sigAct.sa_mask));
2314   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2315   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2316 
2317   if (sigaction(signal_number, &sigAct, &oldSigAct))
2318     // -1 means registration failed
2319     return (void *)-1;
2320 
2321   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2322 }
2323 
2324 void os::signal_raise(int signal_number) {
2325   raise(signal_number);
2326 }
2327 
2328 /*
2329  * The following code is moved from os.cpp for making this
2330  * code platform specific, which it is by its very nature.
2331  */
2332 
2333 // a counter for each possible signal value
2334 static int Sigexit = 0;
2335 static int Maxlibjsigsigs;
2336 static jint *pending_signals = NULL;
2337 static int *preinstalled_sigs = NULL;
2338 static struct sigaction *chainedsigactions = NULL;
2339 static sema_t sig_sem;
2340 typedef int (*version_getting_t)();
2341 version_getting_t os::Solaris::get_libjsig_version = NULL;
2342 static int libjsigversion = NULL;
2343 
2344 int os::sigexitnum_pd() {
2345   assert(Sigexit > 0, "signal memory not yet initialized");
2346   return Sigexit;
2347 }
2348 
2349 void os::Solaris::init_signal_mem() {
2350   // Initialize signal structures
2351   Maxsignum = SIGRTMAX;
2352   Sigexit = Maxsignum+1;
2353   assert(Maxsignum >0, "Unable to obtain max signal number");
2354 
2355   Maxlibjsigsigs = Maxsignum;
2356 
2357   // pending_signals has one int per signal
2358   // The additional signal is for SIGEXIT - exit signal to signal_thread
2359   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2360   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2361 
2362   if (UseSignalChaining) {
2363      chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2364        * (Maxsignum + 1), mtInternal);
2365      memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2366      preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2367      memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2368   }
2369   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
2370   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2371 }
2372 
2373 void os::signal_init_pd() {
2374   int ret;
2375 
2376   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2377   assert(ret == 0, "sema_init() failed");
2378 }
2379 
2380 void os::signal_notify(int signal_number) {
2381   int ret;
2382 
2383   Atomic::inc(&pending_signals[signal_number]);
2384   ret = ::sema_post(&sig_sem);
2385   assert(ret == 0, "sema_post() failed");
2386 }
2387 
2388 static int check_pending_signals(bool wait_for_signal) {
2389   int ret;
2390   while (true) {
2391     for (int i = 0; i < Sigexit + 1; i++) {
2392       jint n = pending_signals[i];
2393       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2394         return i;
2395       }
2396     }
2397     if (!wait_for_signal) {
2398       return -1;
2399     }
2400     JavaThread *thread = JavaThread::current();
2401     ThreadBlockInVM tbivm(thread);
2402 
2403     bool threadIsSuspended;
2404     do {
2405       thread->set_suspend_equivalent();
2406       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2407       while((ret = ::sema_wait(&sig_sem)) == EINTR)
2408           ;
2409       assert(ret == 0, "sema_wait() failed");
2410 
2411       // were we externally suspended while we were waiting?
2412       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2413       if (threadIsSuspended) {
2414         //
2415         // The semaphore has been incremented, but while we were waiting
2416         // another thread suspended us. We don't want to continue running
2417         // while suspended because that would surprise the thread that
2418         // suspended us.
2419         //
2420         ret = ::sema_post(&sig_sem);
2421         assert(ret == 0, "sema_post() failed");
2422 
2423         thread->java_suspend_self();
2424       }
2425     } while (threadIsSuspended);
2426   }
2427 }
2428 
2429 int os::signal_lookup() {
2430   return check_pending_signals(false);
2431 }
2432 
2433 int os::signal_wait() {
2434   return check_pending_signals(true);
2435 }
2436 
2437 ////////////////////////////////////////////////////////////////////////////////
2438 // Virtual Memory
2439 
2440 static int page_size = -1;
2441 
2442 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2443 // clear this var if support is not available.
2444 static bool has_map_align = true;
2445 
2446 int os::vm_page_size() {
2447   assert(page_size != -1, "must call os::init");
2448   return page_size;
2449 }
2450 
2451 // Solaris allocates memory by pages.
2452 int os::vm_allocation_granularity() {
2453   assert(page_size != -1, "must call os::init");
2454   return page_size;
2455 }
2456 
2457 static bool recoverable_mmap_error(int err) {
2458   // See if the error is one we can let the caller handle. This
2459   // list of errno values comes from the Solaris mmap(2) man page.
2460   switch (err) {
2461   case EBADF:
2462   case EINVAL:
2463   case ENOTSUP:
2464     // let the caller deal with these errors
2465     return true;
2466 
2467   default:
2468     // Any remaining errors on this OS can cause our reserved mapping
2469     // to be lost. That can cause confusion where different data
2470     // structures think they have the same memory mapped. The worst
2471     // scenario is if both the VM and a library think they have the
2472     // same memory mapped.
2473     return false;
2474   }
2475 }
2476 
2477 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2478                                     int err) {
2479   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2480           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2481           strerror(err), err);
2482 }
2483 
2484 static void warn_fail_commit_memory(char* addr, size_t bytes,
2485                                     size_t alignment_hint, bool exec,
2486                                     int err) {
2487   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2488           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2489           alignment_hint, exec, strerror(err), err);
2490 }
2491 
2492 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2493   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2494   size_t size = bytes;
2495   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2496   if (res != NULL) {
2497     if (UseNUMAInterleaving) {
2498       numa_make_global(addr, bytes);
2499     }
2500     return 0;
2501   }
2502 
2503   int err = errno;  // save errno from mmap() call in mmap_chunk()
2504 
2505   if (!recoverable_mmap_error(err)) {
2506     warn_fail_commit_memory(addr, bytes, exec, err);
2507     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2508   }
2509 
2510   return err;
2511 }
2512 
2513 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2514   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2515 }
2516 
2517 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2518                                   const char* mesg) {
2519   assert(mesg != NULL, "mesg must be specified");
2520   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2521   if (err != 0) {
2522     // the caller wants all commit errors to exit with the specified mesg:
2523     warn_fail_commit_memory(addr, bytes, exec, err);
2524     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2525   }
2526 }
2527 
2528 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2529                                     size_t alignment_hint, bool exec) {
2530   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2531   if (err == 0) {
2532     if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) {
2533       // If the large page size has been set and the VM
2534       // is using large pages, use the large page size
2535       // if it is smaller than the alignment hint. This is
2536       // a case where the VM wants to use a larger alignment size
2537       // for its own reasons but still want to use large pages
2538       // (which is what matters to setting the mpss range.
2539       size_t page_size = 0;
2540       if (large_page_size() < alignment_hint) {
2541         assert(UseLargePages, "Expected to be here for large page use only");
2542         page_size = large_page_size();
2543       } else {
2544         // If the alignment hint is less than the large page
2545         // size, the VM wants a particular alignment (thus the hint)
2546         // for internal reasons.  Try to set the mpss range using
2547         // the alignment_hint.
2548         page_size = alignment_hint;
2549       }
2550       // Since this is a hint, ignore any failures.
2551       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2552     }
2553   }
2554   return err;
2555 }
2556 
2557 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2558                           bool exec) {
2559   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2560 }
2561 
2562 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2563                                   size_t alignment_hint, bool exec,
2564                                   const char* mesg) {
2565   assert(mesg != NULL, "mesg must be specified");
2566   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2567   if (err != 0) {
2568     // the caller wants all commit errors to exit with the specified mesg:
2569     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2570     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2571   }
2572 }
2573 
2574 // Uncommit the pages in a specified region.
2575 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2576   if (madvise(addr, bytes, MADV_FREE) < 0) {
2577     debug_only(warning("MADV_FREE failed."));
2578     return;
2579   }
2580 }
2581 
2582 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2583   return os::commit_memory(addr, size, !ExecMem);
2584 }
2585 
2586 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2587   return os::uncommit_memory(addr, size);
2588 }
2589 
2590 // Change the page size in a given range.
2591 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2592   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2593   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2594   if (UseLargePages) {
2595     Solaris::setup_large_pages(addr, bytes, alignment_hint);
2596   }
2597 }
2598 
2599 // Tell the OS to make the range local to the first-touching LWP
2600 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2601   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2602   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2603     debug_only(warning("MADV_ACCESS_LWP failed."));
2604   }
2605 }
2606 
2607 // Tell the OS that this range would be accessed from different LWPs.
2608 void os::numa_make_global(char *addr, size_t bytes) {
2609   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2610   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2611     debug_only(warning("MADV_ACCESS_MANY failed."));
2612   }
2613 }
2614 
2615 // Get the number of the locality groups.
2616 size_t os::numa_get_groups_num() {
2617   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2618   return n != -1 ? n : 1;
2619 }
2620 
2621 // Get a list of leaf locality groups. A leaf lgroup is group that
2622 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2623 // board. An LWP is assigned to one of these groups upon creation.
2624 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2625    if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2626      ids[0] = 0;
2627      return 1;
2628    }
2629    int result_size = 0, top = 1, bottom = 0, cur = 0;
2630    for (int k = 0; k < size; k++) {
2631      int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2632                                     (Solaris::lgrp_id_t*)&ids[top], size - top);
2633      if (r == -1) {
2634        ids[0] = 0;
2635        return 1;
2636      }
2637      if (!r) {
2638        // That's a leaf node.
2639        assert (bottom <= cur, "Sanity check");
2640        // Check if the node has memory
2641        if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2642                                    NULL, 0, LGRP_RSRC_MEM) > 0) {
2643          ids[bottom++] = ids[cur];
2644        }
2645      }
2646      top += r;
2647      cur++;
2648    }
2649    if (bottom == 0) {
2650      // Handle a situation, when the OS reports no memory available.
2651      // Assume UMA architecture.
2652      ids[0] = 0;
2653      return 1;
2654    }
2655    return bottom;
2656 }
2657 
2658 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2659 bool os::numa_topology_changed() {
2660   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2661   if (is_stale != -1 && is_stale) {
2662     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2663     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2664     assert(c != 0, "Failure to initialize LGRP API");
2665     Solaris::set_lgrp_cookie(c);
2666     return true;
2667   }
2668   return false;
2669 }
2670 
2671 // Get the group id of the current LWP.
2672 int os::numa_get_group_id() {
2673   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2674   if (lgrp_id == -1) {
2675     return 0;
2676   }
2677   const int size = os::numa_get_groups_num();
2678   int *ids = (int*)alloca(size * sizeof(int));
2679 
2680   // Get the ids of all lgroups with memory; r is the count.
2681   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2682                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2683   if (r <= 0) {
2684     return 0;
2685   }
2686   return ids[os::random() % r];
2687 }
2688 
2689 // Request information about the page.
2690 bool os::get_page_info(char *start, page_info* info) {
2691   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2692   uint64_t addr = (uintptr_t)start;
2693   uint64_t outdata[2];
2694   uint_t validity = 0;
2695 
2696   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2697     return false;
2698   }
2699 
2700   info->size = 0;
2701   info->lgrp_id = -1;
2702 
2703   if ((validity & 1) != 0) {
2704     if ((validity & 2) != 0) {
2705       info->lgrp_id = outdata[0];
2706     }
2707     if ((validity & 4) != 0) {
2708       info->size = outdata[1];
2709     }
2710     return true;
2711   }
2712   return false;
2713 }
2714 
2715 // Scan the pages from start to end until a page different than
2716 // the one described in the info parameter is encountered.
2717 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2718   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2719   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2720   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2721   uint_t validity[MAX_MEMINFO_CNT];
2722 
2723   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2724   uint64_t p = (uint64_t)start;
2725   while (p < (uint64_t)end) {
2726     addrs[0] = p;
2727     size_t addrs_count = 1;
2728     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2729       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2730       addrs_count++;
2731     }
2732 
2733     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2734       return NULL;
2735     }
2736 
2737     size_t i = 0;
2738     for (; i < addrs_count; i++) {
2739       if ((validity[i] & 1) != 0) {
2740         if ((validity[i] & 4) != 0) {
2741           if (outdata[types * i + 1] != page_expected->size) {
2742             break;
2743           }
2744         } else
2745           if (page_expected->size != 0) {
2746             break;
2747           }
2748 
2749         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2750           if (outdata[types * i] != page_expected->lgrp_id) {
2751             break;
2752           }
2753         }
2754       } else {
2755         return NULL;
2756       }
2757     }
2758 
2759     if (i < addrs_count) {
2760       if ((validity[i] & 2) != 0) {
2761         page_found->lgrp_id = outdata[types * i];
2762       } else {
2763         page_found->lgrp_id = -1;
2764       }
2765       if ((validity[i] & 4) != 0) {
2766         page_found->size = outdata[types * i + 1];
2767       } else {
2768         page_found->size = 0;
2769       }
2770       return (char*)addrs[i];
2771     }
2772 
2773     p = addrs[addrs_count - 1] + page_size;
2774   }
2775   return end;
2776 }
2777 
2778 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2779   size_t size = bytes;
2780   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2781   // uncommitted page. Otherwise, the read/write might succeed if we
2782   // have enough swap space to back the physical page.
2783   return
2784     NULL != Solaris::mmap_chunk(addr, size,
2785                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2786                                 PROT_NONE);
2787 }
2788 
2789 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2790   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2791 
2792   if (b == MAP_FAILED) {
2793     return NULL;
2794   }
2795   return b;
2796 }
2797 
2798 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
2799   char* addr = requested_addr;
2800   int flags = MAP_PRIVATE | MAP_NORESERVE;
2801 
2802   assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
2803 
2804   if (fixed) {
2805     flags |= MAP_FIXED;
2806   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2807     flags |= MAP_ALIGN;
2808     addr = (char*) alignment_hint;
2809   }
2810 
2811   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2812   // uncommitted page. Otherwise, the read/write might succeed if we
2813   // have enough swap space to back the physical page.
2814   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2815 }
2816 
2817 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2818   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
2819 
2820   guarantee(requested_addr == NULL || requested_addr == addr,
2821             "OS failed to return requested mmap address.");
2822   return addr;
2823 }
2824 
2825 // Reserve memory at an arbitrary address, only if that area is
2826 // available (and not reserved for something else).
2827 
2828 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2829   const int max_tries = 10;
2830   char* base[max_tries];
2831   size_t size[max_tries];
2832 
2833   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2834   // is dependent on the requested size and the MMU.  Our initial gap
2835   // value here is just a guess and will be corrected later.
2836   bool had_top_overlap = false;
2837   bool have_adjusted_gap = false;
2838   size_t gap = 0x400000;
2839 
2840   // Assert only that the size is a multiple of the page size, since
2841   // that's all that mmap requires, and since that's all we really know
2842   // about at this low abstraction level.  If we need higher alignment,
2843   // we can either pass an alignment to this method or verify alignment
2844   // in one of the methods further up the call chain.  See bug 5044738.
2845   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2846 
2847   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2848   // Give it a try, if the kernel honors the hint we can return immediately.
2849   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2850 
2851   volatile int err = errno;
2852   if (addr == requested_addr) {
2853     return addr;
2854   } else if (addr != NULL) {
2855     pd_unmap_memory(addr, bytes);
2856   }
2857 
2858   if (PrintMiscellaneous && Verbose) {
2859     char buf[256];
2860     buf[0] = '\0';
2861     if (addr == NULL) {
2862       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2863     }
2864     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2865             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2866             "%s", bytes, requested_addr, addr, buf);
2867   }
2868 
2869   // Address hint method didn't work.  Fall back to the old method.
2870   // In theory, once SNV becomes our oldest supported platform, this
2871   // code will no longer be needed.
2872   //
2873   // Repeatedly allocate blocks until the block is allocated at the
2874   // right spot. Give up after max_tries.
2875   int i;
2876   for (i = 0; i < max_tries; ++i) {
2877     base[i] = reserve_memory(bytes);
2878 
2879     if (base[i] != NULL) {
2880       // Is this the block we wanted?
2881       if (base[i] == requested_addr) {
2882         size[i] = bytes;
2883         break;
2884       }
2885 
2886       // check that the gap value is right
2887       if (had_top_overlap && !have_adjusted_gap) {
2888         size_t actual_gap = base[i-1] - base[i] - bytes;
2889         if (gap != actual_gap) {
2890           // adjust the gap value and retry the last 2 allocations
2891           assert(i > 0, "gap adjustment code problem");
2892           have_adjusted_gap = true;  // adjust the gap only once, just in case
2893           gap = actual_gap;
2894           if (PrintMiscellaneous && Verbose) {
2895             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2896           }
2897           unmap_memory(base[i], bytes);
2898           unmap_memory(base[i-1], size[i-1]);
2899           i-=2;
2900           continue;
2901         }
2902       }
2903 
2904       // Does this overlap the block we wanted? Give back the overlapped
2905       // parts and try again.
2906       //
2907       // There is still a bug in this code: if top_overlap == bytes,
2908       // the overlap is offset from requested region by the value of gap.
2909       // In this case giving back the overlapped part will not work,
2910       // because we'll give back the entire block at base[i] and
2911       // therefore the subsequent allocation will not generate a new gap.
2912       // This could be fixed with a new algorithm that used larger
2913       // or variable size chunks to find the requested region -
2914       // but such a change would introduce additional complications.
2915       // It's rare enough that the planets align for this bug,
2916       // so we'll just wait for a fix for 6204603/5003415 which
2917       // will provide a mmap flag to allow us to avoid this business.
2918 
2919       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2920       if (top_overlap >= 0 && top_overlap < bytes) {
2921         had_top_overlap = true;
2922         unmap_memory(base[i], top_overlap);
2923         base[i] += top_overlap;
2924         size[i] = bytes - top_overlap;
2925       } else {
2926         size_t bottom_overlap = base[i] + bytes - requested_addr;
2927         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2928           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
2929             warning("attempt_reserve_memory_at: possible alignment bug");
2930           }
2931           unmap_memory(requested_addr, bottom_overlap);
2932           size[i] = bytes - bottom_overlap;
2933         } else {
2934           size[i] = bytes;
2935         }
2936       }
2937     }
2938   }
2939 
2940   // Give back the unused reserved pieces.
2941 
2942   for (int j = 0; j < i; ++j) {
2943     if (base[j] != NULL) {
2944       unmap_memory(base[j], size[j]);
2945     }
2946   }
2947 
2948   return (i < max_tries) ? requested_addr : NULL;
2949 }
2950 
2951 bool os::pd_release_memory(char* addr, size_t bytes) {
2952   size_t size = bytes;
2953   return munmap(addr, size) == 0;
2954 }
2955 
2956 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
2957   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
2958          "addr must be page aligned");
2959   int retVal = mprotect(addr, bytes, prot);
2960   return retVal == 0;
2961 }
2962 
2963 // Protect memory (Used to pass readonly pages through
2964 // JNI GetArray<type>Elements with empty arrays.)
2965 // Also, used for serialization page and for compressed oops null pointer
2966 // checking.
2967 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2968                         bool is_committed) {
2969   unsigned int p = 0;
2970   switch (prot) {
2971   case MEM_PROT_NONE: p = PROT_NONE; break;
2972   case MEM_PROT_READ: p = PROT_READ; break;
2973   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2974   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2975   default:
2976     ShouldNotReachHere();
2977   }
2978   // is_committed is unused.
2979   return solaris_mprotect(addr, bytes, p);
2980 }
2981 
2982 // guard_memory and unguard_memory only happens within stack guard pages.
2983 // Since ISM pertains only to the heap, guard and unguard memory should not
2984 /// happen with an ISM region.
2985 bool os::guard_memory(char* addr, size_t bytes) {
2986   return solaris_mprotect(addr, bytes, PROT_NONE);
2987 }
2988 
2989 bool os::unguard_memory(char* addr, size_t bytes) {
2990   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
2991 }
2992 
2993 // Large page support
2994 static size_t _large_page_size = 0;
2995 
2996 // Insertion sort for small arrays (descending order).
2997 static void insertion_sort_descending(size_t* array, int len) {
2998   for (int i = 0; i < len; i++) {
2999     size_t val = array[i];
3000     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3001       size_t tmp = array[key];
3002       array[key] = array[key - 1];
3003       array[key - 1] = tmp;
3004     }
3005   }
3006 }
3007 
3008 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3009   const unsigned int usable_count = VM_Version::page_size_count();
3010   if (usable_count == 1) {
3011     return false;
3012   }
3013 
3014   // Find the right getpagesizes interface.  When solaris 11 is the minimum
3015   // build platform, getpagesizes() (without the '2') can be called directly.
3016   typedef int (*gps_t)(size_t[], int);
3017   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3018   if (gps_func == NULL) {
3019     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3020     if (gps_func == NULL) {
3021       if (warn) {
3022         warning("MPSS is not supported by the operating system.");
3023       }
3024       return false;
3025     }
3026   }
3027 
3028   // Fill the array of page sizes.
3029   int n = (*gps_func)(_page_sizes, page_sizes_max);
3030   assert(n > 0, "Solaris bug?");
3031 
3032   if (n == page_sizes_max) {
3033     // Add a sentinel value (necessary only if the array was completely filled
3034     // since it is static (zeroed at initialization)).
3035     _page_sizes[--n] = 0;
3036     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3037   }
3038   assert(_page_sizes[n] == 0, "missing sentinel");
3039   trace_page_sizes("available page sizes", _page_sizes, n);
3040 
3041   if (n == 1) return false;     // Only one page size available.
3042 
3043   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3044   // select up to usable_count elements.  First sort the array, find the first
3045   // acceptable value, then copy the usable sizes to the top of the array and
3046   // trim the rest.  Make sure to include the default page size :-).
3047   //
3048   // A better policy could get rid of the 4M limit by taking the sizes of the
3049   // important VM memory regions (java heap and possibly the code cache) into
3050   // account.
3051   insertion_sort_descending(_page_sizes, n);
3052   const size_t size_limit =
3053     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3054   int beg;
3055   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3056   const int end = MIN2((int)usable_count, n) - 1;
3057   for (int cur = 0; cur < end; ++cur, ++beg) {
3058     _page_sizes[cur] = _page_sizes[beg];
3059   }
3060   _page_sizes[end] = vm_page_size();
3061   _page_sizes[end + 1] = 0;
3062 
3063   if (_page_sizes[end] > _page_sizes[end - 1]) {
3064     // Default page size is not the smallest; sort again.
3065     insertion_sort_descending(_page_sizes, end + 1);
3066   }
3067   *page_size = _page_sizes[0];
3068 
3069   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3070   return true;
3071 }
3072 
3073 void os::large_page_init() {
3074   if (UseLargePages) {
3075     // print a warning if any large page related flag is specified on command line
3076     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
3077                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3078 
3079     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3080   }
3081 }
3082 
3083 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3084   // Signal to OS that we want large pages for addresses
3085   // from addr, addr + bytes
3086   struct memcntl_mha mpss_struct;
3087   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3088   mpss_struct.mha_pagesize = align;
3089   mpss_struct.mha_flags = 0;
3090   // Upon successful completion, memcntl() returns 0
3091   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3092     debug_only(warning("Attempt to use MPSS failed."));
3093     return false;
3094   }
3095   return true;
3096 }
3097 
3098 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3099   fatal("os::reserve_memory_special should not be called on Solaris.");
3100   return NULL;
3101 }
3102 
3103 bool os::release_memory_special(char* base, size_t bytes) {
3104   fatal("os::release_memory_special should not be called on Solaris.");
3105   return false;
3106 }
3107 
3108 size_t os::large_page_size() {
3109   return _large_page_size;
3110 }
3111 
3112 // MPSS allows application to commit large page memory on demand; with ISM
3113 // the entire memory region must be allocated as shared memory.
3114 bool os::can_commit_large_page_memory() {
3115   return true;
3116 }
3117 
3118 bool os::can_execute_large_page_memory() {
3119   return true;
3120 }
3121 
3122 // Read calls from inside the vm need to perform state transitions
3123 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3124   size_t res;
3125   JavaThread* thread = (JavaThread*)Thread::current();
3126   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3127   ThreadBlockInVM tbiv(thread);
3128   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3129   return res;
3130 }
3131 
3132 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3133   size_t res;
3134   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
3135           "Assumed _thread_in_native");
3136   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
3137   return res;
3138 }
3139 
3140 void os::naked_short_sleep(jlong ms) {
3141   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3142 
3143   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3144   // Solaris requires -lrt for this.
3145   usleep((ms * 1000));
3146 
3147   return;
3148 }
3149 
3150 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3151 void os::infinite_sleep() {
3152   while (true) {    // sleep forever ...
3153     ::sleep(100);   // ... 100 seconds at a time
3154   }
3155 }
3156 
3157 // Used to convert frequent JVM_Yield() to nops
3158 bool os::dont_yield() {
3159   if (DontYieldALot) {
3160     static hrtime_t last_time = 0;
3161     hrtime_t diff = getTimeNanos() - last_time;
3162 
3163     if (diff < DontYieldALotInterval * 1000000)
3164       return true;
3165 
3166     last_time += diff;
3167 
3168     return false;
3169   }
3170   else {
3171     return false;
3172   }
3173 }
3174 
3175 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3176 // the linux and win32 implementations do not.  This should be checked.
3177 
3178 void os::yield() {
3179   // Yields to all threads with same or greater priority
3180   os::sleep(Thread::current(), 0, false);
3181 }
3182 
3183 // Note that yield semantics are defined by the scheduling class to which
3184 // the thread currently belongs.  Typically, yield will _not yield to
3185 // other equal or higher priority threads that reside on the dispatch queues
3186 // of other CPUs.
3187 
3188 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3189 
3190 void os::yield_all() {
3191   // Yields to all threads, including threads with lower priorities
3192   os::sleep(Thread::current(), 1, false);
3193 }
3194 
3195 // Interface for setting lwp priorities.  If we are using T2 libthread,
3196 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3197 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
3198 // function is meaningless in this mode so we must adjust the real lwp's priority
3199 // The routines below implement the getting and setting of lwp priorities.
3200 //
3201 // Note: T2 is now the only supported libthread. UseBoundThreads flag is
3202 //       being deprecated and all threads are now BoundThreads
3203 //
3204 // Note: There are three priority scales used on Solaris.  Java priotities
3205 //       which range from 1 to 10, libthread "thr_setprio" scale which range
3206 //       from 0 to 127, and the current scheduling class of the process we
3207 //       are running in.  This is typically from -60 to +60.
3208 //       The setting of the lwp priorities in done after a call to thr_setprio
3209 //       so Java priorities are mapped to libthread priorities and we map from
3210 //       the latter to lwp priorities.  We don't keep priorities stored in
3211 //       Java priorities since some of our worker threads want to set priorities
3212 //       higher than all Java threads.
3213 //
3214 // For related information:
3215 // (1)  man -s 2 priocntl
3216 // (2)  man -s 4 priocntl
3217 // (3)  man dispadmin
3218 // =    librt.so
3219 // =    libthread/common/rtsched.c - thrp_setlwpprio().
3220 // =    ps -cL <pid> ... to validate priority.
3221 // =    sched_get_priority_min and _max
3222 //              pthread_create
3223 //              sched_setparam
3224 //              pthread_setschedparam
3225 //
3226 // Assumptions:
3227 // +    We assume that all threads in the process belong to the same
3228 //              scheduling class.   IE. an homogenous process.
3229 // +    Must be root or in IA group to change change "interactive" attribute.
3230 //              Priocntl() will fail silently.  The only indication of failure is when
3231 //              we read-back the value and notice that it hasn't changed.
3232 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3233 // +    For RT, change timeslice as well.  Invariant:
3234 //              constant "priority integral"
3235 //              Konst == TimeSlice * (60-Priority)
3236 //              Given a priority, compute appropriate timeslice.
3237 // +    Higher numerical values have higher priority.
3238 
3239 // sched class attributes
3240 typedef struct {
3241         int   schedPolicy;              // classID
3242         int   maxPrio;
3243         int   minPrio;
3244 } SchedInfo;
3245 
3246 
3247 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3248 
3249 #ifdef ASSERT
3250 static int  ReadBackValidate = 1;
3251 #endif
3252 static int  myClass     = 0;
3253 static int  myMin       = 0;
3254 static int  myMax       = 0;
3255 static int  myCur       = 0;
3256 static bool priocntl_enable = false;
3257 
3258 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3259 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3260 
3261 
3262 // lwp_priocntl_init
3263 //
3264 // Try to determine the priority scale for our process.
3265 //
3266 // Return errno or 0 if OK.
3267 //
3268 static int lwp_priocntl_init () {
3269   int rslt;
3270   pcinfo_t ClassInfo;
3271   pcparms_t ParmInfo;
3272   int i;
3273 
3274   if (!UseThreadPriorities) return 0;
3275 
3276   // If ThreadPriorityPolicy is 1, switch tables
3277   if (ThreadPriorityPolicy == 1) {
3278     for (i = 0 ; i < CriticalPriority+1; i++)
3279       os::java_to_os_priority[i] = prio_policy1[i];
3280   }
3281   if (UseCriticalJavaThreadPriority) {
3282     // MaxPriority always maps to the FX scheduling class and criticalPrio.
3283     // See set_native_priority() and set_lwp_class_and_priority().
3284     // Save original MaxPriority mapping in case attempt to
3285     // use critical priority fails.
3286     java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3287     // Set negative to distinguish from other priorities
3288     os::java_to_os_priority[MaxPriority] = -criticalPrio;
3289   }
3290 
3291   // Get IDs for a set of well-known scheduling classes.
3292   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3293   // the system.  We should have a loop that iterates over the
3294   // classID values, which are known to be "small" integers.
3295 
3296   strcpy(ClassInfo.pc_clname, "TS");
3297   ClassInfo.pc_cid = -1;
3298   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3299   if (rslt < 0) return errno;
3300   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3301   tsLimits.schedPolicy = ClassInfo.pc_cid;
3302   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3303   tsLimits.minPrio = -tsLimits.maxPrio;
3304 
3305   strcpy(ClassInfo.pc_clname, "IA");
3306   ClassInfo.pc_cid = -1;
3307   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3308   if (rslt < 0) return errno;
3309   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3310   iaLimits.schedPolicy = ClassInfo.pc_cid;
3311   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3312   iaLimits.minPrio = -iaLimits.maxPrio;
3313 
3314   strcpy(ClassInfo.pc_clname, "RT");
3315   ClassInfo.pc_cid = -1;
3316   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3317   if (rslt < 0) return errno;
3318   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3319   rtLimits.schedPolicy = ClassInfo.pc_cid;
3320   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3321   rtLimits.minPrio = 0;
3322 
3323   strcpy(ClassInfo.pc_clname, "FX");
3324   ClassInfo.pc_cid = -1;
3325   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3326   if (rslt < 0) return errno;
3327   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3328   fxLimits.schedPolicy = ClassInfo.pc_cid;
3329   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3330   fxLimits.minPrio = 0;
3331 
3332   // Query our "current" scheduling class.
3333   // This will normally be IA, TS or, rarely, FX or RT.
3334   memset(&ParmInfo, 0, sizeof(ParmInfo));
3335   ParmInfo.pc_cid = PC_CLNULL;
3336   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3337   if (rslt < 0) return errno;
3338   myClass = ParmInfo.pc_cid;
3339 
3340   // We now know our scheduling classId, get specific information
3341   // about the class.
3342   ClassInfo.pc_cid = myClass;
3343   ClassInfo.pc_clname[0] = 0;
3344   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3345   if (rslt < 0) return errno;
3346 
3347   if (ThreadPriorityVerbose) {
3348     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3349   }
3350 
3351   memset(&ParmInfo, 0, sizeof(pcparms_t));
3352   ParmInfo.pc_cid = PC_CLNULL;
3353   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3354   if (rslt < 0) return errno;
3355 
3356   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3357     myMin = rtLimits.minPrio;
3358     myMax = rtLimits.maxPrio;
3359   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3360     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3361     myMin = iaLimits.minPrio;
3362     myMax = iaLimits.maxPrio;
3363     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3364   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3365     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3366     myMin = tsLimits.minPrio;
3367     myMax = tsLimits.maxPrio;
3368     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3369   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3370     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3371     myMin = fxLimits.minPrio;
3372     myMax = fxLimits.maxPrio;
3373     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3374   } else {
3375     // No clue - punt
3376     if (ThreadPriorityVerbose)
3377       tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3378     return EINVAL;      // no clue, punt
3379   }
3380 
3381   if (ThreadPriorityVerbose) {
3382     tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3383   }
3384 
3385   priocntl_enable = true;  // Enable changing priorities
3386   return 0;
3387 }
3388 
3389 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3390 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3391 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3392 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3393 
3394 
3395 // scale_to_lwp_priority
3396 //
3397 // Convert from the libthread "thr_setprio" scale to our current
3398 // lwp scheduling class scale.
3399 //
3400 static
3401 int     scale_to_lwp_priority (int rMin, int rMax, int x)
3402 {
3403   int v;
3404 
3405   if (x == 127) return rMax;            // avoid round-down
3406     v = (((x*(rMax-rMin)))/128)+rMin;
3407   return v;
3408 }
3409 
3410 
3411 // set_lwp_class_and_priority
3412 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3413                                int newPrio, int new_class, bool scale) {
3414   int rslt;
3415   int Actual, Expected, prv;
3416   pcparms_t ParmInfo;                   // for GET-SET
3417 #ifdef ASSERT
3418   pcparms_t ReadBack;                   // for readback
3419 #endif
3420 
3421   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3422   // Query current values.
3423   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3424   // Cache "pcparms_t" in global ParmCache.
3425   // TODO: elide set-to-same-value
3426 
3427   // If something went wrong on init, don't change priorities.
3428   if ( !priocntl_enable ) {
3429     if (ThreadPriorityVerbose)
3430       tty->print_cr("Trying to set priority but init failed, ignoring");
3431     return EINVAL;
3432   }
3433 
3434   // If lwp hasn't started yet, just return
3435   // the _start routine will call us again.
3436   if ( lwpid <= 0 ) {
3437     if (ThreadPriorityVerbose) {
3438       tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
3439                      INTPTR_FORMAT " to %d, lwpid not set",
3440                      ThreadID, newPrio);
3441     }
3442     return 0;
3443   }
3444 
3445   if (ThreadPriorityVerbose) {
3446     tty->print_cr ("set_lwp_class_and_priority("
3447                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3448                    ThreadID, lwpid, newPrio);
3449   }
3450 
3451   memset(&ParmInfo, 0, sizeof(pcparms_t));
3452   ParmInfo.pc_cid = PC_CLNULL;
3453   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3454   if (rslt < 0) return errno;
3455 
3456   int cur_class = ParmInfo.pc_cid;
3457   ParmInfo.pc_cid = (id_t)new_class;
3458 
3459   if (new_class == rtLimits.schedPolicy) {
3460     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3461     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3462                                                        rtLimits.maxPrio, newPrio)
3463                                : newPrio;
3464     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3465     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3466     if (ThreadPriorityVerbose) {
3467       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3468     }
3469   } else if (new_class == iaLimits.schedPolicy) {
3470     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3471     int maxClamped     = MIN2(iaLimits.maxPrio,
3472                               cur_class == new_class
3473                                 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3474     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3475                                                        maxClamped, newPrio)
3476                                : newPrio;
3477     iaInfo->ia_uprilim = cur_class == new_class
3478                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3479     iaInfo->ia_mode    = IA_NOCHANGE;
3480     if (ThreadPriorityVerbose) {
3481       tty->print_cr("IA: [%d...%d] %d->%d\n",
3482                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3483     }
3484   } else if (new_class == tsLimits.schedPolicy) {
3485     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3486     int maxClamped     = MIN2(tsLimits.maxPrio,
3487                               cur_class == new_class
3488                                 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3489     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3490                                                        maxClamped, newPrio)
3491                                : newPrio;
3492     tsInfo->ts_uprilim = cur_class == new_class
3493                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3494     if (ThreadPriorityVerbose) {
3495       tty->print_cr("TS: [%d...%d] %d->%d\n",
3496                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3497     }
3498   } else if (new_class == fxLimits.schedPolicy) {
3499     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3500     int maxClamped     = MIN2(fxLimits.maxPrio,
3501                               cur_class == new_class
3502                                 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3503     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3504                                                        maxClamped, newPrio)
3505                                : newPrio;
3506     fxInfo->fx_uprilim = cur_class == new_class
3507                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3508     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3509     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3510     if (ThreadPriorityVerbose) {
3511       tty->print_cr("FX: [%d...%d] %d->%d\n",
3512                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3513     }
3514   } else {
3515     if (ThreadPriorityVerbose) {
3516       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3517     }
3518     return EINVAL;    // no clue, punt
3519   }
3520 
3521   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3522   if (ThreadPriorityVerbose && rslt) {
3523     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3524   }
3525   if (rslt < 0) return errno;
3526 
3527 #ifdef ASSERT
3528   // Sanity check: read back what we just attempted to set.
3529   // In theory it could have changed in the interim ...
3530   //
3531   // The priocntl system call is tricky.
3532   // Sometimes it'll validate the priority value argument and
3533   // return EINVAL if unhappy.  At other times it fails silently.
3534   // Readbacks are prudent.
3535 
3536   if (!ReadBackValidate) return 0;
3537 
3538   memset(&ReadBack, 0, sizeof(pcparms_t));
3539   ReadBack.pc_cid = PC_CLNULL;
3540   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3541   assert(rslt >= 0, "priocntl failed");
3542   Actual = Expected = 0xBAD;
3543   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3544   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3545     Actual   = RTPRI(ReadBack)->rt_pri;
3546     Expected = RTPRI(ParmInfo)->rt_pri;
3547   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3548     Actual   = IAPRI(ReadBack)->ia_upri;
3549     Expected = IAPRI(ParmInfo)->ia_upri;
3550   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3551     Actual   = TSPRI(ReadBack)->ts_upri;
3552     Expected = TSPRI(ParmInfo)->ts_upri;
3553   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3554     Actual   = FXPRI(ReadBack)->fx_upri;
3555     Expected = FXPRI(ParmInfo)->fx_upri;
3556   } else {
3557     if (ThreadPriorityVerbose) {
3558       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3559                     ParmInfo.pc_cid);
3560     }
3561   }
3562 
3563   if (Actual != Expected) {
3564     if (ThreadPriorityVerbose) {
3565       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3566                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3567     }
3568   }
3569 #endif
3570 
3571   return 0;
3572 }
3573 
3574 // Solaris only gives access to 128 real priorities at a time,
3575 // so we expand Java's ten to fill this range.  This would be better
3576 // if we dynamically adjusted relative priorities.
3577 //
3578 // The ThreadPriorityPolicy option allows us to select 2 different
3579 // priority scales.
3580 //
3581 // ThreadPriorityPolicy=0
3582 // Since the Solaris' default priority is MaximumPriority, we do not
3583 // set a priority lower than Max unless a priority lower than
3584 // NormPriority is requested.
3585 //
3586 // ThreadPriorityPolicy=1
3587 // This mode causes the priority table to get filled with
3588 // linear values.  NormPriority get's mapped to 50% of the
3589 // Maximum priority an so on.  This will cause VM threads
3590 // to get unfair treatment against other Solaris processes
3591 // which do not explicitly alter their thread priorities.
3592 //
3593 
3594 int os::java_to_os_priority[CriticalPriority + 1] = {
3595   -99999,         // 0 Entry should never be used
3596 
3597   0,              // 1 MinPriority
3598   32,             // 2
3599   64,             // 3
3600 
3601   96,             // 4
3602   127,            // 5 NormPriority
3603   127,            // 6
3604 
3605   127,            // 7
3606   127,            // 8
3607   127,            // 9 NearMaxPriority
3608 
3609   127,            // 10 MaxPriority
3610 
3611   -criticalPrio   // 11 CriticalPriority
3612 };
3613 
3614 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3615   OSThread* osthread = thread->osthread();
3616 
3617   // Save requested priority in case the thread hasn't been started
3618   osthread->set_native_priority(newpri);
3619 
3620   // Check for critical priority request
3621   bool fxcritical = false;
3622   if (newpri == -criticalPrio) {
3623     fxcritical = true;
3624     newpri = criticalPrio;
3625   }
3626 
3627   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3628   if (!UseThreadPriorities) return OS_OK;
3629 
3630   int status = 0;
3631 
3632   if (!fxcritical) {
3633     // Use thr_setprio only if we have a priority that thr_setprio understands
3634     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3635   }
3636 
3637   int lwp_status =
3638           set_lwp_class_and_priority(osthread->thread_id(),
3639           osthread->lwp_id(),
3640           newpri,
3641           fxcritical ? fxLimits.schedPolicy : myClass,
3642           !fxcritical);
3643   if (lwp_status != 0 && fxcritical) {
3644     // Try again, this time without changing the scheduling class
3645     newpri = java_MaxPriority_to_os_priority;
3646     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3647             osthread->lwp_id(),
3648             newpri, myClass, false);
3649   }
3650   status |= lwp_status;
3651   return (status == 0) ? OS_OK : OS_ERR;
3652 }
3653 
3654 
3655 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3656   int p;
3657   if ( !UseThreadPriorities ) {
3658     *priority_ptr = NormalPriority;
3659     return OS_OK;
3660   }
3661   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3662   if (status != 0) {
3663     return OS_ERR;
3664   }
3665   *priority_ptr = p;
3666   return OS_OK;
3667 }
3668 
3669 
3670 // Hint to the underlying OS that a task switch would not be good.
3671 // Void return because it's a hint and can fail.
3672 void os::hint_no_preempt() {
3673   schedctl_start(schedctl_init());
3674 }
3675 
3676 static void resume_clear_context(OSThread *osthread) {
3677   osthread->set_ucontext(NULL);
3678 }
3679 
3680 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3681   osthread->set_ucontext(context);
3682 }
3683 
3684 static Semaphore sr_semaphore;
3685 
3686 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3687   // Save and restore errno to avoid confusing native code with EINTR
3688   // after sigsuspend.
3689   int old_errno = errno;
3690 
3691   OSThread* osthread = thread->osthread();
3692   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3693 
3694   os::SuspendResume::State current = osthread->sr.state();
3695   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3696     suspend_save_context(osthread, uc);
3697 
3698     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3699     os::SuspendResume::State state = osthread->sr.suspended();
3700     if (state == os::SuspendResume::SR_SUSPENDED) {
3701       sigset_t suspend_set;  // signals for sigsuspend()
3702 
3703       // get current set of blocked signals and unblock resume signal
3704       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3705       sigdelset(&suspend_set, os::Solaris::SIGasync());
3706 
3707       sr_semaphore.signal();
3708       // wait here until we are resumed
3709       while (1) {
3710         sigsuspend(&suspend_set);
3711 
3712         os::SuspendResume::State result = osthread->sr.running();
3713         if (result == os::SuspendResume::SR_RUNNING) {
3714           sr_semaphore.signal();
3715           break;
3716         }
3717       }
3718 
3719     } else if (state == os::SuspendResume::SR_RUNNING) {
3720       // request was cancelled, continue
3721     } else {
3722       ShouldNotReachHere();
3723     }
3724 
3725     resume_clear_context(osthread);
3726   } else if (current == os::SuspendResume::SR_RUNNING) {
3727     // request was cancelled, continue
3728   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3729     // ignore
3730   } else {
3731     // ignore
3732   }
3733 
3734   errno = old_errno;
3735 }
3736 
3737 void os::print_statistics() {
3738 }
3739 
3740 int os::message_box(const char* title, const char* message) {
3741   int i;
3742   fdStream err(defaultStream::error_fd());
3743   for (i = 0; i < 78; i++) err.print_raw("=");
3744   err.cr();
3745   err.print_raw_cr(title);
3746   for (i = 0; i < 78; i++) err.print_raw("-");
3747   err.cr();
3748   err.print_raw_cr(message);
3749   for (i = 0; i < 78; i++) err.print_raw("=");
3750   err.cr();
3751 
3752   char buf[16];
3753   // Prevent process from exiting upon "read error" without consuming all CPU
3754   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3755 
3756   return buf[0] == 'y' || buf[0] == 'Y';
3757 }
3758 
3759 static int sr_notify(OSThread* osthread) {
3760   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
3761   assert_status(status == 0, status, "thr_kill");
3762   return status;
3763 }
3764 
3765 // "Randomly" selected value for how long we want to spin
3766 // before bailing out on suspending a thread, also how often
3767 // we send a signal to a thread we want to resume
3768 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3769 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3770 
3771 static bool do_suspend(OSThread* osthread) {
3772   assert(osthread->sr.is_running(), "thread should be running");
3773   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
3774 
3775   // mark as suspended and send signal
3776   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3777     // failed to switch, state wasn't running?
3778     ShouldNotReachHere();
3779     return false;
3780   }
3781 
3782   if (sr_notify(osthread) != 0) {
3783     ShouldNotReachHere();
3784   }
3785 
3786   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3787   while (true) {
3788     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
3789       break;
3790     } else {
3791       // timeout
3792       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3793       if (cancelled == os::SuspendResume::SR_RUNNING) {
3794         return false;
3795       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3796         // make sure that we consume the signal on the semaphore as well
3797         sr_semaphore.wait();
3798         break;
3799       } else {
3800         ShouldNotReachHere();
3801         return false;
3802       }
3803     }
3804   }
3805 
3806   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3807   return true;
3808 }
3809 
3810 static void do_resume(OSThread* osthread) {
3811   assert(osthread->sr.is_suspended(), "thread should be suspended");
3812   assert(!sr_semaphore.trywait(), "invalid semaphore state");
3813 
3814   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3815     // failed to switch to WAKEUP_REQUEST
3816     ShouldNotReachHere();
3817     return;
3818   }
3819 
3820   while (true) {
3821     if (sr_notify(osthread) == 0) {
3822       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
3823         if (osthread->sr.is_running()) {
3824           return;
3825         }
3826       }
3827     } else {
3828       ShouldNotReachHere();
3829     }
3830   }
3831 
3832   guarantee(osthread->sr.is_running(), "Must be running!");
3833 }
3834 
3835 void os::SuspendedThreadTask::internal_do_task() {
3836   if (do_suspend(_thread->osthread())) {
3837     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3838     do_task(context);
3839     do_resume(_thread->osthread());
3840   }
3841 }
3842 
3843 class PcFetcher : public os::SuspendedThreadTask {
3844 public:
3845   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3846   ExtendedPC result();
3847 protected:
3848   void do_task(const os::SuspendedThreadTaskContext& context);
3849 private:
3850   ExtendedPC _epc;
3851 };
3852 
3853 ExtendedPC PcFetcher::result() {
3854   guarantee(is_done(), "task is not done yet.");
3855   return _epc;
3856 }
3857 
3858 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3859   Thread* thread = context.thread();
3860   OSThread* osthread = thread->osthread();
3861   if (osthread->ucontext() != NULL) {
3862     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
3863   } else {
3864     // NULL context is unexpected, double-check this is the VMThread
3865     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3866   }
3867 }
3868 
3869 // A lightweight implementation that does not suspend the target thread and
3870 // thus returns only a hint. Used for profiling only!
3871 ExtendedPC os::get_thread_pc(Thread* thread) {
3872   // Make sure that it is called by the watcher and the Threads lock is owned.
3873   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
3874   // For now, is only used to profile the VM Thread
3875   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3876   PcFetcher fetcher(thread);
3877   fetcher.run();
3878   return fetcher.result();
3879 }
3880 
3881 
3882 // This does not do anything on Solaris. This is basically a hook for being
3883 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
3884 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
3885   f(value, method, args, thread);
3886 }
3887 
3888 // This routine may be used by user applications as a "hook" to catch signals.
3889 // The user-defined signal handler must pass unrecognized signals to this
3890 // routine, and if it returns true (non-zero), then the signal handler must
3891 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
3892 // routine will never retun false (zero), but instead will execute a VM panic
3893 // routine kill the process.
3894 //
3895 // If this routine returns false, it is OK to call it again.  This allows
3896 // the user-defined signal handler to perform checks either before or after
3897 // the VM performs its own checks.  Naturally, the user code would be making
3898 // a serious error if it tried to handle an exception (such as a null check
3899 // or breakpoint) that the VM was generating for its own correct operation.
3900 //
3901 // This routine may recognize any of the following kinds of signals:
3902 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
3903 // os::Solaris::SIGasync
3904 // It should be consulted by handlers for any of those signals.
3905 // It explicitly does not recognize os::Solaris::SIGinterrupt
3906 //
3907 // The caller of this routine must pass in the three arguments supplied
3908 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3909 // field of the structure passed to sigaction().  This routine assumes that
3910 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3911 //
3912 // Note that the VM will print warnings if it detects conflicting signal
3913 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3914 //
3915 extern "C" JNIEXPORT int
3916 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
3917                           int abort_if_unrecognized);
3918 
3919 
3920 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
3921   int orig_errno = errno;  // Preserve errno value over signal handler.
3922   JVM_handle_solaris_signal(sig, info, ucVoid, true);
3923   errno = orig_errno;
3924 }
3925 
3926 /* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
3927    is needed to provoke threads blocked on IO to return an EINTR
3928    Note: this explicitly does NOT call JVM_handle_solaris_signal and
3929    does NOT participate in signal chaining due to requirement for
3930    NOT setting SA_RESTART to make EINTR work. */
3931 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
3932    if (UseSignalChaining) {
3933       struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
3934       if (actp && actp->sa_handler) {
3935         vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
3936       }
3937    }
3938 }
3939 
3940 // This boolean allows users to forward their own non-matching signals
3941 // to JVM_handle_solaris_signal, harmlessly.
3942 bool os::Solaris::signal_handlers_are_installed = false;
3943 
3944 // For signal-chaining
3945 bool os::Solaris::libjsig_is_loaded = false;
3946 typedef struct sigaction *(*get_signal_t)(int);
3947 get_signal_t os::Solaris::get_signal_action = NULL;
3948 
3949 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
3950   struct sigaction *actp = NULL;
3951 
3952   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
3953     // Retrieve the old signal handler from libjsig
3954     actp = (*get_signal_action)(sig);
3955   }
3956   if (actp == NULL) {
3957     // Retrieve the preinstalled signal handler from jvm
3958     actp = get_preinstalled_handler(sig);
3959   }
3960 
3961   return actp;
3962 }
3963 
3964 static bool call_chained_handler(struct sigaction *actp, int sig,
3965                                  siginfo_t *siginfo, void *context) {
3966   // Call the old signal handler
3967   if (actp->sa_handler == SIG_DFL) {
3968     // It's more reasonable to let jvm treat it as an unexpected exception
3969     // instead of taking the default action.
3970     return false;
3971   } else if (actp->sa_handler != SIG_IGN) {
3972     if ((actp->sa_flags & SA_NODEFER) == 0) {
3973       // automaticlly block the signal
3974       sigaddset(&(actp->sa_mask), sig);
3975     }
3976 
3977     sa_handler_t hand;
3978     sa_sigaction_t sa;
3979     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3980     // retrieve the chained handler
3981     if (siginfo_flag_set) {
3982       sa = actp->sa_sigaction;
3983     } else {
3984       hand = actp->sa_handler;
3985     }
3986 
3987     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3988       actp->sa_handler = SIG_DFL;
3989     }
3990 
3991     // try to honor the signal mask
3992     sigset_t oset;
3993     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3994 
3995     // call into the chained handler
3996     if (siginfo_flag_set) {
3997       (*sa)(sig, siginfo, context);
3998     } else {
3999       (*hand)(sig);
4000     }
4001 
4002     // restore the signal mask
4003     thr_sigsetmask(SIG_SETMASK, &oset, 0);
4004   }
4005   // Tell jvm's signal handler the signal is taken care of.
4006   return true;
4007 }
4008 
4009 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4010   bool chained = false;
4011   // signal-chaining
4012   if (UseSignalChaining) {
4013     struct sigaction *actp = get_chained_signal_action(sig);
4014     if (actp != NULL) {
4015       chained = call_chained_handler(actp, sig, siginfo, context);
4016     }
4017   }
4018   return chained;
4019 }
4020 
4021 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4022   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4023   if (preinstalled_sigs[sig] != 0) {
4024     return &chainedsigactions[sig];
4025   }
4026   return NULL;
4027 }
4028 
4029 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4030 
4031   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4032   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4033   chainedsigactions[sig] = oldAct;
4034   preinstalled_sigs[sig] = 1;
4035 }
4036 
4037 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4038   // Check for overwrite.
4039   struct sigaction oldAct;
4040   sigaction(sig, (struct sigaction*)NULL, &oldAct);
4041   void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
4042                                       : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
4043   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4044       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4045       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4046     if (AllowUserSignalHandlers || !set_installed) {
4047       // Do not overwrite; user takes responsibility to forward to us.
4048       return;
4049     } else if (UseSignalChaining) {
4050       if (oktochain) {
4051         // save the old handler in jvm
4052         save_preinstalled_handler(sig, oldAct);
4053       } else {
4054         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4055       }
4056       // libjsig also interposes the sigaction() call below and saves the
4057       // old sigaction on it own.
4058     } else {
4059       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4060                     "%#lx for signal %d.", (long)oldhand, sig));
4061     }
4062   }
4063 
4064   struct sigaction sigAct;
4065   sigfillset(&(sigAct.sa_mask));
4066   sigAct.sa_handler = SIG_DFL;
4067 
4068   sigAct.sa_sigaction = signalHandler;
4069   // Handle SIGSEGV on alternate signal stack if
4070   // not using stack banging
4071   if (!UseStackBanging && sig == SIGSEGV) {
4072     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4073   // Interruptible i/o requires SA_RESTART cleared so EINTR
4074   // is returned instead of restarting system calls
4075   } else if (sig == os::Solaris::SIGinterrupt()) {
4076     sigemptyset(&sigAct.sa_mask);
4077     sigAct.sa_handler = NULL;
4078     sigAct.sa_flags = SA_SIGINFO;
4079     sigAct.sa_sigaction = sigINTRHandler;
4080   } else {
4081     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4082   }
4083   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4084 
4085   sigaction(sig, &sigAct, &oldAct);
4086 
4087   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4088                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4089   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4090 }
4091 
4092 
4093 #define DO_SIGNAL_CHECK(sig) \
4094   if (!sigismember(&check_signal_done, sig)) \
4095     os::Solaris::check_signal_handler(sig)
4096 
4097 // This method is a periodic task to check for misbehaving JNI applications
4098 // under CheckJNI, we can add any periodic checks here
4099 
4100 void os::run_periodic_checks() {
4101   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4102   // thereby preventing a NULL checks.
4103   if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4104 
4105   if (check_signals == false) return;
4106 
4107   // SEGV and BUS if overridden could potentially prevent
4108   // generation of hs*.log in the event of a crash, debugging
4109   // such a case can be very challenging, so we absolutely
4110   // check for the following for a good measure:
4111   DO_SIGNAL_CHECK(SIGSEGV);
4112   DO_SIGNAL_CHECK(SIGILL);
4113   DO_SIGNAL_CHECK(SIGFPE);
4114   DO_SIGNAL_CHECK(SIGBUS);
4115   DO_SIGNAL_CHECK(SIGPIPE);
4116   DO_SIGNAL_CHECK(SIGXFSZ);
4117 
4118   // ReduceSignalUsage allows the user to override these handlers
4119   // see comments at the very top and jvm_solaris.h
4120   if (!ReduceSignalUsage) {
4121     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4122     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4123     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4124     DO_SIGNAL_CHECK(BREAK_SIGNAL);
4125   }
4126 
4127   // See comments above for using JVM1/JVM2 and UseAltSigs
4128   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4129   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4130 
4131 }
4132 
4133 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4134 
4135 static os_sigaction_t os_sigaction = NULL;
4136 
4137 void os::Solaris::check_signal_handler(int sig) {
4138   char buf[O_BUFLEN];
4139   address jvmHandler = NULL;
4140 
4141   struct sigaction act;
4142   if (os_sigaction == NULL) {
4143     // only trust the default sigaction, in case it has been interposed
4144     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4145     if (os_sigaction == NULL) return;
4146   }
4147 
4148   os_sigaction(sig, (struct sigaction*)NULL, &act);
4149 
4150   address thisHandler = (act.sa_flags & SA_SIGINFO)
4151     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4152     : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4153 
4154 
4155   switch(sig) {
4156     case SIGSEGV:
4157     case SIGBUS:
4158     case SIGFPE:
4159     case SIGPIPE:
4160     case SIGXFSZ:
4161     case SIGILL:
4162       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4163       break;
4164 
4165     case SHUTDOWN1_SIGNAL:
4166     case SHUTDOWN2_SIGNAL:
4167     case SHUTDOWN3_SIGNAL:
4168     case BREAK_SIGNAL:
4169       jvmHandler = (address)user_handler();
4170       break;
4171 
4172     default:
4173       int intrsig = os::Solaris::SIGinterrupt();
4174       int asynsig = os::Solaris::SIGasync();
4175 
4176       if (sig == intrsig) {
4177         jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4178       } else if (sig == asynsig) {
4179         jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4180       } else {
4181         return;
4182       }
4183       break;
4184   }
4185 
4186 
4187   if (thisHandler != jvmHandler) {
4188     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4189     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4190     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4191     // No need to check this sig any longer
4192     sigaddset(&check_signal_done, sig);
4193     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4194     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4195       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4196                     exception_name(sig, buf, O_BUFLEN));
4197     }
4198   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4199     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4200     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4201     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
4202     // No need to check this sig any longer
4203     sigaddset(&check_signal_done, sig);
4204   }
4205 
4206   // Print all the signal handler state
4207   if (sigismember(&check_signal_done, sig)) {
4208     print_signal_handlers(tty, buf, O_BUFLEN);
4209   }
4210 
4211 }
4212 
4213 void os::Solaris::install_signal_handlers() {
4214   bool libjsigdone = false;
4215   signal_handlers_are_installed = true;
4216 
4217   // signal-chaining
4218   typedef void (*signal_setting_t)();
4219   signal_setting_t begin_signal_setting = NULL;
4220   signal_setting_t end_signal_setting = NULL;
4221   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4222                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4223   if (begin_signal_setting != NULL) {
4224     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4225                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4226     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4227                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4228     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4229                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4230     libjsig_is_loaded = true;
4231     if (os::Solaris::get_libjsig_version != NULL) {
4232       libjsigversion =  (*os::Solaris::get_libjsig_version)();
4233     }
4234     assert(UseSignalChaining, "should enable signal-chaining");
4235   }
4236   if (libjsig_is_loaded) {
4237     // Tell libjsig jvm is setting signal handlers
4238     (*begin_signal_setting)();
4239   }
4240 
4241   set_signal_handler(SIGSEGV, true, true);
4242   set_signal_handler(SIGPIPE, true, true);
4243   set_signal_handler(SIGXFSZ, true, true);
4244   set_signal_handler(SIGBUS, true, true);
4245   set_signal_handler(SIGILL, true, true);
4246   set_signal_handler(SIGFPE, true, true);
4247 
4248 
4249   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4250 
4251     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4252     // can not register overridable signals which might be > 32
4253     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4254     // Tell libjsig jvm has finished setting signal handlers
4255       (*end_signal_setting)();
4256       libjsigdone = true;
4257     }
4258   }
4259 
4260   // Never ok to chain our SIGinterrupt
4261   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4262   set_signal_handler(os::Solaris::SIGasync(), true, true);
4263 
4264   if (libjsig_is_loaded && !libjsigdone) {
4265     // Tell libjsig jvm finishes setting signal handlers
4266     (*end_signal_setting)();
4267   }
4268 
4269   // We don't activate signal checker if libjsig is in place, we trust ourselves
4270   // and if UserSignalHandler is installed all bets are off.
4271   // Log that signal checking is off only if -verbose:jni is specified.
4272   if (CheckJNICalls) {
4273     if (libjsig_is_loaded) {
4274       if (PrintJNIResolving) {
4275         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4276       }
4277       check_signals = false;
4278     }
4279     if (AllowUserSignalHandlers) {
4280       if (PrintJNIResolving) {
4281         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4282       }
4283       check_signals = false;
4284     }
4285   }
4286 }
4287 
4288 
4289 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4290 
4291 const char * signames[] = {
4292   "SIG0",
4293   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4294   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4295   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4296   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4297   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4298   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4299   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4300   "SIGCANCEL", "SIGLOST"
4301 };
4302 
4303 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4304   if (0 < exception_code && exception_code <= SIGRTMAX) {
4305     // signal
4306     if (exception_code < sizeof(signames)/sizeof(const char*)) {
4307        jio_snprintf(buf, size, "%s", signames[exception_code]);
4308     } else {
4309        jio_snprintf(buf, size, "SIG%d", exception_code);
4310     }
4311     return buf;
4312   } else {
4313     return NULL;
4314   }
4315 }
4316 
4317 // (Static) wrapper for getisax(2) call.
4318 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4319 
4320 // (Static) wrappers for the liblgrp API
4321 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4322 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4323 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4324 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4325 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4326 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4327 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4328 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4329 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4330 
4331 // (Static) wrapper for meminfo() call.
4332 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4333 
4334 static address resolve_symbol_lazy(const char* name) {
4335   address addr = (address) dlsym(RTLD_DEFAULT, name);
4336   if(addr == NULL) {
4337     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4338     addr = (address) dlsym(RTLD_NEXT, name);
4339   }
4340   return addr;
4341 }
4342 
4343 static address resolve_symbol(const char* name) {
4344   address addr = resolve_symbol_lazy(name);
4345   if(addr == NULL) {
4346     fatal(dlerror());
4347   }
4348   return addr;
4349 }
4350 
4351 void os::Solaris::libthread_init() {
4352   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4353 
4354   lwp_priocntl_init();
4355 
4356   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4357   if(func == NULL) {
4358     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4359     // Guarantee that this VM is running on an new enough OS (5.6 or
4360     // later) that it will have a new enough libthread.so.
4361     guarantee(func != NULL, "libthread.so is too old.");
4362   }
4363 
4364   int size;
4365   void (*handler_info_func)(address *, int *);
4366   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4367   handler_info_func(&handler_start, &size);
4368   handler_end = handler_start + size;
4369 }
4370 
4371 
4372 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4373 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4374 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4375 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4376 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4377 int os::Solaris::_mutex_scope = USYNC_THREAD;
4378 
4379 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4380 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4381 int_fnP_cond_tP os::Solaris::_cond_signal;
4382 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4383 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4384 int_fnP_cond_tP os::Solaris::_cond_destroy;
4385 int os::Solaris::_cond_scope = USYNC_THREAD;
4386 
4387 void os::Solaris::synchronization_init() {
4388   if(UseLWPSynchronization) {
4389     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4390     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4391     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4392     os::Solaris::set_mutex_init(lwp_mutex_init);
4393     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4394     os::Solaris::set_mutex_scope(USYNC_THREAD);
4395 
4396     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4397     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4398     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4399     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4400     os::Solaris::set_cond_init(lwp_cond_init);
4401     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4402     os::Solaris::set_cond_scope(USYNC_THREAD);
4403   }
4404   else {
4405     os::Solaris::set_mutex_scope(USYNC_THREAD);
4406     os::Solaris::set_cond_scope(USYNC_THREAD);
4407 
4408     if(UsePthreads) {
4409       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4410       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4411       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4412       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4413       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4414 
4415       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4416       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4417       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4418       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4419       os::Solaris::set_cond_init(pthread_cond_default_init);
4420       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4421     }
4422     else {
4423       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4424       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4425       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4426       os::Solaris::set_mutex_init(::mutex_init);
4427       os::Solaris::set_mutex_destroy(::mutex_destroy);
4428 
4429       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4430       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4431       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4432       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4433       os::Solaris::set_cond_init(::cond_init);
4434       os::Solaris::set_cond_destroy(::cond_destroy);
4435     }
4436   }
4437 }
4438 
4439 bool os::Solaris::liblgrp_init() {
4440   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4441   if (handle != NULL) {
4442     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4443     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4444     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4445     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4446     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4447     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4448     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4449     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4450                                        dlsym(handle, "lgrp_cookie_stale")));
4451 
4452     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4453     set_lgrp_cookie(c);
4454     return true;
4455   }
4456   return false;
4457 }
4458 
4459 void os::Solaris::misc_sym_init() {
4460   address func;
4461 
4462   // getisax
4463   func = resolve_symbol_lazy("getisax");
4464   if (func != NULL) {
4465     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4466   }
4467 
4468   // meminfo
4469   func = resolve_symbol_lazy("meminfo");
4470   if (func != NULL) {
4471     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4472   }
4473 }
4474 
4475 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4476   assert(_getisax != NULL, "_getisax not set");
4477   return _getisax(array, n);
4478 }
4479 
4480 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4481 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4482 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4483 
4484 void init_pset_getloadavg_ptr(void) {
4485   pset_getloadavg_ptr =
4486     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4487   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4488     warning("pset_getloadavg function not found");
4489   }
4490 }
4491 
4492 int os::Solaris::_dev_zero_fd = -1;
4493 
4494 // this is called _before_ the global arguments have been parsed
4495 void os::init(void) {
4496   _initial_pid = getpid();
4497 
4498   max_hrtime = first_hrtime = gethrtime();
4499 
4500   init_random(1234567);
4501 
4502   page_size = sysconf(_SC_PAGESIZE);
4503   if (page_size == -1)
4504     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4505                   strerror(errno)));
4506   init_page_sizes((size_t) page_size);
4507 
4508   Solaris::initialize_system_info();
4509 
4510   // Initialize misc. symbols as soon as possible, so we can use them
4511   // if we need them.
4512   Solaris::misc_sym_init();
4513 
4514   int fd = ::open("/dev/zero", O_RDWR);
4515   if (fd < 0) {
4516     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4517   } else {
4518     Solaris::set_dev_zero_fd(fd);
4519 
4520     // Close on exec, child won't inherit.
4521     fcntl(fd, F_SETFD, FD_CLOEXEC);
4522   }
4523 
4524   clock_tics_per_sec = CLK_TCK;
4525 
4526   // check if dladdr1() exists; dladdr1 can provide more information than
4527   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4528   // and is available on linker patches for 5.7 and 5.8.
4529   // libdl.so must have been loaded, this call is just an entry lookup
4530   void * hdl = dlopen("libdl.so", RTLD_NOW);
4531   if (hdl)
4532     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4533 
4534   // (Solaris only) this switches to calls that actually do locking.
4535   ThreadCritical::initialize();
4536 
4537   main_thread = thr_self();
4538 
4539   // Constant minimum stack size allowed. It must be at least
4540   // the minimum of what the OS supports (thr_min_stack()), and
4541   // enough to allow the thread to get to user bytecode execution.
4542   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4543   // If the pagesize of the VM is greater than 8K determine the appropriate
4544   // number of initial guard pages.  The user can change this with the
4545   // command line arguments, if needed.
4546   if (vm_page_size() > 8*K) {
4547     StackYellowPages = 1;
4548     StackRedPages = 1;
4549     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4550   }
4551 }
4552 
4553 // To install functions for atexit system call
4554 extern "C" {
4555   static void perfMemory_exit_helper() {
4556     perfMemory_exit();
4557   }
4558 }
4559 
4560 // this is called _after_ the global arguments have been parsed
4561 jint os::init_2(void) {
4562   // try to enable extended file IO ASAP, see 6431278
4563   os::Solaris::try_enable_extended_io();
4564 
4565   // Allocate a single page and mark it as readable for safepoint polling.  Also
4566   // use this first mmap call to check support for MAP_ALIGN.
4567   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4568                                                       page_size,
4569                                                       MAP_PRIVATE | MAP_ALIGN,
4570                                                       PROT_READ);
4571   if (polling_page == NULL) {
4572     has_map_align = false;
4573     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4574                                                 PROT_READ);
4575   }
4576 
4577   os::set_polling_page(polling_page);
4578 
4579 #ifndef PRODUCT
4580   if( Verbose && PrintMiscellaneous )
4581     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
4582 #endif
4583 
4584   if (!UseMembar) {
4585     address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
4586     guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4587     os::set_memory_serialize_page( mem_serialize_page );
4588 
4589 #ifndef PRODUCT
4590     if(Verbose && PrintMiscellaneous)
4591       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
4592 #endif
4593   }
4594 
4595   // Check minimum allowable stack size for thread creation and to initialize
4596   // the java system classes, including StackOverflowError - depends on page
4597   // size.  Add a page for compiler2 recursion in main thread.
4598   // Add in 2*BytesPerWord times page size to account for VM stack during
4599   // class initialization depending on 32 or 64 bit VM.
4600   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
4601             (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4602                     2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
4603 
4604   size_t threadStackSizeInBytes = ThreadStackSize * K;
4605   if (threadStackSizeInBytes != 0 &&
4606     threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
4607     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4608                   os::Solaris::min_stack_allowed/K);
4609     return JNI_ERR;
4610   }
4611 
4612   // For 64kbps there will be a 64kb page size, which makes
4613   // the usable default stack size quite a bit less.  Increase the
4614   // stack for 64kb (or any > than 8kb) pages, this increases
4615   // virtual memory fragmentation (since we're not creating the
4616   // stack on a power of 2 boundary.  The real fix for this
4617   // should be to fix the guard page mechanism.
4618 
4619   if (vm_page_size() > 8*K) {
4620       threadStackSizeInBytes = (threadStackSizeInBytes != 0)
4621          ? threadStackSizeInBytes +
4622            ((StackYellowPages + StackRedPages) * vm_page_size())
4623          : 0;
4624       ThreadStackSize = threadStackSizeInBytes/K;
4625   }
4626 
4627   // Make the stack size a multiple of the page size so that
4628   // the yellow/red zones can be guarded.
4629   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4630         vm_page_size()));
4631 
4632   Solaris::libthread_init();
4633 
4634   if (UseNUMA) {
4635     if (!Solaris::liblgrp_init()) {
4636       UseNUMA = false;
4637     } else {
4638       size_t lgrp_limit = os::numa_get_groups_num();
4639       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4640       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4641       FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
4642       if (lgrp_num < 2) {
4643         // There's only one locality group, disable NUMA.
4644         UseNUMA = false;
4645       }
4646     }
4647     if (!UseNUMA && ForceNUMA) {
4648       UseNUMA = true;
4649     }
4650   }
4651 
4652   Solaris::signal_sets_init();
4653   Solaris::init_signal_mem();
4654   Solaris::install_signal_handlers();
4655 
4656   if (libjsigversion < JSIG_VERSION_1_4_1) {
4657     Maxlibjsigsigs = OLDMAXSIGNUM;
4658   }
4659 
4660   // initialize synchronization primitives to use either thread or
4661   // lwp synchronization (controlled by UseLWPSynchronization)
4662   Solaris::synchronization_init();
4663 
4664   if (MaxFDLimit) {
4665     // set the number of file descriptors to max. print out error
4666     // if getrlimit/setrlimit fails but continue regardless.
4667     struct rlimit nbr_files;
4668     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4669     if (status != 0) {
4670       if (PrintMiscellaneous && (Verbose || WizardMode))
4671         perror("os::init_2 getrlimit failed");
4672     } else {
4673       nbr_files.rlim_cur = nbr_files.rlim_max;
4674       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4675       if (status != 0) {
4676         if (PrintMiscellaneous && (Verbose || WizardMode))
4677           perror("os::init_2 setrlimit failed");
4678       }
4679     }
4680   }
4681 
4682   // Calculate theoretical max. size of Threads to guard gainst
4683   // artifical out-of-memory situations, where all available address-
4684   // space has been reserved by thread stacks. Default stack size is 1Mb.
4685   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
4686     JavaThread::stack_size_at_create() : (1*K*K);
4687   assert(pre_thread_stack_size != 0, "Must have a stack");
4688   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
4689   // we should start doing Virtual Memory banging. Currently when the threads will
4690   // have used all but 200Mb of space.
4691   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
4692   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
4693 
4694   // at-exit methods are called in the reverse order of their registration.
4695   // In Solaris 7 and earlier, atexit functions are called on return from
4696   // main or as a result of a call to exit(3C). There can be only 32 of
4697   // these functions registered and atexit() does not set errno. In Solaris
4698   // 8 and later, there is no limit to the number of functions registered
4699   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
4700   // functions are called upon dlclose(3DL) in addition to return from main
4701   // and exit(3C).
4702 
4703   if (PerfAllowAtExitRegistration) {
4704     // only register atexit functions if PerfAllowAtExitRegistration is set.
4705     // atexit functions can be delayed until process exit time, which
4706     // can be problematic for embedded VM situations. Embedded VMs should
4707     // call DestroyJavaVM() to assure that VM resources are released.
4708 
4709     // note: perfMemory_exit_helper atexit function may be removed in
4710     // the future if the appropriate cleanup code can be added to the
4711     // VM_Exit VMOperation's doit method.
4712     if (atexit(perfMemory_exit_helper) != 0) {
4713       warning("os::init2 atexit(perfMemory_exit_helper) failed");
4714     }
4715   }
4716 
4717   // Init pset_loadavg function pointer
4718   init_pset_getloadavg_ptr();
4719 
4720   return JNI_OK;
4721 }
4722 
4723 void os::init_3(void) {
4724   return;
4725 }
4726 
4727 // Mark the polling page as unreadable
4728 void os::make_polling_page_unreadable(void) {
4729   if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
4730     fatal("Could not disable polling page");
4731 };
4732 
4733 // Mark the polling page as readable
4734 void os::make_polling_page_readable(void) {
4735   if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
4736     fatal("Could not enable polling page");
4737 };
4738 
4739 // OS interface.
4740 
4741 bool os::check_heap(bool force) { return true; }
4742 
4743 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
4744 static vsnprintf_t sol_vsnprintf = NULL;
4745 
4746 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
4747   if (!sol_vsnprintf) {
4748     //search  for the named symbol in the objects that were loaded after libjvm
4749     void* where = RTLD_NEXT;
4750     if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
4751         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
4752     if (!sol_vsnprintf){
4753       //search  for the named symbol in the objects that were loaded before libjvm
4754       where = RTLD_DEFAULT;
4755       if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
4756         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
4757       assert(sol_vsnprintf != NULL, "vsnprintf not found");
4758     }
4759   }
4760   return (*sol_vsnprintf)(buf, count, fmt, argptr);
4761 }
4762 
4763 
4764 // Is a (classpath) directory empty?
4765 bool os::dir_is_empty(const char* path) {
4766   DIR *dir = NULL;
4767   struct dirent *ptr;
4768 
4769   dir = opendir(path);
4770   if (dir == NULL) return true;
4771 
4772   /* Scan the directory */
4773   bool result = true;
4774   char buf[sizeof(struct dirent) + MAX_PATH];
4775   struct dirent *dbuf = (struct dirent *) buf;
4776   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
4777     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4778       result = false;
4779     }
4780   }
4781   closedir(dir);
4782   return result;
4783 }
4784 
4785 // This code originates from JDK's sysOpen and open64_w
4786 // from src/solaris/hpi/src/system_md.c
4787 
4788 #ifndef O_DELETE
4789 #define O_DELETE 0x10000
4790 #endif
4791 
4792 // Open a file. Unlink the file immediately after open returns
4793 // if the specified oflag has the O_DELETE flag set.
4794 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
4795 
4796 int os::open(const char *path, int oflag, int mode) {
4797   if (strlen(path) > MAX_PATH - 1) {
4798     errno = ENAMETOOLONG;
4799     return -1;
4800   }
4801   int fd;
4802   int o_delete = (oflag & O_DELETE);
4803   oflag = oflag & ~O_DELETE;
4804 
4805   fd = ::open64(path, oflag, mode);
4806   if (fd == -1) return -1;
4807 
4808   //If the open succeeded, the file might still be a directory
4809   {
4810     struct stat64 buf64;
4811     int ret = ::fstat64(fd, &buf64);
4812     int st_mode = buf64.st_mode;
4813 
4814     if (ret != -1) {
4815       if ((st_mode & S_IFMT) == S_IFDIR) {
4816         errno = EISDIR;
4817         ::close(fd);
4818         return -1;
4819       }
4820     } else {
4821       ::close(fd);
4822       return -1;
4823     }
4824   }
4825     /*
4826      * 32-bit Solaris systems suffer from:
4827      *
4828      * - an historical default soft limit of 256 per-process file
4829      *   descriptors that is too low for many Java programs.
4830      *
4831      * - a design flaw where file descriptors created using stdio
4832      *   fopen must be less than 256, _even_ when the first limit above
4833      *   has been raised.  This can cause calls to fopen (but not calls to
4834      *   open, for example) to fail mysteriously, perhaps in 3rd party
4835      *   native code (although the JDK itself uses fopen).  One can hardly
4836      *   criticize them for using this most standard of all functions.
4837      *
4838      * We attempt to make everything work anyways by:
4839      *
4840      * - raising the soft limit on per-process file descriptors beyond
4841      *   256
4842      *
4843      * - As of Solaris 10u4, we can request that Solaris raise the 256
4844      *   stdio fopen limit by calling function enable_extended_FILE_stdio.
4845      *   This is done in init_2 and recorded in enabled_extended_FILE_stdio
4846      *
4847      * - If we are stuck on an old (pre 10u4) Solaris system, we can
4848      *   workaround the bug by remapping non-stdio file descriptors below
4849      *   256 to ones beyond 256, which is done below.
4850      *
4851      * See:
4852      * 1085341: 32-bit stdio routines should support file descriptors >255
4853      * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
4854      * 6431278: Netbeans crash on 32 bit Solaris: need to call
4855      *          enable_extended_FILE_stdio() in VM initialisation
4856      * Giri Mandalika's blog
4857      * http://technopark02.blogspot.com/2005_05_01_archive.html
4858      */
4859 #ifndef  _LP64
4860      if ((!enabled_extended_FILE_stdio) && fd < 256) {
4861          int newfd = ::fcntl(fd, F_DUPFD, 256);
4862          if (newfd != -1) {
4863              ::close(fd);
4864              fd = newfd;
4865          }
4866      }
4867 #endif // 32-bit Solaris
4868     /*
4869      * All file descriptors that are opened in the JVM and not
4870      * specifically destined for a subprocess should have the
4871      * close-on-exec flag set.  If we don't set it, then careless 3rd
4872      * party native code might fork and exec without closing all
4873      * appropriate file descriptors (e.g. as we do in closeDescriptors in
4874      * UNIXProcess.c), and this in turn might:
4875      *
4876      * - cause end-of-file to fail to be detected on some file
4877      *   descriptors, resulting in mysterious hangs, or
4878      *
4879      * - might cause an fopen in the subprocess to fail on a system
4880      *   suffering from bug 1085341.
4881      *
4882      * (Yes, the default setting of the close-on-exec flag is a Unix
4883      * design flaw)
4884      *
4885      * See:
4886      * 1085341: 32-bit stdio routines should support file descriptors >255
4887      * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4888      * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4889      */
4890 #ifdef FD_CLOEXEC
4891     {
4892         int flags = ::fcntl(fd, F_GETFD);
4893         if (flags != -1)
4894             ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4895     }
4896 #endif
4897 
4898   if (o_delete != 0) {
4899     ::unlink(path);
4900   }
4901   return fd;
4902 }
4903 
4904 // create binary file, rewriting existing file if required
4905 int os::create_binary_file(const char* path, bool rewrite_existing) {
4906   int oflags = O_WRONLY | O_CREAT;
4907   if (!rewrite_existing) {
4908     oflags |= O_EXCL;
4909   }
4910   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4911 }
4912 
4913 // return current position of file pointer
4914 jlong os::current_file_offset(int fd) {
4915   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4916 }
4917 
4918 // move file pointer to the specified offset
4919 jlong os::seek_to_file_offset(int fd, jlong offset) {
4920   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4921 }
4922 
4923 jlong os::lseek(int fd, jlong offset, int whence) {
4924   return (jlong) ::lseek64(fd, offset, whence);
4925 }
4926 
4927 char * os::native_path(char *path) {
4928   return path;
4929 }
4930 
4931 int os::ftruncate(int fd, jlong length) {
4932   return ::ftruncate64(fd, length);
4933 }
4934 
4935 int os::fsync(int fd)  {
4936   RESTARTABLE_RETURN_INT(::fsync(fd));
4937 }
4938 
4939 int os::available(int fd, jlong *bytes) {
4940   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
4941           "Assumed _thread_in_native");
4942   jlong cur, end;
4943   int mode;
4944   struct stat64 buf64;
4945 
4946   if (::fstat64(fd, &buf64) >= 0) {
4947     mode = buf64.st_mode;
4948     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4949       int n,ioctl_return;
4950 
4951       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
4952       if (ioctl_return>= 0) {
4953           *bytes = n;
4954         return 1;
4955       }
4956     }
4957   }
4958   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4959     return 0;
4960   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4961     return 0;
4962   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4963     return 0;
4964   }
4965   *bytes = end - cur;
4966   return 1;
4967 }
4968 
4969 // Map a block of memory.
4970 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4971                      char *addr, size_t bytes, bool read_only,
4972                      bool allow_exec) {
4973   int prot;
4974   int flags;
4975 
4976   if (read_only) {
4977     prot = PROT_READ;
4978     flags = MAP_SHARED;
4979   } else {
4980     prot = PROT_READ | PROT_WRITE;
4981     flags = MAP_PRIVATE;
4982   }
4983 
4984   if (allow_exec) {
4985     prot |= PROT_EXEC;
4986   }
4987 
4988   if (addr != NULL) {
4989     flags |= MAP_FIXED;
4990   }
4991 
4992   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
4993                                      fd, file_offset);
4994   if (mapped_address == MAP_FAILED) {
4995     return NULL;
4996   }
4997   return mapped_address;
4998 }
4999 
5000 
5001 // Remap a block of memory.
5002 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5003                        char *addr, size_t bytes, bool read_only,
5004                        bool allow_exec) {
5005   // same as map_memory() on this OS
5006   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5007                         allow_exec);
5008 }
5009 
5010 
5011 // Unmap a block of memory.
5012 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5013   return munmap(addr, bytes) == 0;
5014 }
5015 
5016 void os::pause() {
5017   char filename[MAX_PATH];
5018   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5019     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5020   } else {
5021     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5022   }
5023 
5024   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5025   if (fd != -1) {
5026     struct stat buf;
5027     ::close(fd);
5028     while (::stat(filename, &buf) == 0) {
5029       (void)::poll(NULL, 0, 100);
5030     }
5031   } else {
5032     jio_fprintf(stderr,
5033       "Could not open pause file '%s', continuing immediately.\n", filename);
5034   }
5035 }
5036 
5037 #ifndef PRODUCT
5038 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5039 // Turn this on if you need to trace synch operations.
5040 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5041 // and call record_synch_enable and record_synch_disable
5042 // around the computation of interest.
5043 
5044 void record_synch(char* name, bool returning);  // defined below
5045 
5046 class RecordSynch {
5047   char* _name;
5048  public:
5049   RecordSynch(char* name) :_name(name)
5050                  { record_synch(_name, false); }
5051   ~RecordSynch() { record_synch(_name,   true);  }
5052 };
5053 
5054 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
5055 extern "C" ret name params {                                    \
5056   typedef ret name##_t params;                                  \
5057   static name##_t* implem = NULL;                               \
5058   static int callcount = 0;                                     \
5059   if (implem == NULL) {                                         \
5060     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
5061     if (implem == NULL)  fatal(dlerror());                      \
5062   }                                                             \
5063   ++callcount;                                                  \
5064   RecordSynch _rs(#name);                                       \
5065   inner;                                                        \
5066   return implem args;                                           \
5067 }
5068 // in dbx, examine callcounts this way:
5069 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5070 
5071 #define CHECK_POINTER_OK(p) \
5072   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5073 #define CHECK_MU \
5074   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5075 #define CHECK_CV \
5076   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5077 #define CHECK_P(p) \
5078   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
5079 
5080 #define CHECK_MUTEX(mutex_op) \
5081 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5082 
5083 CHECK_MUTEX(   mutex_lock)
5084 CHECK_MUTEX(  _mutex_lock)
5085 CHECK_MUTEX( mutex_unlock)
5086 CHECK_MUTEX(_mutex_unlock)
5087 CHECK_MUTEX( mutex_trylock)
5088 CHECK_MUTEX(_mutex_trylock)
5089 
5090 #define CHECK_COND(cond_op) \
5091 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5092 
5093 CHECK_COND( cond_wait);
5094 CHECK_COND(_cond_wait);
5095 CHECK_COND(_cond_wait_cancel);
5096 
5097 #define CHECK_COND2(cond_op) \
5098 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5099 
5100 CHECK_COND2( cond_timedwait);
5101 CHECK_COND2(_cond_timedwait);
5102 CHECK_COND2(_cond_timedwait_cancel);
5103 
5104 // do the _lwp_* versions too
5105 #define mutex_t lwp_mutex_t
5106 #define cond_t  lwp_cond_t
5107 CHECK_MUTEX(  _lwp_mutex_lock)
5108 CHECK_MUTEX(  _lwp_mutex_unlock)
5109 CHECK_MUTEX(  _lwp_mutex_trylock)
5110 CHECK_MUTEX( __lwp_mutex_lock)
5111 CHECK_MUTEX( __lwp_mutex_unlock)
5112 CHECK_MUTEX( __lwp_mutex_trylock)
5113 CHECK_MUTEX(___lwp_mutex_lock)
5114 CHECK_MUTEX(___lwp_mutex_unlock)
5115 
5116 CHECK_COND(  _lwp_cond_wait);
5117 CHECK_COND( __lwp_cond_wait);
5118 CHECK_COND(___lwp_cond_wait);
5119 
5120 CHECK_COND2(  _lwp_cond_timedwait);
5121 CHECK_COND2( __lwp_cond_timedwait);
5122 #undef mutex_t
5123 #undef cond_t
5124 
5125 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5126 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
5127 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5128 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
5129 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5130 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
5131 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5132 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
5133 
5134 
5135 // recording machinery:
5136 
5137 enum { RECORD_SYNCH_LIMIT = 200 };
5138 char* record_synch_name[RECORD_SYNCH_LIMIT];
5139 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5140 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5141 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5142 int record_synch_count = 0;
5143 bool record_synch_enabled = false;
5144 
5145 // in dbx, examine recorded data this way:
5146 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5147 
5148 void record_synch(char* name, bool returning) {
5149   if (record_synch_enabled) {
5150     if (record_synch_count < RECORD_SYNCH_LIMIT) {
5151       record_synch_name[record_synch_count] = name;
5152       record_synch_returning[record_synch_count] = returning;
5153       record_synch_thread[record_synch_count] = thr_self();
5154       record_synch_arg0ptr[record_synch_count] = &name;
5155       record_synch_count++;
5156     }
5157     // put more checking code here:
5158     // ...
5159   }
5160 }
5161 
5162 void record_synch_enable() {
5163   // start collecting trace data, if not already doing so
5164   if (!record_synch_enabled)  record_synch_count = 0;
5165   record_synch_enabled = true;
5166 }
5167 
5168 void record_synch_disable() {
5169   // stop collecting trace data
5170   record_synch_enabled = false;
5171 }
5172 
5173 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5174 #endif // PRODUCT
5175 
5176 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5177 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5178                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5179 
5180 
5181 // JVMTI & JVM monitoring and management support
5182 // The thread_cpu_time() and current_thread_cpu_time() are only
5183 // supported if is_thread_cpu_time_supported() returns true.
5184 // They are not supported on Solaris T1.
5185 
5186 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5187 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5188 // of a thread.
5189 //
5190 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5191 // returns the fast estimate available on the platform.
5192 
5193 // hrtime_t gethrvtime() return value includes
5194 // user time but does not include system time
5195 jlong os::current_thread_cpu_time() {
5196   return (jlong) gethrvtime();
5197 }
5198 
5199 jlong os::thread_cpu_time(Thread *thread) {
5200   // return user level CPU time only to be consistent with
5201   // what current_thread_cpu_time returns.
5202   // thread_cpu_time_info() must be changed if this changes
5203   return os::thread_cpu_time(thread, false /* user time only */);
5204 }
5205 
5206 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5207   if (user_sys_cpu_time) {
5208     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5209   } else {
5210     return os::current_thread_cpu_time();
5211   }
5212 }
5213 
5214 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5215   char proc_name[64];
5216   int count;
5217   prusage_t prusage;
5218   jlong lwp_time;
5219   int fd;
5220 
5221   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5222                      getpid(),
5223                      thread->osthread()->lwp_id());
5224   fd = ::open(proc_name, O_RDONLY);
5225   if ( fd == -1 ) return -1;
5226 
5227   do {
5228     count = ::pread(fd,
5229                   (void *)&prusage.pr_utime,
5230                   thr_time_size,
5231                   thr_time_off);
5232   } while (count < 0 && errno == EINTR);
5233   ::close(fd);
5234   if ( count < 0 ) return -1;
5235 
5236   if (user_sys_cpu_time) {
5237     // user + system CPU time
5238     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5239                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5240                  (jlong)prusage.pr_stime.tv_nsec +
5241                  (jlong)prusage.pr_utime.tv_nsec;
5242   } else {
5243     // user level CPU time only
5244     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5245                 (jlong)prusage.pr_utime.tv_nsec;
5246   }
5247 
5248   return(lwp_time);
5249 }
5250 
5251 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5252   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5253   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5254   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5255   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5256 }
5257 
5258 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5259   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
5260   info_ptr->may_skip_backward = false;    // elapsed time not wall time
5261   info_ptr->may_skip_forward = false;     // elapsed time not wall time
5262   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
5263 }
5264 
5265 bool os::is_thread_cpu_time_supported() {
5266   return true;
5267 }
5268 
5269 // System loadavg support.  Returns -1 if load average cannot be obtained.
5270 // Return the load average for our processor set if the primitive exists
5271 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
5272 int os::loadavg(double loadavg[], int nelem) {
5273   if (pset_getloadavg_ptr != NULL) {
5274     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5275   } else {
5276     return ::getloadavg(loadavg, nelem);
5277   }
5278 }
5279 
5280 //---------------------------------------------------------------------------------
5281 
5282 bool os::find(address addr, outputStream* st) {
5283   Dl_info dlinfo;
5284   memset(&dlinfo, 0, sizeof(dlinfo));
5285   if (dladdr(addr, &dlinfo) != 0) {
5286     st->print(PTR_FORMAT ": ", addr);
5287     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5288       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5289     } else if (dlinfo.dli_fbase != NULL)
5290       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5291     else
5292       st->print("<absolute address>");
5293     if (dlinfo.dli_fname != NULL) {
5294       st->print(" in %s", dlinfo.dli_fname);
5295     }
5296     if (dlinfo.dli_fbase != NULL) {
5297       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5298     }
5299     st->cr();
5300 
5301     if (Verbose) {
5302       // decode some bytes around the PC
5303       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5304       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5305       address       lowest = (address) dlinfo.dli_sname;
5306       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
5307       if (begin < lowest)  begin = lowest;
5308       Dl_info dlinfo2;
5309       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5310           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5311         end = (address) dlinfo2.dli_saddr;
5312       Disassembler::decode(begin, end, st);
5313     }
5314     return true;
5315   }
5316   return false;
5317 }
5318 
5319 // Following function has been added to support HotSparc's libjvm.so running
5320 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
5321 // src/solaris/hpi/native_threads in the EVM codebase.
5322 //
5323 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5324 // libraries and should thus be removed. We will leave it behind for a while
5325 // until we no longer want to able to run on top of 1.3.0 Solaris production
5326 // JDK. See 4341971.
5327 
5328 #define STACK_SLACK 0x800
5329 
5330 extern "C" {
5331   intptr_t sysThreadAvailableStackWithSlack() {
5332     stack_t st;
5333     intptr_t retval, stack_top;
5334     retval = thr_stksegment(&st);
5335     assert(retval == 0, "incorrect return value from thr_stksegment");
5336     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5337     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5338     stack_top=(intptr_t)st.ss_sp-st.ss_size;
5339     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5340   }
5341 }
5342 
5343 // ObjectMonitor park-unpark infrastructure ...
5344 //
5345 // We implement Solaris and Linux PlatformEvents with the
5346 // obvious condvar-mutex-flag triple.
5347 // Another alternative that works quite well is pipes:
5348 // Each PlatformEvent consists of a pipe-pair.
5349 // The thread associated with the PlatformEvent
5350 // calls park(), which reads from the input end of the pipe.
5351 // Unpark() writes into the other end of the pipe.
5352 // The write-side of the pipe must be set NDELAY.
5353 // Unfortunately pipes consume a large # of handles.
5354 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5355 // Using pipes for the 1st few threads might be workable, however.
5356 //
5357 // park() is permitted to return spuriously.
5358 // Callers of park() should wrap the call to park() in
5359 // an appropriate loop.  A litmus test for the correct
5360 // usage of park is the following: if park() were modified
5361 // to immediately return 0 your code should still work,
5362 // albeit degenerating to a spin loop.
5363 //
5364 // An interesting optimization for park() is to use a trylock()
5365 // to attempt to acquire the mutex.  If the trylock() fails
5366 // then we know that a concurrent unpark() operation is in-progress.
5367 // in that case the park() code could simply set _count to 0
5368 // and return immediately.  The subsequent park() operation *might*
5369 // return immediately.  That's harmless as the caller of park() is
5370 // expected to loop.  By using trylock() we will have avoided a
5371 // avoided a context switch caused by contention on the per-thread mutex.
5372 //
5373 // TODO-FIXME:
5374 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
5375 //     objectmonitor implementation.
5376 // 2.  Collapse the JSR166 parker event, and the
5377 //     objectmonitor ParkEvent into a single "Event" construct.
5378 // 3.  In park() and unpark() add:
5379 //     assert (Thread::current() == AssociatedWith).
5380 // 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5381 //     1-out-of-N park() operations will return immediately.
5382 //
5383 // _Event transitions in park()
5384 //   -1 => -1 : illegal
5385 //    1 =>  0 : pass - return immediately
5386 //    0 => -1 : block
5387 //
5388 // _Event serves as a restricted-range semaphore.
5389 //
5390 // Another possible encoding of _Event would be with
5391 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5392 //
5393 // TODO-FIXME: add DTRACE probes for:
5394 // 1.   Tx parks
5395 // 2.   Ty unparks Tx
5396 // 3.   Tx resumes from park
5397 
5398 
5399 // value determined through experimentation
5400 #define ROUNDINGFIX 11
5401 
5402 // utility to compute the abstime argument to timedwait.
5403 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5404 
5405 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5406   // millis is the relative timeout time
5407   // abstime will be the absolute timeout time
5408   if (millis < 0)  millis = 0;
5409   struct timeval now;
5410   int status = gettimeofday(&now, NULL);
5411   assert(status == 0, "gettimeofday");
5412   jlong seconds = millis / 1000;
5413   jlong max_wait_period;
5414 
5415   if (UseLWPSynchronization) {
5416     // forward port of fix for 4275818 (not sleeping long enough)
5417     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5418     // _lwp_cond_timedwait() used a round_down algorithm rather
5419     // than a round_up. For millis less than our roundfactor
5420     // it rounded down to 0 which doesn't meet the spec.
5421     // For millis > roundfactor we may return a bit sooner, but
5422     // since we can not accurately identify the patch level and
5423     // this has already been fixed in Solaris 9 and 8 we will
5424     // leave it alone rather than always rounding down.
5425 
5426     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5427        // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5428            // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5429            max_wait_period = 21000000;
5430   } else {
5431     max_wait_period = 50000000;
5432   }
5433   millis %= 1000;
5434   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5435      seconds = max_wait_period;
5436   }
5437   abstime->tv_sec = now.tv_sec  + seconds;
5438   long       usec = now.tv_usec + millis * 1000;
5439   if (usec >= 1000000) {
5440     abstime->tv_sec += 1;
5441     usec -= 1000000;
5442   }
5443   abstime->tv_nsec = usec * 1000;
5444   return abstime;
5445 }
5446 
5447 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5448 // Conceptually TryPark() should be equivalent to park(0).
5449 
5450 int os::PlatformEvent::TryPark() {
5451   for (;;) {
5452     const int v = _Event ;
5453     guarantee ((v == 0) || (v == 1), "invariant") ;
5454     if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
5455   }
5456 }
5457 
5458 void os::PlatformEvent::park() {           // AKA: down()
5459   // Invariant: Only the thread associated with the Event/PlatformEvent
5460   // may call park().
5461   int v ;
5462   for (;;) {
5463       v = _Event ;
5464       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5465   }
5466   guarantee (v >= 0, "invariant") ;
5467   if (v == 0) {
5468      // Do this the hard way by blocking ...
5469      // See http://monaco.sfbay/detail.jsf?cr=5094058.
5470      // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5471      // Only for SPARC >= V8PlusA
5472 #if defined(__sparc) && defined(COMPILER2)
5473      if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5474 #endif
5475      int status = os::Solaris::mutex_lock(_mutex);
5476      assert_status(status == 0, status,  "mutex_lock");
5477      guarantee (_nParked == 0, "invariant") ;
5478      ++ _nParked ;
5479      while (_Event < 0) {
5480         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5481         // Treat this the same as if the wait was interrupted
5482         // With usr/lib/lwp going to kernel, always handle ETIME
5483         status = os::Solaris::cond_wait(_cond, _mutex);
5484         if (status == ETIME) status = EINTR ;
5485         assert_status(status == 0 || status == EINTR, status, "cond_wait");
5486      }
5487      -- _nParked ;
5488      _Event = 0 ;
5489      status = os::Solaris::mutex_unlock(_mutex);
5490      assert_status(status == 0, status, "mutex_unlock");
5491     // Paranoia to ensure our locked and lock-free paths interact
5492     // correctly with each other.
5493     OrderAccess::fence();
5494   }
5495 }
5496 
5497 int os::PlatformEvent::park(jlong millis) {
5498   guarantee (_nParked == 0, "invariant") ;
5499   int v ;
5500   for (;;) {
5501       v = _Event ;
5502       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5503   }
5504   guarantee (v >= 0, "invariant") ;
5505   if (v != 0) return OS_OK ;
5506 
5507   int ret = OS_TIMEOUT;
5508   timestruc_t abst;
5509   compute_abstime (&abst, millis);
5510 
5511   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5512   // For Solaris SPARC set fprs.FEF=0 prior to parking.
5513   // Only for SPARC >= V8PlusA
5514 #if defined(__sparc) && defined(COMPILER2)
5515  if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5516 #endif
5517   int status = os::Solaris::mutex_lock(_mutex);
5518   assert_status(status == 0, status, "mutex_lock");
5519   guarantee (_nParked == 0, "invariant") ;
5520   ++ _nParked ;
5521   while (_Event < 0) {
5522      int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5523      assert_status(status == 0 || status == EINTR ||
5524                    status == ETIME || status == ETIMEDOUT,
5525                    status, "cond_timedwait");
5526      if (!FilterSpuriousWakeups) break ;                // previous semantics
5527      if (status == ETIME || status == ETIMEDOUT) break ;
5528      // We consume and ignore EINTR and spurious wakeups.
5529   }
5530   -- _nParked ;
5531   if (_Event >= 0) ret = OS_OK ;
5532   _Event = 0 ;
5533   status = os::Solaris::mutex_unlock(_mutex);
5534   assert_status(status == 0, status, "mutex_unlock");
5535   // Paranoia to ensure our locked and lock-free paths interact
5536   // correctly with each other.
5537   OrderAccess::fence();
5538   return ret;
5539 }
5540 
5541 void os::PlatformEvent::unpark() {
5542   // Transitions for _Event:
5543   //    0 :=> 1
5544   //    1 :=> 1
5545   //   -1 :=> either 0 or 1; must signal target thread
5546   //          That is, we can safely transition _Event from -1 to either
5547   //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
5548   //          unpark() calls.
5549   // See also: "Semaphores in Plan 9" by Mullender & Cox
5550   //
5551   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5552   // that it will take two back-to-back park() calls for the owning
5553   // thread to block. This has the benefit of forcing a spurious return
5554   // from the first park() call after an unpark() call which will help
5555   // shake out uses of park() and unpark() without condition variables.
5556 
5557   if (Atomic::xchg(1, &_Event) >= 0) return;
5558 
5559   // If the thread associated with the event was parked, wake it.
5560   // Wait for the thread assoc with the PlatformEvent to vacate.
5561   int status = os::Solaris::mutex_lock(_mutex);
5562   assert_status(status == 0, status, "mutex_lock");
5563   int AnyWaiters = _nParked;
5564   status = os::Solaris::mutex_unlock(_mutex);
5565   assert_status(status == 0, status, "mutex_unlock");
5566   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5567   if (AnyWaiters != 0) {
5568     // We intentional signal *after* dropping the lock
5569     // to avoid a common class of futile wakeups.
5570     status = os::Solaris::cond_signal(_cond);
5571     assert_status(status == 0, status, "cond_signal");
5572   }
5573 }
5574 
5575 // JSR166
5576 // -------------------------------------------------------
5577 
5578 /*
5579  * The solaris and linux implementations of park/unpark are fairly
5580  * conservative for now, but can be improved. They currently use a
5581  * mutex/condvar pair, plus _counter.
5582  * Park decrements _counter if > 0, else does a condvar wait.  Unpark
5583  * sets count to 1 and signals condvar.  Only one thread ever waits
5584  * on the condvar. Contention seen when trying to park implies that someone
5585  * is unparking you, so don't wait. And spurious returns are fine, so there
5586  * is no need to track notifications.
5587  */
5588 
5589 #define MAX_SECS 100000000
5590 /*
5591  * This code is common to linux and solaris and will be moved to a
5592  * common place in dolphin.
5593  *
5594  * The passed in time value is either a relative time in nanoseconds
5595  * or an absolute time in milliseconds. Either way it has to be unpacked
5596  * into suitable seconds and nanoseconds components and stored in the
5597  * given timespec structure.
5598  * Given time is a 64-bit value and the time_t used in the timespec is only
5599  * a signed-32-bit value (except on 64-bit Linux) we have to watch for
5600  * overflow if times way in the future are given. Further on Solaris versions
5601  * prior to 10 there is a restriction (see cond_timedwait) that the specified
5602  * number of seconds, in abstime, is less than current_time  + 100,000,000.
5603  * As it will be 28 years before "now + 100000000" will overflow we can
5604  * ignore overflow and just impose a hard-limit on seconds using the value
5605  * of "now + 100,000,000". This places a limit on the timeout of about 3.17
5606  * years from "now".
5607  */
5608 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5609   assert (time > 0, "convertTime");
5610 
5611   struct timeval now;
5612   int status = gettimeofday(&now, NULL);
5613   assert(status == 0, "gettimeofday");
5614 
5615   time_t max_secs = now.tv_sec + MAX_SECS;
5616 
5617   if (isAbsolute) {
5618     jlong secs = time / 1000;
5619     if (secs > max_secs) {
5620       absTime->tv_sec = max_secs;
5621     }
5622     else {
5623       absTime->tv_sec = secs;
5624     }
5625     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5626   }
5627   else {
5628     jlong secs = time / NANOSECS_PER_SEC;
5629     if (secs >= MAX_SECS) {
5630       absTime->tv_sec = max_secs;
5631       absTime->tv_nsec = 0;
5632     }
5633     else {
5634       absTime->tv_sec = now.tv_sec + secs;
5635       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5636       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5637         absTime->tv_nsec -= NANOSECS_PER_SEC;
5638         ++absTime->tv_sec; // note: this must be <= max_secs
5639       }
5640     }
5641   }
5642   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5643   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5644   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5645   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5646 }
5647 
5648 void Parker::park(bool isAbsolute, jlong time) {
5649   // Ideally we'd do something useful while spinning, such
5650   // as calling unpackTime().
5651 
5652   // Optional fast-path check:
5653   // Return immediately if a permit is available.
5654   // We depend on Atomic::xchg() having full barrier semantics
5655   // since we are doing a lock-free update to _counter.
5656   if (Atomic::xchg(0, &_counter) > 0) return;
5657 
5658   // Optional fast-exit: Check interrupt before trying to wait
5659   Thread* thread = Thread::current();
5660   assert(thread->is_Java_thread(), "Must be JavaThread");
5661   JavaThread *jt = (JavaThread *)thread;
5662   if (Thread::is_interrupted(thread, false)) {
5663     return;
5664   }
5665 
5666   // First, demultiplex/decode time arguments
5667   timespec absTime;
5668   if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
5669     return;
5670   }
5671   if (time > 0) {
5672     // Warning: this code might be exposed to the old Solaris time
5673     // round-down bugs.  Grep "roundingFix" for details.
5674     unpackTime(&absTime, isAbsolute, time);
5675   }
5676 
5677   // Enter safepoint region
5678   // Beware of deadlocks such as 6317397.
5679   // The per-thread Parker:: _mutex is a classic leaf-lock.
5680   // In particular a thread must never block on the Threads_lock while
5681   // holding the Parker:: mutex.  If safepoints are pending both the
5682   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5683   ThreadBlockInVM tbivm(jt);
5684 
5685   // Don't wait if cannot get lock since interference arises from
5686   // unblocking.  Also. check interrupt before trying wait
5687   if (Thread::is_interrupted(thread, false) ||
5688       os::Solaris::mutex_trylock(_mutex) != 0) {
5689     return;
5690   }
5691 
5692   int status ;
5693 
5694   if (_counter > 0)  { // no wait needed
5695     _counter = 0;
5696     status = os::Solaris::mutex_unlock(_mutex);
5697     assert (status == 0, "invariant") ;
5698     // Paranoia to ensure our locked and lock-free paths interact
5699     // correctly with each other and Java-level accesses.
5700     OrderAccess::fence();
5701     return;
5702   }
5703 
5704 #ifdef ASSERT
5705   // Don't catch signals while blocked; let the running threads have the signals.
5706   // (This allows a debugger to break into the running thread.)
5707   sigset_t oldsigs;
5708   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
5709   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5710 #endif
5711 
5712   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5713   jt->set_suspend_equivalent();
5714   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5715 
5716   // Do this the hard way by blocking ...
5717   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5718   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5719   // Only for SPARC >= V8PlusA
5720 #if defined(__sparc) && defined(COMPILER2)
5721   if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5722 #endif
5723 
5724   if (time == 0) {
5725     status = os::Solaris::cond_wait (_cond, _mutex) ;
5726   } else {
5727     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
5728   }
5729   // Note that an untimed cond_wait() can sometimes return ETIME on older
5730   // versions of the Solaris.
5731   assert_status(status == 0 || status == EINTR ||
5732                 status == ETIME || status == ETIMEDOUT,
5733                 status, "cond_timedwait");
5734 
5735 #ifdef ASSERT
5736   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
5737 #endif
5738   _counter = 0 ;
5739   status = os::Solaris::mutex_unlock(_mutex);
5740   assert_status(status == 0, status, "mutex_unlock") ;
5741   // Paranoia to ensure our locked and lock-free paths interact
5742   // correctly with each other and Java-level accesses.
5743   OrderAccess::fence();
5744 
5745   // If externally suspended while waiting, re-suspend
5746   if (jt->handle_special_suspend_equivalent_condition()) {
5747     jt->java_suspend_self();
5748   }
5749 }
5750 
5751 void Parker::unpark() {
5752   int s, status ;
5753   status = os::Solaris::mutex_lock (_mutex) ;
5754   assert (status == 0, "invariant") ;
5755   s = _counter;
5756   _counter = 1;
5757   status = os::Solaris::mutex_unlock (_mutex) ;
5758   assert (status == 0, "invariant") ;
5759 
5760   if (s < 1) {
5761     status = os::Solaris::cond_signal (_cond) ;
5762     assert (status == 0, "invariant") ;
5763   }
5764 }
5765 
5766 extern char** environ;
5767 
5768 // Run the specified command in a separate process. Return its exit value,
5769 // or -1 on failure (e.g. can't fork a new process).
5770 // Unlike system(), this function can be called from signal handler. It
5771 // doesn't block SIGINT et al.
5772 int os::fork_and_exec(char* cmd) {
5773   char * argv[4];
5774   argv[0] = (char *)"sh";
5775   argv[1] = (char *)"-c";
5776   argv[2] = cmd;
5777   argv[3] = NULL;
5778 
5779   // fork is async-safe, fork1 is not so can't use in signal handler
5780   pid_t pid;
5781   Thread* t = ThreadLocalStorage::get_thread_slow();
5782   if (t != NULL && t->is_inside_signal_handler()) {
5783     pid = fork();
5784   } else {
5785     pid = fork1();
5786   }
5787 
5788   if (pid < 0) {
5789     // fork failed
5790     warning("fork failed: %s", strerror(errno));
5791     return -1;
5792 
5793   } else if (pid == 0) {
5794     // child process
5795 
5796     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
5797     execve("/usr/bin/sh", argv, environ);
5798 
5799     // execve failed
5800     _exit(-1);
5801 
5802   } else  {
5803     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5804     // care about the actual exit code, for now.
5805 
5806     int status;
5807 
5808     // Wait for the child process to exit.  This returns immediately if
5809     // the child has already exited. */
5810     while (waitpid(pid, &status, 0) < 0) {
5811         switch (errno) {
5812         case ECHILD: return 0;
5813         case EINTR: break;
5814         default: return -1;
5815         }
5816     }
5817 
5818     if (WIFEXITED(status)) {
5819        // The child exited normally; get its exit code.
5820        return WEXITSTATUS(status);
5821     } else if (WIFSIGNALED(status)) {
5822        // The child exited because of a signal
5823        // The best value to return is 0x80 + signal number,
5824        // because that is what all Unix shells do, and because
5825        // it allows callers to distinguish between process exit and
5826        // process death by signal.
5827        return 0x80 + WTERMSIG(status);
5828     } else {
5829        // Unknown exit code; pass it through
5830        return status;
5831     }
5832   }
5833 }
5834 
5835 // is_headless_jre()
5836 //
5837 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5838 // in order to report if we are running in a headless jre
5839 //
5840 // Since JDK8 xawt/libmawt.so was moved into the same directory
5841 // as libawt.so, and renamed libawt_xawt.so
5842 //
5843 bool os::is_headless_jre() {
5844     struct stat statbuf;
5845     char buf[MAXPATHLEN];
5846     char libmawtpath[MAXPATHLEN];
5847     const char *xawtstr  = "/xawt/libmawt.so";
5848     const char *new_xawtstr = "/libawt_xawt.so";
5849     char *p;
5850 
5851     // Get path to libjvm.so
5852     os::jvm_path(buf, sizeof(buf));
5853 
5854     // Get rid of libjvm.so
5855     p = strrchr(buf, '/');
5856     if (p == NULL) return false;
5857     else *p = '\0';
5858 
5859     // Get rid of client or server
5860     p = strrchr(buf, '/');
5861     if (p == NULL) return false;
5862     else *p = '\0';
5863 
5864     // check xawt/libmawt.so
5865     strcpy(libmawtpath, buf);
5866     strcat(libmawtpath, xawtstr);
5867     if (::stat(libmawtpath, &statbuf) == 0) return false;
5868 
5869     // check libawt_xawt.so
5870     strcpy(libmawtpath, buf);
5871     strcat(libmawtpath, new_xawtstr);
5872     if (::stat(libmawtpath, &statbuf) == 0) return false;
5873 
5874     return true;
5875 }
5876 
5877 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
5878   size_t res;
5879   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5880           "Assumed _thread_in_native");
5881   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
5882   return res;
5883 }
5884 
5885 int os::close(int fd) {
5886   return ::close(fd);
5887 }
5888 
5889 int os::socket_close(int fd) {
5890   return ::close(fd);
5891 }
5892 
5893 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5894   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5895           "Assumed _thread_in_native");
5896   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
5897 }
5898 
5899 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5900   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5901           "Assumed _thread_in_native");
5902   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5903 }
5904 
5905 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5906   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5907 }
5908 
5909 // As both poll and select can be interrupted by signals, we have to be
5910 // prepared to restart the system call after updating the timeout, unless
5911 // a poll() is done with timeout == -1, in which case we repeat with this
5912 // "wait forever" value.
5913 
5914 int os::timeout(int fd, long timeout) {
5915   int res;
5916   struct timeval t;
5917   julong prevtime, newtime;
5918   static const char* aNull = 0;
5919   struct pollfd pfd;
5920   pfd.fd = fd;
5921   pfd.events = POLLIN;
5922 
5923   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5924           "Assumed _thread_in_native");
5925 
5926   gettimeofday(&t, &aNull);
5927   prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
5928 
5929   for(;;) {
5930     res = ::poll(&pfd, 1, timeout);
5931     if(res == OS_ERR && errno == EINTR) {
5932         if(timeout != -1) {
5933           gettimeofday(&t, &aNull);
5934           newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
5935           timeout -= newtime - prevtime;
5936           if(timeout <= 0)
5937             return OS_OK;
5938           prevtime = newtime;
5939         }
5940     } else return res;
5941   }
5942 }
5943 
5944 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
5945   int _result;
5946   _result = ::connect(fd, him, len);
5947 
5948   // On Solaris, when a connect() call is interrupted, the connection
5949   // can be established asynchronously (see 6343810). Subsequent calls
5950   // to connect() must check the errno value which has the semantic
5951   // described below (copied from the connect() man page). Handling
5952   // of asynchronously established connections is required for both
5953   // blocking and non-blocking sockets.
5954   //     EINTR            The  connection  attempt  was   interrupted
5955   //                      before  any data arrived by the delivery of
5956   //                      a signal. The connection, however, will  be
5957   //                      established asynchronously.
5958   //
5959   //     EINPROGRESS      The socket is non-blocking, and the connec-
5960   //                      tion  cannot  be completed immediately.
5961   //
5962   //     EALREADY         The socket is non-blocking,  and a previous
5963   //                      connection  attempt  has  not yet been com-
5964   //                      pleted.
5965   //
5966   //     EISCONN          The socket is already connected.
5967   if (_result == OS_ERR && errno == EINTR) {
5968      /* restarting a connect() changes its errno semantics */
5969      RESTARTABLE(::connect(fd, him, len), _result);
5970      /* undo these changes */
5971      if (_result == OS_ERR) {
5972        if (errno == EALREADY) {
5973          errno = EINPROGRESS; /* fall through */
5974        } else if (errno == EISCONN) {
5975          errno = 0;
5976          return OS_OK;
5977        }
5978      }
5979    }
5980    return _result;
5981  }
5982 
5983 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
5984   if (fd < 0) {
5985     return OS_ERR;
5986   }
5987   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5988           "Assumed _thread_in_native");
5989   RESTARTABLE_RETURN_INT((int)::accept(fd, him, len));
5990 }
5991 
5992 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
5993                  sockaddr* from, socklen_t* fromlen) {
5994   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5995           "Assumed _thread_in_native");
5996   RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
5997 }
5998 
5999 int os::sendto(int fd, char* buf, size_t len, uint flags,
6000                struct sockaddr* to, socklen_t tolen) {
6001   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
6002           "Assumed _thread_in_native");
6003   RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
6004 }
6005 
6006 int os::socket_available(int fd, jint *pbytes) {
6007   if (fd < 0) {
6008     return OS_OK;
6009   }
6010   int ret;
6011   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6012   // note: ioctl can return 0 when successful, JVM_SocketAvailable
6013   // is expected to return 0 on failure and 1 on success to the jdk.
6014   return (ret == OS_ERR) ? 0 : 1;
6015 }
6016 
6017 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6018   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
6019           "Assumed _thread_in_native");
6020    return ::bind(fd, him, len);
6021 }
6022 
6023 // Get the default path to the core file
6024 // Returns the length of the string
6025 int os::get_core_path(char* buffer, size_t bufferSize) {
6026   const char* p = get_current_directory(buffer, bufferSize);
6027 
6028   if (p == NULL) {
6029     assert(p != NULL, "failed to get current directory");
6030     return 0;
6031   }
6032 
6033   return strlen(buffer);
6034 }
6035 
6036 #ifndef PRODUCT
6037 void TestReserveMemorySpecial_test() {
6038   // No tests available for this platform
6039 }
6040 #endif