1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvm_solaris.h"
  35 #include "logging/log.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/filemap.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_share_solaris.hpp"
  40 #include "os_solaris.inline.hpp"
  41 #include "prims/jniFastGetField.hpp"
  42 #include "prims/jvm.h"
  43 #include "prims/jvm_misc.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/atomic.hpp"
  46 #include "runtime/extendedPC.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/interfaceSupport.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/mutexLocker.hpp"
  52 #include "runtime/objectMonitor.hpp"
  53 #include "runtime/orderAccess.inline.hpp"
  54 #include "runtime/osThread.hpp"
  55 #include "runtime/perfMemory.hpp"
  56 #include "runtime/sharedRuntime.hpp"
  57 #include "runtime/statSampler.hpp"
  58 #include "runtime/stubRoutines.hpp"
  59 #include "runtime/thread.inline.hpp"
  60 #include "runtime/threadCritical.hpp"
  61 #include "runtime/timer.hpp"
  62 #include "runtime/vm_version.hpp"
  63 #include "semaphore_posix.hpp"
  64 #include "services/attachListener.hpp"
  65 #include "services/memTracker.hpp"
  66 #include "services/runtimeService.hpp"
  67 #include "utilities/align.hpp"
  68 #include "utilities/decoder.hpp"
  69 #include "utilities/defaultStream.hpp"
  70 #include "utilities/events.hpp"
  71 #include "utilities/growableArray.hpp"
  72 #include "utilities/macros.hpp"
  73 #include "utilities/vmError.hpp"
  74 
  75 // put OS-includes here
  76 # include <dlfcn.h>
  77 # include <errno.h>
  78 # include <exception>
  79 # include <link.h>
  80 # include <poll.h>
  81 # include <pthread.h>
  82 # include <schedctl.h>
  83 # include <setjmp.h>
  84 # include <signal.h>
  85 # include <stdio.h>
  86 # include <alloca.h>
  87 # include <sys/filio.h>
  88 # include <sys/ipc.h>
  89 # include <sys/lwp.h>
  90 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
  91 # include <sys/mman.h>
  92 # include <sys/processor.h>
  93 # include <sys/procset.h>
  94 # include <sys/pset.h>
  95 # include <sys/resource.h>
  96 # include <sys/shm.h>
  97 # include <sys/socket.h>
  98 # include <sys/stat.h>
  99 # include <sys/systeminfo.h>
 100 # include <sys/time.h>
 101 # include <sys/times.h>
 102 # include <sys/types.h>
 103 # include <sys/wait.h>
 104 # include <sys/utsname.h>
 105 # include <thread.h>
 106 # include <unistd.h>
 107 # include <sys/priocntl.h>
 108 # include <sys/rtpriocntl.h>
 109 # include <sys/tspriocntl.h>
 110 # include <sys/iapriocntl.h>
 111 # include <sys/fxpriocntl.h>
 112 # include <sys/loadavg.h>
 113 # include <string.h>
 114 # include <stdio.h>
 115 
 116 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
 117 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
 118 
 119 #define MAX_PATH (2 * K)
 120 
 121 // for timer info max values which include all bits
 122 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 123 
 124 
 125 // Here are some liblgrp types from sys/lgrp_user.h to be able to
 126 // compile on older systems without this header file.
 127 
 128 #ifndef MADV_ACCESS_LWP
 129   #define  MADV_ACCESS_LWP   7       /* next LWP to access heavily */
 130 #endif
 131 #ifndef MADV_ACCESS_MANY
 132   #define  MADV_ACCESS_MANY  8       /* many processes to access heavily */
 133 #endif
 134 
 135 #ifndef LGRP_RSRC_CPU
 136   #define LGRP_RSRC_CPU      0       /* CPU resources */
 137 #endif
 138 #ifndef LGRP_RSRC_MEM
 139   #define LGRP_RSRC_MEM      1       /* memory resources */
 140 #endif
 141 
 142 // Values for ThreadPriorityPolicy == 1
 143 int prio_policy1[CriticalPriority+1] = {
 144   -99999,  0, 16,  32,  48,  64,
 145           80, 96, 112, 124, 127, 127 };
 146 
 147 // System parameters used internally
 148 static clock_t clock_tics_per_sec = 100;
 149 
 150 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
 151 static bool enabled_extended_FILE_stdio = false;
 152 
 153 // For diagnostics to print a message once. see run_periodic_checks
 154 static bool check_addr0_done = false;
 155 static sigset_t check_signal_done;
 156 static bool check_signals = true;
 157 
 158 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
 159 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
 160 
 161 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
 162 
 163 os::Solaris::pthread_setname_np_func_t os::Solaris::_pthread_setname_np = NULL;
 164 
 165 // "default" initializers for missing libc APIs
 166 extern "C" {
 167   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 168   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
 169 
 170   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 171   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
 172 }
 173 
 174 // "default" initializers for pthread-based synchronization
 175 extern "C" {
 176   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
 177   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
 178 }
 179 
 180 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 181 
 182 static inline size_t adjust_stack_size(address base, size_t size) {
 183   if ((ssize_t)size < 0) {
 184     // 4759953: Compensate for ridiculous stack size.
 185     size = max_intx;
 186   }
 187   if (size > (size_t)base) {
 188     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
 189     size = (size_t)base;
 190   }
 191   return size;
 192 }
 193 
 194 static inline stack_t get_stack_info() {
 195   stack_t st;
 196   int retval = thr_stksegment(&st);
 197   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
 198   assert(retval == 0, "incorrect return value from thr_stksegment");
 199   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
 200   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
 201   return st;
 202 }
 203 
 204 address os::current_stack_base() {
 205   int r = thr_main();
 206   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 207   bool is_primordial_thread = r;
 208 
 209   // Workaround 4352906, avoid calls to thr_stksegment by
 210   // thr_main after the first one (it looks like we trash
 211   // some data, causing the value for ss_sp to be incorrect).
 212   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
 213     stack_t st = get_stack_info();
 214     if (is_primordial_thread) {
 215       // cache initial value of stack base
 216       os::Solaris::_main_stack_base = (address)st.ss_sp;
 217     }
 218     return (address)st.ss_sp;
 219   } else {
 220     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
 221     return os::Solaris::_main_stack_base;
 222   }
 223 }
 224 
 225 size_t os::current_stack_size() {
 226   size_t size;
 227 
 228   int r = thr_main();
 229   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
 230   if (!r) {
 231     size = get_stack_info().ss_size;
 232   } else {
 233     struct rlimit limits;
 234     getrlimit(RLIMIT_STACK, &limits);
 235     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
 236   }
 237   // base may not be page aligned
 238   address base = current_stack_base();
 239   address bottom = align_up(base - size, os::vm_page_size());;
 240   return (size_t)(base - bottom);
 241 }
 242 
 243 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
 244   return localtime_r(clock, res);
 245 }
 246 
 247 void os::Solaris::try_enable_extended_io() {
 248   typedef int (*enable_extended_FILE_stdio_t)(int, int);
 249 
 250   if (!UseExtendedFileIO) {
 251     return;
 252   }
 253 
 254   enable_extended_FILE_stdio_t enabler =
 255     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
 256                                          "enable_extended_FILE_stdio");
 257   if (enabler) {
 258     enabler(-1, -1);
 259   }
 260 }
 261 
 262 static int _processors_online = 0;
 263 
 264 jint os::Solaris::_os_thread_limit = 0;
 265 volatile jint os::Solaris::_os_thread_count = 0;
 266 
 267 julong os::available_memory() {
 268   return Solaris::available_memory();
 269 }
 270 
 271 julong os::Solaris::available_memory() {
 272   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
 273 }
 274 
 275 julong os::Solaris::_physical_memory = 0;
 276 
 277 julong os::physical_memory() {
 278   return Solaris::physical_memory();
 279 }
 280 
 281 static hrtime_t first_hrtime = 0;
 282 static const hrtime_t hrtime_hz = 1000*1000*1000;
 283 static volatile hrtime_t max_hrtime = 0;
 284 
 285 
 286 void os::Solaris::initialize_system_info() {
 287   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 288   _processors_online = sysconf(_SC_NPROCESSORS_ONLN);
 289   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) *
 290                                      (julong)sysconf(_SC_PAGESIZE);
 291 }
 292 
 293 int os::active_processor_count() {
 294   // User has overridden the number of active processors
 295   if (ActiveProcessorCount > 0) {
 296     log_trace(os)("active_processor_count: "
 297                   "active processor count set by user : %d",
 298                   ActiveProcessorCount);
 299     return ActiveProcessorCount;
 300   }
 301 
 302   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 303   pid_t pid = getpid();
 304   psetid_t pset = PS_NONE;
 305   // Are we running in a processor set or is there any processor set around?
 306   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
 307     uint_t pset_cpus;
 308     // Query the number of cpus available to us.
 309     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
 310       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
 311       _processors_online = pset_cpus;
 312       return pset_cpus;
 313     }
 314   }
 315   // Otherwise return number of online cpus
 316   return online_cpus;
 317 }
 318 
 319 static bool find_processors_in_pset(psetid_t        pset,
 320                                     processorid_t** id_array,
 321                                     uint_t*         id_length) {
 322   bool result = false;
 323   // Find the number of processors in the processor set.
 324   if (pset_info(pset, NULL, id_length, NULL) == 0) {
 325     // Make up an array to hold their ids.
 326     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 327     // Fill in the array with their processor ids.
 328     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
 329       result = true;
 330     }
 331   }
 332   return result;
 333 }
 334 
 335 // Callers of find_processors_online() must tolerate imprecise results --
 336 // the system configuration can change asynchronously because of DR
 337 // or explicit psradm operations.
 338 //
 339 // We also need to take care that the loop (below) terminates as the
 340 // number of processors online can change between the _SC_NPROCESSORS_ONLN
 341 // request and the loop that builds the list of processor ids.   Unfortunately
 342 // there's no reliable way to determine the maximum valid processor id,
 343 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
 344 // man pages, which claim the processor id set is "sparse, but
 345 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
 346 // exit the loop.
 347 //
 348 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
 349 // not available on S8.0.
 350 
 351 static bool find_processors_online(processorid_t** id_array,
 352                                    uint*           id_length) {
 353   const processorid_t MAX_PROCESSOR_ID = 100000;
 354   // Find the number of processors online.
 355   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
 356   // Make up an array to hold their ids.
 357   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
 358   // Processors need not be numbered consecutively.
 359   long found = 0;
 360   processorid_t next = 0;
 361   while (found < *id_length && next < MAX_PROCESSOR_ID) {
 362     processor_info_t info;
 363     if (processor_info(next, &info) == 0) {
 364       // NB, PI_NOINTR processors are effectively online ...
 365       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
 366         (*id_array)[found] = next;
 367         found += 1;
 368       }
 369     }
 370     next += 1;
 371   }
 372   if (found < *id_length) {
 373     // The loop above didn't identify the expected number of processors.
 374     // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
 375     // and re-running the loop, above, but there's no guarantee of progress
 376     // if the system configuration is in flux.  Instead, we just return what
 377     // we've got.  Note that in the worst case find_processors_online() could
 378     // return an empty set.  (As a fall-back in the case of the empty set we
 379     // could just return the ID of the current processor).
 380     *id_length = found;
 381   }
 382 
 383   return true;
 384 }
 385 
 386 static bool assign_distribution(processorid_t* id_array,
 387                                 uint           id_length,
 388                                 uint*          distribution,
 389                                 uint           distribution_length) {
 390   // We assume we can assign processorid_t's to uint's.
 391   assert(sizeof(processorid_t) == sizeof(uint),
 392          "can't convert processorid_t to uint");
 393   // Quick check to see if we won't succeed.
 394   if (id_length < distribution_length) {
 395     return false;
 396   }
 397   // Assign processor ids to the distribution.
 398   // Try to shuffle processors to distribute work across boards,
 399   // assuming 4 processors per board.
 400   const uint processors_per_board = ProcessDistributionStride;
 401   // Find the maximum processor id.
 402   processorid_t max_id = 0;
 403   for (uint m = 0; m < id_length; m += 1) {
 404     max_id = MAX2(max_id, id_array[m]);
 405   }
 406   // The next id, to limit loops.
 407   const processorid_t limit_id = max_id + 1;
 408   // Make up markers for available processors.
 409   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
 410   for (uint c = 0; c < limit_id; c += 1) {
 411     available_id[c] = false;
 412   }
 413   for (uint a = 0; a < id_length; a += 1) {
 414     available_id[id_array[a]] = true;
 415   }
 416   // Step by "boards", then by "slot", copying to "assigned".
 417   // NEEDS_CLEANUP: The assignment of processors should be stateful,
 418   //                remembering which processors have been assigned by
 419   //                previous calls, etc., so as to distribute several
 420   //                independent calls of this method.  What we'd like is
 421   //                It would be nice to have an API that let us ask
 422   //                how many processes are bound to a processor,
 423   //                but we don't have that, either.
 424   //                In the short term, "board" is static so that
 425   //                subsequent distributions don't all start at board 0.
 426   static uint board = 0;
 427   uint assigned = 0;
 428   // Until we've found enough processors ....
 429   while (assigned < distribution_length) {
 430     // ... find the next available processor in the board.
 431     for (uint slot = 0; slot < processors_per_board; slot += 1) {
 432       uint try_id = board * processors_per_board + slot;
 433       if ((try_id < limit_id) && (available_id[try_id] == true)) {
 434         distribution[assigned] = try_id;
 435         available_id[try_id] = false;
 436         assigned += 1;
 437         break;
 438       }
 439     }
 440     board += 1;
 441     if (board * processors_per_board + 0 >= limit_id) {
 442       board = 0;
 443     }
 444   }
 445   if (available_id != NULL) {
 446     FREE_C_HEAP_ARRAY(bool, available_id);
 447   }
 448   return true;
 449 }
 450 
 451 void os::set_native_thread_name(const char *name) {
 452   if (Solaris::_pthread_setname_np != NULL) {
 453     // Only the first 31 bytes of 'name' are processed by pthread_setname_np
 454     // but we explicitly copy into a size-limited buffer to avoid any
 455     // possible overflow.
 456     char buf[32];
 457     snprintf(buf, sizeof(buf), "%s", name);
 458     buf[sizeof(buf) - 1] = '\0';
 459     Solaris::_pthread_setname_np(pthread_self(), buf);
 460   }
 461 }
 462 
 463 bool os::distribute_processes(uint length, uint* distribution) {
 464   bool result = false;
 465   // Find the processor id's of all the available CPUs.
 466   processorid_t* id_array  = NULL;
 467   uint           id_length = 0;
 468   // There are some races between querying information and using it,
 469   // since processor sets can change dynamically.
 470   psetid_t pset = PS_NONE;
 471   // Are we running in a processor set?
 472   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
 473     result = find_processors_in_pset(pset, &id_array, &id_length);
 474   } else {
 475     result = find_processors_online(&id_array, &id_length);
 476   }
 477   if (result == true) {
 478     if (id_length >= length) {
 479       result = assign_distribution(id_array, id_length, distribution, length);
 480     } else {
 481       result = false;
 482     }
 483   }
 484   if (id_array != NULL) {
 485     FREE_C_HEAP_ARRAY(processorid_t, id_array);
 486   }
 487   return result;
 488 }
 489 
 490 bool os::bind_to_processor(uint processor_id) {
 491   // We assume that a processorid_t can be stored in a uint.
 492   assert(sizeof(uint) == sizeof(processorid_t),
 493          "can't convert uint to processorid_t");
 494   int bind_result =
 495     processor_bind(P_LWPID,                       // bind LWP.
 496                    P_MYID,                        // bind current LWP.
 497                    (processorid_t) processor_id,  // id.
 498                    NULL);                         // don't return old binding.
 499   return (bind_result == 0);
 500 }
 501 
 502 // Return true if user is running as root.
 503 
 504 bool os::have_special_privileges() {
 505   static bool init = false;
 506   static bool privileges = false;
 507   if (!init) {
 508     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 509     init = true;
 510   }
 511   return privileges;
 512 }
 513 
 514 
 515 void os::init_system_properties_values() {
 516   // The next steps are taken in the product version:
 517   //
 518   // Obtain the JAVA_HOME value from the location of libjvm.so.
 519   // This library should be located at:
 520   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
 521   //
 522   // If "/jre/lib/" appears at the right place in the path, then we
 523   // assume libjvm.so is installed in a JDK and we use this path.
 524   //
 525   // Otherwise exit with message: "Could not create the Java virtual machine."
 526   //
 527   // The following extra steps are taken in the debugging version:
 528   //
 529   // If "/jre/lib/" does NOT appear at the right place in the path
 530   // instead of exit check for $JAVA_HOME environment variable.
 531   //
 532   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 533   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 534   // it looks like libjvm.so is installed there
 535   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 536   //
 537   // Otherwise exit.
 538   //
 539   // Important note: if the location of libjvm.so changes this
 540   // code needs to be changed accordingly.
 541 
 542 // Base path of extensions installed on the system.
 543 #define SYS_EXT_DIR     "/usr/jdk/packages"
 544 #define EXTENSIONS_DIR  "/lib/ext"
 545 
 546   // Buffer that fits several sprintfs.
 547   // Note that the space for the colon and the trailing null are provided
 548   // by the nulls included by the sizeof operator.
 549   const size_t bufsize =
 550     MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
 551          sizeof(SYS_EXT_DIR) + sizeof("/lib/"), // invariant ld_library_path
 552          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir
 553   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 554 
 555   // sysclasspath, java_home, dll_dir
 556   {
 557     char *pslash;
 558     os::jvm_path(buf, bufsize);
 559 
 560     // Found the full path to libjvm.so.
 561     // Now cut the path to <java_home>/jre if we can.
 562     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 563     pslash = strrchr(buf, '/');
 564     if (pslash != NULL) {
 565       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 566     }
 567     Arguments::set_dll_dir(buf);
 568 
 569     if (pslash != NULL) {
 570       pslash = strrchr(buf, '/');
 571       if (pslash != NULL) {
 572         *pslash = '\0';        // Get rid of /lib.
 573       }
 574     }
 575     Arguments::set_java_home(buf);
 576     set_boot_path('/', ':');
 577   }
 578 
 579   // Where to look for native libraries.
 580   {
 581     // Use dlinfo() to determine the correct java.library.path.
 582     //
 583     // If we're launched by the Java launcher, and the user
 584     // does not set java.library.path explicitly on the commandline,
 585     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
 586     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
 587     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
 588     // /usr/lib), which is exactly what we want.
 589     //
 590     // If the user does set java.library.path, it completely
 591     // overwrites this setting, and always has.
 592     //
 593     // If we're not launched by the Java launcher, we may
 594     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
 595     // settings.  Again, dlinfo does exactly what we want.
 596 
 597     Dl_serinfo     info_sz, *info = &info_sz;
 598     Dl_serpath     *path;
 599     char           *library_path;
 600     char           *common_path = buf;
 601 
 602     // Determine search path count and required buffer size.
 603     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
 604       FREE_C_HEAP_ARRAY(char, buf);
 605       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
 606     }
 607 
 608     // Allocate new buffer and initialize.
 609     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
 610     info->dls_size = info_sz.dls_size;
 611     info->dls_cnt = info_sz.dls_cnt;
 612 
 613     // Obtain search path information.
 614     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
 615       FREE_C_HEAP_ARRAY(char, buf);
 616       FREE_C_HEAP_ARRAY(char, info);
 617       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
 618     }
 619 
 620     path = &info->dls_serpath[0];
 621 
 622     // Note: Due to a legacy implementation, most of the library path
 623     // is set in the launcher. This was to accomodate linking restrictions
 624     // on legacy Solaris implementations (which are no longer supported).
 625     // Eventually, all the library path setting will be done here.
 626     //
 627     // However, to prevent the proliferation of improperly built native
 628     // libraries, the new path component /usr/jdk/packages is added here.
 629 
 630     // Construct the invariant part of ld_library_path.
 631     sprintf(common_path, SYS_EXT_DIR "/lib");
 632 
 633     // Struct size is more than sufficient for the path components obtained
 634     // through the dlinfo() call, so only add additional space for the path
 635     // components explicitly added here.
 636     size_t library_path_size = info->dls_size + strlen(common_path);
 637     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
 638     library_path[0] = '\0';
 639 
 640     // Construct the desired Java library path from the linker's library
 641     // search path.
 642     //
 643     // For compatibility, it is optimal that we insert the additional path
 644     // components specific to the Java VM after those components specified
 645     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
 646     // infrastructure.
 647     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
 648       strcpy(library_path, common_path);
 649     } else {
 650       int inserted = 0;
 651       int i;
 652       for (i = 0; i < info->dls_cnt; i++, path++) {
 653         uint_t flags = path->dls_flags & LA_SER_MASK;
 654         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
 655           strcat(library_path, common_path);
 656           strcat(library_path, os::path_separator());
 657           inserted = 1;
 658         }
 659         strcat(library_path, path->dls_name);
 660         strcat(library_path, os::path_separator());
 661       }
 662       // Eliminate trailing path separator.
 663       library_path[strlen(library_path)-1] = '\0';
 664     }
 665 
 666     // happens before argument parsing - can't use a trace flag
 667     // tty->print_raw("init_system_properties_values: native lib path: ");
 668     // tty->print_raw_cr(library_path);
 669 
 670     // Callee copies into its own buffer.
 671     Arguments::set_library_path(library_path);
 672 
 673     FREE_C_HEAP_ARRAY(char, library_path);
 674     FREE_C_HEAP_ARRAY(char, info);
 675   }
 676 
 677   // Extensions directories.
 678   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 679   Arguments::set_ext_dirs(buf);
 680 
 681   FREE_C_HEAP_ARRAY(char, buf);
 682 
 683 #undef SYS_EXT_DIR
 684 #undef EXTENSIONS_DIR
 685 }
 686 
 687 void os::breakpoint() {
 688   BREAKPOINT;
 689 }
 690 
 691 bool os::obsolete_option(const JavaVMOption *option) {
 692   if (!strncmp(option->optionString, "-Xt", 3)) {
 693     return true;
 694   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
 695     return true;
 696   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
 697     return true;
 698   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
 699     return true;
 700   }
 701   return false;
 702 }
 703 
 704 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
 705   address  stackStart  = (address)thread->stack_base();
 706   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
 707   if (sp < stackStart && sp >= stackEnd) return true;
 708   return false;
 709 }
 710 
 711 extern "C" void breakpoint() {
 712   // use debugger to set breakpoint here
 713 }
 714 
 715 static thread_t main_thread;
 716 
 717 // Thread start routine for all newly created threads
 718 extern "C" void* thread_native_entry(void* thread_addr) {
 719   // Try to randomize the cache line index of hot stack frames.
 720   // This helps when threads of the same stack traces evict each other's
 721   // cache lines. The threads can be either from the same JVM instance, or
 722   // from different JVM instances. The benefit is especially true for
 723   // processors with hyperthreading technology.
 724   static int counter = 0;
 725   int pid = os::current_process_id();
 726   alloca(((pid ^ counter++) & 7) * 128);
 727 
 728   int prio;
 729   Thread* thread = (Thread*)thread_addr;
 730 
 731   thread->initialize_thread_current();
 732 
 733   OSThread* osthr = thread->osthread();
 734 
 735   osthr->set_lwp_id(_lwp_self());  // Store lwp in case we are bound
 736   thread->_schedctl = (void *) schedctl_init();
 737 
 738   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").",
 739     os::current_thread_id());
 740 
 741   if (UseNUMA) {
 742     int lgrp_id = os::numa_get_group_id();
 743     if (lgrp_id != -1) {
 744       thread->set_lgrp_id(lgrp_id);
 745     }
 746   }
 747 
 748   // Our priority was set when we were created, and stored in the
 749   // osthread, but couldn't be passed through to our LWP until now.
 750   // So read back the priority and set it again.
 751 
 752   if (osthr->thread_id() != -1) {
 753     if (UseThreadPriorities) {
 754       int prio = osthr->native_priority();
 755       if (ThreadPriorityVerbose) {
 756         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
 757                       INTPTR_FORMAT ", setting priority: %d\n",
 758                       osthr->thread_id(), osthr->lwp_id(), prio);
 759       }
 760       os::set_native_priority(thread, prio);
 761     }
 762   } else if (ThreadPriorityVerbose) {
 763     warning("Can't set priority in _start routine, thread id hasn't been set\n");
 764   }
 765 
 766   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 767 
 768   // initialize signal mask for this thread
 769   os::Solaris::hotspot_sigmask(thread);
 770 
 771   thread->run();
 772 
 773   // One less thread is executing
 774   // When the VMThread gets here, the main thread may have already exited
 775   // which frees the CodeHeap containing the Atomic::dec code
 776   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 777     Atomic::dec(&os::Solaris::_os_thread_count);
 778   }
 779 
 780   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 781 
 782   // If a thread has not deleted itself ("delete this") as part of its
 783   // termination sequence, we have to ensure thread-local-storage is
 784   // cleared before we actually terminate. No threads should ever be
 785   // deleted asynchronously with respect to their termination.
 786   if (Thread::current_or_null_safe() != NULL) {
 787     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 788     thread->clear_thread_current();
 789   }
 790 
 791   if (UseDetachedThreads) {
 792     thr_exit(NULL);
 793     ShouldNotReachHere();
 794   }
 795   return NULL;
 796 }
 797 
 798 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
 799   // Allocate the OSThread object
 800   OSThread* osthread = new OSThread(NULL, NULL);
 801   if (osthread == NULL) return NULL;
 802 
 803   // Store info on the Solaris thread into the OSThread
 804   osthread->set_thread_id(thread_id);
 805   osthread->set_lwp_id(_lwp_self());
 806   thread->_schedctl = (void *) schedctl_init();
 807 
 808   if (UseNUMA) {
 809     int lgrp_id = os::numa_get_group_id();
 810     if (lgrp_id != -1) {
 811       thread->set_lgrp_id(lgrp_id);
 812     }
 813   }
 814 
 815   if (ThreadPriorityVerbose) {
 816     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
 817                   osthread->thread_id(), osthread->lwp_id());
 818   }
 819 
 820   // Initial thread state is INITIALIZED, not SUSPENDED
 821   osthread->set_state(INITIALIZED);
 822 
 823   return osthread;
 824 }
 825 
 826 void os::Solaris::hotspot_sigmask(Thread* thread) {
 827   //Save caller's signal mask
 828   sigset_t sigmask;
 829   pthread_sigmask(SIG_SETMASK, NULL, &sigmask);
 830   OSThread *osthread = thread->osthread();
 831   osthread->set_caller_sigmask(sigmask);
 832 
 833   pthread_sigmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
 834   if (!ReduceSignalUsage) {
 835     if (thread->is_VM_thread()) {
 836       // Only the VM thread handles BREAK_SIGNAL ...
 837       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 838     } else {
 839       // ... all other threads block BREAK_SIGNAL
 840       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
 841       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 842     }
 843   }
 844 }
 845 
 846 bool os::create_attached_thread(JavaThread* thread) {
 847 #ifdef ASSERT
 848   thread->verify_not_published();
 849 #endif
 850   OSThread* osthread = create_os_thread(thread, thr_self());
 851   if (osthread == NULL) {
 852     return false;
 853   }
 854 
 855   // Initial thread state is RUNNABLE
 856   osthread->set_state(RUNNABLE);
 857   thread->set_osthread(osthread);
 858 
 859   // initialize signal mask for this thread
 860   // and save the caller's signal mask
 861   os::Solaris::hotspot_sigmask(thread);
 862 
 863   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 864     os::current_thread_id());
 865 
 866   return true;
 867 }
 868 
 869 bool os::create_main_thread(JavaThread* thread) {
 870 #ifdef ASSERT
 871   thread->verify_not_published();
 872 #endif
 873   if (_starting_thread == NULL) {
 874     _starting_thread = create_os_thread(thread, main_thread);
 875     if (_starting_thread == NULL) {
 876       return false;
 877     }
 878   }
 879 
 880   // The primodial thread is runnable from the start
 881   _starting_thread->set_state(RUNNABLE);
 882 
 883   thread->set_osthread(_starting_thread);
 884 
 885   // initialize signal mask for this thread
 886   // and save the caller's signal mask
 887   os::Solaris::hotspot_sigmask(thread);
 888 
 889   return true;
 890 }
 891 
 892 // Helper function to trace thread attributes, similar to os::Posix::describe_pthread_attr()
 893 static char* describe_thr_create_attributes(char* buf, size_t buflen,
 894                                             size_t stacksize, long flags) {
 895   stringStream ss(buf, buflen);
 896   ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 897   ss.print("flags: ");
 898   #define PRINT_FLAG(f) if (flags & f) ss.print( #f " ");
 899   #define ALL(X) \
 900     X(THR_SUSPENDED) \
 901     X(THR_DETACHED) \
 902     X(THR_BOUND) \
 903     X(THR_NEW_LWP) \
 904     X(THR_DAEMON)
 905   ALL(PRINT_FLAG)
 906   #undef ALL
 907   #undef PRINT_FLAG
 908   return buf;
 909 }
 910 
 911 // return default stack size for thr_type
 912 size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
 913   // default stack size when not specified by caller is 1M (2M for LP64)
 914   size_t s = (BytesPerWord >> 2) * K * K;
 915   return s;
 916 }
 917 
 918 bool os::create_thread(Thread* thread, ThreadType thr_type,
 919                        size_t req_stack_size) {
 920   // Allocate the OSThread object
 921   OSThread* osthread = new OSThread(NULL, NULL);
 922   if (osthread == NULL) {
 923     return false;
 924   }
 925 
 926   if (ThreadPriorityVerbose) {
 927     char *thrtyp;
 928     switch (thr_type) {
 929     case vm_thread:
 930       thrtyp = (char *)"vm";
 931       break;
 932     case cgc_thread:
 933       thrtyp = (char *)"cgc";
 934       break;
 935     case pgc_thread:
 936       thrtyp = (char *)"pgc";
 937       break;
 938     case java_thread:
 939       thrtyp = (char *)"java";
 940       break;
 941     case compiler_thread:
 942       thrtyp = (char *)"compiler";
 943       break;
 944     case watcher_thread:
 945       thrtyp = (char *)"watcher";
 946       break;
 947     default:
 948       thrtyp = (char *)"unknown";
 949       break;
 950     }
 951     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
 952   }
 953 
 954   // calculate stack size if it's not specified by caller
 955   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 956 
 957   // Initial state is ALLOCATED but not INITIALIZED
 958   osthread->set_state(ALLOCATED);
 959 
 960   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
 961     // We got lots of threads. Check if we still have some address space left.
 962     // Need to be at least 5Mb of unreserved address space. We do check by
 963     // trying to reserve some.
 964     const size_t VirtualMemoryBangSize = 20*K*K;
 965     char* mem = os::reserve_memory(VirtualMemoryBangSize);
 966     if (mem == NULL) {
 967       delete osthread;
 968       return false;
 969     } else {
 970       // Release the memory again
 971       os::release_memory(mem, VirtualMemoryBangSize);
 972     }
 973   }
 974 
 975   // Setup osthread because the child thread may need it.
 976   thread->set_osthread(osthread);
 977 
 978   // Create the Solaris thread
 979   thread_t tid = 0;
 980   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
 981   int      status;
 982 
 983   // Mark that we don't have an lwp or thread id yet.
 984   // In case we attempt to set the priority before the thread starts.
 985   osthread->set_lwp_id(-1);
 986   osthread->set_thread_id(-1);
 987 
 988   status = thr_create(NULL, stack_size, thread_native_entry, thread, flags, &tid);
 989 
 990   char buf[64];
 991   if (status == 0) {
 992     log_info(os, thread)("Thread started (tid: " UINTX_FORMAT ", attributes: %s). ",
 993       (uintx) tid, describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
 994   } else {
 995     log_warning(os, thread)("Failed to start thread - thr_create failed (%s) for attributes: %s.",
 996       os::errno_name(status), describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
 997   }
 998 
 999   if (status != 0) {
1000     thread->set_osthread(NULL);
1001     // Need to clean up stuff we've allocated so far
1002     delete osthread;
1003     return false;
1004   }
1005 
1006   Atomic::inc(&os::Solaris::_os_thread_count);
1007 
1008   // Store info on the Solaris thread into the OSThread
1009   osthread->set_thread_id(tid);
1010 
1011   // Remember that we created this thread so we can set priority on it
1012   osthread->set_vm_created();
1013 
1014   // Most thread types will set an explicit priority before starting the thread,
1015   // but for those that don't we need a valid value to read back in thread_native_entry.
1016   osthread->set_native_priority(NormPriority);
1017 
1018   // Initial thread state is INITIALIZED, not SUSPENDED
1019   osthread->set_state(INITIALIZED);
1020 
1021   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1022   return true;
1023 }
1024 
1025 debug_only(static bool signal_sets_initialized = false);
1026 static sigset_t unblocked_sigs, vm_sigs;
1027 
1028 bool os::Solaris::is_sig_ignored(int sig) {
1029   struct sigaction oact;
1030   sigaction(sig, (struct sigaction*)NULL, &oact);
1031   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1032                                  : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1033   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
1034     return true;
1035   } else {
1036     return false;
1037   }
1038 }
1039 
1040 void os::Solaris::signal_sets_init() {
1041   // Should also have an assertion stating we are still single-threaded.
1042   assert(!signal_sets_initialized, "Already initialized");
1043   // Fill in signals that are necessarily unblocked for all threads in
1044   // the VM. Currently, we unblock the following signals:
1045   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1046   //                         by -Xrs (=ReduceSignalUsage));
1047   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1048   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1049   // the dispositions or masks wrt these signals.
1050   // Programs embedding the VM that want to use the above signals for their
1051   // own purposes must, at this time, use the "-Xrs" option to prevent
1052   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1053   // (See bug 4345157, and other related bugs).
1054   // In reality, though, unblocking these signals is really a nop, since
1055   // these signals are not blocked by default.
1056   sigemptyset(&unblocked_sigs);
1057   sigaddset(&unblocked_sigs, SIGILL);
1058   sigaddset(&unblocked_sigs, SIGSEGV);
1059   sigaddset(&unblocked_sigs, SIGBUS);
1060   sigaddset(&unblocked_sigs, SIGFPE);
1061   sigaddset(&unblocked_sigs, ASYNC_SIGNAL);
1062 
1063   if (!ReduceSignalUsage) {
1064     if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1065       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1066     }
1067     if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1068       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1069     }
1070     if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1071       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1072     }
1073   }
1074   // Fill in signals that are blocked by all but the VM thread.
1075   sigemptyset(&vm_sigs);
1076   if (!ReduceSignalUsage) {
1077     sigaddset(&vm_sigs, BREAK_SIGNAL);
1078   }
1079   debug_only(signal_sets_initialized = true);
1080 
1081   // For diagnostics only used in run_periodic_checks
1082   sigemptyset(&check_signal_done);
1083 }
1084 
1085 // These are signals that are unblocked while a thread is running Java.
1086 // (For some reason, they get blocked by default.)
1087 sigset_t* os::Solaris::unblocked_signals() {
1088   assert(signal_sets_initialized, "Not initialized");
1089   return &unblocked_sigs;
1090 }
1091 
1092 // These are the signals that are blocked while a (non-VM) thread is
1093 // running Java. Only the VM thread handles these signals.
1094 sigset_t* os::Solaris::vm_signals() {
1095   assert(signal_sets_initialized, "Not initialized");
1096   return &vm_sigs;
1097 }
1098 
1099 void _handle_uncaught_cxx_exception() {
1100   VMError::report_and_die("An uncaught C++ exception");
1101 }
1102 
1103 
1104 // First crack at OS-specific initialization, from inside the new thread.
1105 void os::initialize_thread(Thread* thr) {
1106   int r = thr_main();
1107   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
1108   if (r) {
1109     JavaThread* jt = (JavaThread *)thr;
1110     assert(jt != NULL, "Sanity check");
1111     size_t stack_size;
1112     address base = jt->stack_base();
1113     if (Arguments::created_by_java_launcher()) {
1114       // Use 2MB to allow for Solaris 7 64 bit mode.
1115       stack_size = JavaThread::stack_size_at_create() == 0
1116         ? 2048*K : JavaThread::stack_size_at_create();
1117 
1118       // There are rare cases when we may have already used more than
1119       // the basic stack size allotment before this method is invoked.
1120       // Attempt to allow for a normally sized java_stack.
1121       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1122       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1123     } else {
1124       // 6269555: If we were not created by a Java launcher, i.e. if we are
1125       // running embedded in a native application, treat the primordial thread
1126       // as much like a native attached thread as possible.  This means using
1127       // the current stack size from thr_stksegment(), unless it is too large
1128       // to reliably setup guard pages.  A reasonable max size is 8MB.
1129       size_t current_size = current_stack_size();
1130       // This should never happen, but just in case....
1131       if (current_size == 0) current_size = 2 * K * K;
1132       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1133     }
1134     address bottom = align_up(base - stack_size, os::vm_page_size());;
1135     stack_size = (size_t)(base - bottom);
1136 
1137     assert(stack_size > 0, "Stack size calculation problem");
1138 
1139     if (stack_size > jt->stack_size()) {
1140 #ifndef PRODUCT
1141       struct rlimit limits;
1142       getrlimit(RLIMIT_STACK, &limits);
1143       size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1144       assert(size >= jt->stack_size(), "Stack size problem in main thread");
1145 #endif
1146       tty->print_cr("Stack size of %d Kb exceeds current limit of %d Kb.\n"
1147                     "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1148                     "See limit(1) to increase the stack size limit.",
1149                     stack_size / K, jt->stack_size() / K);
1150       vm_exit(1);
1151     }
1152     assert(jt->stack_size() >= stack_size,
1153            "Attempt to map more stack than was allocated");
1154     jt->set_stack_size(stack_size);
1155   }
1156 
1157   // With the T2 libthread (T1 is no longer supported) threads are always bound
1158   // and we use stackbanging in all cases.
1159 
1160   os::Solaris::init_thread_fpu_state();
1161   std::set_terminate(_handle_uncaught_cxx_exception);
1162 }
1163 
1164 
1165 
1166 // Free Solaris resources related to the OSThread
1167 void os::free_thread(OSThread* osthread) {
1168   assert(osthread != NULL, "os::free_thread but osthread not set");
1169 
1170   // We are told to free resources of the argument thread,
1171   // but we can only really operate on the current thread.
1172   assert(Thread::current()->osthread() == osthread,
1173          "os::free_thread but not current thread");
1174 
1175   // Restore caller's signal mask
1176   sigset_t sigmask = osthread->caller_sigmask();
1177   pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1178 
1179   delete osthread;
1180 }
1181 
1182 void os::pd_start_thread(Thread* thread) {
1183   int status = thr_continue(thread->osthread()->thread_id());
1184   assert_status(status == 0, status, "thr_continue failed");
1185 }
1186 
1187 
1188 intx os::current_thread_id() {
1189   return (intx)thr_self();
1190 }
1191 
1192 static pid_t _initial_pid = 0;
1193 
1194 int os::current_process_id() {
1195   return (int)(_initial_pid ? _initial_pid : getpid());
1196 }
1197 
1198 // gethrtime() should be monotonic according to the documentation,
1199 // but some virtualized platforms are known to break this guarantee.
1200 // getTimeNanos() must be guaranteed not to move backwards, so we
1201 // are forced to add a check here.
1202 inline hrtime_t getTimeNanos() {
1203   const hrtime_t now = gethrtime();
1204   const hrtime_t prev = max_hrtime;
1205   if (now <= prev) {
1206     return prev;   // same or retrograde time;
1207   }
1208   const hrtime_t obsv = Atomic::cmpxchg(now, &max_hrtime, prev);
1209   assert(obsv >= prev, "invariant");   // Monotonicity
1210   // If the CAS succeeded then we're done and return "now".
1211   // If the CAS failed and the observed value "obsv" is >= now then
1212   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1213   // some other thread raced this thread and installed a new value, in which case
1214   // we could either (a) retry the entire operation, (b) retry trying to install now
1215   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1216   // we might discard a higher "now" value in deference to a slightly lower but freshly
1217   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1218   // to (a) or (b) -- and greatly reduces coherence traffic.
1219   // We might also condition (c) on the magnitude of the delta between obsv and now.
1220   // Avoiding excessive CAS operations to hot RW locations is critical.
1221   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1222   return (prev == obsv) ? now : obsv;
1223 }
1224 
1225 // Time since start-up in seconds to a fine granularity.
1226 // Used by VMSelfDestructTimer and the MemProfiler.
1227 double os::elapsedTime() {
1228   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1229 }
1230 
1231 jlong os::elapsed_counter() {
1232   return (jlong)(getTimeNanos() - first_hrtime);
1233 }
1234 
1235 jlong os::elapsed_frequency() {
1236   return hrtime_hz;
1237 }
1238 
1239 // Return the real, user, and system times in seconds from an
1240 // arbitrary fixed point in the past.
1241 bool os::getTimesSecs(double* process_real_time,
1242                       double* process_user_time,
1243                       double* process_system_time) {
1244   struct tms ticks;
1245   clock_t real_ticks = times(&ticks);
1246 
1247   if (real_ticks == (clock_t) (-1)) {
1248     return false;
1249   } else {
1250     double ticks_per_second = (double) clock_tics_per_sec;
1251     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1252     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1253     // For consistency return the real time from getTimeNanos()
1254     // converted to seconds.
1255     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1256 
1257     return true;
1258   }
1259 }
1260 
1261 bool os::supports_vtime() { return true; }
1262 bool os::enable_vtime() { return false; }
1263 bool os::vtime_enabled() { return false; }
1264 
1265 double os::elapsedVTime() {
1266   return (double)gethrvtime() / (double)hrtime_hz;
1267 }
1268 
1269 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1270 jlong os::javaTimeMillis() {
1271   timeval t;
1272   if (gettimeofday(&t, NULL) == -1) {
1273     fatal("os::javaTimeMillis: gettimeofday (%s)", os::strerror(errno));
1274   }
1275   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1276 }
1277 
1278 // Must return seconds+nanos since Jan 1 1970. This must use the same
1279 // time source as javaTimeMillis and can't use get_nsec_fromepoch as
1280 // we need better than 1ms accuracy
1281 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1282   timeval t;
1283   if (gettimeofday(&t, NULL) == -1) {
1284     fatal("os::javaTimeSystemUTC: gettimeofday (%s)", os::strerror(errno));
1285   }
1286   seconds = jlong(t.tv_sec);
1287   nanos = jlong(t.tv_usec) * 1000;
1288 }
1289 
1290 
1291 jlong os::javaTimeNanos() {
1292   return (jlong)getTimeNanos();
1293 }
1294 
1295 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1296   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1297   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1298   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1299   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1300 }
1301 
1302 char * os::local_time_string(char *buf, size_t buflen) {
1303   struct tm t;
1304   time_t long_time;
1305   time(&long_time);
1306   localtime_r(&long_time, &t);
1307   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1308                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1309                t.tm_hour, t.tm_min, t.tm_sec);
1310   return buf;
1311 }
1312 
1313 // Note: os::shutdown() might be called very early during initialization, or
1314 // called from signal handler. Before adding something to os::shutdown(), make
1315 // sure it is async-safe and can handle partially initialized VM.
1316 void os::shutdown() {
1317 
1318   // allow PerfMemory to attempt cleanup of any persistent resources
1319   perfMemory_exit();
1320 
1321   // needs to remove object in file system
1322   AttachListener::abort();
1323 
1324   // flush buffered output, finish log files
1325   ostream_abort();
1326 
1327   // Check for abort hook
1328   abort_hook_t abort_hook = Arguments::abort_hook();
1329   if (abort_hook != NULL) {
1330     abort_hook();
1331   }
1332 }
1333 
1334 // Note: os::abort() might be called very early during initialization, or
1335 // called from signal handler. Before adding something to os::abort(), make
1336 // sure it is async-safe and can handle partially initialized VM.
1337 void os::abort(bool dump_core, void* siginfo, const void* context) {
1338   os::shutdown();
1339   if (dump_core) {
1340 #ifndef PRODUCT
1341     fdStream out(defaultStream::output_fd());
1342     out.print_raw("Current thread is ");
1343     char buf[16];
1344     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1345     out.print_raw_cr(buf);
1346     out.print_raw_cr("Dumping core ...");
1347 #endif
1348     ::abort(); // dump core (for debugging)
1349   }
1350 
1351   ::exit(1);
1352 }
1353 
1354 // Die immediately, no exit hook, no abort hook, no cleanup.
1355 void os::die() {
1356   ::abort(); // dump core (for debugging)
1357 }
1358 
1359 // DLL functions
1360 
1361 const char* os::dll_file_extension() { return ".so"; }
1362 
1363 // This must be hard coded because it's the system's temporary
1364 // directory not the java application's temp directory, ala java.io.tmpdir.
1365 const char* os::get_temp_directory() { return "/tmp"; }
1366 
1367 // check if addr is inside libjvm.so
1368 bool os::address_is_in_vm(address addr) {
1369   static address libjvm_base_addr;
1370   Dl_info dlinfo;
1371 
1372   if (libjvm_base_addr == NULL) {
1373     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1374       libjvm_base_addr = (address)dlinfo.dli_fbase;
1375     }
1376     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1377   }
1378 
1379   if (dladdr((void *)addr, &dlinfo) != 0) {
1380     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1381   }
1382 
1383   return false;
1384 }
1385 
1386 typedef int (*dladdr1_func_type)(void *, Dl_info *, void **, int);
1387 static dladdr1_func_type dladdr1_func = NULL;
1388 
1389 bool os::dll_address_to_function_name(address addr, char *buf,
1390                                       int buflen, int * offset,
1391                                       bool demangle) {
1392   // buf is not optional, but offset is optional
1393   assert(buf != NULL, "sanity check");
1394 
1395   Dl_info dlinfo;
1396 
1397   // dladdr1_func was initialized in os::init()
1398   if (dladdr1_func != NULL) {
1399     // yes, we have dladdr1
1400 
1401     // Support for dladdr1 is checked at runtime; it may be
1402     // available even if the vm is built on a machine that does
1403     // not have dladdr1 support.  Make sure there is a value for
1404     // RTLD_DL_SYMENT.
1405 #ifndef RTLD_DL_SYMENT
1406   #define RTLD_DL_SYMENT 1
1407 #endif
1408 #ifdef _LP64
1409     Elf64_Sym * info;
1410 #else
1411     Elf32_Sym * info;
1412 #endif
1413     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1414                      RTLD_DL_SYMENT) != 0) {
1415       // see if we have a matching symbol that covers our address
1416       if (dlinfo.dli_saddr != NULL &&
1417           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1418         if (dlinfo.dli_sname != NULL) {
1419           if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
1420             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1421           }
1422           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1423           return true;
1424         }
1425       }
1426       // no matching symbol so try for just file info
1427       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1428         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1429                             buf, buflen, offset, dlinfo.dli_fname, demangle)) {
1430           return true;
1431         }
1432       }
1433     }
1434     buf[0] = '\0';
1435     if (offset != NULL) *offset  = -1;
1436     return false;
1437   }
1438 
1439   // no, only dladdr is available
1440   if (dladdr((void *)addr, &dlinfo) != 0) {
1441     // see if we have a matching symbol
1442     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1443       if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
1444         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1445       }
1446       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1447       return true;
1448     }
1449     // no matching symbol so try for just file info
1450     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1451       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1452                           buf, buflen, offset, dlinfo.dli_fname, demangle)) {
1453         return true;
1454       }
1455     }
1456   }
1457   buf[0] = '\0';
1458   if (offset != NULL) *offset  = -1;
1459   return false;
1460 }
1461 
1462 bool os::dll_address_to_library_name(address addr, char* buf,
1463                                      int buflen, int* offset) {
1464   // buf is not optional, but offset is optional
1465   assert(buf != NULL, "sanity check");
1466 
1467   Dl_info dlinfo;
1468 
1469   if (dladdr((void*)addr, &dlinfo) != 0) {
1470     if (dlinfo.dli_fname != NULL) {
1471       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1472     }
1473     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1474       *offset = addr - (address)dlinfo.dli_fbase;
1475     }
1476     return true;
1477   }
1478 
1479   buf[0] = '\0';
1480   if (offset) *offset = -1;
1481   return false;
1482 }
1483 
1484 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1485   Dl_info dli;
1486   // Sanity check?
1487   if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 ||
1488       dli.dli_fname == NULL) {
1489     return 1;
1490   }
1491 
1492   void * handle = dlopen(dli.dli_fname, RTLD_LAZY);
1493   if (handle == NULL) {
1494     return 1;
1495   }
1496 
1497   Link_map *map;
1498   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1499   if (map == NULL) {
1500     dlclose(handle);
1501     return 1;
1502   }
1503 
1504   while (map->l_prev != NULL) {
1505     map = map->l_prev;
1506   }
1507 
1508   while (map != NULL) {
1509     // Iterate through all map entries and call callback with fields of interest
1510     if(callback(map->l_name, (address)map->l_addr, (address)0, param)) {
1511       dlclose(handle);
1512       return 1;
1513     }
1514     map = map->l_next;
1515   }
1516 
1517   dlclose(handle);
1518   return 0;
1519 }
1520 
1521 int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) {
1522   outputStream * out = (outputStream *) param;
1523   out->print_cr(PTR_FORMAT " \t%s", base_address, name);
1524   return 0;
1525 }
1526 
1527 void os::print_dll_info(outputStream * st) {
1528   st->print_cr("Dynamic libraries:"); st->flush();
1529   if (get_loaded_modules_info(_print_dll_info_cb, (void *)st)) {
1530     st->print_cr("Error: Cannot print dynamic libraries.");
1531   }
1532 }
1533 
1534 // Loads .dll/.so and
1535 // in case of error it checks if .dll/.so was built for the
1536 // same architecture as Hotspot is running on
1537 
1538 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1539   void * result= ::dlopen(filename, RTLD_LAZY);
1540   if (result != NULL) {
1541     // Successful loading
1542     return result;
1543   }
1544 
1545   Elf32_Ehdr elf_head;
1546 
1547   // Read system error message into ebuf
1548   // It may or may not be overwritten below
1549   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1550   ebuf[ebuflen-1]='\0';
1551   int diag_msg_max_length=ebuflen-strlen(ebuf);
1552   char* diag_msg_buf=ebuf+strlen(ebuf);
1553 
1554   if (diag_msg_max_length==0) {
1555     // No more space in ebuf for additional diagnostics message
1556     return NULL;
1557   }
1558 
1559 
1560   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1561 
1562   if (file_descriptor < 0) {
1563     // Can't open library, report dlerror() message
1564     return NULL;
1565   }
1566 
1567   bool failed_to_read_elf_head=
1568     (sizeof(elf_head)!=
1569      (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1570 
1571   ::close(file_descriptor);
1572   if (failed_to_read_elf_head) {
1573     // file i/o error - report dlerror() msg
1574     return NULL;
1575   }
1576 
1577   typedef struct {
1578     Elf32_Half  code;         // Actual value as defined in elf.h
1579     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1580     char        elf_class;    // 32 or 64 bit
1581     char        endianess;    // MSB or LSB
1582     char*       name;         // String representation
1583   } arch_t;
1584 
1585   static const arch_t arch_array[]={
1586     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1587     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1588     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1589     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1590     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1591     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1592     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1593     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1594     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1595     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1596   };
1597 
1598 #if  (defined IA32)
1599   static  Elf32_Half running_arch_code=EM_386;
1600 #elif   (defined AMD64)
1601   static  Elf32_Half running_arch_code=EM_X86_64;
1602 #elif  (defined IA64)
1603   static  Elf32_Half running_arch_code=EM_IA_64;
1604 #elif  (defined __sparc) && (defined _LP64)
1605   static  Elf32_Half running_arch_code=EM_SPARCV9;
1606 #elif  (defined __sparc) && (!defined _LP64)
1607   static  Elf32_Half running_arch_code=EM_SPARC;
1608 #elif  (defined __powerpc64__)
1609   static  Elf32_Half running_arch_code=EM_PPC64;
1610 #elif  (defined __powerpc__)
1611   static  Elf32_Half running_arch_code=EM_PPC;
1612 #elif (defined ARM)
1613   static  Elf32_Half running_arch_code=EM_ARM;
1614 #else
1615   #error Method os::dll_load requires that one of following is defined:\
1616        IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1617 #endif
1618 
1619   // Identify compatability class for VM's architecture and library's architecture
1620   // Obtain string descriptions for architectures
1621 
1622   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1623   int running_arch_index=-1;
1624 
1625   for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
1626     if (running_arch_code == arch_array[i].code) {
1627       running_arch_index    = i;
1628     }
1629     if (lib_arch.code == arch_array[i].code) {
1630       lib_arch.compat_class = arch_array[i].compat_class;
1631       lib_arch.name         = arch_array[i].name;
1632     }
1633   }
1634 
1635   assert(running_arch_index != -1,
1636          "Didn't find running architecture code (running_arch_code) in arch_array");
1637   if (running_arch_index == -1) {
1638     // Even though running architecture detection failed
1639     // we may still continue with reporting dlerror() message
1640     return NULL;
1641   }
1642 
1643   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1644     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1645     return NULL;
1646   }
1647 
1648   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1649     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1650     return NULL;
1651   }
1652 
1653   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1654     if (lib_arch.name!=NULL) {
1655       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1656                  " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1657                  lib_arch.name, arch_array[running_arch_index].name);
1658     } else {
1659       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1660                  " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1661                  lib_arch.code,
1662                  arch_array[running_arch_index].name);
1663     }
1664   }
1665 
1666   return NULL;
1667 }
1668 
1669 void* os::dll_lookup(void* handle, const char* name) {
1670   return dlsym(handle, name);
1671 }
1672 
1673 void* os::get_default_process_handle() {
1674   return (void*)::dlopen(NULL, RTLD_LAZY);
1675 }
1676 
1677 int os::stat(const char *path, struct stat *sbuf) {
1678   char pathbuf[MAX_PATH];
1679   if (strlen(path) > MAX_PATH - 1) {
1680     errno = ENAMETOOLONG;
1681     return -1;
1682   }
1683   os::native_path(strcpy(pathbuf, path));
1684   return ::stat(pathbuf, sbuf);
1685 }
1686 
1687 static inline time_t get_mtime(const char* filename) {
1688   struct stat st;
1689   int ret = os::stat(filename, &st);
1690   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
1691   return st.st_mtime;
1692 }
1693 
1694 int os::compare_file_modified_times(const char* file1, const char* file2) {
1695   time_t t1 = get_mtime(file1);
1696   time_t t2 = get_mtime(file2);
1697   return t1 - t2;
1698 }
1699 
1700 static bool _print_ascii_file(const char* filename, outputStream* st) {
1701   int fd = ::open(filename, O_RDONLY);
1702   if (fd == -1) {
1703     return false;
1704   }
1705 
1706   char buf[32];
1707   int bytes;
1708   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1709     st->print_raw(buf, bytes);
1710   }
1711 
1712   ::close(fd);
1713 
1714   return true;
1715 }
1716 
1717 void os::print_os_info_brief(outputStream* st) {
1718   os::Solaris::print_distro_info(st);
1719 
1720   os::Posix::print_uname_info(st);
1721 
1722   os::Solaris::print_libversion_info(st);
1723 }
1724 
1725 void os::print_os_info(outputStream* st) {
1726   st->print("OS:");
1727 
1728   os::Solaris::print_distro_info(st);
1729 
1730   os::Posix::print_uname_info(st);
1731 
1732   os::Solaris::print_libversion_info(st);
1733 
1734   os::Posix::print_rlimit_info(st);
1735 
1736   os::Posix::print_load_average(st);
1737 }
1738 
1739 void os::Solaris::print_distro_info(outputStream* st) {
1740   if (!_print_ascii_file("/etc/release", st)) {
1741     st->print("Solaris");
1742   }
1743   st->cr();
1744 }
1745 
1746 void os::get_summary_os_info(char* buf, size_t buflen) {
1747   strncpy(buf, "Solaris", buflen);  // default to plain solaris
1748   FILE* fp = fopen("/etc/release", "r");
1749   if (fp != NULL) {
1750     char tmp[256];
1751     // Only get the first line and chop out everything but the os name.
1752     if (fgets(tmp, sizeof(tmp), fp)) {
1753       char* ptr = tmp;
1754       // skip past whitespace characters
1755       while (*ptr != '\0' && (*ptr == ' ' || *ptr == '\t' || *ptr == '\n')) ptr++;
1756       if (*ptr != '\0') {
1757         char* nl = strchr(ptr, '\n');
1758         if (nl != NULL) *nl = '\0';
1759         strncpy(buf, ptr, buflen);
1760       }
1761     }
1762     fclose(fp);
1763   }
1764 }
1765 
1766 void os::Solaris::print_libversion_info(outputStream* st) {
1767   st->print("  (T2 libthread)");
1768   st->cr();
1769 }
1770 
1771 static bool check_addr0(outputStream* st) {
1772   jboolean status = false;
1773   const int read_chunk = 200;
1774   int ret = 0;
1775   int nmap = 0;
1776   int fd = ::open("/proc/self/map",O_RDONLY);
1777   if (fd >= 0) {
1778     prmap_t *p = NULL;
1779     char *mbuff = (char *) calloc(read_chunk, sizeof(prmap_t));
1780     if (NULL == mbuff) {
1781       ::close(fd);
1782       return status;
1783     }
1784     while ((ret = ::read(fd, mbuff, read_chunk*sizeof(prmap_t))) > 0) {
1785       //check if read() has not read partial data
1786       if( 0 != ret % sizeof(prmap_t)){
1787         break;
1788       }
1789       nmap = ret / sizeof(prmap_t);
1790       p = (prmap_t *)mbuff;
1791       for(int i = 0; i < nmap; i++){
1792         if (p->pr_vaddr == 0x0) {
1793           st->print("Warning: Address: " PTR_FORMAT ", Size: " SIZE_FORMAT "K, ",p->pr_vaddr, p->pr_size/1024);
1794           st->print("Mapped file: %s, ", p->pr_mapname[0] == '\0' ? "None" : p->pr_mapname);
1795           st->print("Access: ");
1796           st->print("%s",(p->pr_mflags & MA_READ)  ? "r" : "-");
1797           st->print("%s",(p->pr_mflags & MA_WRITE) ? "w" : "-");
1798           st->print("%s",(p->pr_mflags & MA_EXEC)  ? "x" : "-");
1799           st->cr();
1800           status = true;
1801         }
1802         p++;
1803       }
1804     }
1805     free(mbuff);
1806     ::close(fd);
1807   }
1808   return status;
1809 }
1810 
1811 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1812   // Get MHz with system call. We don't seem to already have this.
1813   processor_info_t stats;
1814   processorid_t id = getcpuid();
1815   int clock = 0;
1816   if (processor_info(id, &stats) != -1) {
1817     clock = stats.pi_clock;  // pi_processor_type isn't more informative than below
1818   }
1819 #ifdef AMD64
1820   snprintf(buf, buflen, "x86 64 bit %d MHz", clock);
1821 #else
1822   // must be sparc
1823   snprintf(buf, buflen, "Sparcv9 64 bit %d MHz", clock);
1824 #endif
1825 }
1826 
1827 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1828   // Nothing to do for now.
1829 }
1830 
1831 void os::print_memory_info(outputStream* st) {
1832   st->print("Memory:");
1833   st->print(" %dk page", os::vm_page_size()>>10);
1834   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
1835   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
1836   st->cr();
1837   (void) check_addr0(st);
1838 }
1839 
1840 // Moved from whole group, because we need them here for diagnostic
1841 // prints.
1842 static int Maxsignum = 0;
1843 static int *ourSigFlags = NULL;
1844 
1845 int os::Solaris::get_our_sigflags(int sig) {
1846   assert(ourSigFlags!=NULL, "signal data structure not initialized");
1847   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
1848   return ourSigFlags[sig];
1849 }
1850 
1851 void os::Solaris::set_our_sigflags(int sig, int flags) {
1852   assert(ourSigFlags!=NULL, "signal data structure not initialized");
1853   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
1854   ourSigFlags[sig] = flags;
1855 }
1856 
1857 
1858 static const char* get_signal_handler_name(address handler,
1859                                            char* buf, int buflen) {
1860   int offset;
1861   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
1862   if (found) {
1863     // skip directory names
1864     const char *p1, *p2;
1865     p1 = buf;
1866     size_t len = strlen(os::file_separator());
1867     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
1868     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
1869   } else {
1870     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
1871   }
1872   return buf;
1873 }
1874 
1875 static void print_signal_handler(outputStream* st, int sig,
1876                                  char* buf, size_t buflen) {
1877   struct sigaction sa;
1878 
1879   sigaction(sig, NULL, &sa);
1880 
1881   st->print("%s: ", os::exception_name(sig, buf, buflen));
1882 
1883   address handler = (sa.sa_flags & SA_SIGINFO)
1884                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
1885                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
1886 
1887   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
1888     st->print("SIG_DFL");
1889   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
1890     st->print("SIG_IGN");
1891   } else {
1892     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
1893   }
1894 
1895   st->print(", sa_mask[0]=");
1896   os::Posix::print_signal_set_short(st, &sa.sa_mask);
1897 
1898   address rh = VMError::get_resetted_sighandler(sig);
1899   // May be, handler was resetted by VMError?
1900   if (rh != NULL) {
1901     handler = rh;
1902     sa.sa_flags = VMError::get_resetted_sigflags(sig);
1903   }
1904 
1905   st->print(", sa_flags=");
1906   os::Posix::print_sa_flags(st, sa.sa_flags);
1907 
1908   // Check: is it our handler?
1909   if (handler == CAST_FROM_FN_PTR(address, signalHandler)) {
1910     // It is our signal handler
1911     // check for flags
1912     if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
1913       st->print(
1914                 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
1915                 os::Solaris::get_our_sigflags(sig));
1916     }
1917   }
1918   st->cr();
1919 }
1920 
1921 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1922   st->print_cr("Signal Handlers:");
1923   print_signal_handler(st, SIGSEGV, buf, buflen);
1924   print_signal_handler(st, SIGBUS , buf, buflen);
1925   print_signal_handler(st, SIGFPE , buf, buflen);
1926   print_signal_handler(st, SIGPIPE, buf, buflen);
1927   print_signal_handler(st, SIGXFSZ, buf, buflen);
1928   print_signal_handler(st, SIGILL , buf, buflen);
1929   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
1930   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1931   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
1932   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1933   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
1934 }
1935 
1936 static char saved_jvm_path[MAXPATHLEN] = { 0 };
1937 
1938 // Find the full path to the current module, libjvm.so
1939 void os::jvm_path(char *buf, jint buflen) {
1940   // Error checking.
1941   if (buflen < MAXPATHLEN) {
1942     assert(false, "must use a large-enough buffer");
1943     buf[0] = '\0';
1944     return;
1945   }
1946   // Lazy resolve the path to current module.
1947   if (saved_jvm_path[0] != 0) {
1948     strcpy(buf, saved_jvm_path);
1949     return;
1950   }
1951 
1952   Dl_info dlinfo;
1953   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1954   assert(ret != 0, "cannot locate libjvm");
1955   if (ret != 0 && dlinfo.dli_fname != NULL) {
1956     if (os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen) == NULL) {
1957       return;
1958     }
1959   } else {
1960     buf[0] = '\0';
1961     return;
1962   }
1963 
1964   if (Arguments::sun_java_launcher_is_altjvm()) {
1965     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
1966     // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
1967     // If "/jre/lib/" appears at the right place in the string, then
1968     // assume we are installed in a JDK and we're done.  Otherwise, check
1969     // for a JAVA_HOME environment variable and fix up the path so it
1970     // looks like libjvm.so is installed there (append a fake suffix
1971     // hotspot/libjvm.so).
1972     const char *p = buf + strlen(buf) - 1;
1973     for (int count = 0; p > buf && count < 5; ++count) {
1974       for (--p; p > buf && *p != '/'; --p)
1975         /* empty */ ;
1976     }
1977 
1978     if (strncmp(p, "/jre/lib/", 9) != 0) {
1979       // Look for JAVA_HOME in the environment.
1980       char* java_home_var = ::getenv("JAVA_HOME");
1981       if (java_home_var != NULL && java_home_var[0] != 0) {
1982         char* jrelib_p;
1983         int   len;
1984 
1985         // Check the current module name "libjvm.so".
1986         p = strrchr(buf, '/');
1987         assert(strstr(p, "/libjvm") == p, "invalid library name");
1988 
1989         if (os::Posix::realpath(java_home_var, buf, buflen) == NULL) {
1990           return;
1991         }
1992         // determine if this is a legacy image or modules image
1993         // modules image doesn't have "jre" subdirectory
1994         len = strlen(buf);
1995         assert(len < buflen, "Ran out of buffer space");
1996         jrelib_p = buf + len;
1997         snprintf(jrelib_p, buflen-len, "/jre/lib");
1998         if (0 != access(buf, F_OK)) {
1999           snprintf(jrelib_p, buflen-len, "/lib");
2000         }
2001 
2002         if (0 == access(buf, F_OK)) {
2003           // Use current module name "libjvm.so"
2004           len = strlen(buf);
2005           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2006         } else {
2007           // Go back to path of .so
2008           if (os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen) == NULL) {
2009             return;
2010           }
2011         }
2012       }
2013     }
2014   }
2015 
2016   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2017   saved_jvm_path[MAXPATHLEN - 1] = '\0';
2018 }
2019 
2020 
2021 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2022   // no prefix required, not even "_"
2023 }
2024 
2025 
2026 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2027   // no suffix required
2028 }
2029 
2030 // This method is a copy of JDK's sysGetLastErrorString
2031 // from src/solaris/hpi/src/system_md.c
2032 
2033 size_t os::lasterror(char *buf, size_t len) {
2034   if (errno == 0)  return 0;
2035 
2036   const char *s = os::strerror(errno);
2037   size_t n = ::strlen(s);
2038   if (n >= len) {
2039     n = len - 1;
2040   }
2041   ::strncpy(buf, s, n);
2042   buf[n] = '\0';
2043   return n;
2044 }
2045 
2046 
2047 // sun.misc.Signal
2048 
2049 extern "C" {
2050   static void UserHandler(int sig, void *siginfo, void *context) {
2051     // Ctrl-C is pressed during error reporting, likely because the error
2052     // handler fails to abort. Let VM die immediately.
2053     if (sig == SIGINT && VMError::is_error_reported()) {
2054       os::die();
2055     }
2056 
2057     os::signal_notify(sig);
2058     // We do not need to reinstate the signal handler each time...
2059   }
2060 }
2061 
2062 void* os::user_handler() {
2063   return CAST_FROM_FN_PTR(void*, UserHandler);
2064 }
2065 
2066 struct timespec PosixSemaphore::create_timespec(unsigned int sec, int nsec) {
2067   struct timespec ts;
2068   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2069 
2070   return ts;
2071 }
2072 
2073 extern "C" {
2074   typedef void (*sa_handler_t)(int);
2075   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2076 }
2077 
2078 void* os::signal(int signal_number, void* handler) {
2079   struct sigaction sigAct, oldSigAct;
2080   sigfillset(&(sigAct.sa_mask));
2081   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2082   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2083 
2084   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
2085     // -1 means registration failed
2086     return (void *)-1;
2087   }
2088 
2089   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2090 }
2091 
2092 void os::signal_raise(int signal_number) {
2093   raise(signal_number);
2094 }
2095 
2096 // The following code is moved from os.cpp for making this
2097 // code platform specific, which it is by its very nature.
2098 
2099 // a counter for each possible signal value
2100 static int Sigexit = 0;
2101 static jint *pending_signals = NULL;
2102 static int *preinstalled_sigs = NULL;
2103 static struct sigaction *chainedsigactions = NULL;
2104 static sema_t sig_sem;
2105 typedef int (*version_getting_t)();
2106 version_getting_t os::Solaris::get_libjsig_version = NULL;
2107 
2108 int os::sigexitnum_pd() {
2109   assert(Sigexit > 0, "signal memory not yet initialized");
2110   return Sigexit;
2111 }
2112 
2113 void os::Solaris::init_signal_mem() {
2114   // Initialize signal structures
2115   Maxsignum = SIGRTMAX;
2116   Sigexit = Maxsignum+1;
2117   assert(Maxsignum >0, "Unable to obtain max signal number");
2118 
2119   // pending_signals has one int per signal
2120   // The additional signal is for SIGEXIT - exit signal to signal_thread
2121   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2122   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2123 
2124   if (UseSignalChaining) {
2125     chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2126                                                    * (Maxsignum + 1), mtInternal);
2127     memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2128     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2129     memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2130   }
2131   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2132   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2133 }
2134 
2135 void os::signal_init_pd() {
2136   int ret;
2137 
2138   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2139   assert(ret == 0, "sema_init() failed");
2140 }
2141 
2142 void os::signal_notify(int signal_number) {
2143   int ret;
2144 
2145   Atomic::inc(&pending_signals[signal_number]);
2146   ret = ::sema_post(&sig_sem);
2147   assert(ret == 0, "sema_post() failed");
2148 }
2149 
2150 static int check_pending_signals(bool wait_for_signal) {
2151   int ret;
2152   while (true) {
2153     for (int i = 0; i < Sigexit + 1; i++) {
2154       jint n = pending_signals[i];
2155       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2156         return i;
2157       }
2158     }
2159     if (!wait_for_signal) {
2160       return -1;
2161     }
2162     JavaThread *thread = JavaThread::current();
2163     ThreadBlockInVM tbivm(thread);
2164 
2165     bool threadIsSuspended;
2166     do {
2167       thread->set_suspend_equivalent();
2168       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2169       while ((ret = ::sema_wait(&sig_sem)) == EINTR)
2170         ;
2171       assert(ret == 0, "sema_wait() failed");
2172 
2173       // were we externally suspended while we were waiting?
2174       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2175       if (threadIsSuspended) {
2176         // The semaphore has been incremented, but while we were waiting
2177         // another thread suspended us. We don't want to continue running
2178         // while suspended because that would surprise the thread that
2179         // suspended us.
2180         ret = ::sema_post(&sig_sem);
2181         assert(ret == 0, "sema_post() failed");
2182 
2183         thread->java_suspend_self();
2184       }
2185     } while (threadIsSuspended);
2186   }
2187 }
2188 
2189 int os::signal_lookup() {
2190   return check_pending_signals(false);
2191 }
2192 
2193 int os::signal_wait() {
2194   return check_pending_signals(true);
2195 }
2196 
2197 ////////////////////////////////////////////////////////////////////////////////
2198 // Virtual Memory
2199 
2200 static int page_size = -1;
2201 
2202 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
2203 // clear this var if support is not available.
2204 static bool has_map_align = true;
2205 
2206 int os::vm_page_size() {
2207   assert(page_size != -1, "must call os::init");
2208   return page_size;
2209 }
2210 
2211 // Solaris allocates memory by pages.
2212 int os::vm_allocation_granularity() {
2213   assert(page_size != -1, "must call os::init");
2214   return page_size;
2215 }
2216 
2217 static bool recoverable_mmap_error(int err) {
2218   // See if the error is one we can let the caller handle. This
2219   // list of errno values comes from the Solaris mmap(2) man page.
2220   switch (err) {
2221   case EBADF:
2222   case EINVAL:
2223   case ENOTSUP:
2224     // let the caller deal with these errors
2225     return true;
2226 
2227   default:
2228     // Any remaining errors on this OS can cause our reserved mapping
2229     // to be lost. That can cause confusion where different data
2230     // structures think they have the same memory mapped. The worst
2231     // scenario is if both the VM and a library think they have the
2232     // same memory mapped.
2233     return false;
2234   }
2235 }
2236 
2237 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2238                                     int err) {
2239   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2240           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2241           os::strerror(err), err);
2242 }
2243 
2244 static void warn_fail_commit_memory(char* addr, size_t bytes,
2245                                     size_t alignment_hint, bool exec,
2246                                     int err) {
2247   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2248           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2249           alignment_hint, exec, os::strerror(err), err);
2250 }
2251 
2252 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2253   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2254   size_t size = bytes;
2255   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2256   if (res != NULL) {
2257     if (UseNUMAInterleaving) {
2258       numa_make_global(addr, bytes);
2259     }
2260     return 0;
2261   }
2262 
2263   int err = errno;  // save errno from mmap() call in mmap_chunk()
2264 
2265   if (!recoverable_mmap_error(err)) {
2266     warn_fail_commit_memory(addr, bytes, exec, err);
2267     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2268   }
2269 
2270   return err;
2271 }
2272 
2273 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2274   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2275 }
2276 
2277 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2278                                   const char* mesg) {
2279   assert(mesg != NULL, "mesg must be specified");
2280   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2281   if (err != 0) {
2282     // the caller wants all commit errors to exit with the specified mesg:
2283     warn_fail_commit_memory(addr, bytes, exec, err);
2284     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "%s", mesg);
2285   }
2286 }
2287 
2288 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
2289   assert(is_aligned(alignment, (size_t) vm_page_size()),
2290          SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
2291          alignment, (size_t) vm_page_size());
2292 
2293   for (int i = 0; _page_sizes[i] != 0; i++) {
2294     if (is_aligned(alignment, _page_sizes[i])) {
2295       return _page_sizes[i];
2296     }
2297   }
2298 
2299   return (size_t) vm_page_size();
2300 }
2301 
2302 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2303                                     size_t alignment_hint, bool exec) {
2304   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2305   if (err == 0 && UseLargePages && alignment_hint > 0) {
2306     assert(is_aligned(bytes, alignment_hint),
2307            SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint);
2308 
2309     // The syscall memcntl requires an exact page size (see man memcntl for details).
2310     size_t page_size = page_size_for_alignment(alignment_hint);
2311     if (page_size > (size_t) vm_page_size()) {
2312       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2313     }
2314   }
2315   return err;
2316 }
2317 
2318 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2319                           bool exec) {
2320   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2321 }
2322 
2323 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2324                                   size_t alignment_hint, bool exec,
2325                                   const char* mesg) {
2326   assert(mesg != NULL, "mesg must be specified");
2327   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2328   if (err != 0) {
2329     // the caller wants all commit errors to exit with the specified mesg:
2330     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2331     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "%s", mesg);
2332   }
2333 }
2334 
2335 // Uncommit the pages in a specified region.
2336 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2337   if (madvise(addr, bytes, MADV_FREE) < 0) {
2338     debug_only(warning("MADV_FREE failed."));
2339     return;
2340   }
2341 }
2342 
2343 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2344   return os::commit_memory(addr, size, !ExecMem);
2345 }
2346 
2347 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2348   return os::uncommit_memory(addr, size);
2349 }
2350 
2351 // Change the page size in a given range.
2352 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2353   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2354   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2355   if (UseLargePages) {
2356     size_t page_size = Solaris::page_size_for_alignment(alignment_hint);
2357     if (page_size > (size_t) vm_page_size()) {
2358       Solaris::setup_large_pages(addr, bytes, page_size);
2359     }
2360   }
2361 }
2362 
2363 // Tell the OS to make the range local to the first-touching LWP
2364 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2365   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2366   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2367     debug_only(warning("MADV_ACCESS_LWP failed."));
2368   }
2369 }
2370 
2371 // Tell the OS that this range would be accessed from different LWPs.
2372 void os::numa_make_global(char *addr, size_t bytes) {
2373   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2374   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2375     debug_only(warning("MADV_ACCESS_MANY failed."));
2376   }
2377 }
2378 
2379 // Get the number of the locality groups.
2380 size_t os::numa_get_groups_num() {
2381   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2382   return n != -1 ? n : 1;
2383 }
2384 
2385 // Get a list of leaf locality groups. A leaf lgroup is group that
2386 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2387 // board. An LWP is assigned to one of these groups upon creation.
2388 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2389   if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2390     ids[0] = 0;
2391     return 1;
2392   }
2393   int result_size = 0, top = 1, bottom = 0, cur = 0;
2394   for (int k = 0; k < size; k++) {
2395     int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2396                                    (Solaris::lgrp_id_t*)&ids[top], size - top);
2397     if (r == -1) {
2398       ids[0] = 0;
2399       return 1;
2400     }
2401     if (!r) {
2402       // That's a leaf node.
2403       assert(bottom <= cur, "Sanity check");
2404       // Check if the node has memory
2405       if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2406                                   NULL, 0, LGRP_RSRC_MEM) > 0) {
2407         ids[bottom++] = ids[cur];
2408       }
2409     }
2410     top += r;
2411     cur++;
2412   }
2413   if (bottom == 0) {
2414     // Handle a situation, when the OS reports no memory available.
2415     // Assume UMA architecture.
2416     ids[0] = 0;
2417     return 1;
2418   }
2419   return bottom;
2420 }
2421 
2422 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2423 bool os::numa_topology_changed() {
2424   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2425   if (is_stale != -1 && is_stale) {
2426     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2427     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2428     assert(c != 0, "Failure to initialize LGRP API");
2429     Solaris::set_lgrp_cookie(c);
2430     return true;
2431   }
2432   return false;
2433 }
2434 
2435 // Get the group id of the current LWP.
2436 int os::numa_get_group_id() {
2437   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2438   if (lgrp_id == -1) {
2439     return 0;
2440   }
2441   const int size = os::numa_get_groups_num();
2442   int *ids = (int*)alloca(size * sizeof(int));
2443 
2444   // Get the ids of all lgroups with memory; r is the count.
2445   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2446                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2447   if (r <= 0) {
2448     return 0;
2449   }
2450   return ids[os::random() % r];
2451 }
2452 
2453 // Request information about the page.
2454 bool os::get_page_info(char *start, page_info* info) {
2455   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2456   uint64_t addr = (uintptr_t)start;
2457   uint64_t outdata[2];
2458   uint_t validity = 0;
2459 
2460   if (meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2461     return false;
2462   }
2463 
2464   info->size = 0;
2465   info->lgrp_id = -1;
2466 
2467   if ((validity & 1) != 0) {
2468     if ((validity & 2) != 0) {
2469       info->lgrp_id = outdata[0];
2470     }
2471     if ((validity & 4) != 0) {
2472       info->size = outdata[1];
2473     }
2474     return true;
2475   }
2476   return false;
2477 }
2478 
2479 // Scan the pages from start to end until a page different than
2480 // the one described in the info parameter is encountered.
2481 char *os::scan_pages(char *start, char* end, page_info* page_expected,
2482                      page_info* page_found) {
2483   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2484   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2485   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2486   uint_t validity[MAX_MEMINFO_CNT];
2487 
2488   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2489   uint64_t p = (uint64_t)start;
2490   while (p < (uint64_t)end) {
2491     addrs[0] = p;
2492     size_t addrs_count = 1;
2493     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2494       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2495       addrs_count++;
2496     }
2497 
2498     if (meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2499       return NULL;
2500     }
2501 
2502     size_t i = 0;
2503     for (; i < addrs_count; i++) {
2504       if ((validity[i] & 1) != 0) {
2505         if ((validity[i] & 4) != 0) {
2506           if (outdata[types * i + 1] != page_expected->size) {
2507             break;
2508           }
2509         } else if (page_expected->size != 0) {
2510           break;
2511         }
2512 
2513         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2514           if (outdata[types * i] != page_expected->lgrp_id) {
2515             break;
2516           }
2517         }
2518       } else {
2519         return NULL;
2520       }
2521     }
2522 
2523     if (i < addrs_count) {
2524       if ((validity[i] & 2) != 0) {
2525         page_found->lgrp_id = outdata[types * i];
2526       } else {
2527         page_found->lgrp_id = -1;
2528       }
2529       if ((validity[i] & 4) != 0) {
2530         page_found->size = outdata[types * i + 1];
2531       } else {
2532         page_found->size = 0;
2533       }
2534       return (char*)addrs[i];
2535     }
2536 
2537     p = addrs[addrs_count - 1] + page_size;
2538   }
2539   return end;
2540 }
2541 
2542 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2543   size_t size = bytes;
2544   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2545   // uncommitted page. Otherwise, the read/write might succeed if we
2546   // have enough swap space to back the physical page.
2547   return
2548     NULL != Solaris::mmap_chunk(addr, size,
2549                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2550                                 PROT_NONE);
2551 }
2552 
2553 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2554   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2555 
2556   if (b == MAP_FAILED) {
2557     return NULL;
2558   }
2559   return b;
2560 }
2561 
2562 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes,
2563                              size_t alignment_hint, bool fixed) {
2564   char* addr = requested_addr;
2565   int flags = MAP_PRIVATE | MAP_NORESERVE;
2566 
2567   assert(!(fixed && (alignment_hint > 0)),
2568          "alignment hint meaningless with fixed mmap");
2569 
2570   if (fixed) {
2571     flags |= MAP_FIXED;
2572   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2573     flags |= MAP_ALIGN;
2574     addr = (char*) alignment_hint;
2575   }
2576 
2577   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2578   // uncommitted page. Otherwise, the read/write might succeed if we
2579   // have enough swap space to back the physical page.
2580   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2581 }
2582 
2583 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2584                             size_t alignment_hint) {
2585   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint,
2586                                   (requested_addr != NULL));
2587 
2588   guarantee(requested_addr == NULL || requested_addr == addr,
2589             "OS failed to return requested mmap address.");
2590   return addr;
2591 }
2592 
2593 // Reserve memory at an arbitrary address, only if that area is
2594 // available (and not reserved for something else).
2595 
2596 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2597   const int max_tries = 10;
2598   char* base[max_tries];
2599   size_t size[max_tries];
2600 
2601   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2602   // is dependent on the requested size and the MMU.  Our initial gap
2603   // value here is just a guess and will be corrected later.
2604   bool had_top_overlap = false;
2605   bool have_adjusted_gap = false;
2606   size_t gap = 0x400000;
2607 
2608   // Assert only that the size is a multiple of the page size, since
2609   // that's all that mmap requires, and since that's all we really know
2610   // about at this low abstraction level.  If we need higher alignment,
2611   // we can either pass an alignment to this method or verify alignment
2612   // in one of the methods further up the call chain.  See bug 5044738.
2613   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2614 
2615   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2616   // Give it a try, if the kernel honors the hint we can return immediately.
2617   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2618 
2619   volatile int err = errno;
2620   if (addr == requested_addr) {
2621     return addr;
2622   } else if (addr != NULL) {
2623     pd_unmap_memory(addr, bytes);
2624   }
2625 
2626   if (log_is_enabled(Warning, os)) {
2627     char buf[256];
2628     buf[0] = '\0';
2629     if (addr == NULL) {
2630       jio_snprintf(buf, sizeof(buf), ": %s", os::strerror(err));
2631     }
2632     log_info(os)("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2633             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2634             "%s", bytes, requested_addr, addr, buf);
2635   }
2636 
2637   // Address hint method didn't work.  Fall back to the old method.
2638   // In theory, once SNV becomes our oldest supported platform, this
2639   // code will no longer be needed.
2640   //
2641   // Repeatedly allocate blocks until the block is allocated at the
2642   // right spot. Give up after max_tries.
2643   int i;
2644   for (i = 0; i < max_tries; ++i) {
2645     base[i] = reserve_memory(bytes);
2646 
2647     if (base[i] != NULL) {
2648       // Is this the block we wanted?
2649       if (base[i] == requested_addr) {
2650         size[i] = bytes;
2651         break;
2652       }
2653 
2654       // check that the gap value is right
2655       if (had_top_overlap && !have_adjusted_gap) {
2656         size_t actual_gap = base[i-1] - base[i] - bytes;
2657         if (gap != actual_gap) {
2658           // adjust the gap value and retry the last 2 allocations
2659           assert(i > 0, "gap adjustment code problem");
2660           have_adjusted_gap = true;  // adjust the gap only once, just in case
2661           gap = actual_gap;
2662           log_info(os)("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2663           unmap_memory(base[i], bytes);
2664           unmap_memory(base[i-1], size[i-1]);
2665           i-=2;
2666           continue;
2667         }
2668       }
2669 
2670       // Does this overlap the block we wanted? Give back the overlapped
2671       // parts and try again.
2672       //
2673       // There is still a bug in this code: if top_overlap == bytes,
2674       // the overlap is offset from requested region by the value of gap.
2675       // In this case giving back the overlapped part will not work,
2676       // because we'll give back the entire block at base[i] and
2677       // therefore the subsequent allocation will not generate a new gap.
2678       // This could be fixed with a new algorithm that used larger
2679       // or variable size chunks to find the requested region -
2680       // but such a change would introduce additional complications.
2681       // It's rare enough that the planets align for this bug,
2682       // so we'll just wait for a fix for 6204603/5003415 which
2683       // will provide a mmap flag to allow us to avoid this business.
2684 
2685       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2686       if (top_overlap >= 0 && top_overlap < bytes) {
2687         had_top_overlap = true;
2688         unmap_memory(base[i], top_overlap);
2689         base[i] += top_overlap;
2690         size[i] = bytes - top_overlap;
2691       } else {
2692         size_t bottom_overlap = base[i] + bytes - requested_addr;
2693         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2694           if (bottom_overlap == 0) {
2695             log_info(os)("attempt_reserve_memory_at: possible alignment bug");
2696           }
2697           unmap_memory(requested_addr, bottom_overlap);
2698           size[i] = bytes - bottom_overlap;
2699         } else {
2700           size[i] = bytes;
2701         }
2702       }
2703     }
2704   }
2705 
2706   // Give back the unused reserved pieces.
2707 
2708   for (int j = 0; j < i; ++j) {
2709     if (base[j] != NULL) {
2710       unmap_memory(base[j], size[j]);
2711     }
2712   }
2713 
2714   return (i < max_tries) ? requested_addr : NULL;
2715 }
2716 
2717 bool os::pd_release_memory(char* addr, size_t bytes) {
2718   size_t size = bytes;
2719   return munmap(addr, size) == 0;
2720 }
2721 
2722 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
2723   assert(addr == (char*)align_down((uintptr_t)addr, os::vm_page_size()),
2724          "addr must be page aligned");
2725   int retVal = mprotect(addr, bytes, prot);
2726   return retVal == 0;
2727 }
2728 
2729 // Protect memory (Used to pass readonly pages through
2730 // JNI GetArray<type>Elements with empty arrays.)
2731 // Also, used for serialization page and for compressed oops null pointer
2732 // checking.
2733 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2734                         bool is_committed) {
2735   unsigned int p = 0;
2736   switch (prot) {
2737   case MEM_PROT_NONE: p = PROT_NONE; break;
2738   case MEM_PROT_READ: p = PROT_READ; break;
2739   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2740   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2741   default:
2742     ShouldNotReachHere();
2743   }
2744   // is_committed is unused.
2745   return solaris_mprotect(addr, bytes, p);
2746 }
2747 
2748 // guard_memory and unguard_memory only happens within stack guard pages.
2749 // Since ISM pertains only to the heap, guard and unguard memory should not
2750 /// happen with an ISM region.
2751 bool os::guard_memory(char* addr, size_t bytes) {
2752   return solaris_mprotect(addr, bytes, PROT_NONE);
2753 }
2754 
2755 bool os::unguard_memory(char* addr, size_t bytes) {
2756   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
2757 }
2758 
2759 // Large page support
2760 static size_t _large_page_size = 0;
2761 
2762 // Insertion sort for small arrays (descending order).
2763 static void insertion_sort_descending(size_t* array, int len) {
2764   for (int i = 0; i < len; i++) {
2765     size_t val = array[i];
2766     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
2767       size_t tmp = array[key];
2768       array[key] = array[key - 1];
2769       array[key - 1] = tmp;
2770     }
2771   }
2772 }
2773 
2774 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
2775   const unsigned int usable_count = VM_Version::page_size_count();
2776   if (usable_count == 1) {
2777     return false;
2778   }
2779 
2780   // Find the right getpagesizes interface.  When solaris 11 is the minimum
2781   // build platform, getpagesizes() (without the '2') can be called directly.
2782   typedef int (*gps_t)(size_t[], int);
2783   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
2784   if (gps_func == NULL) {
2785     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
2786     if (gps_func == NULL) {
2787       if (warn) {
2788         warning("MPSS is not supported by the operating system.");
2789       }
2790       return false;
2791     }
2792   }
2793 
2794   // Fill the array of page sizes.
2795   int n = (*gps_func)(_page_sizes, page_sizes_max);
2796   assert(n > 0, "Solaris bug?");
2797 
2798   if (n == page_sizes_max) {
2799     // Add a sentinel value (necessary only if the array was completely filled
2800     // since it is static (zeroed at initialization)).
2801     _page_sizes[--n] = 0;
2802     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
2803   }
2804   assert(_page_sizes[n] == 0, "missing sentinel");
2805   trace_page_sizes("available page sizes", _page_sizes, n);
2806 
2807   if (n == 1) return false;     // Only one page size available.
2808 
2809   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
2810   // select up to usable_count elements.  First sort the array, find the first
2811   // acceptable value, then copy the usable sizes to the top of the array and
2812   // trim the rest.  Make sure to include the default page size :-).
2813   //
2814   // A better policy could get rid of the 4M limit by taking the sizes of the
2815   // important VM memory regions (java heap and possibly the code cache) into
2816   // account.
2817   insertion_sort_descending(_page_sizes, n);
2818   const size_t size_limit =
2819     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
2820   int beg;
2821   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */;
2822   const int end = MIN2((int)usable_count, n) - 1;
2823   for (int cur = 0; cur < end; ++cur, ++beg) {
2824     _page_sizes[cur] = _page_sizes[beg];
2825   }
2826   _page_sizes[end] = vm_page_size();
2827   _page_sizes[end + 1] = 0;
2828 
2829   if (_page_sizes[end] > _page_sizes[end - 1]) {
2830     // Default page size is not the smallest; sort again.
2831     insertion_sort_descending(_page_sizes, end + 1);
2832   }
2833   *page_size = _page_sizes[0];
2834 
2835   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
2836   return true;
2837 }
2838 
2839 void os::large_page_init() {
2840   if (UseLargePages) {
2841     // print a warning if any large page related flag is specified on command line
2842     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
2843                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2844 
2845     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
2846   }
2847 }
2848 
2849 bool os::Solaris::is_valid_page_size(size_t bytes) {
2850   for (int i = 0; _page_sizes[i] != 0; i++) {
2851     if (_page_sizes[i] == bytes) {
2852       return true;
2853     }
2854   }
2855   return false;
2856 }
2857 
2858 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
2859   assert(is_valid_page_size(align), SIZE_FORMAT " is not a valid page size", align);
2860   assert(is_aligned((void*) start, align),
2861          PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align);
2862   assert(is_aligned(bytes, align),
2863          SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align);
2864 
2865   // Signal to OS that we want large pages for addresses
2866   // from addr, addr + bytes
2867   struct memcntl_mha mpss_struct;
2868   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
2869   mpss_struct.mha_pagesize = align;
2870   mpss_struct.mha_flags = 0;
2871   // Upon successful completion, memcntl() returns 0
2872   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
2873     debug_only(warning("Attempt to use MPSS failed."));
2874     return false;
2875   }
2876   return true;
2877 }
2878 
2879 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
2880   fatal("os::reserve_memory_special should not be called on Solaris.");
2881   return NULL;
2882 }
2883 
2884 bool os::release_memory_special(char* base, size_t bytes) {
2885   fatal("os::release_memory_special should not be called on Solaris.");
2886   return false;
2887 }
2888 
2889 size_t os::large_page_size() {
2890   return _large_page_size;
2891 }
2892 
2893 // MPSS allows application to commit large page memory on demand; with ISM
2894 // the entire memory region must be allocated as shared memory.
2895 bool os::can_commit_large_page_memory() {
2896   return true;
2897 }
2898 
2899 bool os::can_execute_large_page_memory() {
2900   return true;
2901 }
2902 
2903 // Read calls from inside the vm need to perform state transitions
2904 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2905   size_t res;
2906   JavaThread* thread = (JavaThread*)Thread::current();
2907   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
2908   ThreadBlockInVM tbiv(thread);
2909   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
2910   return res;
2911 }
2912 
2913 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2914   size_t res;
2915   JavaThread* thread = (JavaThread*)Thread::current();
2916   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
2917   ThreadBlockInVM tbiv(thread);
2918   RESTARTABLE(::pread(fd, buf, (size_t) nBytes, offset), res);
2919   return res;
2920 }
2921 
2922 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
2923   size_t res;
2924   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
2925          "Assumed _thread_in_native");
2926   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
2927   return res;
2928 }
2929 
2930 void os::naked_short_sleep(jlong ms) {
2931   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2932 
2933   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
2934   // Solaris requires -lrt for this.
2935   usleep((ms * 1000));
2936 
2937   return;
2938 }
2939 
2940 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2941 void os::infinite_sleep() {
2942   while (true) {    // sleep forever ...
2943     ::sleep(100);   // ... 100 seconds at a time
2944   }
2945 }
2946 
2947 // Used to convert frequent JVM_Yield() to nops
2948 bool os::dont_yield() {
2949   if (DontYieldALot) {
2950     static hrtime_t last_time = 0;
2951     hrtime_t diff = getTimeNanos() - last_time;
2952 
2953     if (diff < DontYieldALotInterval * 1000000) {
2954       return true;
2955     }
2956 
2957     last_time += diff;
2958 
2959     return false;
2960   } else {
2961     return false;
2962   }
2963 }
2964 
2965 // Note that yield semantics are defined by the scheduling class to which
2966 // the thread currently belongs.  Typically, yield will _not yield to
2967 // other equal or higher priority threads that reside on the dispatch queues
2968 // of other CPUs.
2969 
2970 void os::naked_yield() {
2971   thr_yield();
2972 }
2973 
2974 // Interface for setting lwp priorities.  We are using T2 libthread,
2975 // which forces the use of bound threads, so all of our threads will
2976 // be assigned to real lwp's.  Using the thr_setprio function is
2977 // meaningless in this mode so we must adjust the real lwp's priority.
2978 // The routines below implement the getting and setting of lwp priorities.
2979 //
2980 // Note: There are three priority scales used on Solaris.  Java priotities
2981 //       which range from 1 to 10, libthread "thr_setprio" scale which range
2982 //       from 0 to 127, and the current scheduling class of the process we
2983 //       are running in.  This is typically from -60 to +60.
2984 //       The setting of the lwp priorities in done after a call to thr_setprio
2985 //       so Java priorities are mapped to libthread priorities and we map from
2986 //       the latter to lwp priorities.  We don't keep priorities stored in
2987 //       Java priorities since some of our worker threads want to set priorities
2988 //       higher than all Java threads.
2989 //
2990 // For related information:
2991 // (1)  man -s 2 priocntl
2992 // (2)  man -s 4 priocntl
2993 // (3)  man dispadmin
2994 // =    librt.so
2995 // =    libthread/common/rtsched.c - thrp_setlwpprio().
2996 // =    ps -cL <pid> ... to validate priority.
2997 // =    sched_get_priority_min and _max
2998 //              pthread_create
2999 //              sched_setparam
3000 //              pthread_setschedparam
3001 //
3002 // Assumptions:
3003 // +    We assume that all threads in the process belong to the same
3004 //              scheduling class.   IE. an homogenous process.
3005 // +    Must be root or in IA group to change change "interactive" attribute.
3006 //              Priocntl() will fail silently.  The only indication of failure is when
3007 //              we read-back the value and notice that it hasn't changed.
3008 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3009 // +    For RT, change timeslice as well.  Invariant:
3010 //              constant "priority integral"
3011 //              Konst == TimeSlice * (60-Priority)
3012 //              Given a priority, compute appropriate timeslice.
3013 // +    Higher numerical values have higher priority.
3014 
3015 // sched class attributes
3016 typedef struct {
3017   int   schedPolicy;              // classID
3018   int   maxPrio;
3019   int   minPrio;
3020 } SchedInfo;
3021 
3022 
3023 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3024 
3025 #ifdef ASSERT
3026 static int  ReadBackValidate = 1;
3027 #endif
3028 static int  myClass     = 0;
3029 static int  myMin       = 0;
3030 static int  myMax       = 0;
3031 static int  myCur       = 0;
3032 static bool priocntl_enable = false;
3033 
3034 static const int criticalPrio = FXCriticalPriority;
3035 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3036 
3037 
3038 // lwp_priocntl_init
3039 //
3040 // Try to determine the priority scale for our process.
3041 //
3042 // Return errno or 0 if OK.
3043 //
3044 static int lwp_priocntl_init() {
3045   int rslt;
3046   pcinfo_t ClassInfo;
3047   pcparms_t ParmInfo;
3048   int i;
3049 
3050   if (!UseThreadPriorities) return 0;
3051 
3052   // If ThreadPriorityPolicy is 1, switch tables
3053   if (ThreadPriorityPolicy == 1) {
3054     for (i = 0; i < CriticalPriority+1; i++)
3055       os::java_to_os_priority[i] = prio_policy1[i];
3056   }
3057   if (UseCriticalJavaThreadPriority) {
3058     // MaxPriority always maps to the FX scheduling class and criticalPrio.
3059     // See set_native_priority() and set_lwp_class_and_priority().
3060     // Save original MaxPriority mapping in case attempt to
3061     // use critical priority fails.
3062     java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3063     // Set negative to distinguish from other priorities
3064     os::java_to_os_priority[MaxPriority] = -criticalPrio;
3065   }
3066 
3067   // Get IDs for a set of well-known scheduling classes.
3068   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3069   // the system.  We should have a loop that iterates over the
3070   // classID values, which are known to be "small" integers.
3071 
3072   strcpy(ClassInfo.pc_clname, "TS");
3073   ClassInfo.pc_cid = -1;
3074   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3075   if (rslt < 0) return errno;
3076   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3077   tsLimits.schedPolicy = ClassInfo.pc_cid;
3078   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3079   tsLimits.minPrio = -tsLimits.maxPrio;
3080 
3081   strcpy(ClassInfo.pc_clname, "IA");
3082   ClassInfo.pc_cid = -1;
3083   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3084   if (rslt < 0) return errno;
3085   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3086   iaLimits.schedPolicy = ClassInfo.pc_cid;
3087   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3088   iaLimits.minPrio = -iaLimits.maxPrio;
3089 
3090   strcpy(ClassInfo.pc_clname, "RT");
3091   ClassInfo.pc_cid = -1;
3092   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3093   if (rslt < 0) return errno;
3094   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3095   rtLimits.schedPolicy = ClassInfo.pc_cid;
3096   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3097   rtLimits.minPrio = 0;
3098 
3099   strcpy(ClassInfo.pc_clname, "FX");
3100   ClassInfo.pc_cid = -1;
3101   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3102   if (rslt < 0) return errno;
3103   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3104   fxLimits.schedPolicy = ClassInfo.pc_cid;
3105   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3106   fxLimits.minPrio = 0;
3107 
3108   // Query our "current" scheduling class.
3109   // This will normally be IA, TS or, rarely, FX or RT.
3110   memset(&ParmInfo, 0, sizeof(ParmInfo));
3111   ParmInfo.pc_cid = PC_CLNULL;
3112   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3113   if (rslt < 0) return errno;
3114   myClass = ParmInfo.pc_cid;
3115 
3116   // We now know our scheduling classId, get specific information
3117   // about the class.
3118   ClassInfo.pc_cid = myClass;
3119   ClassInfo.pc_clname[0] = 0;
3120   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3121   if (rslt < 0) return errno;
3122 
3123   if (ThreadPriorityVerbose) {
3124     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3125   }
3126 
3127   memset(&ParmInfo, 0, sizeof(pcparms_t));
3128   ParmInfo.pc_cid = PC_CLNULL;
3129   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3130   if (rslt < 0) return errno;
3131 
3132   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3133     myMin = rtLimits.minPrio;
3134     myMax = rtLimits.maxPrio;
3135   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3136     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3137     myMin = iaLimits.minPrio;
3138     myMax = iaLimits.maxPrio;
3139     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3140   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3141     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3142     myMin = tsLimits.minPrio;
3143     myMax = tsLimits.maxPrio;
3144     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3145   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3146     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3147     myMin = fxLimits.minPrio;
3148     myMax = fxLimits.maxPrio;
3149     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3150   } else {
3151     // No clue - punt
3152     if (ThreadPriorityVerbose) {
3153       tty->print_cr("Unknown scheduling class: %s ... \n",
3154                     ClassInfo.pc_clname);
3155     }
3156     return EINVAL;      // no clue, punt
3157   }
3158 
3159   if (ThreadPriorityVerbose) {
3160     tty->print_cr("Thread priority Range: [%d..%d]\n", myMin, myMax);
3161   }
3162 
3163   priocntl_enable = true;  // Enable changing priorities
3164   return 0;
3165 }
3166 
3167 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3168 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3169 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3170 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3171 
3172 
3173 // scale_to_lwp_priority
3174 //
3175 // Convert from the libthread "thr_setprio" scale to our current
3176 // lwp scheduling class scale.
3177 //
3178 static int scale_to_lwp_priority(int rMin, int rMax, int x) {
3179   int v;
3180 
3181   if (x == 127) return rMax;            // avoid round-down
3182   v = (((x*(rMax-rMin)))/128)+rMin;
3183   return v;
3184 }
3185 
3186 
3187 // set_lwp_class_and_priority
3188 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3189                                int newPrio, int new_class, bool scale) {
3190   int rslt;
3191   int Actual, Expected, prv;
3192   pcparms_t ParmInfo;                   // for GET-SET
3193 #ifdef ASSERT
3194   pcparms_t ReadBack;                   // for readback
3195 #endif
3196 
3197   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3198   // Query current values.
3199   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3200   // Cache "pcparms_t" in global ParmCache.
3201   // TODO: elide set-to-same-value
3202 
3203   // If something went wrong on init, don't change priorities.
3204   if (!priocntl_enable) {
3205     if (ThreadPriorityVerbose) {
3206       tty->print_cr("Trying to set priority but init failed, ignoring");
3207     }
3208     return EINVAL;
3209   }
3210 
3211   // If lwp hasn't started yet, just return
3212   // the _start routine will call us again.
3213   if (lwpid <= 0) {
3214     if (ThreadPriorityVerbose) {
3215       tty->print_cr("deferring the set_lwp_class_and_priority of thread "
3216                     INTPTR_FORMAT " to %d, lwpid not set",
3217                     ThreadID, newPrio);
3218     }
3219     return 0;
3220   }
3221 
3222   if (ThreadPriorityVerbose) {
3223     tty->print_cr ("set_lwp_class_and_priority("
3224                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3225                    ThreadID, lwpid, newPrio);
3226   }
3227 
3228   memset(&ParmInfo, 0, sizeof(pcparms_t));
3229   ParmInfo.pc_cid = PC_CLNULL;
3230   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3231   if (rslt < 0) return errno;
3232 
3233   int cur_class = ParmInfo.pc_cid;
3234   ParmInfo.pc_cid = (id_t)new_class;
3235 
3236   if (new_class == rtLimits.schedPolicy) {
3237     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3238     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3239                                                        rtLimits.maxPrio, newPrio)
3240                                : newPrio;
3241     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3242     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3243     if (ThreadPriorityVerbose) {
3244       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3245     }
3246   } else if (new_class == iaLimits.schedPolicy) {
3247     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3248     int maxClamped     = MIN2(iaLimits.maxPrio,
3249                               cur_class == new_class
3250                               ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3251     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3252                                                        maxClamped, newPrio)
3253                                : newPrio;
3254     iaInfo->ia_uprilim = cur_class == new_class
3255                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3256     iaInfo->ia_mode    = IA_NOCHANGE;
3257     if (ThreadPriorityVerbose) {
3258       tty->print_cr("IA: [%d...%d] %d->%d\n",
3259                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3260     }
3261   } else if (new_class == tsLimits.schedPolicy) {
3262     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3263     int maxClamped     = MIN2(tsLimits.maxPrio,
3264                               cur_class == new_class
3265                               ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3266     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3267                                                        maxClamped, newPrio)
3268                                : newPrio;
3269     tsInfo->ts_uprilim = cur_class == new_class
3270                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3271     if (ThreadPriorityVerbose) {
3272       tty->print_cr("TS: [%d...%d] %d->%d\n",
3273                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3274     }
3275   } else if (new_class == fxLimits.schedPolicy) {
3276     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3277     int maxClamped     = MIN2(fxLimits.maxPrio,
3278                               cur_class == new_class
3279                               ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3280     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3281                                                        maxClamped, newPrio)
3282                                : newPrio;
3283     fxInfo->fx_uprilim = cur_class == new_class
3284                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3285     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3286     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3287     if (ThreadPriorityVerbose) {
3288       tty->print_cr("FX: [%d...%d] %d->%d\n",
3289                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3290     }
3291   } else {
3292     if (ThreadPriorityVerbose) {
3293       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3294     }
3295     return EINVAL;    // no clue, punt
3296   }
3297 
3298   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3299   if (ThreadPriorityVerbose && rslt) {
3300     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3301   }
3302   if (rslt < 0) return errno;
3303 
3304 #ifdef ASSERT
3305   // Sanity check: read back what we just attempted to set.
3306   // In theory it could have changed in the interim ...
3307   //
3308   // The priocntl system call is tricky.
3309   // Sometimes it'll validate the priority value argument and
3310   // return EINVAL if unhappy.  At other times it fails silently.
3311   // Readbacks are prudent.
3312 
3313   if (!ReadBackValidate) return 0;
3314 
3315   memset(&ReadBack, 0, sizeof(pcparms_t));
3316   ReadBack.pc_cid = PC_CLNULL;
3317   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3318   assert(rslt >= 0, "priocntl failed");
3319   Actual = Expected = 0xBAD;
3320   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3321   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3322     Actual   = RTPRI(ReadBack)->rt_pri;
3323     Expected = RTPRI(ParmInfo)->rt_pri;
3324   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3325     Actual   = IAPRI(ReadBack)->ia_upri;
3326     Expected = IAPRI(ParmInfo)->ia_upri;
3327   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3328     Actual   = TSPRI(ReadBack)->ts_upri;
3329     Expected = TSPRI(ParmInfo)->ts_upri;
3330   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3331     Actual   = FXPRI(ReadBack)->fx_upri;
3332     Expected = FXPRI(ParmInfo)->fx_upri;
3333   } else {
3334     if (ThreadPriorityVerbose) {
3335       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3336                     ParmInfo.pc_cid);
3337     }
3338   }
3339 
3340   if (Actual != Expected) {
3341     if (ThreadPriorityVerbose) {
3342       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3343                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3344     }
3345   }
3346 #endif
3347 
3348   return 0;
3349 }
3350 
3351 // Solaris only gives access to 128 real priorities at a time,
3352 // so we expand Java's ten to fill this range.  This would be better
3353 // if we dynamically adjusted relative priorities.
3354 //
3355 // The ThreadPriorityPolicy option allows us to select 2 different
3356 // priority scales.
3357 //
3358 // ThreadPriorityPolicy=0
3359 // Since the Solaris' default priority is MaximumPriority, we do not
3360 // set a priority lower than Max unless a priority lower than
3361 // NormPriority is requested.
3362 //
3363 // ThreadPriorityPolicy=1
3364 // This mode causes the priority table to get filled with
3365 // linear values.  NormPriority get's mapped to 50% of the
3366 // Maximum priority an so on.  This will cause VM threads
3367 // to get unfair treatment against other Solaris processes
3368 // which do not explicitly alter their thread priorities.
3369 
3370 int os::java_to_os_priority[CriticalPriority + 1] = {
3371   -99999,         // 0 Entry should never be used
3372 
3373   0,              // 1 MinPriority
3374   32,             // 2
3375   64,             // 3
3376 
3377   96,             // 4
3378   127,            // 5 NormPriority
3379   127,            // 6
3380 
3381   127,            // 7
3382   127,            // 8
3383   127,            // 9 NearMaxPriority
3384 
3385   127,            // 10 MaxPriority
3386 
3387   -criticalPrio   // 11 CriticalPriority
3388 };
3389 
3390 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3391   OSThread* osthread = thread->osthread();
3392 
3393   // Save requested priority in case the thread hasn't been started
3394   osthread->set_native_priority(newpri);
3395 
3396   // Check for critical priority request
3397   bool fxcritical = false;
3398   if (newpri == -criticalPrio) {
3399     fxcritical = true;
3400     newpri = criticalPrio;
3401   }
3402 
3403   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3404   if (!UseThreadPriorities) return OS_OK;
3405 
3406   int status = 0;
3407 
3408   if (!fxcritical) {
3409     // Use thr_setprio only if we have a priority that thr_setprio understands
3410     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3411   }
3412 
3413   int lwp_status =
3414           set_lwp_class_and_priority(osthread->thread_id(),
3415                                      osthread->lwp_id(),
3416                                      newpri,
3417                                      fxcritical ? fxLimits.schedPolicy : myClass,
3418                                      !fxcritical);
3419   if (lwp_status != 0 && fxcritical) {
3420     // Try again, this time without changing the scheduling class
3421     newpri = java_MaxPriority_to_os_priority;
3422     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3423                                             osthread->lwp_id(),
3424                                             newpri, myClass, false);
3425   }
3426   status |= lwp_status;
3427   return (status == 0) ? OS_OK : OS_ERR;
3428 }
3429 
3430 
3431 OSReturn os::get_native_priority(const Thread* const thread,
3432                                  int *priority_ptr) {
3433   int p;
3434   if (!UseThreadPriorities) {
3435     *priority_ptr = NormalPriority;
3436     return OS_OK;
3437   }
3438   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3439   if (status != 0) {
3440     return OS_ERR;
3441   }
3442   *priority_ptr = p;
3443   return OS_OK;
3444 }
3445 
3446 
3447 // Hint to the underlying OS that a task switch would not be good.
3448 // Void return because it's a hint and can fail.
3449 void os::hint_no_preempt() {
3450   schedctl_start(schedctl_init());
3451 }
3452 
3453 ////////////////////////////////////////////////////////////////////////////////
3454 // suspend/resume support
3455 
3456 //  The low-level signal-based suspend/resume support is a remnant from the
3457 //  old VM-suspension that used to be for java-suspension, safepoints etc,
3458 //  within hotspot. Currently used by JFR's OSThreadSampler
3459 //
3460 //  The remaining code is greatly simplified from the more general suspension
3461 //  code that used to be used.
3462 //
3463 //  The protocol is quite simple:
3464 //  - suspend:
3465 //      - sends a signal to the target thread
3466 //      - polls the suspend state of the osthread using a yield loop
3467 //      - target thread signal handler (SR_handler) sets suspend state
3468 //        and blocks in sigsuspend until continued
3469 //  - resume:
3470 //      - sets target osthread state to continue
3471 //      - sends signal to end the sigsuspend loop in the SR_handler
3472 //
3473 //  Note that the SR_lock plays no role in this suspend/resume protocol,
3474 //  but is checked for NULL in SR_handler as a thread termination indicator.
3475 //  The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs.
3476 //
3477 //  Note that resume_clear_context() and suspend_save_context() are needed
3478 //  by SR_handler(), so that fetch_frame_from_ucontext() works,
3479 //  which in part is used by:
3480 //    - Forte Analyzer: AsyncGetCallTrace()
3481 //    - StackBanging: get_frame_at_stack_banging_point()
3482 //    - JFR: get_topframe()-->....-->get_valid_uc_in_signal_handler()
3483 
3484 static void resume_clear_context(OSThread *osthread) {
3485   osthread->set_ucontext(NULL);
3486 }
3487 
3488 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3489   osthread->set_ucontext(context);
3490 }
3491 
3492 static PosixSemaphore sr_semaphore;
3493 
3494 void os::Solaris::SR_handler(Thread* thread, ucontext_t* context) {
3495   // Save and restore errno to avoid confusing native code with EINTR
3496   // after sigsuspend.
3497   int old_errno = errno;
3498 
3499   OSThread* osthread = thread->osthread();
3500   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3501 
3502   os::SuspendResume::State current = osthread->sr.state();
3503   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3504     suspend_save_context(osthread, context);
3505 
3506     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3507     os::SuspendResume::State state = osthread->sr.suspended();
3508     if (state == os::SuspendResume::SR_SUSPENDED) {
3509       sigset_t suspend_set;  // signals for sigsuspend()
3510 
3511       // get current set of blocked signals and unblock resume signal
3512       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
3513       sigdelset(&suspend_set, ASYNC_SIGNAL);
3514 
3515       sr_semaphore.signal();
3516       // wait here until we are resumed
3517       while (1) {
3518         sigsuspend(&suspend_set);
3519 
3520         os::SuspendResume::State result = osthread->sr.running();
3521         if (result == os::SuspendResume::SR_RUNNING) {
3522           sr_semaphore.signal();
3523           break;
3524         }
3525       }
3526 
3527     } else if (state == os::SuspendResume::SR_RUNNING) {
3528       // request was cancelled, continue
3529     } else {
3530       ShouldNotReachHere();
3531     }
3532 
3533     resume_clear_context(osthread);
3534   } else if (current == os::SuspendResume::SR_RUNNING) {
3535     // request was cancelled, continue
3536   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3537     // ignore
3538   } else {
3539     // ignore
3540   }
3541 
3542   errno = old_errno;
3543 }
3544 
3545 void os::print_statistics() {
3546 }
3547 
3548 bool os::message_box(const char* title, const char* message) {
3549   int i;
3550   fdStream err(defaultStream::error_fd());
3551   for (i = 0; i < 78; i++) err.print_raw("=");
3552   err.cr();
3553   err.print_raw_cr(title);
3554   for (i = 0; i < 78; i++) err.print_raw("-");
3555   err.cr();
3556   err.print_raw_cr(message);
3557   for (i = 0; i < 78; i++) err.print_raw("=");
3558   err.cr();
3559 
3560   char buf[16];
3561   // Prevent process from exiting upon "read error" without consuming all CPU
3562   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3563 
3564   return buf[0] == 'y' || buf[0] == 'Y';
3565 }
3566 
3567 static int sr_notify(OSThread* osthread) {
3568   int status = thr_kill(osthread->thread_id(), ASYNC_SIGNAL);
3569   assert_status(status == 0, status, "thr_kill");
3570   return status;
3571 }
3572 
3573 // "Randomly" selected value for how long we want to spin
3574 // before bailing out on suspending a thread, also how often
3575 // we send a signal to a thread we want to resume
3576 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3577 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3578 
3579 static bool do_suspend(OSThread* osthread) {
3580   assert(osthread->sr.is_running(), "thread should be running");
3581   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
3582 
3583   // mark as suspended and send signal
3584   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3585     // failed to switch, state wasn't running?
3586     ShouldNotReachHere();
3587     return false;
3588   }
3589 
3590   if (sr_notify(osthread) != 0) {
3591     ShouldNotReachHere();
3592   }
3593 
3594   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3595   while (true) {
3596     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
3597       break;
3598     } else {
3599       // timeout
3600       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3601       if (cancelled == os::SuspendResume::SR_RUNNING) {
3602         return false;
3603       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3604         // make sure that we consume the signal on the semaphore as well
3605         sr_semaphore.wait();
3606         break;
3607       } else {
3608         ShouldNotReachHere();
3609         return false;
3610       }
3611     }
3612   }
3613 
3614   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3615   return true;
3616 }
3617 
3618 static void do_resume(OSThread* osthread) {
3619   assert(osthread->sr.is_suspended(), "thread should be suspended");
3620   assert(!sr_semaphore.trywait(), "invalid semaphore state");
3621 
3622   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3623     // failed to switch to WAKEUP_REQUEST
3624     ShouldNotReachHere();
3625     return;
3626   }
3627 
3628   while (true) {
3629     if (sr_notify(osthread) == 0) {
3630       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
3631         if (osthread->sr.is_running()) {
3632           return;
3633         }
3634       }
3635     } else {
3636       ShouldNotReachHere();
3637     }
3638   }
3639 
3640   guarantee(osthread->sr.is_running(), "Must be running!");
3641 }
3642 
3643 void os::SuspendedThreadTask::internal_do_task() {
3644   if (do_suspend(_thread->osthread())) {
3645     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3646     do_task(context);
3647     do_resume(_thread->osthread());
3648   }
3649 }
3650 
3651 // This does not do anything on Solaris. This is basically a hook for being
3652 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
3653 void os::os_exception_wrapper(java_call_t f, JavaValue* value,
3654                               const methodHandle& method, JavaCallArguments* args,
3655                               Thread* thread) {
3656   f(value, method, args, thread);
3657 }
3658 
3659 // This routine may be used by user applications as a "hook" to catch signals.
3660 // The user-defined signal handler must pass unrecognized signals to this
3661 // routine, and if it returns true (non-zero), then the signal handler must
3662 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
3663 // routine will never retun false (zero), but instead will execute a VM panic
3664 // routine kill the process.
3665 //
3666 // If this routine returns false, it is OK to call it again.  This allows
3667 // the user-defined signal handler to perform checks either before or after
3668 // the VM performs its own checks.  Naturally, the user code would be making
3669 // a serious error if it tried to handle an exception (such as a null check
3670 // or breakpoint) that the VM was generating for its own correct operation.
3671 //
3672 // This routine may recognize any of the following kinds of signals:
3673 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
3674 // ASYNC_SIGNAL.
3675 // It should be consulted by handlers for any of those signals.
3676 //
3677 // The caller of this routine must pass in the three arguments supplied
3678 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3679 // field of the structure passed to sigaction().  This routine assumes that
3680 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3681 //
3682 // Note that the VM will print warnings if it detects conflicting signal
3683 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3684 //
3685 extern "C" JNIEXPORT int JVM_handle_solaris_signal(int signo,
3686                                                    siginfo_t* siginfo,
3687                                                    void* ucontext,
3688                                                    int abort_if_unrecognized);
3689 
3690 
3691 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
3692   int orig_errno = errno;  // Preserve errno value over signal handler.
3693   JVM_handle_solaris_signal(sig, info, ucVoid, true);
3694   errno = orig_errno;
3695 }
3696 
3697 // This boolean allows users to forward their own non-matching signals
3698 // to JVM_handle_solaris_signal, harmlessly.
3699 bool os::Solaris::signal_handlers_are_installed = false;
3700 
3701 // For signal-chaining
3702 bool os::Solaris::libjsig_is_loaded = false;
3703 typedef struct sigaction *(*get_signal_t)(int);
3704 get_signal_t os::Solaris::get_signal_action = NULL;
3705 
3706 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
3707   struct sigaction *actp = NULL;
3708 
3709   if ((libjsig_is_loaded)  && (sig <= Maxsignum)) {
3710     // Retrieve the old signal handler from libjsig
3711     actp = (*get_signal_action)(sig);
3712   }
3713   if (actp == NULL) {
3714     // Retrieve the preinstalled signal handler from jvm
3715     actp = get_preinstalled_handler(sig);
3716   }
3717 
3718   return actp;
3719 }
3720 
3721 static bool call_chained_handler(struct sigaction *actp, int sig,
3722                                  siginfo_t *siginfo, void *context) {
3723   // Call the old signal handler
3724   if (actp->sa_handler == SIG_DFL) {
3725     // It's more reasonable to let jvm treat it as an unexpected exception
3726     // instead of taking the default action.
3727     return false;
3728   } else if (actp->sa_handler != SIG_IGN) {
3729     if ((actp->sa_flags & SA_NODEFER) == 0) {
3730       // automaticlly block the signal
3731       sigaddset(&(actp->sa_mask), sig);
3732     }
3733 
3734     sa_handler_t hand;
3735     sa_sigaction_t sa;
3736     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3737     // retrieve the chained handler
3738     if (siginfo_flag_set) {
3739       sa = actp->sa_sigaction;
3740     } else {
3741       hand = actp->sa_handler;
3742     }
3743 
3744     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3745       actp->sa_handler = SIG_DFL;
3746     }
3747 
3748     // try to honor the signal mask
3749     sigset_t oset;
3750     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3751 
3752     // call into the chained handler
3753     if (siginfo_flag_set) {
3754       (*sa)(sig, siginfo, context);
3755     } else {
3756       (*hand)(sig);
3757     }
3758 
3759     // restore the signal mask
3760     pthread_sigmask(SIG_SETMASK, &oset, 0);
3761   }
3762   // Tell jvm's signal handler the signal is taken care of.
3763   return true;
3764 }
3765 
3766 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3767   bool chained = false;
3768   // signal-chaining
3769   if (UseSignalChaining) {
3770     struct sigaction *actp = get_chained_signal_action(sig);
3771     if (actp != NULL) {
3772       chained = call_chained_handler(actp, sig, siginfo, context);
3773     }
3774   }
3775   return chained;
3776 }
3777 
3778 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
3779   assert((chainedsigactions != (struct sigaction *)NULL) &&
3780          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
3781   if (preinstalled_sigs[sig] != 0) {
3782     return &chainedsigactions[sig];
3783   }
3784   return NULL;
3785 }
3786 
3787 void os::Solaris::save_preinstalled_handler(int sig,
3788                                             struct sigaction& oldAct) {
3789   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
3790   assert((chainedsigactions != (struct sigaction *)NULL) &&
3791          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
3792   chainedsigactions[sig] = oldAct;
3793   preinstalled_sigs[sig] = 1;
3794 }
3795 
3796 void os::Solaris::set_signal_handler(int sig, bool set_installed,
3797                                      bool oktochain) {
3798   // Check for overwrite.
3799   struct sigaction oldAct;
3800   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3801   void* oldhand =
3802       oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
3803                           : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
3804   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3805       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3806       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
3807     if (AllowUserSignalHandlers || !set_installed) {
3808       // Do not overwrite; user takes responsibility to forward to us.
3809       return;
3810     } else if (UseSignalChaining) {
3811       if (oktochain) {
3812         // save the old handler in jvm
3813         save_preinstalled_handler(sig, oldAct);
3814       } else {
3815         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal.");
3816       }
3817       // libjsig also interposes the sigaction() call below and saves the
3818       // old sigaction on it own.
3819     } else {
3820       fatal("Encountered unexpected pre-existing sigaction handler "
3821             "%#lx for signal %d.", (long)oldhand, sig);
3822     }
3823   }
3824 
3825   struct sigaction sigAct;
3826   sigfillset(&(sigAct.sa_mask));
3827   sigAct.sa_handler = SIG_DFL;
3828 
3829   sigAct.sa_sigaction = signalHandler;
3830   // Handle SIGSEGV on alternate signal stack if
3831   // not using stack banging
3832   if (!UseStackBanging && sig == SIGSEGV) {
3833     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
3834   } else {
3835     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
3836   }
3837   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
3838 
3839   sigaction(sig, &sigAct, &oldAct);
3840 
3841   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3842                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3843   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3844 }
3845 
3846 
3847 #define DO_SIGNAL_CHECK(sig)                      \
3848   do {                                            \
3849     if (!sigismember(&check_signal_done, sig)) {  \
3850       os::Solaris::check_signal_handler(sig);     \
3851     }                                             \
3852   } while (0)
3853 
3854 // This method is a periodic task to check for misbehaving JNI applications
3855 // under CheckJNI, we can add any periodic checks here
3856 
3857 void os::run_periodic_checks() {
3858   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
3859   // thereby preventing a NULL checks.
3860   if (!check_addr0_done) check_addr0_done = check_addr0(tty);
3861 
3862   if (check_signals == false) return;
3863 
3864   // SEGV and BUS if overridden could potentially prevent
3865   // generation of hs*.log in the event of a crash, debugging
3866   // such a case can be very challenging, so we absolutely
3867   // check for the following for a good measure:
3868   DO_SIGNAL_CHECK(SIGSEGV);
3869   DO_SIGNAL_CHECK(SIGILL);
3870   DO_SIGNAL_CHECK(SIGFPE);
3871   DO_SIGNAL_CHECK(SIGBUS);
3872   DO_SIGNAL_CHECK(SIGPIPE);
3873   DO_SIGNAL_CHECK(SIGXFSZ);
3874   DO_SIGNAL_CHECK(ASYNC_SIGNAL);
3875 
3876   // ReduceSignalUsage allows the user to override these handlers
3877   // see comments at the very top and jvm_solaris.h
3878   if (!ReduceSignalUsage) {
3879     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3880     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3881     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3882     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3883   }
3884 }
3885 
3886 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3887 
3888 static os_sigaction_t os_sigaction = NULL;
3889 
3890 void os::Solaris::check_signal_handler(int sig) {
3891   char buf[O_BUFLEN];
3892   address jvmHandler = NULL;
3893 
3894   struct sigaction act;
3895   if (os_sigaction == NULL) {
3896     // only trust the default sigaction, in case it has been interposed
3897     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3898     if (os_sigaction == NULL) return;
3899   }
3900 
3901   os_sigaction(sig, (struct sigaction*)NULL, &act);
3902 
3903   address thisHandler = (act.sa_flags & SA_SIGINFO)
3904     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3905     : CAST_FROM_FN_PTR(address, act.sa_handler);
3906 
3907 
3908   switch (sig) {
3909   case SIGSEGV:
3910   case SIGBUS:
3911   case SIGFPE:
3912   case SIGPIPE:
3913   case SIGXFSZ:
3914   case SIGILL:
3915   case ASYNC_SIGNAL:
3916     jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
3917     break;
3918 
3919   case SHUTDOWN1_SIGNAL:
3920   case SHUTDOWN2_SIGNAL:
3921   case SHUTDOWN3_SIGNAL:
3922   case BREAK_SIGNAL:
3923     jvmHandler = (address)user_handler();
3924     break;
3925 
3926   default:
3927       return;
3928   }
3929 
3930   if (thisHandler != jvmHandler) {
3931     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3932     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3933     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3934     // No need to check this sig any longer
3935     sigaddset(&check_signal_done, sig);
3936     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3937     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3938       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3939                     exception_name(sig, buf, O_BUFLEN));
3940     }
3941   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
3942     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3943     tty->print("expected:");
3944     os::Posix::print_sa_flags(tty, os::Solaris::get_our_sigflags(sig));
3945     tty->cr();
3946     tty->print("  found:");
3947     os::Posix::print_sa_flags(tty, act.sa_flags);
3948     tty->cr();
3949     // No need to check this sig any longer
3950     sigaddset(&check_signal_done, sig);
3951   }
3952 
3953   // Print all the signal handler state
3954   if (sigismember(&check_signal_done, sig)) {
3955     print_signal_handlers(tty, buf, O_BUFLEN);
3956   }
3957 
3958 }
3959 
3960 void os::Solaris::install_signal_handlers() {
3961   signal_handlers_are_installed = true;
3962 
3963   // signal-chaining
3964   typedef void (*signal_setting_t)();
3965   signal_setting_t begin_signal_setting = NULL;
3966   signal_setting_t end_signal_setting = NULL;
3967   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3968                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3969   if (begin_signal_setting != NULL) {
3970     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3971                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3972     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3973                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3974     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
3975                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
3976     libjsig_is_loaded = true;
3977     if (os::Solaris::get_libjsig_version != NULL) {
3978       int libjsigversion =  (*os::Solaris::get_libjsig_version)();
3979       assert(libjsigversion == JSIG_VERSION_1_4_1, "libjsig version mismatch");
3980     }
3981     assert(UseSignalChaining, "should enable signal-chaining");
3982   }
3983   if (libjsig_is_loaded) {
3984     // Tell libjsig jvm is setting signal handlers
3985     (*begin_signal_setting)();
3986   }
3987 
3988   set_signal_handler(SIGSEGV, true, true);
3989   set_signal_handler(SIGPIPE, true, true);
3990   set_signal_handler(SIGXFSZ, true, true);
3991   set_signal_handler(SIGBUS, true, true);
3992   set_signal_handler(SIGILL, true, true);
3993   set_signal_handler(SIGFPE, true, true);
3994   set_signal_handler(ASYNC_SIGNAL, true, true);
3995 
3996   if (libjsig_is_loaded) {
3997     // Tell libjsig jvm finishes setting signal handlers
3998     (*end_signal_setting)();
3999   }
4000 
4001   // We don't activate signal checker if libjsig is in place, we trust ourselves
4002   // and if UserSignalHandler is installed all bets are off.
4003   // Log that signal checking is off only if -verbose:jni is specified.
4004   if (CheckJNICalls) {
4005     if (libjsig_is_loaded) {
4006       if (PrintJNIResolving) {
4007         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4008       }
4009       check_signals = false;
4010     }
4011     if (AllowUserSignalHandlers) {
4012       if (PrintJNIResolving) {
4013         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4014       }
4015       check_signals = false;
4016     }
4017   }
4018 }
4019 
4020 
4021 void report_error(const char* file_name, int line_no, const char* title,
4022                   const char* format, ...);
4023 
4024 // (Static) wrappers for the liblgrp API
4025 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4026 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4027 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4028 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4029 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4030 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4031 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4032 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4033 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4034 
4035 static address resolve_symbol_lazy(const char* name) {
4036   address addr = (address) dlsym(RTLD_DEFAULT, name);
4037   if (addr == NULL) {
4038     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4039     addr = (address) dlsym(RTLD_NEXT, name);
4040   }
4041   return addr;
4042 }
4043 
4044 static address resolve_symbol(const char* name) {
4045   address addr = resolve_symbol_lazy(name);
4046   if (addr == NULL) {
4047     fatal(dlerror());
4048   }
4049   return addr;
4050 }
4051 
4052 void os::Solaris::libthread_init() {
4053   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4054 
4055   lwp_priocntl_init();
4056 
4057   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4058   if (func == NULL) {
4059     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4060     // Guarantee that this VM is running on an new enough OS (5.6 or
4061     // later) that it will have a new enough libthread.so.
4062     guarantee(func != NULL, "libthread.so is too old.");
4063   }
4064 
4065   int size;
4066   void (*handler_info_func)(address *, int *);
4067   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4068   handler_info_func(&handler_start, &size);
4069   handler_end = handler_start + size;
4070 }
4071 
4072 
4073 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4074 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4075 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4076 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4077 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4078 int os::Solaris::_mutex_scope = USYNC_THREAD;
4079 
4080 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4081 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4082 int_fnP_cond_tP os::Solaris::_cond_signal;
4083 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4084 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4085 int_fnP_cond_tP os::Solaris::_cond_destroy;
4086 int os::Solaris::_cond_scope = USYNC_THREAD;
4087 bool os::Solaris::_synchronization_initialized;
4088 
4089 void os::Solaris::synchronization_init() {
4090   if (UseLWPSynchronization) {
4091     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4092     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4093     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4094     os::Solaris::set_mutex_init(lwp_mutex_init);
4095     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4096     os::Solaris::set_mutex_scope(USYNC_THREAD);
4097 
4098     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4099     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4100     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4101     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4102     os::Solaris::set_cond_init(lwp_cond_init);
4103     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4104     os::Solaris::set_cond_scope(USYNC_THREAD);
4105   } else {
4106     os::Solaris::set_mutex_scope(USYNC_THREAD);
4107     os::Solaris::set_cond_scope(USYNC_THREAD);
4108 
4109     if (UsePthreads) {
4110       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4111       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4112       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4113       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4114       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4115 
4116       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4117       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4118       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4119       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4120       os::Solaris::set_cond_init(pthread_cond_default_init);
4121       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4122     } else {
4123       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4124       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4125       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4126       os::Solaris::set_mutex_init(::mutex_init);
4127       os::Solaris::set_mutex_destroy(::mutex_destroy);
4128 
4129       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4130       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4131       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4132       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4133       os::Solaris::set_cond_init(::cond_init);
4134       os::Solaris::set_cond_destroy(::cond_destroy);
4135     }
4136   }
4137   _synchronization_initialized = true;
4138 }
4139 
4140 bool os::Solaris::liblgrp_init() {
4141   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4142   if (handle != NULL) {
4143     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4144     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4145     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4146     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4147     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4148     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4149     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4150     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4151                                                       dlsym(handle, "lgrp_cookie_stale")));
4152 
4153     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4154     set_lgrp_cookie(c);
4155     return true;
4156   }
4157   return false;
4158 }
4159 
4160 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4161 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4162 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4163 
4164 void init_pset_getloadavg_ptr(void) {
4165   pset_getloadavg_ptr =
4166     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4167   if (pset_getloadavg_ptr == NULL) {
4168     log_warning(os)("pset_getloadavg function not found");
4169   }
4170 }
4171 
4172 int os::Solaris::_dev_zero_fd = -1;
4173 
4174 // this is called _before_ the global arguments have been parsed
4175 void os::init(void) {
4176   _initial_pid = getpid();
4177 
4178   max_hrtime = first_hrtime = gethrtime();
4179 
4180   init_random(1234567);
4181 
4182   page_size = sysconf(_SC_PAGESIZE);
4183   if (page_size == -1) {
4184     fatal("os_solaris.cpp: os::init: sysconf failed (%s)", os::strerror(errno));
4185   }
4186   init_page_sizes((size_t) page_size);
4187 
4188   Solaris::initialize_system_info();
4189 
4190   int fd = ::open("/dev/zero", O_RDWR);
4191   if (fd < 0) {
4192     fatal("os::init: cannot open /dev/zero (%s)", os::strerror(errno));
4193   } else {
4194     Solaris::set_dev_zero_fd(fd);
4195 
4196     // Close on exec, child won't inherit.
4197     fcntl(fd, F_SETFD, FD_CLOEXEC);
4198   }
4199 
4200   clock_tics_per_sec = CLK_TCK;
4201 
4202   // check if dladdr1() exists; dladdr1 can provide more information than
4203   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4204   // and is available on linker patches for 5.7 and 5.8.
4205   // libdl.so must have been loaded, this call is just an entry lookup
4206   void * hdl = dlopen("libdl.so", RTLD_NOW);
4207   if (hdl) {
4208     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4209   }
4210 
4211   main_thread = thr_self();
4212 
4213   // dynamic lookup of functions that may not be available in our lowest
4214   // supported Solaris release
4215   void * handle = dlopen("libc.so.1", RTLD_LAZY);
4216   if (handle != NULL) {
4217     Solaris::_pthread_setname_np =  // from 11.3
4218         (Solaris::pthread_setname_np_func_t)dlsym(handle, "pthread_setname_np");
4219   }
4220 }
4221 
4222 // To install functions for atexit system call
4223 extern "C" {
4224   static void perfMemory_exit_helper() {
4225     perfMemory_exit();
4226   }
4227 }
4228 
4229 // this is called _after_ the global arguments have been parsed
4230 jint os::init_2(void) {
4231   // try to enable extended file IO ASAP, see 6431278
4232   os::Solaris::try_enable_extended_io();
4233 
4234   // Allocate a single page and mark it as readable for safepoint polling.  Also
4235   // use this first mmap call to check support for MAP_ALIGN.
4236   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4237                                                       page_size,
4238                                                       MAP_PRIVATE | MAP_ALIGN,
4239                                                       PROT_READ);
4240   if (polling_page == NULL) {
4241     has_map_align = false;
4242     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
4243                                                 PROT_READ);
4244   }
4245 
4246   os::set_polling_page(polling_page);
4247   log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
4248 
4249   if (!UseMembar) {
4250     address mem_serialize_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE);
4251     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
4252     os::set_memory_serialize_page(mem_serialize_page);
4253     log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
4254   }
4255 
4256   // Check and sets minimum stack sizes against command line options
4257   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
4258     return JNI_ERR;
4259   }
4260 
4261   Solaris::libthread_init();
4262 
4263   if (UseNUMA) {
4264     if (!Solaris::liblgrp_init()) {
4265       UseNUMA = false;
4266     } else {
4267       size_t lgrp_limit = os::numa_get_groups_num();
4268       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4269       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4270       FREE_C_HEAP_ARRAY(int, lgrp_ids);
4271       if (lgrp_num < 2) {
4272         // There's only one locality group, disable NUMA.
4273         UseNUMA = false;
4274       }
4275     }
4276     if (!UseNUMA && ForceNUMA) {
4277       UseNUMA = true;
4278     }
4279   }
4280 
4281   Solaris::signal_sets_init();
4282   Solaris::init_signal_mem();
4283   Solaris::install_signal_handlers();
4284 
4285   // initialize synchronization primitives to use either thread or
4286   // lwp synchronization (controlled by UseLWPSynchronization)
4287   Solaris::synchronization_init();
4288 
4289   if (MaxFDLimit) {
4290     // set the number of file descriptors to max. print out error
4291     // if getrlimit/setrlimit fails but continue regardless.
4292     struct rlimit nbr_files;
4293     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4294     if (status != 0) {
4295       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
4296     } else {
4297       nbr_files.rlim_cur = nbr_files.rlim_max;
4298       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4299       if (status != 0) {
4300         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
4301       }
4302     }
4303   }
4304 
4305   // Calculate theoretical max. size of Threads to guard gainst
4306   // artifical out-of-memory situations, where all available address-
4307   // space has been reserved by thread stacks. Default stack size is 1Mb.
4308   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
4309     JavaThread::stack_size_at_create() : (1*K*K);
4310   assert(pre_thread_stack_size != 0, "Must have a stack");
4311   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
4312   // we should start doing Virtual Memory banging. Currently when the threads will
4313   // have used all but 200Mb of space.
4314   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
4315   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
4316 
4317   // at-exit methods are called in the reverse order of their registration.
4318   // In Solaris 7 and earlier, atexit functions are called on return from
4319   // main or as a result of a call to exit(3C). There can be only 32 of
4320   // these functions registered and atexit() does not set errno. In Solaris
4321   // 8 and later, there is no limit to the number of functions registered
4322   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
4323   // functions are called upon dlclose(3DL) in addition to return from main
4324   // and exit(3C).
4325 
4326   if (PerfAllowAtExitRegistration) {
4327     // only register atexit functions if PerfAllowAtExitRegistration is set.
4328     // atexit functions can be delayed until process exit time, which
4329     // can be problematic for embedded VM situations. Embedded VMs should
4330     // call DestroyJavaVM() to assure that VM resources are released.
4331 
4332     // note: perfMemory_exit_helper atexit function may be removed in
4333     // the future if the appropriate cleanup code can be added to the
4334     // VM_Exit VMOperation's doit method.
4335     if (atexit(perfMemory_exit_helper) != 0) {
4336       warning("os::init2 atexit(perfMemory_exit_helper) failed");
4337     }
4338   }
4339 
4340   // Init pset_loadavg function pointer
4341   init_pset_getloadavg_ptr();
4342 
4343   return JNI_OK;
4344 }
4345 
4346 // Mark the polling page as unreadable
4347 void os::make_polling_page_unreadable(void) {
4348   if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0) {
4349     fatal("Could not disable polling page");
4350   }
4351 }
4352 
4353 // Mark the polling page as readable
4354 void os::make_polling_page_readable(void) {
4355   if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0) {
4356     fatal("Could not enable polling page");
4357   }
4358 }
4359 
4360 // Is a (classpath) directory empty?
4361 bool os::dir_is_empty(const char* path) {
4362   DIR *dir = NULL;
4363   struct dirent *ptr;
4364 
4365   dir = opendir(path);
4366   if (dir == NULL) return true;
4367 
4368   // Scan the directory
4369   bool result = true;
4370   char buf[sizeof(struct dirent) + MAX_PATH];
4371   struct dirent *dbuf = (struct dirent *) buf;
4372   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
4373     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4374       result = false;
4375     }
4376   }
4377   closedir(dir);
4378   return result;
4379 }
4380 
4381 // This code originates from JDK's sysOpen and open64_w
4382 // from src/solaris/hpi/src/system_md.c
4383 
4384 int os::open(const char *path, int oflag, int mode) {
4385   if (strlen(path) > MAX_PATH - 1) {
4386     errno = ENAMETOOLONG;
4387     return -1;
4388   }
4389   int fd;
4390 
4391   fd = ::open64(path, oflag, mode);
4392   if (fd == -1) return -1;
4393 
4394   // If the open succeeded, the file might still be a directory
4395   {
4396     struct stat64 buf64;
4397     int ret = ::fstat64(fd, &buf64);
4398     int st_mode = buf64.st_mode;
4399 
4400     if (ret != -1) {
4401       if ((st_mode & S_IFMT) == S_IFDIR) {
4402         errno = EISDIR;
4403         ::close(fd);
4404         return -1;
4405       }
4406     } else {
4407       ::close(fd);
4408       return -1;
4409     }
4410   }
4411 
4412   // 32-bit Solaris systems suffer from:
4413   //
4414   // - an historical default soft limit of 256 per-process file
4415   //   descriptors that is too low for many Java programs.
4416   //
4417   // - a design flaw where file descriptors created using stdio
4418   //   fopen must be less than 256, _even_ when the first limit above
4419   //   has been raised.  This can cause calls to fopen (but not calls to
4420   //   open, for example) to fail mysteriously, perhaps in 3rd party
4421   //   native code (although the JDK itself uses fopen).  One can hardly
4422   //   criticize them for using this most standard of all functions.
4423   //
4424   // We attempt to make everything work anyways by:
4425   //
4426   // - raising the soft limit on per-process file descriptors beyond
4427   //   256
4428   //
4429   // - As of Solaris 10u4, we can request that Solaris raise the 256
4430   //   stdio fopen limit by calling function enable_extended_FILE_stdio.
4431   //   This is done in init_2 and recorded in enabled_extended_FILE_stdio
4432   //
4433   // - If we are stuck on an old (pre 10u4) Solaris system, we can
4434   //   workaround the bug by remapping non-stdio file descriptors below
4435   //   256 to ones beyond 256, which is done below.
4436   //
4437   // See:
4438   // 1085341: 32-bit stdio routines should support file descriptors >255
4439   // 6533291: Work around 32-bit Solaris stdio limit of 256 open files
4440   // 6431278: Netbeans crash on 32 bit Solaris: need to call
4441   //          enable_extended_FILE_stdio() in VM initialisation
4442   // Giri Mandalika's blog
4443   // http://technopark02.blogspot.com/2005_05_01_archive.html
4444   //
4445 #ifndef  _LP64
4446   if ((!enabled_extended_FILE_stdio) && fd < 256) {
4447     int newfd = ::fcntl(fd, F_DUPFD, 256);
4448     if (newfd != -1) {
4449       ::close(fd);
4450       fd = newfd;
4451     }
4452   }
4453 #endif // 32-bit Solaris
4454 
4455   // All file descriptors that are opened in the JVM and not
4456   // specifically destined for a subprocess should have the
4457   // close-on-exec flag set.  If we don't set it, then careless 3rd
4458   // party native code might fork and exec without closing all
4459   // appropriate file descriptors (e.g. as we do in closeDescriptors in
4460   // UNIXProcess.c), and this in turn might:
4461   //
4462   // - cause end-of-file to fail to be detected on some file
4463   //   descriptors, resulting in mysterious hangs, or
4464   //
4465   // - might cause an fopen in the subprocess to fail on a system
4466   //   suffering from bug 1085341.
4467   //
4468   // (Yes, the default setting of the close-on-exec flag is a Unix
4469   // design flaw)
4470   //
4471   // See:
4472   // 1085341: 32-bit stdio routines should support file descriptors >255
4473   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4474   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4475   //
4476 #ifdef FD_CLOEXEC
4477   {
4478     int flags = ::fcntl(fd, F_GETFD);
4479     if (flags != -1) {
4480       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4481     }
4482   }
4483 #endif
4484 
4485   return fd;
4486 }
4487 
4488 // create binary file, rewriting existing file if required
4489 int os::create_binary_file(const char* path, bool rewrite_existing) {
4490   int oflags = O_WRONLY | O_CREAT;
4491   if (!rewrite_existing) {
4492     oflags |= O_EXCL;
4493   }
4494   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4495 }
4496 
4497 // return current position of file pointer
4498 jlong os::current_file_offset(int fd) {
4499   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4500 }
4501 
4502 // move file pointer to the specified offset
4503 jlong os::seek_to_file_offset(int fd, jlong offset) {
4504   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4505 }
4506 
4507 jlong os::lseek(int fd, jlong offset, int whence) {
4508   return (jlong) ::lseek64(fd, offset, whence);
4509 }
4510 
4511 char * os::native_path(char *path) {
4512   return path;
4513 }
4514 
4515 int os::ftruncate(int fd, jlong length) {
4516   return ::ftruncate64(fd, length);
4517 }
4518 
4519 int os::fsync(int fd)  {
4520   RESTARTABLE_RETURN_INT(::fsync(fd));
4521 }
4522 
4523 int os::available(int fd, jlong *bytes) {
4524   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
4525          "Assumed _thread_in_native");
4526   jlong cur, end;
4527   int mode;
4528   struct stat64 buf64;
4529 
4530   if (::fstat64(fd, &buf64) >= 0) {
4531     mode = buf64.st_mode;
4532     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4533       int n,ioctl_return;
4534 
4535       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
4536       if (ioctl_return>= 0) {
4537         *bytes = n;
4538         return 1;
4539       }
4540     }
4541   }
4542   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4543     return 0;
4544   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4545     return 0;
4546   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4547     return 0;
4548   }
4549   *bytes = end - cur;
4550   return 1;
4551 }
4552 
4553 // Map a block of memory.
4554 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4555                         char *addr, size_t bytes, bool read_only,
4556                         bool allow_exec) {
4557   int prot;
4558   int flags;
4559 
4560   if (read_only) {
4561     prot = PROT_READ;
4562     flags = MAP_SHARED;
4563   } else {
4564     prot = PROT_READ | PROT_WRITE;
4565     flags = MAP_PRIVATE;
4566   }
4567 
4568   if (allow_exec) {
4569     prot |= PROT_EXEC;
4570   }
4571 
4572   if (addr != NULL) {
4573     flags |= MAP_FIXED;
4574   }
4575 
4576   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
4577                                      fd, file_offset);
4578   if (mapped_address == MAP_FAILED) {
4579     return NULL;
4580   }
4581   return mapped_address;
4582 }
4583 
4584 
4585 // Remap a block of memory.
4586 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4587                           char *addr, size_t bytes, bool read_only,
4588                           bool allow_exec) {
4589   // same as map_memory() on this OS
4590   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4591                         allow_exec);
4592 }
4593 
4594 
4595 // Unmap a block of memory.
4596 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4597   return munmap(addr, bytes) == 0;
4598 }
4599 
4600 void os::pause() {
4601   char filename[MAX_PATH];
4602   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4603     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4604   } else {
4605     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4606   }
4607 
4608   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4609   if (fd != -1) {
4610     struct stat buf;
4611     ::close(fd);
4612     while (::stat(filename, &buf) == 0) {
4613       (void)::poll(NULL, 0, 100);
4614     }
4615   } else {
4616     jio_fprintf(stderr,
4617                 "Could not open pause file '%s', continuing immediately.\n", filename);
4618   }
4619 }
4620 
4621 #ifndef PRODUCT
4622 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
4623 // Turn this on if you need to trace synch operations.
4624 // Set RECORD_SYNCH_LIMIT to a large-enough value,
4625 // and call record_synch_enable and record_synch_disable
4626 // around the computation of interest.
4627 
4628 void record_synch(char* name, bool returning);  // defined below
4629 
4630 class RecordSynch {
4631   char* _name;
4632  public:
4633   RecordSynch(char* name) :_name(name) { record_synch(_name, false); }
4634   ~RecordSynch()                       { record_synch(_name, true); }
4635 };
4636 
4637 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
4638 extern "C" ret name params {                                    \
4639   typedef ret name##_t params;                                  \
4640   static name##_t* implem = NULL;                               \
4641   static int callcount = 0;                                     \
4642   if (implem == NULL) {                                         \
4643     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
4644     if (implem == NULL)  fatal(dlerror());                      \
4645   }                                                             \
4646   ++callcount;                                                  \
4647   RecordSynch _rs(#name);                                       \
4648   inner;                                                        \
4649   return implem args;                                           \
4650 }
4651 // in dbx, examine callcounts this way:
4652 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
4653 
4654 #define CHECK_POINTER_OK(p) \
4655   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
4656 #define CHECK_MU \
4657   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
4658 #define CHECK_CV \
4659   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
4660 #define CHECK_P(p) \
4661   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
4662 
4663 #define CHECK_MUTEX(mutex_op) \
4664   CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
4665 
4666 CHECK_MUTEX(   mutex_lock)
4667 CHECK_MUTEX(  _mutex_lock)
4668 CHECK_MUTEX( mutex_unlock)
4669 CHECK_MUTEX(_mutex_unlock)
4670 CHECK_MUTEX( mutex_trylock)
4671 CHECK_MUTEX(_mutex_trylock)
4672 
4673 #define CHECK_COND(cond_op) \
4674   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU; CHECK_CV);
4675 
4676 CHECK_COND( cond_wait);
4677 CHECK_COND(_cond_wait);
4678 CHECK_COND(_cond_wait_cancel);
4679 
4680 #define CHECK_COND2(cond_op) \
4681   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU; CHECK_CV);
4682 
4683 CHECK_COND2( cond_timedwait);
4684 CHECK_COND2(_cond_timedwait);
4685 CHECK_COND2(_cond_timedwait_cancel);
4686 
4687 // do the _lwp_* versions too
4688 #define mutex_t lwp_mutex_t
4689 #define cond_t  lwp_cond_t
4690 CHECK_MUTEX(  _lwp_mutex_lock)
4691 CHECK_MUTEX(  _lwp_mutex_unlock)
4692 CHECK_MUTEX(  _lwp_mutex_trylock)
4693 CHECK_MUTEX( __lwp_mutex_lock)
4694 CHECK_MUTEX( __lwp_mutex_unlock)
4695 CHECK_MUTEX( __lwp_mutex_trylock)
4696 CHECK_MUTEX(___lwp_mutex_lock)
4697 CHECK_MUTEX(___lwp_mutex_unlock)
4698 
4699 CHECK_COND(  _lwp_cond_wait);
4700 CHECK_COND( __lwp_cond_wait);
4701 CHECK_COND(___lwp_cond_wait);
4702 
4703 CHECK_COND2(  _lwp_cond_timedwait);
4704 CHECK_COND2( __lwp_cond_timedwait);
4705 #undef mutex_t
4706 #undef cond_t
4707 
4708 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
4709 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
4710 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
4711 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
4712 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
4713 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
4714 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
4715 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
4716 
4717 
4718 // recording machinery:
4719 
4720 enum { RECORD_SYNCH_LIMIT = 200 };
4721 char* record_synch_name[RECORD_SYNCH_LIMIT];
4722 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
4723 bool record_synch_returning[RECORD_SYNCH_LIMIT];
4724 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
4725 int record_synch_count = 0;
4726 bool record_synch_enabled = false;
4727 
4728 // in dbx, examine recorded data this way:
4729 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
4730 
4731 void record_synch(char* name, bool returning) {
4732   if (record_synch_enabled) {
4733     if (record_synch_count < RECORD_SYNCH_LIMIT) {
4734       record_synch_name[record_synch_count] = name;
4735       record_synch_returning[record_synch_count] = returning;
4736       record_synch_thread[record_synch_count] = thr_self();
4737       record_synch_arg0ptr[record_synch_count] = &name;
4738       record_synch_count++;
4739     }
4740     // put more checking code here:
4741     // ...
4742   }
4743 }
4744 
4745 void record_synch_enable() {
4746   // start collecting trace data, if not already doing so
4747   if (!record_synch_enabled)  record_synch_count = 0;
4748   record_synch_enabled = true;
4749 }
4750 
4751 void record_synch_disable() {
4752   // stop collecting trace data
4753   record_synch_enabled = false;
4754 }
4755 
4756 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
4757 #endif // PRODUCT
4758 
4759 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
4760 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
4761                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
4762 
4763 
4764 // JVMTI & JVM monitoring and management support
4765 // The thread_cpu_time() and current_thread_cpu_time() are only
4766 // supported if is_thread_cpu_time_supported() returns true.
4767 // They are not supported on Solaris T1.
4768 
4769 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4770 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4771 // of a thread.
4772 //
4773 // current_thread_cpu_time() and thread_cpu_time(Thread *)
4774 // returns the fast estimate available on the platform.
4775 
4776 // hrtime_t gethrvtime() return value includes
4777 // user time but does not include system time
4778 jlong os::current_thread_cpu_time() {
4779   return (jlong) gethrvtime();
4780 }
4781 
4782 jlong os::thread_cpu_time(Thread *thread) {
4783   // return user level CPU time only to be consistent with
4784   // what current_thread_cpu_time returns.
4785   // thread_cpu_time_info() must be changed if this changes
4786   return os::thread_cpu_time(thread, false /* user time only */);
4787 }
4788 
4789 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4790   if (user_sys_cpu_time) {
4791     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4792   } else {
4793     return os::current_thread_cpu_time();
4794   }
4795 }
4796 
4797 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4798   char proc_name[64];
4799   int count;
4800   prusage_t prusage;
4801   jlong lwp_time;
4802   int fd;
4803 
4804   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
4805           getpid(),
4806           thread->osthread()->lwp_id());
4807   fd = ::open(proc_name, O_RDONLY);
4808   if (fd == -1) return -1;
4809 
4810   do {
4811     count = ::pread(fd,
4812                     (void *)&prusage.pr_utime,
4813                     thr_time_size,
4814                     thr_time_off);
4815   } while (count < 0 && errno == EINTR);
4816   ::close(fd);
4817   if (count < 0) return -1;
4818 
4819   if (user_sys_cpu_time) {
4820     // user + system CPU time
4821     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
4822                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
4823                  (jlong)prusage.pr_stime.tv_nsec +
4824                  (jlong)prusage.pr_utime.tv_nsec;
4825   } else {
4826     // user level CPU time only
4827     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
4828                 (jlong)prusage.pr_utime.tv_nsec;
4829   }
4830 
4831   return (lwp_time);
4832 }
4833 
4834 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4835   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
4836   info_ptr->may_skip_backward = false;    // elapsed time not wall time
4837   info_ptr->may_skip_forward = false;     // elapsed time not wall time
4838   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
4839 }
4840 
4841 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4842   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
4843   info_ptr->may_skip_backward = false;    // elapsed time not wall time
4844   info_ptr->may_skip_forward = false;     // elapsed time not wall time
4845   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
4846 }
4847 
4848 bool os::is_thread_cpu_time_supported() {
4849   return true;
4850 }
4851 
4852 // System loadavg support.  Returns -1 if load average cannot be obtained.
4853 // Return the load average for our processor set if the primitive exists
4854 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
4855 int os::loadavg(double loadavg[], int nelem) {
4856   if (pset_getloadavg_ptr != NULL) {
4857     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
4858   } else {
4859     return ::getloadavg(loadavg, nelem);
4860   }
4861 }
4862 
4863 //---------------------------------------------------------------------------------
4864 
4865 bool os::find(address addr, outputStream* st) {
4866   Dl_info dlinfo;
4867   memset(&dlinfo, 0, sizeof(dlinfo));
4868   if (dladdr(addr, &dlinfo) != 0) {
4869     st->print(PTR_FORMAT ": ", addr);
4870     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
4871       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
4872     } else if (dlinfo.dli_fbase != NULL) {
4873       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
4874     } else {
4875       st->print("<absolute address>");
4876     }
4877     if (dlinfo.dli_fname != NULL) {
4878       st->print(" in %s", dlinfo.dli_fname);
4879     }
4880     if (dlinfo.dli_fbase != NULL) {
4881       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
4882     }
4883     st->cr();
4884 
4885     if (Verbose) {
4886       // decode some bytes around the PC
4887       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
4888       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
4889       address       lowest = (address) dlinfo.dli_sname;
4890       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
4891       if (begin < lowest)  begin = lowest;
4892       Dl_info dlinfo2;
4893       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
4894           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
4895         end = (address) dlinfo2.dli_saddr;
4896       }
4897       Disassembler::decode(begin, end, st);
4898     }
4899     return true;
4900   }
4901   return false;
4902 }
4903 
4904 // Following function has been added to support HotSparc's libjvm.so running
4905 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
4906 // src/solaris/hpi/native_threads in the EVM codebase.
4907 //
4908 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
4909 // libraries and should thus be removed. We will leave it behind for a while
4910 // until we no longer want to able to run on top of 1.3.0 Solaris production
4911 // JDK. See 4341971.
4912 
4913 #define STACK_SLACK 0x800
4914 
4915 extern "C" {
4916   intptr_t sysThreadAvailableStackWithSlack() {
4917     stack_t st;
4918     intptr_t retval, stack_top;
4919     retval = thr_stksegment(&st);
4920     assert(retval == 0, "incorrect return value from thr_stksegment");
4921     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
4922     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
4923     stack_top=(intptr_t)st.ss_sp-st.ss_size;
4924     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
4925   }
4926 }
4927 
4928 // ObjectMonitor park-unpark infrastructure ...
4929 //
4930 // We implement Solaris and Linux PlatformEvents with the
4931 // obvious condvar-mutex-flag triple.
4932 // Another alternative that works quite well is pipes:
4933 // Each PlatformEvent consists of a pipe-pair.
4934 // The thread associated with the PlatformEvent
4935 // calls park(), which reads from the input end of the pipe.
4936 // Unpark() writes into the other end of the pipe.
4937 // The write-side of the pipe must be set NDELAY.
4938 // Unfortunately pipes consume a large # of handles.
4939 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
4940 // Using pipes for the 1st few threads might be workable, however.
4941 //
4942 // park() is permitted to return spuriously.
4943 // Callers of park() should wrap the call to park() in
4944 // an appropriate loop.  A litmus test for the correct
4945 // usage of park is the following: if park() were modified
4946 // to immediately return 0 your code should still work,
4947 // albeit degenerating to a spin loop.
4948 //
4949 // In a sense, park()-unpark() just provides more polite spinning
4950 // and polling with the key difference over naive spinning being
4951 // that a parked thread needs to be explicitly unparked() in order
4952 // to wake up and to poll the underlying condition.
4953 //
4954 // Assumption:
4955 //    Only one parker can exist on an event, which is why we allocate
4956 //    them per-thread. Multiple unparkers can coexist.
4957 //
4958 // _Event transitions in park()
4959 //   -1 => -1 : illegal
4960 //    1 =>  0 : pass - return immediately
4961 //    0 => -1 : block; then set _Event to 0 before returning
4962 //
4963 // _Event transitions in unpark()
4964 //    0 => 1 : just return
4965 //    1 => 1 : just return
4966 //   -1 => either 0 or 1; must signal target thread
4967 //         That is, we can safely transition _Event from -1 to either
4968 //         0 or 1.
4969 //
4970 // _Event serves as a restricted-range semaphore.
4971 //   -1 : thread is blocked, i.e. there is a waiter
4972 //    0 : neutral: thread is running or ready,
4973 //        could have been signaled after a wait started
4974 //    1 : signaled - thread is running or ready
4975 //
4976 // Another possible encoding of _Event would be with
4977 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
4978 //
4979 // TODO-FIXME: add DTRACE probes for:
4980 // 1.   Tx parks
4981 // 2.   Ty unparks Tx
4982 // 3.   Tx resumes from park
4983 
4984 
4985 // value determined through experimentation
4986 #define ROUNDINGFIX 11
4987 
4988 // utility to compute the abstime argument to timedwait.
4989 // TODO-FIXME: switch from compute_abstime() to unpackTime().
4990 
4991 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
4992   // millis is the relative timeout time
4993   // abstime will be the absolute timeout time
4994   if (millis < 0)  millis = 0;
4995   struct timeval now;
4996   int status = gettimeofday(&now, NULL);
4997   assert(status == 0, "gettimeofday");
4998   jlong seconds = millis / 1000;
4999   jlong max_wait_period;
5000 
5001   if (UseLWPSynchronization) {
5002     // forward port of fix for 4275818 (not sleeping long enough)
5003     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5004     // _lwp_cond_timedwait() used a round_down algorithm rather
5005     // than a round_up. For millis less than our roundfactor
5006     // it rounded down to 0 which doesn't meet the spec.
5007     // For millis > roundfactor we may return a bit sooner, but
5008     // since we can not accurately identify the patch level and
5009     // this has already been fixed in Solaris 9 and 8 we will
5010     // leave it alone rather than always rounding down.
5011 
5012     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5013     // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5014     // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5015     max_wait_period = 21000000;
5016   } else {
5017     max_wait_period = 50000000;
5018   }
5019   millis %= 1000;
5020   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
5021     seconds = max_wait_period;
5022   }
5023   abstime->tv_sec = now.tv_sec  + seconds;
5024   long       usec = now.tv_usec + millis * 1000;
5025   if (usec >= 1000000) {
5026     abstime->tv_sec += 1;
5027     usec -= 1000000;
5028   }
5029   abstime->tv_nsec = usec * 1000;
5030   return abstime;
5031 }
5032 
5033 void os::PlatformEvent::park() {           // AKA: down()
5034   // Transitions for _Event:
5035   //   -1 => -1 : illegal
5036   //    1 =>  0 : pass - return immediately
5037   //    0 => -1 : block; then set _Event to 0 before returning
5038 
5039   // Invariant: Only the thread associated with the Event/PlatformEvent
5040   // may call park().
5041   assert(_nParked == 0, "invariant");
5042 
5043   int v;
5044   for (;;) {
5045     v = _Event;
5046     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5047   }
5048   guarantee(v >= 0, "invariant");
5049   if (v == 0) {
5050     // Do this the hard way by blocking ...
5051     // See http://monaco.sfbay/detail.jsf?cr=5094058.
5052     int status = os::Solaris::mutex_lock(_mutex);
5053     assert_status(status == 0, status, "mutex_lock");
5054     guarantee(_nParked == 0, "invariant");
5055     ++_nParked;
5056     while (_Event < 0) {
5057       // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5058       // Treat this the same as if the wait was interrupted
5059       // With usr/lib/lwp going to kernel, always handle ETIME
5060       status = os::Solaris::cond_wait(_cond, _mutex);
5061       if (status == ETIME) status = EINTR;
5062       assert_status(status == 0 || status == EINTR, status, "cond_wait");
5063     }
5064     --_nParked;
5065     _Event = 0;
5066     status = os::Solaris::mutex_unlock(_mutex);
5067     assert_status(status == 0, status, "mutex_unlock");
5068     // Paranoia to ensure our locked and lock-free paths interact
5069     // correctly with each other.
5070     OrderAccess::fence();
5071   }
5072 }
5073 
5074 int os::PlatformEvent::park(jlong millis) {
5075   // Transitions for _Event:
5076   //   -1 => -1 : illegal
5077   //    1 =>  0 : pass - return immediately
5078   //    0 => -1 : block; then set _Event to 0 before returning
5079 
5080   guarantee(_nParked == 0, "invariant");
5081   int v;
5082   for (;;) {
5083     v = _Event;
5084     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5085   }
5086   guarantee(v >= 0, "invariant");
5087   if (v != 0) return OS_OK;
5088 
5089   int ret = OS_TIMEOUT;
5090   timestruc_t abst;
5091   compute_abstime(&abst, millis);
5092 
5093   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5094   int status = os::Solaris::mutex_lock(_mutex);
5095   assert_status(status == 0, status, "mutex_lock");
5096   guarantee(_nParked == 0, "invariant");
5097   ++_nParked;
5098   while (_Event < 0) {
5099     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5100     assert_status(status == 0 || status == EINTR ||
5101                   status == ETIME || status == ETIMEDOUT,
5102                   status, "cond_timedwait");
5103     if (!FilterSpuriousWakeups) break;                // previous semantics
5104     if (status == ETIME || status == ETIMEDOUT) break;
5105     // We consume and ignore EINTR and spurious wakeups.
5106   }
5107   --_nParked;
5108   if (_Event >= 0) ret = OS_OK;
5109   _Event = 0;
5110   status = os::Solaris::mutex_unlock(_mutex);
5111   assert_status(status == 0, status, "mutex_unlock");
5112   // Paranoia to ensure our locked and lock-free paths interact
5113   // correctly with each other.
5114   OrderAccess::fence();
5115   return ret;
5116 }
5117 
5118 void os::PlatformEvent::unpark() {
5119   // Transitions for _Event:
5120   //    0 => 1 : just return
5121   //    1 => 1 : just return
5122   //   -1 => either 0 or 1; must signal target thread
5123   //         That is, we can safely transition _Event from -1 to either
5124   //         0 or 1.
5125   // See also: "Semaphores in Plan 9" by Mullender & Cox
5126   //
5127   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5128   // that it will take two back-to-back park() calls for the owning
5129   // thread to block. This has the benefit of forcing a spurious return
5130   // from the first park() call after an unpark() call which will help
5131   // shake out uses of park() and unpark() without condition variables.
5132 
5133   if (Atomic::xchg(1, &_Event) >= 0) return;
5134 
5135   // If the thread associated with the event was parked, wake it.
5136   // Wait for the thread assoc with the PlatformEvent to vacate.
5137   int status = os::Solaris::mutex_lock(_mutex);
5138   assert_status(status == 0, status, "mutex_lock");
5139   int AnyWaiters = _nParked;
5140   status = os::Solaris::mutex_unlock(_mutex);
5141   assert_status(status == 0, status, "mutex_unlock");
5142   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5143   if (AnyWaiters != 0) {
5144     // Note that we signal() *after* dropping the lock for "immortal" Events.
5145     // This is safe and avoids a common class of  futile wakeups.  In rare
5146     // circumstances this can cause a thread to return prematurely from
5147     // cond_{timed}wait() but the spurious wakeup is benign and the victim
5148     // will simply re-test the condition and re-park itself.
5149     // This provides particular benefit if the underlying platform does not
5150     // provide wait morphing.
5151     status = os::Solaris::cond_signal(_cond);
5152     assert_status(status == 0, status, "cond_signal");
5153   }
5154 }
5155 
5156 // JSR166
5157 // -------------------------------------------------------
5158 
5159 // The solaris and linux implementations of park/unpark are fairly
5160 // conservative for now, but can be improved. They currently use a
5161 // mutex/condvar pair, plus _counter.
5162 // Park decrements _counter if > 0, else does a condvar wait.  Unpark
5163 // sets count to 1 and signals condvar.  Only one thread ever waits
5164 // on the condvar. Contention seen when trying to park implies that someone
5165 // is unparking you, so don't wait. And spurious returns are fine, so there
5166 // is no need to track notifications.
5167 
5168 #define MAX_SECS 100000000
5169 
5170 // This code is common to linux and solaris and will be moved to a
5171 // common place in dolphin.
5172 //
5173 // The passed in time value is either a relative time in nanoseconds
5174 // or an absolute time in milliseconds. Either way it has to be unpacked
5175 // into suitable seconds and nanoseconds components and stored in the
5176 // given timespec structure.
5177 // Given time is a 64-bit value and the time_t used in the timespec is only
5178 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
5179 // overflow if times way in the future are given. Further on Solaris versions
5180 // prior to 10 there is a restriction (see cond_timedwait) that the specified
5181 // number of seconds, in abstime, is less than current_time  + 100,000,000.
5182 // As it will be 28 years before "now + 100000000" will overflow we can
5183 // ignore overflow and just impose a hard-limit on seconds using the value
5184 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
5185 // years from "now".
5186 //
5187 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5188   assert(time > 0, "convertTime");
5189 
5190   struct timeval now;
5191   int status = gettimeofday(&now, NULL);
5192   assert(status == 0, "gettimeofday");
5193 
5194   time_t max_secs = now.tv_sec + MAX_SECS;
5195 
5196   if (isAbsolute) {
5197     jlong secs = time / 1000;
5198     if (secs > max_secs) {
5199       absTime->tv_sec = max_secs;
5200     } else {
5201       absTime->tv_sec = secs;
5202     }
5203     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5204   } else {
5205     jlong secs = time / NANOSECS_PER_SEC;
5206     if (secs >= MAX_SECS) {
5207       absTime->tv_sec = max_secs;
5208       absTime->tv_nsec = 0;
5209     } else {
5210       absTime->tv_sec = now.tv_sec + secs;
5211       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5212       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5213         absTime->tv_nsec -= NANOSECS_PER_SEC;
5214         ++absTime->tv_sec; // note: this must be <= max_secs
5215       }
5216     }
5217   }
5218   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5219   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5220   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5221   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5222 }
5223 
5224 void Parker::park(bool isAbsolute, jlong time) {
5225   // Ideally we'd do something useful while spinning, such
5226   // as calling unpackTime().
5227 
5228   // Optional fast-path check:
5229   // Return immediately if a permit is available.
5230   // We depend on Atomic::xchg() having full barrier semantics
5231   // since we are doing a lock-free update to _counter.
5232   if (Atomic::xchg(0, &_counter) > 0) return;
5233 
5234   // Optional fast-exit: Check interrupt before trying to wait
5235   Thread* thread = Thread::current();
5236   assert(thread->is_Java_thread(), "Must be JavaThread");
5237   JavaThread *jt = (JavaThread *)thread;
5238   if (Thread::is_interrupted(thread, false)) {
5239     return;
5240   }
5241 
5242   // First, demultiplex/decode time arguments
5243   timespec absTime;
5244   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
5245     return;
5246   }
5247   if (time > 0) {
5248     // Warning: this code might be exposed to the old Solaris time
5249     // round-down bugs.  Grep "roundingFix" for details.
5250     unpackTime(&absTime, isAbsolute, time);
5251   }
5252 
5253   // Enter safepoint region
5254   // Beware of deadlocks such as 6317397.
5255   // The per-thread Parker:: _mutex is a classic leaf-lock.
5256   // In particular a thread must never block on the Threads_lock while
5257   // holding the Parker:: mutex.  If safepoints are pending both the
5258   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5259   ThreadBlockInVM tbivm(jt);
5260 
5261   // Don't wait if cannot get lock since interference arises from
5262   // unblocking.  Also. check interrupt before trying wait
5263   if (Thread::is_interrupted(thread, false) ||
5264       os::Solaris::mutex_trylock(_mutex) != 0) {
5265     return;
5266   }
5267 
5268   int status;
5269 
5270   if (_counter > 0)  { // no wait needed
5271     _counter = 0;
5272     status = os::Solaris::mutex_unlock(_mutex);
5273     assert(status == 0, "invariant");
5274     // Paranoia to ensure our locked and lock-free paths interact
5275     // correctly with each other and Java-level accesses.
5276     OrderAccess::fence();
5277     return;
5278   }
5279 
5280   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5281   jt->set_suspend_equivalent();
5282   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5283 
5284   // Do this the hard way by blocking ...
5285   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5286   if (time == 0) {
5287     status = os::Solaris::cond_wait(_cond, _mutex);
5288   } else {
5289     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
5290   }
5291   // Note that an untimed cond_wait() can sometimes return ETIME on older
5292   // versions of the Solaris.
5293   assert_status(status == 0 || status == EINTR ||
5294                 status == ETIME || status == ETIMEDOUT,
5295                 status, "cond_timedwait");
5296 
5297   _counter = 0;
5298   status = os::Solaris::mutex_unlock(_mutex);
5299   assert_status(status == 0, status, "mutex_unlock");
5300   // Paranoia to ensure our locked and lock-free paths interact
5301   // correctly with each other and Java-level accesses.
5302   OrderAccess::fence();
5303 
5304   // If externally suspended while waiting, re-suspend
5305   if (jt->handle_special_suspend_equivalent_condition()) {
5306     jt->java_suspend_self();
5307   }
5308 }
5309 
5310 void Parker::unpark() {
5311   int status = os::Solaris::mutex_lock(_mutex);
5312   assert(status == 0, "invariant");
5313   const int s = _counter;
5314   _counter = 1;
5315   status = os::Solaris::mutex_unlock(_mutex);
5316   assert(status == 0, "invariant");
5317 
5318   if (s < 1) {
5319     status = os::Solaris::cond_signal(_cond);
5320     assert(status == 0, "invariant");
5321   }
5322 }
5323 
5324 extern char** environ;
5325 
5326 // Run the specified command in a separate process. Return its exit value,
5327 // or -1 on failure (e.g. can't fork a new process).
5328 // Unlike system(), this function can be called from signal handler. It
5329 // doesn't block SIGINT et al.
5330 int os::fork_and_exec(char* cmd) {
5331   char * argv[4];
5332   argv[0] = (char *)"sh";
5333   argv[1] = (char *)"-c";
5334   argv[2] = cmd;
5335   argv[3] = NULL;
5336 
5337   // fork is async-safe, fork1 is not so can't use in signal handler
5338   pid_t pid;
5339   Thread* t = Thread::current_or_null_safe();
5340   if (t != NULL && t->is_inside_signal_handler()) {
5341     pid = fork();
5342   } else {
5343     pid = fork1();
5344   }
5345 
5346   if (pid < 0) {
5347     // fork failed
5348     warning("fork failed: %s", os::strerror(errno));
5349     return -1;
5350 
5351   } else if (pid == 0) {
5352     // child process
5353 
5354     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
5355     execve("/usr/bin/sh", argv, environ);
5356 
5357     // execve failed
5358     _exit(-1);
5359 
5360   } else  {
5361     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5362     // care about the actual exit code, for now.
5363 
5364     int status;
5365 
5366     // Wait for the child process to exit.  This returns immediately if
5367     // the child has already exited. */
5368     while (waitpid(pid, &status, 0) < 0) {
5369       switch (errno) {
5370       case ECHILD: return 0;
5371       case EINTR: break;
5372       default: return -1;
5373       }
5374     }
5375 
5376     if (WIFEXITED(status)) {
5377       // The child exited normally; get its exit code.
5378       return WEXITSTATUS(status);
5379     } else if (WIFSIGNALED(status)) {
5380       // The child exited because of a signal
5381       // The best value to return is 0x80 + signal number,
5382       // because that is what all Unix shells do, and because
5383       // it allows callers to distinguish between process exit and
5384       // process death by signal.
5385       return 0x80 + WTERMSIG(status);
5386     } else {
5387       // Unknown exit code; pass it through
5388       return status;
5389     }
5390   }
5391 }
5392 
5393 // is_headless_jre()
5394 //
5395 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5396 // in order to report if we are running in a headless jre
5397 //
5398 // Since JDK8 xawt/libmawt.so was moved into the same directory
5399 // as libawt.so, and renamed libawt_xawt.so
5400 //
5401 bool os::is_headless_jre() {
5402   struct stat statbuf;
5403   char buf[MAXPATHLEN];
5404   char libmawtpath[MAXPATHLEN];
5405   const char *xawtstr  = "/xawt/libmawt.so";
5406   const char *new_xawtstr = "/libawt_xawt.so";
5407   char *p;
5408 
5409   // Get path to libjvm.so
5410   os::jvm_path(buf, sizeof(buf));
5411 
5412   // Get rid of libjvm.so
5413   p = strrchr(buf, '/');
5414   if (p == NULL) {
5415     return false;
5416   } else {
5417     *p = '\0';
5418   }
5419 
5420   // Get rid of client or server
5421   p = strrchr(buf, '/');
5422   if (p == NULL) {
5423     return false;
5424   } else {
5425     *p = '\0';
5426   }
5427 
5428   // check xawt/libmawt.so
5429   strcpy(libmawtpath, buf);
5430   strcat(libmawtpath, xawtstr);
5431   if (::stat(libmawtpath, &statbuf) == 0) return false;
5432 
5433   // check libawt_xawt.so
5434   strcpy(libmawtpath, buf);
5435   strcat(libmawtpath, new_xawtstr);
5436   if (::stat(libmawtpath, &statbuf) == 0) return false;
5437 
5438   return true;
5439 }
5440 
5441 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
5442   size_t res;
5443   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
5444   return res;
5445 }
5446 
5447 int os::close(int fd) {
5448   return ::close(fd);
5449 }
5450 
5451 int os::socket_close(int fd) {
5452   return ::close(fd);
5453 }
5454 
5455 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5456   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5457          "Assumed _thread_in_native");
5458   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
5459 }
5460 
5461 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5462   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5463          "Assumed _thread_in_native");
5464   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5465 }
5466 
5467 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5468   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5469 }
5470 
5471 // As both poll and select can be interrupted by signals, we have to be
5472 // prepared to restart the system call after updating the timeout, unless
5473 // a poll() is done with timeout == -1, in which case we repeat with this
5474 // "wait forever" value.
5475 
5476 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
5477   int _result;
5478   _result = ::connect(fd, him, len);
5479 
5480   // On Solaris, when a connect() call is interrupted, the connection
5481   // can be established asynchronously (see 6343810). Subsequent calls
5482   // to connect() must check the errno value which has the semantic
5483   // described below (copied from the connect() man page). Handling
5484   // of asynchronously established connections is required for both
5485   // blocking and non-blocking sockets.
5486   //     EINTR            The  connection  attempt  was   interrupted
5487   //                      before  any data arrived by the delivery of
5488   //                      a signal. The connection, however, will  be
5489   //                      established asynchronously.
5490   //
5491   //     EINPROGRESS      The socket is non-blocking, and the connec-
5492   //                      tion  cannot  be completed immediately.
5493   //
5494   //     EALREADY         The socket is non-blocking,  and a previous
5495   //                      connection  attempt  has  not yet been com-
5496   //                      pleted.
5497   //
5498   //     EISCONN          The socket is already connected.
5499   if (_result == OS_ERR && errno == EINTR) {
5500     // restarting a connect() changes its errno semantics
5501     RESTARTABLE(::connect(fd, him, len), _result);
5502     // undo these changes
5503     if (_result == OS_ERR) {
5504       if (errno == EALREADY) {
5505         errno = EINPROGRESS; // fall through
5506       } else if (errno == EISCONN) {
5507         errno = 0;
5508         return OS_OK;
5509       }
5510     }
5511   }
5512   return _result;
5513 }
5514 
5515 // Get the default path to the core file
5516 // Returns the length of the string
5517 int os::get_core_path(char* buffer, size_t bufferSize) {
5518   const char* p = get_current_directory(buffer, bufferSize);
5519 
5520   if (p == NULL) {
5521     assert(p != NULL, "failed to get current directory");
5522     return 0;
5523   }
5524 
5525   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
5526                                               p, current_process_id());
5527 
5528   return strlen(buffer);
5529 }
5530 
5531 #ifndef PRODUCT
5532 void TestReserveMemorySpecial_test() {
5533   // No tests available for this platform
5534 }
5535 #endif
5536 
5537 bool os::start_debugging(char *buf, int buflen) {
5538   int len = (int)strlen(buf);
5539   char *p = &buf[len];
5540 
5541   jio_snprintf(p, buflen-len,
5542                "\n\n"
5543                "Do you want to debug the problem?\n\n"
5544                "To debug, run 'dbx - %d'; then switch to thread " INTX_FORMAT "\n"
5545                "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
5546                "Otherwise, press RETURN to abort...",
5547                os::current_process_id(), os::current_thread_id());
5548 
5549   bool yes = os::message_box("Unexpected Error", buf);
5550 
5551   if (yes) {
5552     // yes, user asked VM to launch debugger
5553     jio_snprintf(buf, sizeof(buf), "dbx - %d", os::current_process_id());
5554 
5555     os::fork_and_exec(buf);
5556     yes = false;
5557   }
5558   return yes;
5559 }