1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "utilities/globalDefinitions.hpp"
  26 #include "prims/jvm.h"
  27 #include "semaphore_posix.hpp"
  28 #include "runtime/frame.inline.hpp"
  29 #include "runtime/interfaceSupport.hpp"
  30 #include "runtime/os.hpp"
  31 #include "utilities/macros.hpp"
  32 #include "utilities/vmError.hpp"
  33 
  34 #include <dlfcn.h>
  35 #include <pthread.h>
  36 #include <semaphore.h>
  37 #include <signal.h>
  38 #include <sys/resource.h>
  39 #include <sys/utsname.h>
  40 #include <time.h>
  41 #include <unistd.h>
  42 
  43 // Todo: provide a os::get_max_process_id() or similar. Number of processes
  44 // may have been configured, can be read more accurately from proc fs etc.
  45 #ifndef MAX_PID
  46 #define MAX_PID INT_MAX
  47 #endif
  48 #define IS_VALID_PID(p) (p > 0 && p < MAX_PID)
  49 
  50 // Check core dump limit and report possible place where core can be found
  51 void os::check_dump_limit(char* buffer, size_t bufferSize) {
  52   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
  53     jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line");
  54     VMError::record_coredump_status(buffer, false);
  55     return;
  56   }
  57 
  58   int n;
  59   struct rlimit rlim;
  60   bool success;
  61 
  62   char core_path[PATH_MAX];
  63   n = get_core_path(core_path, PATH_MAX);
  64 
  65   if (n <= 0) {
  66     jio_snprintf(buffer, bufferSize, "core.%d (may not exist)", current_process_id());
  67     success = true;
  68 #ifdef LINUX
  69   } else if (core_path[0] == '"') { // redirect to user process
  70     jio_snprintf(buffer, bufferSize, "Core dumps may be processed with %s", core_path);
  71     success = true;
  72 #endif
  73   } else if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
  74     jio_snprintf(buffer, bufferSize, "%s (may not exist)", core_path);
  75     success = true;
  76   } else {
  77     switch(rlim.rlim_cur) {
  78       case RLIM_INFINITY:
  79         jio_snprintf(buffer, bufferSize, "%s", core_path);
  80         success = true;
  81         break;
  82       case 0:
  83         jio_snprintf(buffer, bufferSize, "Core dumps have been disabled. To enable core dumping, try \"ulimit -c unlimited\" before starting Java again");
  84         success = false;
  85         break;
  86       default:
  87         jio_snprintf(buffer, bufferSize, "%s (max size " UINT64_FORMAT " kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", core_path, uint64_t(rlim.rlim_cur) / 1024);
  88         success = true;
  89         break;
  90     }
  91   }
  92 
  93   VMError::record_coredump_status(buffer, success);
  94 }
  95 
  96 int os::get_native_stack(address* stack, int frames, int toSkip) {
  97   int frame_idx = 0;
  98   int num_of_frames;  // number of frames captured
  99   frame fr = os::current_frame();
 100   while (fr.pc() && frame_idx < frames) {
 101     if (toSkip > 0) {
 102       toSkip --;
 103     } else {
 104       stack[frame_idx ++] = fr.pc();
 105     }
 106     if (fr.fp() == NULL || fr.cb() != NULL ||
 107         fr.sender_pc() == NULL || os::is_first_C_frame(&fr)) break;
 108 
 109     if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
 110       fr = os::get_sender_for_C_frame(&fr);
 111     } else {
 112       break;
 113     }
 114   }
 115   num_of_frames = frame_idx;
 116   for (; frame_idx < frames; frame_idx ++) {
 117     stack[frame_idx] = NULL;
 118   }
 119 
 120   return num_of_frames;
 121 }
 122 
 123 
 124 bool os::unsetenv(const char* name) {
 125   assert(name != NULL, "Null pointer");
 126   return (::unsetenv(name) == 0);
 127 }
 128 
 129 int os::get_last_error() {
 130   return errno;
 131 }
 132 
 133 bool os::is_debugger_attached() {
 134   // not implemented
 135   return false;
 136 }
 137 
 138 void os::wait_for_keypress_at_exit(void) {
 139   // don't do anything on posix platforms
 140   return;
 141 }
 142 
 143 // Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
 144 // so on posix, unmap the section at the start and at the end of the chunk that we mapped
 145 // rather than unmapping and remapping the whole chunk to get requested alignment.
 146 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
 147   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
 148       "Alignment must be a multiple of allocation granularity (page size)");
 149   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
 150 
 151   size_t extra_size = size + alignment;
 152   assert(extra_size >= size, "overflow, size is too large to allow alignment");
 153 
 154   char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
 155 
 156   if (extra_base == NULL) {
 157     return NULL;
 158   }
 159 
 160   // Do manual alignment
 161   char* aligned_base = align_up(extra_base, alignment);
 162 
 163   // [  |                                       |  ]
 164   // ^ extra_base
 165   //    ^ extra_base + begin_offset == aligned_base
 166   //     extra_base + begin_offset + size       ^
 167   //                       extra_base + extra_size ^
 168   // |<>| == begin_offset
 169   //                              end_offset == |<>|
 170   size_t begin_offset = aligned_base - extra_base;
 171   size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
 172 
 173   if (begin_offset > 0) {
 174       os::release_memory(extra_base, begin_offset);
 175   }
 176 
 177   if (end_offset > 0) {
 178       os::release_memory(extra_base + begin_offset + size, end_offset);
 179   }
 180 
 181   return aligned_base;
 182 }
 183 
 184 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
 185     return vsnprintf(buf, len, fmt, args);
 186 }
 187 
 188 int os::get_fileno(FILE* fp) {
 189   return NOT_AIX(::)fileno(fp);
 190 }
 191 
 192 struct tm* os::gmtime_pd(const time_t* clock, struct tm*  res) {
 193   return gmtime_r(clock, res);
 194 }
 195 
 196 void os::Posix::print_load_average(outputStream* st) {
 197   st->print("load average:");
 198   double loadavg[3];
 199   os::loadavg(loadavg, 3);
 200   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
 201   st->cr();
 202 }
 203 
 204 void os::Posix::print_rlimit_info(outputStream* st) {
 205   st->print("rlimit:");
 206   struct rlimit rlim;
 207 
 208   st->print(" STACK ");
 209   getrlimit(RLIMIT_STACK, &rlim);
 210   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 211   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 212 
 213   st->print(", CORE ");
 214   getrlimit(RLIMIT_CORE, &rlim);
 215   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 216   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 217 
 218   // Isn't there on solaris
 219 #if defined(AIX)
 220   st->print(", NPROC ");
 221   st->print("%d", sysconf(_SC_CHILD_MAX));
 222 #elif !defined(SOLARIS)
 223   st->print(", NPROC ");
 224   getrlimit(RLIMIT_NPROC, &rlim);
 225   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 226   else st->print(UINT64_FORMAT, uint64_t(rlim.rlim_cur));
 227 #endif
 228 
 229   st->print(", NOFILE ");
 230   getrlimit(RLIMIT_NOFILE, &rlim);
 231   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 232   else st->print(UINT64_FORMAT, uint64_t(rlim.rlim_cur));
 233 
 234   st->print(", AS ");
 235   getrlimit(RLIMIT_AS, &rlim);
 236   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 237   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 238 
 239   st->print(", DATA ");
 240   getrlimit(RLIMIT_DATA, &rlim);
 241   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 242   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 243 
 244   st->print(", FSIZE ");
 245   getrlimit(RLIMIT_FSIZE, &rlim);
 246   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 247   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 248 
 249   st->cr();
 250 }
 251 
 252 void os::Posix::print_uname_info(outputStream* st) {
 253   // kernel
 254   st->print("uname:");
 255   struct utsname name;
 256   uname(&name);
 257   st->print("%s ", name.sysname);
 258 #ifdef ASSERT
 259   st->print("%s ", name.nodename);
 260 #endif
 261   st->print("%s ", name.release);
 262   st->print("%s ", name.version);
 263   st->print("%s", name.machine);
 264   st->cr();
 265 }
 266 
 267 bool os::get_host_name(char* buf, size_t buflen) {
 268   struct utsname name;
 269   uname(&name);
 270   jio_snprintf(buf, buflen, "%s", name.nodename);
 271   return true;
 272 }
 273 
 274 bool os::has_allocatable_memory_limit(julong* limit) {
 275   struct rlimit rlim;
 276   int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
 277   // if there was an error when calling getrlimit, assume that there is no limitation
 278   // on virtual memory.
 279   bool result;
 280   if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) {
 281     result = false;
 282   } else {
 283     *limit = (julong)rlim.rlim_cur;
 284     result = true;
 285   }
 286 #ifdef _LP64
 287   return result;
 288 #else
 289   // arbitrary virtual space limit for 32 bit Unices found by testing. If
 290   // getrlimit above returned a limit, bound it with this limit. Otherwise
 291   // directly use it.
 292   const julong max_virtual_limit = (julong)3800*M;
 293   if (result) {
 294     *limit = MIN2(*limit, max_virtual_limit);
 295   } else {
 296     *limit = max_virtual_limit;
 297   }
 298 
 299   // bound by actually allocatable memory. The algorithm uses two bounds, an
 300   // upper and a lower limit. The upper limit is the current highest amount of
 301   // memory that could not be allocated, the lower limit is the current highest
 302   // amount of memory that could be allocated.
 303   // The algorithm iteratively refines the result by halving the difference
 304   // between these limits, updating either the upper limit (if that value could
 305   // not be allocated) or the lower limit (if the that value could be allocated)
 306   // until the difference between these limits is "small".
 307 
 308   // the minimum amount of memory we care about allocating.
 309   const julong min_allocation_size = M;
 310 
 311   julong upper_limit = *limit;
 312 
 313   // first check a few trivial cases
 314   if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) {
 315     *limit = upper_limit;
 316   } else if (!is_allocatable(min_allocation_size)) {
 317     // we found that not even min_allocation_size is allocatable. Return it
 318     // anyway. There is no point to search for a better value any more.
 319     *limit = min_allocation_size;
 320   } else {
 321     // perform the binary search.
 322     julong lower_limit = min_allocation_size;
 323     while ((upper_limit - lower_limit) > min_allocation_size) {
 324       julong temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit;
 325       temp_limit = align_down(temp_limit, min_allocation_size);
 326       if (is_allocatable(temp_limit)) {
 327         lower_limit = temp_limit;
 328       } else {
 329         upper_limit = temp_limit;
 330       }
 331     }
 332     *limit = lower_limit;
 333   }
 334   return true;
 335 #endif
 336 }
 337 
 338 const char* os::get_current_directory(char *buf, size_t buflen) {
 339   return getcwd(buf, buflen);
 340 }
 341 
 342 FILE* os::open(int fd, const char* mode) {
 343   return ::fdopen(fd, mode);
 344 }
 345 
 346 void os::flockfile(FILE* fp) {
 347   ::flockfile(fp);
 348 }
 349 
 350 void os::funlockfile(FILE* fp) {
 351   ::funlockfile(fp);
 352 }
 353 
 354 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
 355 // which is used to find statically linked in agents.
 356 // Parameters:
 357 //            sym_name: Symbol in library we are looking for
 358 //            lib_name: Name of library to look in, NULL for shared libs.
 359 //            is_absolute_path == true if lib_name is absolute path to agent
 360 //                                     such as "/a/b/libL.so"
 361 //            == false if only the base name of the library is passed in
 362 //               such as "L"
 363 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
 364                                     bool is_absolute_path) {
 365   char *agent_entry_name;
 366   size_t len;
 367   size_t name_len;
 368   size_t prefix_len = strlen(JNI_LIB_PREFIX);
 369   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
 370   const char *start;
 371 
 372   if (lib_name != NULL) {
 373     name_len = strlen(lib_name);
 374     if (is_absolute_path) {
 375       // Need to strip path, prefix and suffix
 376       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
 377         lib_name = ++start;
 378       }
 379       if (strlen(lib_name) <= (prefix_len + suffix_len)) {
 380         return NULL;
 381       }
 382       lib_name += prefix_len;
 383       name_len = strlen(lib_name) - suffix_len;
 384     }
 385   }
 386   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
 387   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
 388   if (agent_entry_name == NULL) {
 389     return NULL;
 390   }
 391   strcpy(agent_entry_name, sym_name);
 392   if (lib_name != NULL) {
 393     strcat(agent_entry_name, "_");
 394     strncat(agent_entry_name, lib_name, name_len);
 395   }
 396   return agent_entry_name;
 397 }
 398 
 399 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
 400   assert(thread == Thread::current(),  "thread consistency check");
 401 
 402   ParkEvent * const slp = thread->_SleepEvent ;
 403   slp->reset() ;
 404   OrderAccess::fence() ;
 405 
 406   if (interruptible) {
 407     jlong prevtime = javaTimeNanos();
 408 
 409     for (;;) {
 410       if (os::is_interrupted(thread, true)) {
 411         return OS_INTRPT;
 412       }
 413 
 414       jlong newtime = javaTimeNanos();
 415 
 416       if (newtime - prevtime < 0) {
 417         // time moving backwards, should only happen if no monotonic clock
 418         // not a guarantee() because JVM should not abort on kernel/glibc bugs
 419         assert(!os::supports_monotonic_clock(), "unexpected time moving backwards detected in os::sleep(interruptible)");
 420       } else {
 421         millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
 422       }
 423 
 424       if (millis <= 0) {
 425         return OS_OK;
 426       }
 427 
 428       prevtime = newtime;
 429 
 430       {
 431         assert(thread->is_Java_thread(), "sanity check");
 432         JavaThread *jt = (JavaThread *) thread;
 433         ThreadBlockInVM tbivm(jt);
 434         OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
 435 
 436         jt->set_suspend_equivalent();
 437         // cleared by handle_special_suspend_equivalent_condition() or
 438         // java_suspend_self() via check_and_wait_while_suspended()
 439 
 440         slp->park(millis);
 441 
 442         // were we externally suspended while we were waiting?
 443         jt->check_and_wait_while_suspended();
 444       }
 445     }
 446   } else {
 447     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
 448     jlong prevtime = javaTimeNanos();
 449 
 450     for (;;) {
 451       // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
 452       // the 1st iteration ...
 453       jlong newtime = javaTimeNanos();
 454 
 455       if (newtime - prevtime < 0) {
 456         // time moving backwards, should only happen if no monotonic clock
 457         // not a guarantee() because JVM should not abort on kernel/glibc bugs
 458         assert(!os::supports_monotonic_clock(), "unexpected time moving backwards detected on os::sleep(!interruptible)");
 459       } else {
 460         millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
 461       }
 462 
 463       if (millis <= 0) break ;
 464 
 465       prevtime = newtime;
 466       slp->park(millis);
 467     }
 468     return OS_OK ;
 469   }
 470 }
 471 
 472 ////////////////////////////////////////////////////////////////////////////////
 473 // interrupt support
 474 
 475 void os::interrupt(Thread* thread) {
 476   assert(Thread::current() == thread || Threads_lock->owned_by_self(),
 477     "possibility of dangling Thread pointer");
 478 
 479   OSThread* osthread = thread->osthread();
 480 
 481   if (!osthread->interrupted()) {
 482     osthread->set_interrupted(true);
 483     // More than one thread can get here with the same value of osthread,
 484     // resulting in multiple notifications.  We do, however, want the store
 485     // to interrupted() to be visible to other threads before we execute unpark().
 486     OrderAccess::fence();
 487     ParkEvent * const slp = thread->_SleepEvent ;
 488     if (slp != NULL) slp->unpark() ;
 489   }
 490 
 491   // For JSR166. Unpark even if interrupt status already was set
 492   if (thread->is_Java_thread())
 493     ((JavaThread*)thread)->parker()->unpark();
 494 
 495   ParkEvent * ev = thread->_ParkEvent ;
 496   if (ev != NULL) ev->unpark() ;
 497 
 498 }
 499 
 500 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
 501   assert(Thread::current() == thread || Threads_lock->owned_by_self(),
 502     "possibility of dangling Thread pointer");
 503 
 504   OSThread* osthread = thread->osthread();
 505 
 506   bool interrupted = osthread->interrupted();
 507 
 508   // NOTE that since there is no "lock" around the interrupt and
 509   // is_interrupted operations, there is the possibility that the
 510   // interrupted flag (in osThread) will be "false" but that the
 511   // low-level events will be in the signaled state. This is
 512   // intentional. The effect of this is that Object.wait() and
 513   // LockSupport.park() will appear to have a spurious wakeup, which
 514   // is allowed and not harmful, and the possibility is so rare that
 515   // it is not worth the added complexity to add yet another lock.
 516   // For the sleep event an explicit reset is performed on entry
 517   // to os::sleep, so there is no early return. It has also been
 518   // recommended not to put the interrupted flag into the "event"
 519   // structure because it hides the issue.
 520   if (interrupted && clear_interrupted) {
 521     osthread->set_interrupted(false);
 522     // consider thread->_SleepEvent->reset() ... optional optimization
 523   }
 524 
 525   return interrupted;
 526 }
 527 
 528 
 529 
 530 static const struct {
 531   int sig; const char* name;
 532 }
 533  g_signal_info[] =
 534   {
 535   {  SIGABRT,     "SIGABRT" },
 536 #ifdef SIGAIO
 537   {  SIGAIO,      "SIGAIO" },
 538 #endif
 539   {  SIGALRM,     "SIGALRM" },
 540 #ifdef SIGALRM1
 541   {  SIGALRM1,    "SIGALRM1" },
 542 #endif
 543   {  SIGBUS,      "SIGBUS" },
 544 #ifdef SIGCANCEL
 545   {  SIGCANCEL,   "SIGCANCEL" },
 546 #endif
 547   {  SIGCHLD,     "SIGCHLD" },
 548 #ifdef SIGCLD
 549   {  SIGCLD,      "SIGCLD" },
 550 #endif
 551   {  SIGCONT,     "SIGCONT" },
 552 #ifdef SIGCPUFAIL
 553   {  SIGCPUFAIL,  "SIGCPUFAIL" },
 554 #endif
 555 #ifdef SIGDANGER
 556   {  SIGDANGER,   "SIGDANGER" },
 557 #endif
 558 #ifdef SIGDIL
 559   {  SIGDIL,      "SIGDIL" },
 560 #endif
 561 #ifdef SIGEMT
 562   {  SIGEMT,      "SIGEMT" },
 563 #endif
 564   {  SIGFPE,      "SIGFPE" },
 565 #ifdef SIGFREEZE
 566   {  SIGFREEZE,   "SIGFREEZE" },
 567 #endif
 568 #ifdef SIGGFAULT
 569   {  SIGGFAULT,   "SIGGFAULT" },
 570 #endif
 571 #ifdef SIGGRANT
 572   {  SIGGRANT,    "SIGGRANT" },
 573 #endif
 574   {  SIGHUP,      "SIGHUP" },
 575   {  SIGILL,      "SIGILL" },
 576   {  SIGINT,      "SIGINT" },
 577 #ifdef SIGIO
 578   {  SIGIO,       "SIGIO" },
 579 #endif
 580 #ifdef SIGIOINT
 581   {  SIGIOINT,    "SIGIOINT" },
 582 #endif
 583 #ifdef SIGIOT
 584 // SIGIOT is there for BSD compatibility, but on most Unices just a
 585 // synonym for SIGABRT. The result should be "SIGABRT", not
 586 // "SIGIOT".
 587 #if (SIGIOT != SIGABRT )
 588   {  SIGIOT,      "SIGIOT" },
 589 #endif
 590 #endif
 591 #ifdef SIGKAP
 592   {  SIGKAP,      "SIGKAP" },
 593 #endif
 594   {  SIGKILL,     "SIGKILL" },
 595 #ifdef SIGLOST
 596   {  SIGLOST,     "SIGLOST" },
 597 #endif
 598 #ifdef SIGLWP
 599   {  SIGLWP,      "SIGLWP" },
 600 #endif
 601 #ifdef SIGLWPTIMER
 602   {  SIGLWPTIMER, "SIGLWPTIMER" },
 603 #endif
 604 #ifdef SIGMIGRATE
 605   {  SIGMIGRATE,  "SIGMIGRATE" },
 606 #endif
 607 #ifdef SIGMSG
 608   {  SIGMSG,      "SIGMSG" },
 609 #endif
 610   {  SIGPIPE,     "SIGPIPE" },
 611 #ifdef SIGPOLL
 612   {  SIGPOLL,     "SIGPOLL" },
 613 #endif
 614 #ifdef SIGPRE
 615   {  SIGPRE,      "SIGPRE" },
 616 #endif
 617   {  SIGPROF,     "SIGPROF" },
 618 #ifdef SIGPTY
 619   {  SIGPTY,      "SIGPTY" },
 620 #endif
 621 #ifdef SIGPWR
 622   {  SIGPWR,      "SIGPWR" },
 623 #endif
 624   {  SIGQUIT,     "SIGQUIT" },
 625 #ifdef SIGRECONFIG
 626   {  SIGRECONFIG, "SIGRECONFIG" },
 627 #endif
 628 #ifdef SIGRECOVERY
 629   {  SIGRECOVERY, "SIGRECOVERY" },
 630 #endif
 631 #ifdef SIGRESERVE
 632   {  SIGRESERVE,  "SIGRESERVE" },
 633 #endif
 634 #ifdef SIGRETRACT
 635   {  SIGRETRACT,  "SIGRETRACT" },
 636 #endif
 637 #ifdef SIGSAK
 638   {  SIGSAK,      "SIGSAK" },
 639 #endif
 640   {  SIGSEGV,     "SIGSEGV" },
 641 #ifdef SIGSOUND
 642   {  SIGSOUND,    "SIGSOUND" },
 643 #endif
 644 #ifdef SIGSTKFLT
 645   {  SIGSTKFLT,    "SIGSTKFLT" },
 646 #endif
 647   {  SIGSTOP,     "SIGSTOP" },
 648   {  SIGSYS,      "SIGSYS" },
 649 #ifdef SIGSYSERROR
 650   {  SIGSYSERROR, "SIGSYSERROR" },
 651 #endif
 652 #ifdef SIGTALRM
 653   {  SIGTALRM,    "SIGTALRM" },
 654 #endif
 655   {  SIGTERM,     "SIGTERM" },
 656 #ifdef SIGTHAW
 657   {  SIGTHAW,     "SIGTHAW" },
 658 #endif
 659   {  SIGTRAP,     "SIGTRAP" },
 660 #ifdef SIGTSTP
 661   {  SIGTSTP,     "SIGTSTP" },
 662 #endif
 663   {  SIGTTIN,     "SIGTTIN" },
 664   {  SIGTTOU,     "SIGTTOU" },
 665 #ifdef SIGURG
 666   {  SIGURG,      "SIGURG" },
 667 #endif
 668   {  SIGUSR1,     "SIGUSR1" },
 669   {  SIGUSR2,     "SIGUSR2" },
 670 #ifdef SIGVIRT
 671   {  SIGVIRT,     "SIGVIRT" },
 672 #endif
 673   {  SIGVTALRM,   "SIGVTALRM" },
 674 #ifdef SIGWAITING
 675   {  SIGWAITING,  "SIGWAITING" },
 676 #endif
 677 #ifdef SIGWINCH
 678   {  SIGWINCH,    "SIGWINCH" },
 679 #endif
 680 #ifdef SIGWINDOW
 681   {  SIGWINDOW,   "SIGWINDOW" },
 682 #endif
 683   {  SIGXCPU,     "SIGXCPU" },
 684   {  SIGXFSZ,     "SIGXFSZ" },
 685 #ifdef SIGXRES
 686   {  SIGXRES,     "SIGXRES" },
 687 #endif
 688   { -1, NULL }
 689 };
 690 
 691 // Returned string is a constant. For unknown signals "UNKNOWN" is returned.
 692 const char* os::Posix::get_signal_name(int sig, char* out, size_t outlen) {
 693 
 694   const char* ret = NULL;
 695 
 696 #ifdef SIGRTMIN
 697   if (sig >= SIGRTMIN && sig <= SIGRTMAX) {
 698     if (sig == SIGRTMIN) {
 699       ret = "SIGRTMIN";
 700     } else if (sig == SIGRTMAX) {
 701       ret = "SIGRTMAX";
 702     } else {
 703       jio_snprintf(out, outlen, "SIGRTMIN+%d", sig - SIGRTMIN);
 704       return out;
 705     }
 706   }
 707 #endif
 708 
 709   if (sig > 0) {
 710     for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) {
 711       if (g_signal_info[idx].sig == sig) {
 712         ret = g_signal_info[idx].name;
 713         break;
 714       }
 715     }
 716   }
 717 
 718   if (!ret) {
 719     if (!is_valid_signal(sig)) {
 720       ret = "INVALID";
 721     } else {
 722       ret = "UNKNOWN";
 723     }
 724   }
 725 
 726   if (out && outlen > 0) {
 727     strncpy(out, ret, outlen);
 728     out[outlen - 1] = '\0';
 729   }
 730   return out;
 731 }
 732 
 733 int os::Posix::get_signal_number(const char* signal_name) {
 734   char tmp[30];
 735   const char* s = signal_name;
 736   if (s[0] != 'S' || s[1] != 'I' || s[2] != 'G') {
 737     jio_snprintf(tmp, sizeof(tmp), "SIG%s", signal_name);
 738     s = tmp;
 739   }
 740   for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) {
 741     if (strcmp(g_signal_info[idx].name, s) == 0) {
 742       return g_signal_info[idx].sig;
 743     }
 744   }
 745   return -1;
 746 }
 747 
 748 int os::get_signal_number(const char* signal_name) {
 749   return os::Posix::get_signal_number(signal_name);
 750 }
 751 
 752 // Returns true if signal number is valid.
 753 bool os::Posix::is_valid_signal(int sig) {
 754   // MacOS not really POSIX compliant: sigaddset does not return
 755   // an error for invalid signal numbers. However, MacOS does not
 756   // support real time signals and simply seems to have just 33
 757   // signals with no holes in the signal range.
 758 #ifdef __APPLE__
 759   return sig >= 1 && sig < NSIG;
 760 #else
 761   // Use sigaddset to check for signal validity.
 762   sigset_t set;
 763   sigemptyset(&set);
 764   if (sigaddset(&set, sig) == -1 && errno == EINVAL) {
 765     return false;
 766   }
 767   return true;
 768 #endif
 769 }
 770 
 771 // Returns:
 772 // NULL for an invalid signal number
 773 // "SIG<num>" for a valid but unknown signal number
 774 // signal name otherwise.
 775 const char* os::exception_name(int sig, char* buf, size_t size) {
 776   if (!os::Posix::is_valid_signal(sig)) {
 777     return NULL;
 778   }
 779   const char* const name = os::Posix::get_signal_name(sig, buf, size);
 780   if (strcmp(name, "UNKNOWN") == 0) {
 781     jio_snprintf(buf, size, "SIG%d", sig);
 782   }
 783   return buf;
 784 }
 785 
 786 #define NUM_IMPORTANT_SIGS 32
 787 // Returns one-line short description of a signal set in a user provided buffer.
 788 const char* os::Posix::describe_signal_set_short(const sigset_t* set, char* buffer, size_t buf_size) {
 789   assert(buf_size == (NUM_IMPORTANT_SIGS + 1), "wrong buffer size");
 790   // Note: for shortness, just print out the first 32. That should
 791   // cover most of the useful ones, apart from realtime signals.
 792   for (int sig = 1; sig <= NUM_IMPORTANT_SIGS; sig++) {
 793     const int rc = sigismember(set, sig);
 794     if (rc == -1 && errno == EINVAL) {
 795       buffer[sig-1] = '?';
 796     } else {
 797       buffer[sig-1] = rc == 0 ? '0' : '1';
 798     }
 799   }
 800   buffer[NUM_IMPORTANT_SIGS] = 0;
 801   return buffer;
 802 }
 803 
 804 // Prints one-line description of a signal set.
 805 void os::Posix::print_signal_set_short(outputStream* st, const sigset_t* set) {
 806   char buf[NUM_IMPORTANT_SIGS + 1];
 807   os::Posix::describe_signal_set_short(set, buf, sizeof(buf));
 808   st->print("%s", buf);
 809 }
 810 
 811 // Writes one-line description of a combination of sigaction.sa_flags into a user
 812 // provided buffer. Returns that buffer.
 813 const char* os::Posix::describe_sa_flags(int flags, char* buffer, size_t size) {
 814   char* p = buffer;
 815   size_t remaining = size;
 816   bool first = true;
 817   int idx = 0;
 818 
 819   assert(buffer, "invalid argument");
 820 
 821   if (size == 0) {
 822     return buffer;
 823   }
 824 
 825   strncpy(buffer, "none", size);
 826 
 827   const struct {
 828     // NB: i is an unsigned int here because SA_RESETHAND is on some
 829     // systems 0x80000000, which is implicitly unsigned.  Assignining
 830     // it to an int field would be an overflow in unsigned-to-signed
 831     // conversion.
 832     unsigned int i;
 833     const char* s;
 834   } flaginfo [] = {
 835     { SA_NOCLDSTOP, "SA_NOCLDSTOP" },
 836     { SA_ONSTACK,   "SA_ONSTACK"   },
 837     { SA_RESETHAND, "SA_RESETHAND" },
 838     { SA_RESTART,   "SA_RESTART"   },
 839     { SA_SIGINFO,   "SA_SIGINFO"   },
 840     { SA_NOCLDWAIT, "SA_NOCLDWAIT" },
 841     { SA_NODEFER,   "SA_NODEFER"   },
 842 #ifdef AIX
 843     { SA_ONSTACK,   "SA_ONSTACK"   },
 844     { SA_OLDSTYLE,  "SA_OLDSTYLE"  },
 845 #endif
 846     { 0, NULL }
 847   };
 848 
 849   for (idx = 0; flaginfo[idx].s && remaining > 1; idx++) {
 850     if (flags & flaginfo[idx].i) {
 851       if (first) {
 852         jio_snprintf(p, remaining, "%s", flaginfo[idx].s);
 853         first = false;
 854       } else {
 855         jio_snprintf(p, remaining, "|%s", flaginfo[idx].s);
 856       }
 857       const size_t len = strlen(p);
 858       p += len;
 859       remaining -= len;
 860     }
 861   }
 862 
 863   buffer[size - 1] = '\0';
 864 
 865   return buffer;
 866 }
 867 
 868 // Prints one-line description of a combination of sigaction.sa_flags.
 869 void os::Posix::print_sa_flags(outputStream* st, int flags) {
 870   char buffer[0x100];
 871   os::Posix::describe_sa_flags(flags, buffer, sizeof(buffer));
 872   st->print("%s", buffer);
 873 }
 874 
 875 // Helper function for os::Posix::print_siginfo_...():
 876 // return a textual description for signal code.
 877 struct enum_sigcode_desc_t {
 878   const char* s_name;
 879   const char* s_desc;
 880 };
 881 
 882 static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t* out) {
 883 
 884   const struct {
 885     int sig; int code; const char* s_code; const char* s_desc;
 886   } t1 [] = {
 887     { SIGILL,  ILL_ILLOPC,   "ILL_ILLOPC",   "Illegal opcode." },
 888     { SIGILL,  ILL_ILLOPN,   "ILL_ILLOPN",   "Illegal operand." },
 889     { SIGILL,  ILL_ILLADR,   "ILL_ILLADR",   "Illegal addressing mode." },
 890     { SIGILL,  ILL_ILLTRP,   "ILL_ILLTRP",   "Illegal trap." },
 891     { SIGILL,  ILL_PRVOPC,   "ILL_PRVOPC",   "Privileged opcode." },
 892     { SIGILL,  ILL_PRVREG,   "ILL_PRVREG",   "Privileged register." },
 893     { SIGILL,  ILL_COPROC,   "ILL_COPROC",   "Coprocessor error." },
 894     { SIGILL,  ILL_BADSTK,   "ILL_BADSTK",   "Internal stack error." },
 895 #if defined(IA64) && defined(LINUX)
 896     { SIGILL,  ILL_BADIADDR, "ILL_BADIADDR", "Unimplemented instruction address" },
 897     { SIGILL,  ILL_BREAK,    "ILL_BREAK",    "Application Break instruction" },
 898 #endif
 899     { SIGFPE,  FPE_INTDIV,   "FPE_INTDIV",   "Integer divide by zero." },
 900     { SIGFPE,  FPE_INTOVF,   "FPE_INTOVF",   "Integer overflow." },
 901     { SIGFPE,  FPE_FLTDIV,   "FPE_FLTDIV",   "Floating-point divide by zero." },
 902     { SIGFPE,  FPE_FLTOVF,   "FPE_FLTOVF",   "Floating-point overflow." },
 903     { SIGFPE,  FPE_FLTUND,   "FPE_FLTUND",   "Floating-point underflow." },
 904     { SIGFPE,  FPE_FLTRES,   "FPE_FLTRES",   "Floating-point inexact result." },
 905     { SIGFPE,  FPE_FLTINV,   "FPE_FLTINV",   "Invalid floating-point operation." },
 906     { SIGFPE,  FPE_FLTSUB,   "FPE_FLTSUB",   "Subscript out of range." },
 907     { SIGSEGV, SEGV_MAPERR,  "SEGV_MAPERR",  "Address not mapped to object." },
 908     { SIGSEGV, SEGV_ACCERR,  "SEGV_ACCERR",  "Invalid permissions for mapped object." },
 909 #ifdef AIX
 910     // no explanation found what keyerr would be
 911     { SIGSEGV, SEGV_KEYERR,  "SEGV_KEYERR",  "key error" },
 912 #endif
 913 #if defined(IA64) && !defined(AIX)
 914     { SIGSEGV, SEGV_PSTKOVF, "SEGV_PSTKOVF", "Paragraph stack overflow" },
 915 #endif
 916 #if defined(__sparc) && defined(SOLARIS)
 917 // define Solaris Sparc M7 ADI SEGV signals
 918 #if !defined(SEGV_ACCADI)
 919 #define SEGV_ACCADI 3
 920 #endif
 921     { SIGSEGV, SEGV_ACCADI,  "SEGV_ACCADI",  "ADI not enabled for mapped object." },
 922 #if !defined(SEGV_ACCDERR)
 923 #define SEGV_ACCDERR 4
 924 #endif
 925     { SIGSEGV, SEGV_ACCDERR, "SEGV_ACCDERR", "ADI disrupting exception." },
 926 #if !defined(SEGV_ACCPERR)
 927 #define SEGV_ACCPERR 5
 928 #endif
 929     { SIGSEGV, SEGV_ACCPERR, "SEGV_ACCPERR", "ADI precise exception." },
 930 #endif // defined(__sparc) && defined(SOLARIS)
 931     { SIGBUS,  BUS_ADRALN,   "BUS_ADRALN",   "Invalid address alignment." },
 932     { SIGBUS,  BUS_ADRERR,   "BUS_ADRERR",   "Nonexistent physical address." },
 933     { SIGBUS,  BUS_OBJERR,   "BUS_OBJERR",   "Object-specific hardware error." },
 934     { SIGTRAP, TRAP_BRKPT,   "TRAP_BRKPT",   "Process breakpoint." },
 935     { SIGTRAP, TRAP_TRACE,   "TRAP_TRACE",   "Process trace trap." },
 936     { SIGCHLD, CLD_EXITED,   "CLD_EXITED",   "Child has exited." },
 937     { SIGCHLD, CLD_KILLED,   "CLD_KILLED",   "Child has terminated abnormally and did not create a core file." },
 938     { SIGCHLD, CLD_DUMPED,   "CLD_DUMPED",   "Child has terminated abnormally and created a core file." },
 939     { SIGCHLD, CLD_TRAPPED,  "CLD_TRAPPED",  "Traced child has trapped." },
 940     { SIGCHLD, CLD_STOPPED,  "CLD_STOPPED",  "Child has stopped." },
 941     { SIGCHLD, CLD_CONTINUED,"CLD_CONTINUED","Stopped child has continued." },
 942 #ifdef SIGPOLL
 943     { SIGPOLL, POLL_OUT,     "POLL_OUT",     "Output buffers available." },
 944     { SIGPOLL, POLL_MSG,     "POLL_MSG",     "Input message available." },
 945     { SIGPOLL, POLL_ERR,     "POLL_ERR",     "I/O error." },
 946     { SIGPOLL, POLL_PRI,     "POLL_PRI",     "High priority input available." },
 947     { SIGPOLL, POLL_HUP,     "POLL_HUP",     "Device disconnected. [Option End]" },
 948 #endif
 949     { -1, -1, NULL, NULL }
 950   };
 951 
 952   // Codes valid in any signal context.
 953   const struct {
 954     int code; const char* s_code; const char* s_desc;
 955   } t2 [] = {
 956     { SI_USER,      "SI_USER",     "Signal sent by kill()." },
 957     { SI_QUEUE,     "SI_QUEUE",    "Signal sent by the sigqueue()." },
 958     { SI_TIMER,     "SI_TIMER",    "Signal generated by expiration of a timer set by timer_settime()." },
 959     { SI_ASYNCIO,   "SI_ASYNCIO",  "Signal generated by completion of an asynchronous I/O request." },
 960     { SI_MESGQ,     "SI_MESGQ",    "Signal generated by arrival of a message on an empty message queue." },
 961     // Linux specific
 962 #ifdef SI_TKILL
 963     { SI_TKILL,     "SI_TKILL",    "Signal sent by tkill (pthread_kill)" },
 964 #endif
 965 #ifdef SI_DETHREAD
 966     { SI_DETHREAD,  "SI_DETHREAD", "Signal sent by execve() killing subsidiary threads" },
 967 #endif
 968 #ifdef SI_KERNEL
 969     { SI_KERNEL,    "SI_KERNEL",   "Signal sent by kernel." },
 970 #endif
 971 #ifdef SI_SIGIO
 972     { SI_SIGIO,     "SI_SIGIO",    "Signal sent by queued SIGIO" },
 973 #endif
 974 
 975 #ifdef AIX
 976     { SI_UNDEFINED, "SI_UNDEFINED","siginfo contains partial information" },
 977     { SI_EMPTY,     "SI_EMPTY",    "siginfo contains no useful information" },
 978 #endif
 979 
 980 #ifdef __sun
 981     { SI_NOINFO,    "SI_NOINFO",   "No signal information" },
 982     { SI_RCTL,      "SI_RCTL",     "kernel generated signal via rctl action" },
 983     { SI_LWP,       "SI_LWP",      "Signal sent via lwp_kill" },
 984 #endif
 985 
 986     { -1, NULL, NULL }
 987   };
 988 
 989   const char* s_code = NULL;
 990   const char* s_desc = NULL;
 991 
 992   for (int i = 0; t1[i].sig != -1; i ++) {
 993     if (t1[i].sig == si->si_signo && t1[i].code == si->si_code) {
 994       s_code = t1[i].s_code;
 995       s_desc = t1[i].s_desc;
 996       break;
 997     }
 998   }
 999 
1000   if (s_code == NULL) {
1001     for (int i = 0; t2[i].s_code != NULL; i ++) {
1002       if (t2[i].code == si->si_code) {
1003         s_code = t2[i].s_code;
1004         s_desc = t2[i].s_desc;
1005       }
1006     }
1007   }
1008 
1009   if (s_code == NULL) {
1010     out->s_name = "unknown";
1011     out->s_desc = "unknown";
1012     return false;
1013   }
1014 
1015   out->s_name = s_code;
1016   out->s_desc = s_desc;
1017 
1018   return true;
1019 }
1020 
1021 void os::print_siginfo(outputStream* os, const void* si0) {
1022 
1023   const siginfo_t* const si = (const siginfo_t*) si0;
1024 
1025   char buf[20];
1026   os->print("siginfo:");
1027 
1028   if (!si) {
1029     os->print(" <null>");
1030     return;
1031   }
1032 
1033   const int sig = si->si_signo;
1034 
1035   os->print(" si_signo: %d (%s)", sig, os::Posix::get_signal_name(sig, buf, sizeof(buf)));
1036 
1037   enum_sigcode_desc_t ed;
1038   get_signal_code_description(si, &ed);
1039   os->print(", si_code: %d (%s)", si->si_code, ed.s_name);
1040 
1041   if (si->si_errno) {
1042     os->print(", si_errno: %d", si->si_errno);
1043   }
1044 
1045   // Output additional information depending on the signal code.
1046 
1047   // Note: Many implementations lump si_addr, si_pid, si_uid etc. together as unions,
1048   // so it depends on the context which member to use. For synchronous error signals,
1049   // we print si_addr, unless the signal was sent by another process or thread, in
1050   // which case we print out pid or tid of the sender.
1051   if (si->si_code == SI_USER || si->si_code == SI_QUEUE) {
1052     const pid_t pid = si->si_pid;
1053     os->print(", si_pid: %ld", (long) pid);
1054     if (IS_VALID_PID(pid)) {
1055       const pid_t me = getpid();
1056       if (me == pid) {
1057         os->print(" (current process)");
1058       }
1059     } else {
1060       os->print(" (invalid)");
1061     }
1062     os->print(", si_uid: %ld", (long) si->si_uid);
1063     if (sig == SIGCHLD) {
1064       os->print(", si_status: %d", si->si_status);
1065     }
1066   } else if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
1067              sig == SIGTRAP || sig == SIGFPE) {
1068     os->print(", si_addr: " PTR_FORMAT, p2i(si->si_addr));
1069 #ifdef SIGPOLL
1070   } else if (sig == SIGPOLL) {
1071     os->print(", si_band: %ld", si->si_band);
1072 #endif
1073   }
1074 
1075 }
1076 
1077 int os::Posix::unblock_thread_signal_mask(const sigset_t *set) {
1078   return pthread_sigmask(SIG_UNBLOCK, set, NULL);
1079 }
1080 
1081 address os::Posix::ucontext_get_pc(const ucontext_t* ctx) {
1082 #if defined(AIX)
1083    return Aix::ucontext_get_pc(ctx);
1084 #elif defined(BSD)
1085    return Bsd::ucontext_get_pc(ctx);
1086 #elif defined(LINUX)
1087    return Linux::ucontext_get_pc(ctx);
1088 #elif defined(SOLARIS)
1089    return Solaris::ucontext_get_pc(ctx);
1090 #else
1091    VMError::report_and_die("unimplemented ucontext_get_pc");
1092 #endif
1093 }
1094 
1095 void os::Posix::ucontext_set_pc(ucontext_t* ctx, address pc) {
1096 #if defined(AIX)
1097    Aix::ucontext_set_pc(ctx, pc);
1098 #elif defined(BSD)
1099    Bsd::ucontext_set_pc(ctx, pc);
1100 #elif defined(LINUX)
1101    Linux::ucontext_set_pc(ctx, pc);
1102 #elif defined(SOLARIS)
1103    Solaris::ucontext_set_pc(ctx, pc);
1104 #else
1105    VMError::report_and_die("unimplemented ucontext_get_pc");
1106 #endif
1107 }
1108 
1109 char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr) {
1110   size_t stack_size = 0;
1111   size_t guard_size = 0;
1112   int detachstate = 0;
1113   pthread_attr_getstacksize(attr, &stack_size);
1114   pthread_attr_getguardsize(attr, &guard_size);
1115   // Work around linux NPTL implementation error, see also os::create_thread() in os_linux.cpp.
1116   LINUX_ONLY(stack_size -= guard_size);
1117   pthread_attr_getdetachstate(attr, &detachstate);
1118   jio_snprintf(buf, buflen, "stacksize: " SIZE_FORMAT "k, guardsize: " SIZE_FORMAT "k, %s",
1119     stack_size / 1024, guard_size / 1024,
1120     (detachstate == PTHREAD_CREATE_DETACHED ? "detached" : "joinable"));
1121   return buf;
1122 }
1123 
1124 char* os::Posix::realpath(const char* filename, char* outbuf, size_t outbuflen) {
1125 
1126   if (filename == NULL || outbuf == NULL || outbuflen < 1) {
1127     assert(false, "os::Posix::realpath: invalid arguments.");
1128     errno = EINVAL;
1129     return NULL;
1130   }
1131 
1132   char* result = NULL;
1133 
1134   // This assumes platform realpath() is implemented according to POSIX.1-2008.
1135   // POSIX.1-2008 allows to specify NULL for the output buffer, in which case
1136   // output buffer is dynamically allocated and must be ::free()'d by the caller.
1137   char* p = ::realpath(filename, NULL);
1138   if (p != NULL) {
1139     if (strlen(p) < outbuflen) {
1140       strcpy(outbuf, p);
1141       result = outbuf;
1142     } else {
1143       errno = ENAMETOOLONG;
1144     }
1145     ::free(p); // *not* os::free
1146   } else {
1147     // Fallback for platforms struggling with modern Posix standards (AIX 5.3, 6.1). If realpath
1148     // returns EINVAL, this may indicate that realpath is not POSIX.1-2008 compatible and
1149     // that it complains about the NULL we handed down as user buffer.
1150     // In this case, use the user provided buffer but at least check whether realpath caused
1151     // a memory overwrite.
1152     if (errno == EINVAL) {
1153       outbuf[outbuflen - 1] = '\0';
1154       p = ::realpath(filename, outbuf);
1155       if (p != NULL) {
1156         guarantee(outbuf[outbuflen - 1] == '\0', "realpath buffer overwrite detected.");
1157         result = p;
1158       }
1159     }
1160   }
1161   return result;
1162 
1163 }
1164 
1165 
1166 // Check minimum allowable stack sizes for thread creation and to initialize
1167 // the java system classes, including StackOverflowError - depends on page
1168 // size.
1169 // The space needed for frames during startup is platform dependent. It
1170 // depends on word size, platform calling conventions, C frame layout and
1171 // interpreter/C1/C2 design decisions. Therefore this is given in a
1172 // platform (os/cpu) dependent constant.
1173 // To this, space for guard mechanisms is added, which depends on the
1174 // page size which again depends on the concrete system the VM is running
1175 // on. Space for libc guard pages is not included in this size.
1176 jint os::Posix::set_minimum_stack_sizes() {
1177   size_t os_min_stack_allowed = SOLARIS_ONLY(thr_min_stack()) NOT_SOLARIS(PTHREAD_STACK_MIN);
1178 
1179   _java_thread_min_stack_allowed = _java_thread_min_stack_allowed +
1180                                    JavaThread::stack_guard_zone_size() +
1181                                    JavaThread::stack_shadow_zone_size();
1182 
1183   _java_thread_min_stack_allowed = align_up(_java_thread_min_stack_allowed, vm_page_size());
1184   _java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, os_min_stack_allowed);
1185 
1186   size_t stack_size_in_bytes = ThreadStackSize * K;
1187   if (stack_size_in_bytes != 0 &&
1188       stack_size_in_bytes < _java_thread_min_stack_allowed) {
1189     // The '-Xss' and '-XX:ThreadStackSize=N' options both set
1190     // ThreadStackSize so we go with "Java thread stack size" instead
1191     // of "ThreadStackSize" to be more friendly.
1192     tty->print_cr("\nThe Java thread stack size specified is too small. "
1193                   "Specify at least " SIZE_FORMAT "k",
1194                   _java_thread_min_stack_allowed / K);
1195     return JNI_ERR;
1196   }
1197 
1198   // Make the stack size a multiple of the page size so that
1199   // the yellow/red zones can be guarded.
1200   JavaThread::set_stack_size_at_create(align_up(stack_size_in_bytes, vm_page_size()));
1201 
1202   // Reminder: a compiler thread is a Java thread.
1203   _compiler_thread_min_stack_allowed = _compiler_thread_min_stack_allowed +
1204                                        JavaThread::stack_guard_zone_size() +
1205                                        JavaThread::stack_shadow_zone_size();
1206 
1207   _compiler_thread_min_stack_allowed = align_up(_compiler_thread_min_stack_allowed, vm_page_size());
1208   _compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, os_min_stack_allowed);
1209 
1210   stack_size_in_bytes = CompilerThreadStackSize * K;
1211   if (stack_size_in_bytes != 0 &&
1212       stack_size_in_bytes < _compiler_thread_min_stack_allowed) {
1213     tty->print_cr("\nThe CompilerThreadStackSize specified is too small. "
1214                   "Specify at least " SIZE_FORMAT "k",
1215                   _compiler_thread_min_stack_allowed / K);
1216     return JNI_ERR;
1217   }
1218 
1219   _vm_internal_thread_min_stack_allowed = align_up(_vm_internal_thread_min_stack_allowed, vm_page_size());
1220   _vm_internal_thread_min_stack_allowed = MAX2(_vm_internal_thread_min_stack_allowed, os_min_stack_allowed);
1221 
1222   stack_size_in_bytes = VMThreadStackSize * K;
1223   if (stack_size_in_bytes != 0 &&
1224       stack_size_in_bytes < _vm_internal_thread_min_stack_allowed) {
1225     tty->print_cr("\nThe VMThreadStackSize specified is too small. "
1226                   "Specify at least " SIZE_FORMAT "k",
1227                   _vm_internal_thread_min_stack_allowed / K);
1228     return JNI_ERR;
1229   }
1230   return JNI_OK;
1231 }
1232 
1233 // Called when creating the thread.  The minimum stack sizes have already been calculated
1234 size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
1235   size_t stack_size;
1236   if (req_stack_size == 0) {
1237     stack_size = default_stack_size(thr_type);
1238   } else {
1239     stack_size = req_stack_size;
1240   }
1241 
1242   switch (thr_type) {
1243   case os::java_thread:
1244     // Java threads use ThreadStackSize which default value can be
1245     // changed with the flag -Xss
1246     if (req_stack_size == 0 && JavaThread::stack_size_at_create() > 0) {
1247       // no requested size and we have a more specific default value
1248       stack_size = JavaThread::stack_size_at_create();
1249     }
1250     stack_size = MAX2(stack_size,
1251                       _java_thread_min_stack_allowed);
1252     break;
1253   case os::compiler_thread:
1254     if (req_stack_size == 0 && CompilerThreadStackSize > 0) {
1255       // no requested size and we have a more specific default value
1256       stack_size = (size_t)(CompilerThreadStackSize * K);
1257     }
1258     stack_size = MAX2(stack_size,
1259                       _compiler_thread_min_stack_allowed);
1260     break;
1261   case os::vm_thread:
1262   case os::pgc_thread:
1263   case os::cgc_thread:
1264   case os::watcher_thread:
1265   default:  // presume the unknown thr_type is a VM internal
1266     if (req_stack_size == 0 && VMThreadStackSize > 0) {
1267       // no requested size and we have a more specific default value
1268       stack_size = (size_t)(VMThreadStackSize * K);
1269     }
1270 
1271     stack_size = MAX2(stack_size,
1272                       _vm_internal_thread_min_stack_allowed);
1273     break;
1274   }
1275 
1276   // pthread_attr_setstacksize() may require that the size be rounded up to the OS page size.
1277   // Be careful not to round up to 0. Align down in that case.
1278   if (stack_size <= SIZE_MAX - vm_page_size()) {
1279     stack_size = align_up(stack_size, vm_page_size());
1280   } else {
1281     stack_size = align_down(stack_size, vm_page_size());
1282   }
1283 
1284   return stack_size;
1285 }
1286 
1287 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
1288 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
1289 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
1290 
1291 os::ThreadCrashProtection::ThreadCrashProtection() {
1292 }
1293 
1294 /*
1295  * See the caveats for this class in os_posix.hpp
1296  * Protects the callback call so that SIGSEGV / SIGBUS jumps back into this
1297  * method and returns false. If none of the signals are raised, returns true.
1298  * The callback is supposed to provide the method that should be protected.
1299  */
1300 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
1301   sigset_t saved_sig_mask;
1302 
1303   Thread::muxAcquire(&_crash_mux, "CrashProtection");
1304 
1305   _protected_thread = Thread::current_or_null();
1306   assert(_protected_thread != NULL, "Cannot crash protect a none Thread");
1307 
1308   // we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask
1309   // since on at least some systems (OS X) siglongjmp will restore the mask
1310   // for the process, not the thread
1311   pthread_sigmask(0, NULL, &saved_sig_mask);
1312   if (sigsetjmp(_jmpbuf, 0) == 0) {
1313     // make sure we can see in the signal handler that we have crash protection
1314     // installed
1315     _crash_protection = this;
1316     cb.call();
1317     // and clear the crash protection
1318     _crash_protection = NULL;
1319     _protected_thread = NULL;
1320     Thread::muxRelease(&_crash_mux);
1321     return true;
1322   }
1323   // this happens when we siglongjmp() back
1324   pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL);
1325   _crash_protection = NULL;
1326   _protected_thread = NULL;
1327   Thread::muxRelease(&_crash_mux);
1328   return false;
1329 }
1330 
1331 void os::ThreadCrashProtection::restore() {
1332   assert(_crash_protection != NULL, "must have crash protection");
1333   siglongjmp(_jmpbuf, 1);
1334 }
1335 
1336 void os::ThreadCrashProtection::check_crash_protection(int sig,
1337     Thread* thread) {
1338 
1339   if (thread != NULL &&
1340       thread == _protected_thread &&
1341       _crash_protection != NULL) {
1342 
1343     if (sig == SIGSEGV || sig == SIGBUS) {
1344       _crash_protection->restore();
1345     }
1346   }
1347 }
1348 
1349 #define check_with_errno(check_type, cond, msg)                             \
1350   do {                                                                      \
1351     int err = errno;                                                        \
1352     check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err),   \
1353                os::errno_name(err));                                        \
1354 } while (false)
1355 
1356 #define assert_with_errno(cond, msg)    check_with_errno(assert, cond, msg)
1357 #define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg)
1358 
1359 // POSIX unamed semaphores are not supported on OS X.
1360 #ifndef __APPLE__
1361 
1362 PosixSemaphore::PosixSemaphore(uint value) {
1363   int ret = sem_init(&_semaphore, 0, value);
1364 
1365   guarantee_with_errno(ret == 0, "Failed to initialize semaphore");
1366 }
1367 
1368 PosixSemaphore::~PosixSemaphore() {
1369   sem_destroy(&_semaphore);
1370 }
1371 
1372 void PosixSemaphore::signal(uint count) {
1373   for (uint i = 0; i < count; i++) {
1374     int ret = sem_post(&_semaphore);
1375 
1376     assert_with_errno(ret == 0, "sem_post failed");
1377   }
1378 }
1379 
1380 void PosixSemaphore::wait() {
1381   int ret;
1382 
1383   do {
1384     ret = sem_wait(&_semaphore);
1385   } while (ret != 0 && errno == EINTR);
1386 
1387   assert_with_errno(ret == 0, "sem_wait failed");
1388 }
1389 
1390 bool PosixSemaphore::trywait() {
1391   int ret;
1392 
1393   do {
1394     ret = sem_trywait(&_semaphore);
1395   } while (ret != 0 && errno == EINTR);
1396 
1397   assert_with_errno(ret == 0 || errno == EAGAIN, "trywait failed");
1398 
1399   return ret == 0;
1400 }
1401 
1402 bool PosixSemaphore::timedwait(struct timespec ts) {
1403   while (true) {
1404     int result = sem_timedwait(&_semaphore, &ts);
1405     if (result == 0) {
1406       return true;
1407     } else if (errno == EINTR) {
1408       continue;
1409     } else if (errno == ETIMEDOUT) {
1410       return false;
1411     } else {
1412       assert_with_errno(false, "timedwait failed");
1413       return false;
1414     }
1415   }
1416 }
1417 
1418 #endif // __APPLE__
1419 
1420 
1421 // Shared pthread_mutex/cond based PlatformEvent implementation.
1422 // Not currently usable by Solaris.
1423 
1424 #ifndef SOLARIS
1425 
1426 // Shared condattr object for use with relative timed-waits. Will be associated
1427 // with CLOCK_MONOTONIC if available to avoid issues with time-of-day changes,
1428 // but otherwise whatever default is used by the platform - generally the
1429 // time-of-day clock.
1430 static pthread_condattr_t _condAttr[1];
1431 
1432 // Shared mutexattr to explicitly set the type to PTHREAD_MUTEX_NORMAL as not
1433 // all systems (e.g. FreeBSD) map the default to "normal".
1434 static pthread_mutexattr_t _mutexAttr[1];
1435 
1436 // common basic initialization that is always supported
1437 static void pthread_init_common(void) {
1438   int status;
1439   if ((status = pthread_condattr_init(_condAttr)) != 0) {
1440     fatal("pthread_condattr_init: %s", os::strerror(status));
1441   }
1442   if ((status = pthread_mutexattr_init(_mutexAttr)) != 0) {
1443     fatal("pthread_mutexattr_init: %s", os::strerror(status));
1444   }
1445   if ((status = pthread_mutexattr_settype(_mutexAttr, PTHREAD_MUTEX_NORMAL)) != 0) {
1446     fatal("pthread_mutexattr_settype: %s", os::strerror(status));
1447   }
1448 }
1449 
1450 // Not all POSIX types and API's are available on all notionally "posix"
1451 // platforms. If we have build-time support then we will check for actual
1452 // runtime support via dlopen/dlsym lookup. This allows for running on an
1453 // older OS version compared to the build platform. But if there is no
1454 // build time support then there cannot be any runtime support as we do not
1455 // know what the runtime types would be (for example clockid_t might be an
1456 // int or int64_t).
1457 //
1458 #ifdef SUPPORTS_CLOCK_MONOTONIC
1459 
1460 // This means we have clockid_t, clock_gettime et al and CLOCK_MONOTONIC
1461 
1462 static int (*_clock_gettime)(clockid_t, struct timespec *);
1463 static int (*_pthread_condattr_setclock)(pthread_condattr_t *, clockid_t);
1464 
1465 static bool _use_clock_monotonic_condattr;
1466 
1467 // Determine what POSIX API's are present and do appropriate
1468 // configuration.
1469 void os::Posix::init(void) {
1470 
1471   // NOTE: no logging available when this is called. Put logging
1472   // statements in init_2().
1473 
1474   // Copied from os::Linux::clock_init(). The duplication is temporary.
1475 
1476   // 1. Check for CLOCK_MONOTONIC support.
1477 
1478   void* handle = NULL;
1479 
1480   // For linux we need librt, for other OS we can find
1481   // this function in regular libc.
1482 #ifdef NEEDS_LIBRT
1483   // We do dlopen's in this particular order due to bug in linux
1484   // dynamic loader (see 6348968) leading to crash on exit.
1485   handle = dlopen("librt.so.1", RTLD_LAZY);
1486   if (handle == NULL) {
1487     handle = dlopen("librt.so", RTLD_LAZY);
1488   }
1489 #endif
1490 
1491   if (handle == NULL) {
1492     handle = RTLD_DEFAULT;
1493   }
1494 
1495   _clock_gettime = NULL;
1496 
1497   int (*clock_getres_func)(clockid_t, struct timespec*) =
1498     (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
1499   int (*clock_gettime_func)(clockid_t, struct timespec*) =
1500     (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
1501   if (clock_getres_func != NULL && clock_gettime_func != NULL) {
1502     // We assume that if both clock_gettime and clock_getres support
1503     // CLOCK_MONOTONIC then the OS provides true high-res monotonic clock.
1504     struct timespec res;
1505     struct timespec tp;
1506     if (clock_getres_func(CLOCK_MONOTONIC, &res) == 0 &&
1507         clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) {
1508       // Yes, monotonic clock is supported.
1509       _clock_gettime = clock_gettime_func;
1510     } else {
1511 #ifdef NEEDS_LIBRT
1512       // Close librt if there is no monotonic clock.
1513       if (handle != RTLD_DEFAULT) {
1514         dlclose(handle);
1515       }
1516 #endif
1517     }
1518   }
1519 
1520   // 2. Check for pthread_condattr_setclock support.
1521 
1522   _pthread_condattr_setclock = NULL;
1523 
1524   // libpthread is already loaded.
1525   int (*condattr_setclock_func)(pthread_condattr_t*, clockid_t) =
1526     (int (*)(pthread_condattr_t*, clockid_t))dlsym(RTLD_DEFAULT,
1527                                                    "pthread_condattr_setclock");
1528   if (condattr_setclock_func != NULL) {
1529     _pthread_condattr_setclock = condattr_setclock_func;
1530   }
1531 
1532   // Now do general initialization.
1533 
1534   pthread_init_common();
1535 
1536   int status;
1537   if (_pthread_condattr_setclock != NULL && _clock_gettime != NULL) {
1538     if ((status = _pthread_condattr_setclock(_condAttr, CLOCK_MONOTONIC)) != 0) {
1539       if (status == EINVAL) {
1540         _use_clock_monotonic_condattr = false;
1541         warning("Unable to use monotonic clock with relative timed-waits" \
1542                 " - changes to the time-of-day clock may have adverse affects");
1543       } else {
1544         fatal("pthread_condattr_setclock: %s", os::strerror(status));
1545       }
1546     } else {
1547       _use_clock_monotonic_condattr = true;
1548     }
1549   } else {
1550     _use_clock_monotonic_condattr = false;
1551   }
1552 }
1553 
1554 void os::Posix::init_2(void) {
1555   log_info(os)("Use of CLOCK_MONOTONIC is%s supported",
1556                (_clock_gettime != NULL ? "" : " not"));
1557   log_info(os)("Use of pthread_condattr_setclock is%s supported",
1558                (_pthread_condattr_setclock != NULL ? "" : " not"));
1559   log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with %s",
1560                _use_clock_monotonic_condattr ? "CLOCK_MONOTONIC" : "the default clock");
1561 }
1562 
1563 #else // !SUPPORTS_CLOCK_MONOTONIC
1564 
1565 void os::Posix::init(void) {
1566   pthread_init_common();
1567 }
1568 
1569 void os::Posix::init_2(void) {
1570   log_info(os)("Use of CLOCK_MONOTONIC is not supported");
1571   log_info(os)("Use of pthread_condattr_setclock is not supported");
1572   log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with the default clock");
1573 }
1574 
1575 #endif // SUPPORTS_CLOCK_MONOTONIC
1576 
1577 os::PlatformEvent::PlatformEvent() {
1578   int status = pthread_cond_init(_cond, _condAttr);
1579   assert_status(status == 0, status, "cond_init");
1580   status = pthread_mutex_init(_mutex, _mutexAttr);
1581   assert_status(status == 0, status, "mutex_init");
1582   _event   = 0;
1583   _nParked = 0;
1584 }
1585 
1586 // Utility to convert the given timeout to an absolute timespec
1587 // (based on the appropriate clock) to use with pthread_cond_timewait.
1588 // The clock queried here must be the clock used to manage the
1589 // timeout of the condition variable.
1590 //
1591 // The passed in timeout value is either a relative time in nanoseconds
1592 // or an absolute time in milliseconds. A relative timeout will be
1593 // associated with CLOCK_MONOTONIC if available; otherwise, or if absolute,
1594 // the default time-of-day clock will be used.
1595 
1596 // Given time is a 64-bit value and the time_t used in the timespec is
1597 // sometimes a signed-32-bit value we have to watch for overflow if times
1598 // way in the future are given. Further on Solaris versions
1599 // prior to 10 there is a restriction (see cond_timedwait) that the specified
1600 // number of seconds, in abstime, is less than current_time + 100000000.
1601 // As it will be over 20 years before "now + 100000000" will overflow we can
1602 // ignore overflow and just impose a hard-limit on seconds using the value
1603 // of "now + 100000000". This places a limit on the timeout of about 3.17
1604 // years from "now".
1605 //
1606 #define MAX_SECS 100000000
1607 
1608 // Calculate a new absolute time that is "timeout" nanoseconds from "now".
1609 // "unit" indicates the unit of "now_part_sec" (may be nanos or micros depending
1610 // on which clock is being used).
1611 static void calc_rel_time(timespec* abstime, jlong timeout, jlong now_sec,
1612                           jlong now_part_sec, jlong unit) {
1613   time_t max_secs = now_sec + MAX_SECS;
1614 
1615   jlong seconds = timeout / NANOUNITS;
1616   timeout %= NANOUNITS; // remaining nanos
1617 
1618   if (seconds >= MAX_SECS) {
1619     // More seconds than we can add, so pin to max_secs.
1620     abstime->tv_sec = max_secs;
1621     abstime->tv_nsec = 0;
1622   } else {
1623     abstime->tv_sec = now_sec  + seconds;
1624     long nanos = (now_part_sec * (NANOUNITS / unit)) + timeout;
1625     if (nanos >= NANOUNITS) { // overflow
1626       abstime->tv_sec += 1;
1627       nanos -= NANOUNITS;
1628     }
1629     abstime->tv_nsec = nanos;
1630   }
1631 }
1632 
1633 // Unpack the given deadline in milliseconds since the epoch, into the given timespec.
1634 // The current time in seconds is also passed in to enforce an upper bound as discussed above.
1635 static void unpack_abs_time(timespec* abstime, jlong deadline, jlong now_sec) {
1636   time_t max_secs = now_sec + MAX_SECS;
1637 
1638   jlong seconds = deadline / MILLIUNITS;
1639   jlong millis = deadline % MILLIUNITS;
1640 
1641   if (seconds >= max_secs) {
1642     // Absolute seconds exceeds allowed max, so pin to max_secs.
1643     abstime->tv_sec = max_secs;
1644     abstime->tv_nsec = 0;
1645   } else {
1646     abstime->tv_sec = seconds;
1647     abstime->tv_nsec = millis * (NANOUNITS / MILLIUNITS);
1648   }
1649 }
1650 
1651 static void to_abstime(timespec* abstime, jlong timeout, bool isAbsolute) {
1652   DEBUG_ONLY(int max_secs = MAX_SECS;)
1653 
1654   if (timeout < 0) {
1655     timeout = 0;
1656   }
1657 
1658 #ifdef SUPPORTS_CLOCK_MONOTONIC
1659 
1660   if (_use_clock_monotonic_condattr && !isAbsolute) {
1661     struct timespec now;
1662     int status = _clock_gettime(CLOCK_MONOTONIC, &now);
1663     assert_status(status == 0, status, "clock_gettime");
1664     calc_rel_time(abstime, timeout, now.tv_sec, now.tv_nsec, NANOUNITS);
1665     DEBUG_ONLY(max_secs += now.tv_sec;)
1666   } else {
1667 
1668 #else
1669 
1670   { // Match the block scope.
1671 
1672 #endif // SUPPORTS_CLOCK_MONOTONIC
1673 
1674     // Time-of-day clock is all we can reliably use.
1675     struct timeval now;
1676     int status = gettimeofday(&now, NULL);
1677     assert_status(status == 0, errno, "gettimeofday");
1678     if (isAbsolute) {
1679       unpack_abs_time(abstime, timeout, now.tv_sec);
1680     } else {
1681       calc_rel_time(abstime, timeout, now.tv_sec, now.tv_usec, MICROUNITS);
1682     }
1683     DEBUG_ONLY(max_secs += now.tv_sec;)
1684   }
1685 
1686   assert(abstime->tv_sec >= 0, "tv_sec < 0");
1687   assert(abstime->tv_sec <= max_secs, "tv_sec > max_secs");
1688   assert(abstime->tv_nsec >= 0, "tv_nsec < 0");
1689   assert(abstime->tv_nsec < NANOUNITS, "tv_nsec >= NANOUNITS");
1690 }
1691 
1692 // PlatformEvent
1693 //
1694 // Assumption:
1695 //    Only one parker can exist on an event, which is why we allocate
1696 //    them per-thread. Multiple unparkers can coexist.
1697 //
1698 // _event serves as a restricted-range semaphore.
1699 //   -1 : thread is blocked, i.e. there is a waiter
1700 //    0 : neutral: thread is running or ready,
1701 //        could have been signaled after a wait started
1702 //    1 : signaled - thread is running or ready
1703 //
1704 //    Having three states allows for some detection of bad usage - see
1705 //    comments on unpark().
1706 
1707 void os::PlatformEvent::park() {       // AKA "down()"
1708   // Transitions for _event:
1709   //   -1 => -1 : illegal
1710   //    1 =>  0 : pass - return immediately
1711   //    0 => -1 : block; then set _event to 0 before returning
1712 
1713   // Invariant: Only the thread associated with the PlatformEvent
1714   // may call park().
1715   assert(_nParked == 0, "invariant");
1716 
1717   int v;
1718 
1719   // atomically decrement _event
1720   for (;;) {
1721     v = _event;
1722     if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
1723   }
1724   guarantee(v >= 0, "invariant");
1725 
1726   if (v == 0) { // Do this the hard way by blocking ...
1727     int status = pthread_mutex_lock(_mutex);
1728     assert_status(status == 0, status, "mutex_lock");
1729     guarantee(_nParked == 0, "invariant");
1730     ++_nParked;
1731     while (_event < 0) {
1732       // OS-level "spurious wakeups" are ignored
1733       status = pthread_cond_wait(_cond, _mutex);
1734       assert_status(status == 0, status, "cond_wait");
1735     }
1736     --_nParked;
1737 
1738     _event = 0;
1739     status = pthread_mutex_unlock(_mutex);
1740     assert_status(status == 0, status, "mutex_unlock");
1741     // Paranoia to ensure our locked and lock-free paths interact
1742     // correctly with each other.
1743     OrderAccess::fence();
1744   }
1745   guarantee(_event >= 0, "invariant");
1746 }
1747 
1748 int os::PlatformEvent::park(jlong millis) {
1749   // Transitions for _event:
1750   //   -1 => -1 : illegal
1751   //    1 =>  0 : pass - return immediately
1752   //    0 => -1 : block; then set _event to 0 before returning
1753 
1754   // Invariant: Only the thread associated with the Event/PlatformEvent
1755   // may call park().
1756   assert(_nParked == 0, "invariant");
1757 
1758   int v;
1759   // atomically decrement _event
1760   for (;;) {
1761     v = _event;
1762     if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
1763   }
1764   guarantee(v >= 0, "invariant");
1765 
1766   if (v == 0) { // Do this the hard way by blocking ...
1767     struct timespec abst;
1768     to_abstime(&abst, millis * (NANOUNITS / MILLIUNITS), false);
1769 
1770     int ret = OS_TIMEOUT;
1771     int status = pthread_mutex_lock(_mutex);
1772     assert_status(status == 0, status, "mutex_lock");
1773     guarantee(_nParked == 0, "invariant");
1774     ++_nParked;
1775 
1776     while (_event < 0) {
1777       status = pthread_cond_timedwait(_cond, _mutex, &abst);
1778       assert_status(status == 0 || status == ETIMEDOUT,
1779                     status, "cond_timedwait");
1780       // OS-level "spurious wakeups" are ignored unless the archaic
1781       // FilterSpuriousWakeups is set false. That flag should be obsoleted.
1782       if (!FilterSpuriousWakeups) break;
1783       if (status == ETIMEDOUT) break;
1784     }
1785     --_nParked;
1786 
1787     if (_event >= 0) {
1788       ret = OS_OK;
1789     }
1790 
1791     _event = 0;
1792     status = pthread_mutex_unlock(_mutex);
1793     assert_status(status == 0, status, "mutex_unlock");
1794     // Paranoia to ensure our locked and lock-free paths interact
1795     // correctly with each other.
1796     OrderAccess::fence();
1797     return ret;
1798   }
1799   return OS_OK;
1800 }
1801 
1802 void os::PlatformEvent::unpark() {
1803   // Transitions for _event:
1804   //    0 => 1 : just return
1805   //    1 => 1 : just return
1806   //   -1 => either 0 or 1; must signal target thread
1807   //         That is, we can safely transition _event from -1 to either
1808   //         0 or 1.
1809   // See also: "Semaphores in Plan 9" by Mullender & Cox
1810   //
1811   // Note: Forcing a transition from "-1" to "1" on an unpark() means
1812   // that it will take two back-to-back park() calls for the owning
1813   // thread to block. This has the benefit of forcing a spurious return
1814   // from the first park() call after an unpark() call which will help
1815   // shake out uses of park() and unpark() without checking state conditions
1816   // properly. This spurious return doesn't manifest itself in any user code
1817   // but only in the correctly written condition checking loops of ObjectMonitor,
1818   // Mutex/Monitor, Thread::muxAcquire and os::sleep
1819 
1820   if (Atomic::xchg(1, &_event) >= 0) return;
1821 
1822   int status = pthread_mutex_lock(_mutex);
1823   assert_status(status == 0, status, "mutex_lock");
1824   int anyWaiters = _nParked;
1825   assert(anyWaiters == 0 || anyWaiters == 1, "invariant");
1826   status = pthread_mutex_unlock(_mutex);
1827   assert_status(status == 0, status, "mutex_unlock");
1828 
1829   // Note that we signal() *after* dropping the lock for "immortal" Events.
1830   // This is safe and avoids a common class of futile wakeups.  In rare
1831   // circumstances this can cause a thread to return prematurely from
1832   // cond_{timed}wait() but the spurious wakeup is benign and the victim
1833   // will simply re-test the condition and re-park itself.
1834   // This provides particular benefit if the underlying platform does not
1835   // provide wait morphing.
1836 
1837   if (anyWaiters != 0) {
1838     status = pthread_cond_signal(_cond);
1839     assert_status(status == 0, status, "cond_signal");
1840   }
1841 }
1842 
1843 // JSR166 support
1844 
1845  os::PlatformParker::PlatformParker() {
1846   int status;
1847   status = pthread_cond_init(&_cond[REL_INDEX], _condAttr);
1848   assert_status(status == 0, status, "cond_init rel");
1849   status = pthread_cond_init(&_cond[ABS_INDEX], NULL);
1850   assert_status(status == 0, status, "cond_init abs");
1851   status = pthread_mutex_init(_mutex, _mutexAttr);
1852   assert_status(status == 0, status, "mutex_init");
1853   _cur_index = -1; // mark as unused
1854 }
1855 
1856 // Parker::park decrements count if > 0, else does a condvar wait.  Unpark
1857 // sets count to 1 and signals condvar.  Only one thread ever waits
1858 // on the condvar. Contention seen when trying to park implies that someone
1859 // is unparking you, so don't wait. And spurious returns are fine, so there
1860 // is no need to track notifications.
1861 
1862 void Parker::park(bool isAbsolute, jlong time) {
1863 
1864   // Optional fast-path check:
1865   // Return immediately if a permit is available.
1866   // We depend on Atomic::xchg() having full barrier semantics
1867   // since we are doing a lock-free update to _counter.
1868   if (Atomic::xchg(0, &_counter) > 0) return;
1869 
1870   Thread* thread = Thread::current();
1871   assert(thread->is_Java_thread(), "Must be JavaThread");
1872   JavaThread *jt = (JavaThread *)thread;
1873 
1874   // Optional optimization -- avoid state transitions if there's
1875   // an interrupt pending.
1876   if (Thread::is_interrupted(thread, false)) {
1877     return;
1878   }
1879 
1880   // Next, demultiplex/decode time arguments
1881   struct timespec absTime;
1882   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
1883     return;
1884   }
1885   if (time > 0) {
1886     to_abstime(&absTime, time, isAbsolute);
1887   }
1888 
1889   // Enter safepoint region
1890   // Beware of deadlocks such as 6317397.
1891   // The per-thread Parker:: mutex is a classic leaf-lock.
1892   // In particular a thread must never block on the Threads_lock while
1893   // holding the Parker:: mutex.  If safepoints are pending both the
1894   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
1895   ThreadBlockInVM tbivm(jt);
1896 
1897   // Don't wait if cannot get lock since interference arises from
1898   // unparking. Also re-check interrupt before trying wait.
1899   if (Thread::is_interrupted(thread, false) ||
1900       pthread_mutex_trylock(_mutex) != 0) {
1901     return;
1902   }
1903 
1904   int status;
1905   if (_counter > 0)  { // no wait needed
1906     _counter = 0;
1907     status = pthread_mutex_unlock(_mutex);
1908     assert_status(status == 0, status, "invariant");
1909     // Paranoia to ensure our locked and lock-free paths interact
1910     // correctly with each other and Java-level accesses.
1911     OrderAccess::fence();
1912     return;
1913   }
1914 
1915   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
1916   jt->set_suspend_equivalent();
1917   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1918 
1919   assert(_cur_index == -1, "invariant");
1920   if (time == 0) {
1921     _cur_index = REL_INDEX; // arbitrary choice when not timed
1922     status = pthread_cond_wait(&_cond[_cur_index], _mutex);
1923     assert_status(status == 0, status, "cond_timedwait");
1924   }
1925   else {
1926     _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
1927     status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
1928     assert_status(status == 0 || status == ETIMEDOUT,
1929                   status, "cond_timedwait");
1930   }
1931   _cur_index = -1;
1932 
1933   _counter = 0;
1934   status = pthread_mutex_unlock(_mutex);
1935   assert_status(status == 0, status, "invariant");
1936   // Paranoia to ensure our locked and lock-free paths interact
1937   // correctly with each other and Java-level accesses.
1938   OrderAccess::fence();
1939 
1940   // If externally suspended while waiting, re-suspend
1941   if (jt->handle_special_suspend_equivalent_condition()) {
1942     jt->java_suspend_self();
1943   }
1944 }
1945 
1946 void Parker::unpark() {
1947   int status = pthread_mutex_lock(_mutex);
1948   assert_status(status == 0, status, "invariant");
1949   const int s = _counter;
1950   _counter = 1;
1951   // must capture correct index before unlocking
1952   int index = _cur_index;
1953   status = pthread_mutex_unlock(_mutex);
1954   assert_status(status == 0, status, "invariant");
1955 
1956   // Note that we signal() *after* dropping the lock for "immortal" Events.
1957   // This is safe and avoids a common class of futile wakeups.  In rare
1958   // circumstances this can cause a thread to return prematurely from
1959   // cond_{timed}wait() but the spurious wakeup is benign and the victim
1960   // will simply re-test the condition and re-park itself.
1961   // This provides particular benefit if the underlying platform does not
1962   // provide wait morphing.
1963 
1964   if (s < 1 && index != -1) {
1965     // thread is definitely parked
1966     status = pthread_cond_signal(&_cond[index]);
1967     assert_status(status == 0, status, "invariant");
1968   }
1969 }
1970 
1971 
1972 #endif // !SOLARIS