1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "utilities/globalDefinitions.hpp"
  26 #include "prims/jvm.h"
  27 #include "semaphore_posix.hpp"
  28 #include "runtime/frame.inline.hpp"
  29 #include "runtime/interfaceSupport.hpp"
  30 #include "runtime/os.hpp"
  31 #include "utilities/align.hpp"
  32 #include "utilities/macros.hpp"
  33 #include "utilities/vmError.hpp"
  34 
  35 #include <dlfcn.h>
  36 #include <pthread.h>
  37 #include <semaphore.h>
  38 #include <signal.h>
  39 #include <sys/resource.h>
  40 #include <sys/utsname.h>
  41 #include <time.h>
  42 #include <unistd.h>
  43 
  44 // Todo: provide a os::get_max_process_id() or similar. Number of processes
  45 // may have been configured, can be read more accurately from proc fs etc.
  46 #ifndef MAX_PID
  47 #define MAX_PID INT_MAX
  48 #endif
  49 #define IS_VALID_PID(p) (p > 0 && p < MAX_PID)
  50 
  51 // Check core dump limit and report possible place where core can be found
  52 void os::check_dump_limit(char* buffer, size_t bufferSize) {
  53   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
  54     jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line");
  55     VMError::record_coredump_status(buffer, false);
  56     return;
  57   }
  58 
  59   int n;
  60   struct rlimit rlim;
  61   bool success;
  62 
  63   char core_path[PATH_MAX];
  64   n = get_core_path(core_path, PATH_MAX);
  65 
  66   if (n <= 0) {
  67     jio_snprintf(buffer, bufferSize, "core.%d (may not exist)", current_process_id());
  68     success = true;
  69 #ifdef LINUX
  70   } else if (core_path[0] == '"') { // redirect to user process
  71     jio_snprintf(buffer, bufferSize, "Core dumps may be processed with %s", core_path);
  72     success = true;
  73 #endif
  74   } else if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
  75     jio_snprintf(buffer, bufferSize, "%s (may not exist)", core_path);
  76     success = true;
  77   } else {
  78     switch(rlim.rlim_cur) {
  79       case RLIM_INFINITY:
  80         jio_snprintf(buffer, bufferSize, "%s", core_path);
  81         success = true;
  82         break;
  83       case 0:
  84         jio_snprintf(buffer, bufferSize, "Core dumps have been disabled. To enable core dumping, try \"ulimit -c unlimited\" before starting Java again");
  85         success = false;
  86         break;
  87       default:
  88         jio_snprintf(buffer, bufferSize, "%s (max size " UINT64_FORMAT " kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", core_path, uint64_t(rlim.rlim_cur) / 1024);
  89         success = true;
  90         break;
  91     }
  92   }
  93 
  94   VMError::record_coredump_status(buffer, success);
  95 }
  96 
  97 int os::get_native_stack(address* stack, int frames, int toSkip) {
  98   int frame_idx = 0;
  99   int num_of_frames;  // number of frames captured
 100   frame fr = os::current_frame();
 101   while (fr.pc() && frame_idx < frames) {
 102     if (toSkip > 0) {
 103       toSkip --;
 104     } else {
 105       stack[frame_idx ++] = fr.pc();
 106     }
 107     if (fr.fp() == NULL || fr.cb() != NULL ||
 108         fr.sender_pc() == NULL || os::is_first_C_frame(&fr)) break;
 109 
 110     if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
 111       fr = os::get_sender_for_C_frame(&fr);
 112     } else {
 113       break;
 114     }
 115   }
 116   num_of_frames = frame_idx;
 117   for (; frame_idx < frames; frame_idx ++) {
 118     stack[frame_idx] = NULL;
 119   }
 120 
 121   return num_of_frames;
 122 }
 123 
 124 
 125 bool os::unsetenv(const char* name) {
 126   assert(name != NULL, "Null pointer");
 127   return (::unsetenv(name) == 0);
 128 }
 129 
 130 int os::get_last_error() {
 131   return errno;
 132 }
 133 
 134 bool os::is_debugger_attached() {
 135   // not implemented
 136   return false;
 137 }
 138 
 139 void os::wait_for_keypress_at_exit(void) {
 140   // don't do anything on posix platforms
 141   return;
 142 }
 143 
 144 // Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
 145 // so on posix, unmap the section at the start and at the end of the chunk that we mapped
 146 // rather than unmapping and remapping the whole chunk to get requested alignment.
 147 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
 148   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
 149       "Alignment must be a multiple of allocation granularity (page size)");
 150   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
 151 
 152   size_t extra_size = size + alignment;
 153   assert(extra_size >= size, "overflow, size is too large to allow alignment");
 154 
 155   char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
 156 
 157   if (extra_base == NULL) {
 158     return NULL;
 159   }
 160 
 161   // Do manual alignment
 162   char* aligned_base = align_up(extra_base, alignment);
 163 
 164   // [  |                                       |  ]
 165   // ^ extra_base
 166   //    ^ extra_base + begin_offset == aligned_base
 167   //     extra_base + begin_offset + size       ^
 168   //                       extra_base + extra_size ^
 169   // |<>| == begin_offset
 170   //                              end_offset == |<>|
 171   size_t begin_offset = aligned_base - extra_base;
 172   size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
 173 
 174   if (begin_offset > 0) {
 175       os::release_memory(extra_base, begin_offset);
 176   }
 177 
 178   if (end_offset > 0) {
 179       os::release_memory(extra_base + begin_offset + size, end_offset);
 180   }
 181 
 182   return aligned_base;
 183 }
 184 
 185 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
 186     return vsnprintf(buf, len, fmt, args);
 187 }
 188 
 189 int os::get_fileno(FILE* fp) {
 190   return NOT_AIX(::)fileno(fp);
 191 }
 192 
 193 struct tm* os::gmtime_pd(const time_t* clock, struct tm*  res) {
 194   return gmtime_r(clock, res);
 195 }
 196 
 197 void os::Posix::print_load_average(outputStream* st) {
 198   st->print("load average:");
 199   double loadavg[3];
 200   os::loadavg(loadavg, 3);
 201   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
 202   st->cr();
 203 }
 204 
 205 void os::Posix::print_rlimit_info(outputStream* st) {
 206   st->print("rlimit:");
 207   struct rlimit rlim;
 208 
 209   st->print(" STACK ");
 210   getrlimit(RLIMIT_STACK, &rlim);
 211   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 212   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 213 
 214   st->print(", CORE ");
 215   getrlimit(RLIMIT_CORE, &rlim);
 216   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 217   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 218 
 219   // Isn't there on solaris
 220 #if defined(AIX)
 221   st->print(", NPROC ");
 222   st->print("%d", sysconf(_SC_CHILD_MAX));
 223 #elif !defined(SOLARIS)
 224   st->print(", NPROC ");
 225   getrlimit(RLIMIT_NPROC, &rlim);
 226   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 227   else st->print(UINT64_FORMAT, uint64_t(rlim.rlim_cur));
 228 #endif
 229 
 230   st->print(", NOFILE ");
 231   getrlimit(RLIMIT_NOFILE, &rlim);
 232   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 233   else st->print(UINT64_FORMAT, uint64_t(rlim.rlim_cur));
 234 
 235   st->print(", AS ");
 236   getrlimit(RLIMIT_AS, &rlim);
 237   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 238   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 239 
 240   st->print(", DATA ");
 241   getrlimit(RLIMIT_DATA, &rlim);
 242   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 243   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 244 
 245   st->print(", FSIZE ");
 246   getrlimit(RLIMIT_FSIZE, &rlim);
 247   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 248   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 249 
 250   st->cr();
 251 }
 252 
 253 void os::Posix::print_uname_info(outputStream* st) {
 254   // kernel
 255   st->print("uname:");
 256   struct utsname name;
 257   uname(&name);
 258   st->print("%s ", name.sysname);
 259 #ifdef ASSERT
 260   st->print("%s ", name.nodename);
 261 #endif
 262   st->print("%s ", name.release);
 263   st->print("%s ", name.version);
 264   st->print("%s", name.machine);
 265   st->cr();
 266 }
 267 
 268 bool os::get_host_name(char* buf, size_t buflen) {
 269   struct utsname name;
 270   uname(&name);
 271   jio_snprintf(buf, buflen, "%s", name.nodename);
 272   return true;
 273 }
 274 
 275 bool os::has_allocatable_memory_limit(julong* limit) {
 276   struct rlimit rlim;
 277   int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
 278   // if there was an error when calling getrlimit, assume that there is no limitation
 279   // on virtual memory.
 280   bool result;
 281   if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) {
 282     result = false;
 283   } else {
 284     *limit = (julong)rlim.rlim_cur;
 285     result = true;
 286   }
 287 #ifdef _LP64
 288   return result;
 289 #else
 290   // arbitrary virtual space limit for 32 bit Unices found by testing. If
 291   // getrlimit above returned a limit, bound it with this limit. Otherwise
 292   // directly use it.
 293   const julong max_virtual_limit = (julong)3800*M;
 294   if (result) {
 295     *limit = MIN2(*limit, max_virtual_limit);
 296   } else {
 297     *limit = max_virtual_limit;
 298   }
 299 
 300   // bound by actually allocatable memory. The algorithm uses two bounds, an
 301   // upper and a lower limit. The upper limit is the current highest amount of
 302   // memory that could not be allocated, the lower limit is the current highest
 303   // amount of memory that could be allocated.
 304   // The algorithm iteratively refines the result by halving the difference
 305   // between these limits, updating either the upper limit (if that value could
 306   // not be allocated) or the lower limit (if the that value could be allocated)
 307   // until the difference between these limits is "small".
 308 
 309   // the minimum amount of memory we care about allocating.
 310   const julong min_allocation_size = M;
 311 
 312   julong upper_limit = *limit;
 313 
 314   // first check a few trivial cases
 315   if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) {
 316     *limit = upper_limit;
 317   } else if (!is_allocatable(min_allocation_size)) {
 318     // we found that not even min_allocation_size is allocatable. Return it
 319     // anyway. There is no point to search for a better value any more.
 320     *limit = min_allocation_size;
 321   } else {
 322     // perform the binary search.
 323     julong lower_limit = min_allocation_size;
 324     while ((upper_limit - lower_limit) > min_allocation_size) {
 325       julong temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit;
 326       temp_limit = align_down(temp_limit, min_allocation_size);
 327       if (is_allocatable(temp_limit)) {
 328         lower_limit = temp_limit;
 329       } else {
 330         upper_limit = temp_limit;
 331       }
 332     }
 333     *limit = lower_limit;
 334   }
 335   return true;
 336 #endif
 337 }
 338 
 339 const char* os::get_current_directory(char *buf, size_t buflen) {
 340   return getcwd(buf, buflen);
 341 }
 342 
 343 FILE* os::open(int fd, const char* mode) {
 344   return ::fdopen(fd, mode);
 345 }
 346 
 347 void os::flockfile(FILE* fp) {
 348   ::flockfile(fp);
 349 }
 350 
 351 void os::funlockfile(FILE* fp) {
 352   ::funlockfile(fp);
 353 }
 354 
 355 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
 356 // which is used to find statically linked in agents.
 357 // Parameters:
 358 //            sym_name: Symbol in library we are looking for
 359 //            lib_name: Name of library to look in, NULL for shared libs.
 360 //            is_absolute_path == true if lib_name is absolute path to agent
 361 //                                     such as "/a/b/libL.so"
 362 //            == false if only the base name of the library is passed in
 363 //               such as "L"
 364 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
 365                                     bool is_absolute_path) {
 366   char *agent_entry_name;
 367   size_t len;
 368   size_t name_len;
 369   size_t prefix_len = strlen(JNI_LIB_PREFIX);
 370   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
 371   const char *start;
 372 
 373   if (lib_name != NULL) {
 374     name_len = strlen(lib_name);
 375     if (is_absolute_path) {
 376       // Need to strip path, prefix and suffix
 377       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
 378         lib_name = ++start;
 379       }
 380       if (strlen(lib_name) <= (prefix_len + suffix_len)) {
 381         return NULL;
 382       }
 383       lib_name += prefix_len;
 384       name_len = strlen(lib_name) - suffix_len;
 385     }
 386   }
 387   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
 388   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
 389   if (agent_entry_name == NULL) {
 390     return NULL;
 391   }
 392   strcpy(agent_entry_name, sym_name);
 393   if (lib_name != NULL) {
 394     strcat(agent_entry_name, "_");
 395     strncat(agent_entry_name, lib_name, name_len);
 396   }
 397   return agent_entry_name;
 398 }
 399 
 400 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
 401   assert(thread == Thread::current(),  "thread consistency check");
 402 
 403   ParkEvent * const slp = thread->_SleepEvent ;
 404   slp->reset() ;
 405   OrderAccess::fence() ;
 406 
 407   if (interruptible) {
 408     jlong prevtime = javaTimeNanos();
 409 
 410     for (;;) {
 411       if (os::is_interrupted(thread, true)) {
 412         return OS_INTRPT;
 413       }
 414 
 415       jlong newtime = javaTimeNanos();
 416 
 417       if (newtime - prevtime < 0) {
 418         // time moving backwards, should only happen if no monotonic clock
 419         // not a guarantee() because JVM should not abort on kernel/glibc bugs
 420         assert(!os::supports_monotonic_clock(), "unexpected time moving backwards detected in os::sleep(interruptible)");
 421       } else {
 422         millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
 423       }
 424 
 425       if (millis <= 0) {
 426         return OS_OK;
 427       }
 428 
 429       prevtime = newtime;
 430 
 431       {
 432         assert(thread->is_Java_thread(), "sanity check");
 433         JavaThread *jt = (JavaThread *) thread;
 434         ThreadBlockInVM tbivm(jt);
 435         OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
 436 
 437         jt->set_suspend_equivalent();
 438         // cleared by handle_special_suspend_equivalent_condition() or
 439         // java_suspend_self() via check_and_wait_while_suspended()
 440 
 441         slp->park(millis);
 442 
 443         // were we externally suspended while we were waiting?
 444         jt->check_and_wait_while_suspended();
 445       }
 446     }
 447   } else {
 448     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
 449     jlong prevtime = javaTimeNanos();
 450 
 451     for (;;) {
 452       // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
 453       // the 1st iteration ...
 454       jlong newtime = javaTimeNanos();
 455 
 456       if (newtime - prevtime < 0) {
 457         // time moving backwards, should only happen if no monotonic clock
 458         // not a guarantee() because JVM should not abort on kernel/glibc bugs
 459         assert(!os::supports_monotonic_clock(), "unexpected time moving backwards detected on os::sleep(!interruptible)");
 460       } else {
 461         millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
 462       }
 463 
 464       if (millis <= 0) break ;
 465 
 466       prevtime = newtime;
 467       slp->park(millis);
 468     }
 469     return OS_OK ;
 470   }
 471 }
 472 
 473 ////////////////////////////////////////////////////////////////////////////////
 474 // interrupt support
 475 
 476 void os::interrupt(Thread* thread) {
 477   assert(Thread::current() == thread || Threads_lock->owned_by_self(),
 478     "possibility of dangling Thread pointer");
 479 
 480   OSThread* osthread = thread->osthread();
 481 
 482   if (!osthread->interrupted()) {
 483     osthread->set_interrupted(true);
 484     // More than one thread can get here with the same value of osthread,
 485     // resulting in multiple notifications.  We do, however, want the store
 486     // to interrupted() to be visible to other threads before we execute unpark().
 487     OrderAccess::fence();
 488     ParkEvent * const slp = thread->_SleepEvent ;
 489     if (slp != NULL) slp->unpark() ;
 490   }
 491 
 492   // For JSR166. Unpark even if interrupt status already was set
 493   if (thread->is_Java_thread())
 494     ((JavaThread*)thread)->parker()->unpark();
 495 
 496   ParkEvent * ev = thread->_ParkEvent ;
 497   if (ev != NULL) ev->unpark() ;
 498 
 499 }
 500 
 501 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
 502   assert(Thread::current() == thread || Threads_lock->owned_by_self(),
 503     "possibility of dangling Thread pointer");
 504 
 505   OSThread* osthread = thread->osthread();
 506 
 507   bool interrupted = osthread->interrupted();
 508 
 509   // NOTE that since there is no "lock" around the interrupt and
 510   // is_interrupted operations, there is the possibility that the
 511   // interrupted flag (in osThread) will be "false" but that the
 512   // low-level events will be in the signaled state. This is
 513   // intentional. The effect of this is that Object.wait() and
 514   // LockSupport.park() will appear to have a spurious wakeup, which
 515   // is allowed and not harmful, and the possibility is so rare that
 516   // it is not worth the added complexity to add yet another lock.
 517   // For the sleep event an explicit reset is performed on entry
 518   // to os::sleep, so there is no early return. It has also been
 519   // recommended not to put the interrupted flag into the "event"
 520   // structure because it hides the issue.
 521   if (interrupted && clear_interrupted) {
 522     osthread->set_interrupted(false);
 523     // consider thread->_SleepEvent->reset() ... optional optimization
 524   }
 525 
 526   return interrupted;
 527 }
 528 
 529 
 530 
 531 static const struct {
 532   int sig; const char* name;
 533 }
 534  g_signal_info[] =
 535   {
 536   {  SIGABRT,     "SIGABRT" },
 537 #ifdef SIGAIO
 538   {  SIGAIO,      "SIGAIO" },
 539 #endif
 540   {  SIGALRM,     "SIGALRM" },
 541 #ifdef SIGALRM1
 542   {  SIGALRM1,    "SIGALRM1" },
 543 #endif
 544   {  SIGBUS,      "SIGBUS" },
 545 #ifdef SIGCANCEL
 546   {  SIGCANCEL,   "SIGCANCEL" },
 547 #endif
 548   {  SIGCHLD,     "SIGCHLD" },
 549 #ifdef SIGCLD
 550   {  SIGCLD,      "SIGCLD" },
 551 #endif
 552   {  SIGCONT,     "SIGCONT" },
 553 #ifdef SIGCPUFAIL
 554   {  SIGCPUFAIL,  "SIGCPUFAIL" },
 555 #endif
 556 #ifdef SIGDANGER
 557   {  SIGDANGER,   "SIGDANGER" },
 558 #endif
 559 #ifdef SIGDIL
 560   {  SIGDIL,      "SIGDIL" },
 561 #endif
 562 #ifdef SIGEMT
 563   {  SIGEMT,      "SIGEMT" },
 564 #endif
 565   {  SIGFPE,      "SIGFPE" },
 566 #ifdef SIGFREEZE
 567   {  SIGFREEZE,   "SIGFREEZE" },
 568 #endif
 569 #ifdef SIGGFAULT
 570   {  SIGGFAULT,   "SIGGFAULT" },
 571 #endif
 572 #ifdef SIGGRANT
 573   {  SIGGRANT,    "SIGGRANT" },
 574 #endif
 575   {  SIGHUP,      "SIGHUP" },
 576   {  SIGILL,      "SIGILL" },
 577   {  SIGINT,      "SIGINT" },
 578 #ifdef SIGIO
 579   {  SIGIO,       "SIGIO" },
 580 #endif
 581 #ifdef SIGIOINT
 582   {  SIGIOINT,    "SIGIOINT" },
 583 #endif
 584 #ifdef SIGIOT
 585 // SIGIOT is there for BSD compatibility, but on most Unices just a
 586 // synonym for SIGABRT. The result should be "SIGABRT", not
 587 // "SIGIOT".
 588 #if (SIGIOT != SIGABRT )
 589   {  SIGIOT,      "SIGIOT" },
 590 #endif
 591 #endif
 592 #ifdef SIGKAP
 593   {  SIGKAP,      "SIGKAP" },
 594 #endif
 595   {  SIGKILL,     "SIGKILL" },
 596 #ifdef SIGLOST
 597   {  SIGLOST,     "SIGLOST" },
 598 #endif
 599 #ifdef SIGLWP
 600   {  SIGLWP,      "SIGLWP" },
 601 #endif
 602 #ifdef SIGLWPTIMER
 603   {  SIGLWPTIMER, "SIGLWPTIMER" },
 604 #endif
 605 #ifdef SIGMIGRATE
 606   {  SIGMIGRATE,  "SIGMIGRATE" },
 607 #endif
 608 #ifdef SIGMSG
 609   {  SIGMSG,      "SIGMSG" },
 610 #endif
 611   {  SIGPIPE,     "SIGPIPE" },
 612 #ifdef SIGPOLL
 613   {  SIGPOLL,     "SIGPOLL" },
 614 #endif
 615 #ifdef SIGPRE
 616   {  SIGPRE,      "SIGPRE" },
 617 #endif
 618   {  SIGPROF,     "SIGPROF" },
 619 #ifdef SIGPTY
 620   {  SIGPTY,      "SIGPTY" },
 621 #endif
 622 #ifdef SIGPWR
 623   {  SIGPWR,      "SIGPWR" },
 624 #endif
 625   {  SIGQUIT,     "SIGQUIT" },
 626 #ifdef SIGRECONFIG
 627   {  SIGRECONFIG, "SIGRECONFIG" },
 628 #endif
 629 #ifdef SIGRECOVERY
 630   {  SIGRECOVERY, "SIGRECOVERY" },
 631 #endif
 632 #ifdef SIGRESERVE
 633   {  SIGRESERVE,  "SIGRESERVE" },
 634 #endif
 635 #ifdef SIGRETRACT
 636   {  SIGRETRACT,  "SIGRETRACT" },
 637 #endif
 638 #ifdef SIGSAK
 639   {  SIGSAK,      "SIGSAK" },
 640 #endif
 641   {  SIGSEGV,     "SIGSEGV" },
 642 #ifdef SIGSOUND
 643   {  SIGSOUND,    "SIGSOUND" },
 644 #endif
 645 #ifdef SIGSTKFLT
 646   {  SIGSTKFLT,    "SIGSTKFLT" },
 647 #endif
 648   {  SIGSTOP,     "SIGSTOP" },
 649   {  SIGSYS,      "SIGSYS" },
 650 #ifdef SIGSYSERROR
 651   {  SIGSYSERROR, "SIGSYSERROR" },
 652 #endif
 653 #ifdef SIGTALRM
 654   {  SIGTALRM,    "SIGTALRM" },
 655 #endif
 656   {  SIGTERM,     "SIGTERM" },
 657 #ifdef SIGTHAW
 658   {  SIGTHAW,     "SIGTHAW" },
 659 #endif
 660   {  SIGTRAP,     "SIGTRAP" },
 661 #ifdef SIGTSTP
 662   {  SIGTSTP,     "SIGTSTP" },
 663 #endif
 664   {  SIGTTIN,     "SIGTTIN" },
 665   {  SIGTTOU,     "SIGTTOU" },
 666 #ifdef SIGURG
 667   {  SIGURG,      "SIGURG" },
 668 #endif
 669   {  SIGUSR1,     "SIGUSR1" },
 670   {  SIGUSR2,     "SIGUSR2" },
 671 #ifdef SIGVIRT
 672   {  SIGVIRT,     "SIGVIRT" },
 673 #endif
 674   {  SIGVTALRM,   "SIGVTALRM" },
 675 #ifdef SIGWAITING
 676   {  SIGWAITING,  "SIGWAITING" },
 677 #endif
 678 #ifdef SIGWINCH
 679   {  SIGWINCH,    "SIGWINCH" },
 680 #endif
 681 #ifdef SIGWINDOW
 682   {  SIGWINDOW,   "SIGWINDOW" },
 683 #endif
 684   {  SIGXCPU,     "SIGXCPU" },
 685   {  SIGXFSZ,     "SIGXFSZ" },
 686 #ifdef SIGXRES
 687   {  SIGXRES,     "SIGXRES" },
 688 #endif
 689   { -1, NULL }
 690 };
 691 
 692 // Returned string is a constant. For unknown signals "UNKNOWN" is returned.
 693 const char* os::Posix::get_signal_name(int sig, char* out, size_t outlen) {
 694 
 695   const char* ret = NULL;
 696 
 697 #ifdef SIGRTMIN
 698   if (sig >= SIGRTMIN && sig <= SIGRTMAX) {
 699     if (sig == SIGRTMIN) {
 700       ret = "SIGRTMIN";
 701     } else if (sig == SIGRTMAX) {
 702       ret = "SIGRTMAX";
 703     } else {
 704       jio_snprintf(out, outlen, "SIGRTMIN+%d", sig - SIGRTMIN);
 705       return out;
 706     }
 707   }
 708 #endif
 709 
 710   if (sig > 0) {
 711     for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) {
 712       if (g_signal_info[idx].sig == sig) {
 713         ret = g_signal_info[idx].name;
 714         break;
 715       }
 716     }
 717   }
 718 
 719   if (!ret) {
 720     if (!is_valid_signal(sig)) {
 721       ret = "INVALID";
 722     } else {
 723       ret = "UNKNOWN";
 724     }
 725   }
 726 
 727   if (out && outlen > 0) {
 728     strncpy(out, ret, outlen);
 729     out[outlen - 1] = '\0';
 730   }
 731   return out;
 732 }
 733 
 734 int os::Posix::get_signal_number(const char* signal_name) {
 735   char tmp[30];
 736   const char* s = signal_name;
 737   if (s[0] != 'S' || s[1] != 'I' || s[2] != 'G') {
 738     jio_snprintf(tmp, sizeof(tmp), "SIG%s", signal_name);
 739     s = tmp;
 740   }
 741   for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) {
 742     if (strcmp(g_signal_info[idx].name, s) == 0) {
 743       return g_signal_info[idx].sig;
 744     }
 745   }
 746   return -1;
 747 }
 748 
 749 int os::get_signal_number(const char* signal_name) {
 750   return os::Posix::get_signal_number(signal_name);
 751 }
 752 
 753 // Returns true if signal number is valid.
 754 bool os::Posix::is_valid_signal(int sig) {
 755   // MacOS not really POSIX compliant: sigaddset does not return
 756   // an error for invalid signal numbers. However, MacOS does not
 757   // support real time signals and simply seems to have just 33
 758   // signals with no holes in the signal range.
 759 #ifdef __APPLE__
 760   return sig >= 1 && sig < NSIG;
 761 #else
 762   // Use sigaddset to check for signal validity.
 763   sigset_t set;
 764   sigemptyset(&set);
 765   if (sigaddset(&set, sig) == -1 && errno == EINVAL) {
 766     return false;
 767   }
 768   return true;
 769 #endif
 770 }
 771 
 772 // Returns:
 773 // NULL for an invalid signal number
 774 // "SIG<num>" for a valid but unknown signal number
 775 // signal name otherwise.
 776 const char* os::exception_name(int sig, char* buf, size_t size) {
 777   if (!os::Posix::is_valid_signal(sig)) {
 778     return NULL;
 779   }
 780   const char* const name = os::Posix::get_signal_name(sig, buf, size);
 781   if (strcmp(name, "UNKNOWN") == 0) {
 782     jio_snprintf(buf, size, "SIG%d", sig);
 783   }
 784   return buf;
 785 }
 786 
 787 #define NUM_IMPORTANT_SIGS 32
 788 // Returns one-line short description of a signal set in a user provided buffer.
 789 const char* os::Posix::describe_signal_set_short(const sigset_t* set, char* buffer, size_t buf_size) {
 790   assert(buf_size == (NUM_IMPORTANT_SIGS + 1), "wrong buffer size");
 791   // Note: for shortness, just print out the first 32. That should
 792   // cover most of the useful ones, apart from realtime signals.
 793   for (int sig = 1; sig <= NUM_IMPORTANT_SIGS; sig++) {
 794     const int rc = sigismember(set, sig);
 795     if (rc == -1 && errno == EINVAL) {
 796       buffer[sig-1] = '?';
 797     } else {
 798       buffer[sig-1] = rc == 0 ? '0' : '1';
 799     }
 800   }
 801   buffer[NUM_IMPORTANT_SIGS] = 0;
 802   return buffer;
 803 }
 804 
 805 // Prints one-line description of a signal set.
 806 void os::Posix::print_signal_set_short(outputStream* st, const sigset_t* set) {
 807   char buf[NUM_IMPORTANT_SIGS + 1];
 808   os::Posix::describe_signal_set_short(set, buf, sizeof(buf));
 809   st->print("%s", buf);
 810 }
 811 
 812 // Writes one-line description of a combination of sigaction.sa_flags into a user
 813 // provided buffer. Returns that buffer.
 814 const char* os::Posix::describe_sa_flags(int flags, char* buffer, size_t size) {
 815   char* p = buffer;
 816   size_t remaining = size;
 817   bool first = true;
 818   int idx = 0;
 819 
 820   assert(buffer, "invalid argument");
 821 
 822   if (size == 0) {
 823     return buffer;
 824   }
 825 
 826   strncpy(buffer, "none", size);
 827 
 828   const struct {
 829     // NB: i is an unsigned int here because SA_RESETHAND is on some
 830     // systems 0x80000000, which is implicitly unsigned.  Assignining
 831     // it to an int field would be an overflow in unsigned-to-signed
 832     // conversion.
 833     unsigned int i;
 834     const char* s;
 835   } flaginfo [] = {
 836     { SA_NOCLDSTOP, "SA_NOCLDSTOP" },
 837     { SA_ONSTACK,   "SA_ONSTACK"   },
 838     { SA_RESETHAND, "SA_RESETHAND" },
 839     { SA_RESTART,   "SA_RESTART"   },
 840     { SA_SIGINFO,   "SA_SIGINFO"   },
 841     { SA_NOCLDWAIT, "SA_NOCLDWAIT" },
 842     { SA_NODEFER,   "SA_NODEFER"   },
 843 #ifdef AIX
 844     { SA_ONSTACK,   "SA_ONSTACK"   },
 845     { SA_OLDSTYLE,  "SA_OLDSTYLE"  },
 846 #endif
 847     { 0, NULL }
 848   };
 849 
 850   for (idx = 0; flaginfo[idx].s && remaining > 1; idx++) {
 851     if (flags & flaginfo[idx].i) {
 852       if (first) {
 853         jio_snprintf(p, remaining, "%s", flaginfo[idx].s);
 854         first = false;
 855       } else {
 856         jio_snprintf(p, remaining, "|%s", flaginfo[idx].s);
 857       }
 858       const size_t len = strlen(p);
 859       p += len;
 860       remaining -= len;
 861     }
 862   }
 863 
 864   buffer[size - 1] = '\0';
 865 
 866   return buffer;
 867 }
 868 
 869 // Prints one-line description of a combination of sigaction.sa_flags.
 870 void os::Posix::print_sa_flags(outputStream* st, int flags) {
 871   char buffer[0x100];
 872   os::Posix::describe_sa_flags(flags, buffer, sizeof(buffer));
 873   st->print("%s", buffer);
 874 }
 875 
 876 // Helper function for os::Posix::print_siginfo_...():
 877 // return a textual description for signal code.
 878 struct enum_sigcode_desc_t {
 879   const char* s_name;
 880   const char* s_desc;
 881 };
 882 
 883 static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t* out) {
 884 
 885   const struct {
 886     int sig; int code; const char* s_code; const char* s_desc;
 887   } t1 [] = {
 888     { SIGILL,  ILL_ILLOPC,   "ILL_ILLOPC",   "Illegal opcode." },
 889     { SIGILL,  ILL_ILLOPN,   "ILL_ILLOPN",   "Illegal operand." },
 890     { SIGILL,  ILL_ILLADR,   "ILL_ILLADR",   "Illegal addressing mode." },
 891     { SIGILL,  ILL_ILLTRP,   "ILL_ILLTRP",   "Illegal trap." },
 892     { SIGILL,  ILL_PRVOPC,   "ILL_PRVOPC",   "Privileged opcode." },
 893     { SIGILL,  ILL_PRVREG,   "ILL_PRVREG",   "Privileged register." },
 894     { SIGILL,  ILL_COPROC,   "ILL_COPROC",   "Coprocessor error." },
 895     { SIGILL,  ILL_BADSTK,   "ILL_BADSTK",   "Internal stack error." },
 896 #if defined(IA64) && defined(LINUX)
 897     { SIGILL,  ILL_BADIADDR, "ILL_BADIADDR", "Unimplemented instruction address" },
 898     { SIGILL,  ILL_BREAK,    "ILL_BREAK",    "Application Break instruction" },
 899 #endif
 900     { SIGFPE,  FPE_INTDIV,   "FPE_INTDIV",   "Integer divide by zero." },
 901     { SIGFPE,  FPE_INTOVF,   "FPE_INTOVF",   "Integer overflow." },
 902     { SIGFPE,  FPE_FLTDIV,   "FPE_FLTDIV",   "Floating-point divide by zero." },
 903     { SIGFPE,  FPE_FLTOVF,   "FPE_FLTOVF",   "Floating-point overflow." },
 904     { SIGFPE,  FPE_FLTUND,   "FPE_FLTUND",   "Floating-point underflow." },
 905     { SIGFPE,  FPE_FLTRES,   "FPE_FLTRES",   "Floating-point inexact result." },
 906     { SIGFPE,  FPE_FLTINV,   "FPE_FLTINV",   "Invalid floating-point operation." },
 907     { SIGFPE,  FPE_FLTSUB,   "FPE_FLTSUB",   "Subscript out of range." },
 908     { SIGSEGV, SEGV_MAPERR,  "SEGV_MAPERR",  "Address not mapped to object." },
 909     { SIGSEGV, SEGV_ACCERR,  "SEGV_ACCERR",  "Invalid permissions for mapped object." },
 910 #ifdef AIX
 911     // no explanation found what keyerr would be
 912     { SIGSEGV, SEGV_KEYERR,  "SEGV_KEYERR",  "key error" },
 913 #endif
 914 #if defined(IA64) && !defined(AIX)
 915     { SIGSEGV, SEGV_PSTKOVF, "SEGV_PSTKOVF", "Paragraph stack overflow" },
 916 #endif
 917 #if defined(__sparc) && defined(SOLARIS)
 918 // define Solaris Sparc M7 ADI SEGV signals
 919 #if !defined(SEGV_ACCADI)
 920 #define SEGV_ACCADI 3
 921 #endif
 922     { SIGSEGV, SEGV_ACCADI,  "SEGV_ACCADI",  "ADI not enabled for mapped object." },
 923 #if !defined(SEGV_ACCDERR)
 924 #define SEGV_ACCDERR 4
 925 #endif
 926     { SIGSEGV, SEGV_ACCDERR, "SEGV_ACCDERR", "ADI disrupting exception." },
 927 #if !defined(SEGV_ACCPERR)
 928 #define SEGV_ACCPERR 5
 929 #endif
 930     { SIGSEGV, SEGV_ACCPERR, "SEGV_ACCPERR", "ADI precise exception." },
 931 #endif // defined(__sparc) && defined(SOLARIS)
 932     { SIGBUS,  BUS_ADRALN,   "BUS_ADRALN",   "Invalid address alignment." },
 933     { SIGBUS,  BUS_ADRERR,   "BUS_ADRERR",   "Nonexistent physical address." },
 934     { SIGBUS,  BUS_OBJERR,   "BUS_OBJERR",   "Object-specific hardware error." },
 935     { SIGTRAP, TRAP_BRKPT,   "TRAP_BRKPT",   "Process breakpoint." },
 936     { SIGTRAP, TRAP_TRACE,   "TRAP_TRACE",   "Process trace trap." },
 937     { SIGCHLD, CLD_EXITED,   "CLD_EXITED",   "Child has exited." },
 938     { SIGCHLD, CLD_KILLED,   "CLD_KILLED",   "Child has terminated abnormally and did not create a core file." },
 939     { SIGCHLD, CLD_DUMPED,   "CLD_DUMPED",   "Child has terminated abnormally and created a core file." },
 940     { SIGCHLD, CLD_TRAPPED,  "CLD_TRAPPED",  "Traced child has trapped." },
 941     { SIGCHLD, CLD_STOPPED,  "CLD_STOPPED",  "Child has stopped." },
 942     { SIGCHLD, CLD_CONTINUED,"CLD_CONTINUED","Stopped child has continued." },
 943 #ifdef SIGPOLL
 944     { SIGPOLL, POLL_OUT,     "POLL_OUT",     "Output buffers available." },
 945     { SIGPOLL, POLL_MSG,     "POLL_MSG",     "Input message available." },
 946     { SIGPOLL, POLL_ERR,     "POLL_ERR",     "I/O error." },
 947     { SIGPOLL, POLL_PRI,     "POLL_PRI",     "High priority input available." },
 948     { SIGPOLL, POLL_HUP,     "POLL_HUP",     "Device disconnected. [Option End]" },
 949 #endif
 950     { -1, -1, NULL, NULL }
 951   };
 952 
 953   // Codes valid in any signal context.
 954   const struct {
 955     int code; const char* s_code; const char* s_desc;
 956   } t2 [] = {
 957     { SI_USER,      "SI_USER",     "Signal sent by kill()." },
 958     { SI_QUEUE,     "SI_QUEUE",    "Signal sent by the sigqueue()." },
 959     { SI_TIMER,     "SI_TIMER",    "Signal generated by expiration of a timer set by timer_settime()." },
 960     { SI_ASYNCIO,   "SI_ASYNCIO",  "Signal generated by completion of an asynchronous I/O request." },
 961     { SI_MESGQ,     "SI_MESGQ",    "Signal generated by arrival of a message on an empty message queue." },
 962     // Linux specific
 963 #ifdef SI_TKILL
 964     { SI_TKILL,     "SI_TKILL",    "Signal sent by tkill (pthread_kill)" },
 965 #endif
 966 #ifdef SI_DETHREAD
 967     { SI_DETHREAD,  "SI_DETHREAD", "Signal sent by execve() killing subsidiary threads" },
 968 #endif
 969 #ifdef SI_KERNEL
 970     { SI_KERNEL,    "SI_KERNEL",   "Signal sent by kernel." },
 971 #endif
 972 #ifdef SI_SIGIO
 973     { SI_SIGIO,     "SI_SIGIO",    "Signal sent by queued SIGIO" },
 974 #endif
 975 
 976 #ifdef AIX
 977     { SI_UNDEFINED, "SI_UNDEFINED","siginfo contains partial information" },
 978     { SI_EMPTY,     "SI_EMPTY",    "siginfo contains no useful information" },
 979 #endif
 980 
 981 #ifdef __sun
 982     { SI_NOINFO,    "SI_NOINFO",   "No signal information" },
 983     { SI_RCTL,      "SI_RCTL",     "kernel generated signal via rctl action" },
 984     { SI_LWP,       "SI_LWP",      "Signal sent via lwp_kill" },
 985 #endif
 986 
 987     { -1, NULL, NULL }
 988   };
 989 
 990   const char* s_code = NULL;
 991   const char* s_desc = NULL;
 992 
 993   for (int i = 0; t1[i].sig != -1; i ++) {
 994     if (t1[i].sig == si->si_signo && t1[i].code == si->si_code) {
 995       s_code = t1[i].s_code;
 996       s_desc = t1[i].s_desc;
 997       break;
 998     }
 999   }
1000 
1001   if (s_code == NULL) {
1002     for (int i = 0; t2[i].s_code != NULL; i ++) {
1003       if (t2[i].code == si->si_code) {
1004         s_code = t2[i].s_code;
1005         s_desc = t2[i].s_desc;
1006       }
1007     }
1008   }
1009 
1010   if (s_code == NULL) {
1011     out->s_name = "unknown";
1012     out->s_desc = "unknown";
1013     return false;
1014   }
1015 
1016   out->s_name = s_code;
1017   out->s_desc = s_desc;
1018 
1019   return true;
1020 }
1021 
1022 void os::print_siginfo(outputStream* os, const void* si0) {
1023 
1024   const siginfo_t* const si = (const siginfo_t*) si0;
1025 
1026   char buf[20];
1027   os->print("siginfo:");
1028 
1029   if (!si) {
1030     os->print(" <null>");
1031     return;
1032   }
1033 
1034   const int sig = si->si_signo;
1035 
1036   os->print(" si_signo: %d (%s)", sig, os::Posix::get_signal_name(sig, buf, sizeof(buf)));
1037 
1038   enum_sigcode_desc_t ed;
1039   get_signal_code_description(si, &ed);
1040   os->print(", si_code: %d (%s)", si->si_code, ed.s_name);
1041 
1042   if (si->si_errno) {
1043     os->print(", si_errno: %d", si->si_errno);
1044   }
1045 
1046   // Output additional information depending on the signal code.
1047 
1048   // Note: Many implementations lump si_addr, si_pid, si_uid etc. together as unions,
1049   // so it depends on the context which member to use. For synchronous error signals,
1050   // we print si_addr, unless the signal was sent by another process or thread, in
1051   // which case we print out pid or tid of the sender.
1052   if (si->si_code == SI_USER || si->si_code == SI_QUEUE) {
1053     const pid_t pid = si->si_pid;
1054     os->print(", si_pid: %ld", (long) pid);
1055     if (IS_VALID_PID(pid)) {
1056       const pid_t me = getpid();
1057       if (me == pid) {
1058         os->print(" (current process)");
1059       }
1060     } else {
1061       os->print(" (invalid)");
1062     }
1063     os->print(", si_uid: %ld", (long) si->si_uid);
1064     if (sig == SIGCHLD) {
1065       os->print(", si_status: %d", si->si_status);
1066     }
1067   } else if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
1068              sig == SIGTRAP || sig == SIGFPE) {
1069     os->print(", si_addr: " PTR_FORMAT, p2i(si->si_addr));
1070 #ifdef SIGPOLL
1071   } else if (sig == SIGPOLL) {
1072     os->print(", si_band: %ld", si->si_band);
1073 #endif
1074   }
1075 
1076 }
1077 
1078 int os::Posix::unblock_thread_signal_mask(const sigset_t *set) {
1079   return pthread_sigmask(SIG_UNBLOCK, set, NULL);
1080 }
1081 
1082 address os::Posix::ucontext_get_pc(const ucontext_t* ctx) {
1083 #if defined(AIX)
1084    return Aix::ucontext_get_pc(ctx);
1085 #elif defined(BSD)
1086    return Bsd::ucontext_get_pc(ctx);
1087 #elif defined(LINUX)
1088    return Linux::ucontext_get_pc(ctx);
1089 #elif defined(SOLARIS)
1090    return Solaris::ucontext_get_pc(ctx);
1091 #else
1092    VMError::report_and_die("unimplemented ucontext_get_pc");
1093 #endif
1094 }
1095 
1096 void os::Posix::ucontext_set_pc(ucontext_t* ctx, address pc) {
1097 #if defined(AIX)
1098    Aix::ucontext_set_pc(ctx, pc);
1099 #elif defined(BSD)
1100    Bsd::ucontext_set_pc(ctx, pc);
1101 #elif defined(LINUX)
1102    Linux::ucontext_set_pc(ctx, pc);
1103 #elif defined(SOLARIS)
1104    Solaris::ucontext_set_pc(ctx, pc);
1105 #else
1106    VMError::report_and_die("unimplemented ucontext_get_pc");
1107 #endif
1108 }
1109 
1110 char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr) {
1111   size_t stack_size = 0;
1112   size_t guard_size = 0;
1113   int detachstate = 0;
1114   pthread_attr_getstacksize(attr, &stack_size);
1115   pthread_attr_getguardsize(attr, &guard_size);
1116   // Work around linux NPTL implementation error, see also os::create_thread() in os_linux.cpp.
1117   LINUX_ONLY(stack_size -= guard_size);
1118   pthread_attr_getdetachstate(attr, &detachstate);
1119   jio_snprintf(buf, buflen, "stacksize: " SIZE_FORMAT "k, guardsize: " SIZE_FORMAT "k, %s",
1120     stack_size / 1024, guard_size / 1024,
1121     (detachstate == PTHREAD_CREATE_DETACHED ? "detached" : "joinable"));
1122   return buf;
1123 }
1124 
1125 char* os::Posix::realpath(const char* filename, char* outbuf, size_t outbuflen) {
1126 
1127   if (filename == NULL || outbuf == NULL || outbuflen < 1) {
1128     assert(false, "os::Posix::realpath: invalid arguments.");
1129     errno = EINVAL;
1130     return NULL;
1131   }
1132 
1133   char* result = NULL;
1134 
1135   // This assumes platform realpath() is implemented according to POSIX.1-2008.
1136   // POSIX.1-2008 allows to specify NULL for the output buffer, in which case
1137   // output buffer is dynamically allocated and must be ::free()'d by the caller.
1138   char* p = ::realpath(filename, NULL);
1139   if (p != NULL) {
1140     if (strlen(p) < outbuflen) {
1141       strcpy(outbuf, p);
1142       result = outbuf;
1143     } else {
1144       errno = ENAMETOOLONG;
1145     }
1146     ::free(p); // *not* os::free
1147   } else {
1148     // Fallback for platforms struggling with modern Posix standards (AIX 5.3, 6.1). If realpath
1149     // returns EINVAL, this may indicate that realpath is not POSIX.1-2008 compatible and
1150     // that it complains about the NULL we handed down as user buffer.
1151     // In this case, use the user provided buffer but at least check whether realpath caused
1152     // a memory overwrite.
1153     if (errno == EINVAL) {
1154       outbuf[outbuflen - 1] = '\0';
1155       p = ::realpath(filename, outbuf);
1156       if (p != NULL) {
1157         guarantee(outbuf[outbuflen - 1] == '\0', "realpath buffer overwrite detected.");
1158         result = p;
1159       }
1160     }
1161   }
1162   return result;
1163 
1164 }
1165 
1166 
1167 // Check minimum allowable stack sizes for thread creation and to initialize
1168 // the java system classes, including StackOverflowError - depends on page
1169 // size.
1170 // The space needed for frames during startup is platform dependent. It
1171 // depends on word size, platform calling conventions, C frame layout and
1172 // interpreter/C1/C2 design decisions. Therefore this is given in a
1173 // platform (os/cpu) dependent constant.
1174 // To this, space for guard mechanisms is added, which depends on the
1175 // page size which again depends on the concrete system the VM is running
1176 // on. Space for libc guard pages is not included in this size.
1177 jint os::Posix::set_minimum_stack_sizes() {
1178   size_t os_min_stack_allowed = SOLARIS_ONLY(thr_min_stack()) NOT_SOLARIS(PTHREAD_STACK_MIN);
1179 
1180   _java_thread_min_stack_allowed = _java_thread_min_stack_allowed +
1181                                    JavaThread::stack_guard_zone_size() +
1182                                    JavaThread::stack_shadow_zone_size();
1183 
1184   _java_thread_min_stack_allowed = align_up(_java_thread_min_stack_allowed, vm_page_size());
1185   _java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, os_min_stack_allowed);
1186 
1187   size_t stack_size_in_bytes = ThreadStackSize * K;
1188   if (stack_size_in_bytes != 0 &&
1189       stack_size_in_bytes < _java_thread_min_stack_allowed) {
1190     // The '-Xss' and '-XX:ThreadStackSize=N' options both set
1191     // ThreadStackSize so we go with "Java thread stack size" instead
1192     // of "ThreadStackSize" to be more friendly.
1193     tty->print_cr("\nThe Java thread stack size specified is too small. "
1194                   "Specify at least " SIZE_FORMAT "k",
1195                   _java_thread_min_stack_allowed / K);
1196     return JNI_ERR;
1197   }
1198 
1199   // Make the stack size a multiple of the page size so that
1200   // the yellow/red zones can be guarded.
1201   JavaThread::set_stack_size_at_create(align_up(stack_size_in_bytes, vm_page_size()));
1202 
1203   // Reminder: a compiler thread is a Java thread.
1204   _compiler_thread_min_stack_allowed = _compiler_thread_min_stack_allowed +
1205                                        JavaThread::stack_guard_zone_size() +
1206                                        JavaThread::stack_shadow_zone_size();
1207 
1208   _compiler_thread_min_stack_allowed = align_up(_compiler_thread_min_stack_allowed, vm_page_size());
1209   _compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, os_min_stack_allowed);
1210 
1211   stack_size_in_bytes = CompilerThreadStackSize * K;
1212   if (stack_size_in_bytes != 0 &&
1213       stack_size_in_bytes < _compiler_thread_min_stack_allowed) {
1214     tty->print_cr("\nThe CompilerThreadStackSize specified is too small. "
1215                   "Specify at least " SIZE_FORMAT "k",
1216                   _compiler_thread_min_stack_allowed / K);
1217     return JNI_ERR;
1218   }
1219 
1220   _vm_internal_thread_min_stack_allowed = align_up(_vm_internal_thread_min_stack_allowed, vm_page_size());
1221   _vm_internal_thread_min_stack_allowed = MAX2(_vm_internal_thread_min_stack_allowed, os_min_stack_allowed);
1222 
1223   stack_size_in_bytes = VMThreadStackSize * K;
1224   if (stack_size_in_bytes != 0 &&
1225       stack_size_in_bytes < _vm_internal_thread_min_stack_allowed) {
1226     tty->print_cr("\nThe VMThreadStackSize specified is too small. "
1227                   "Specify at least " SIZE_FORMAT "k",
1228                   _vm_internal_thread_min_stack_allowed / K);
1229     return JNI_ERR;
1230   }
1231   return JNI_OK;
1232 }
1233 
1234 // Called when creating the thread.  The minimum stack sizes have already been calculated
1235 size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
1236   size_t stack_size;
1237   if (req_stack_size == 0) {
1238     stack_size = default_stack_size(thr_type);
1239   } else {
1240     stack_size = req_stack_size;
1241   }
1242 
1243   switch (thr_type) {
1244   case os::java_thread:
1245     // Java threads use ThreadStackSize which default value can be
1246     // changed with the flag -Xss
1247     if (req_stack_size == 0 && JavaThread::stack_size_at_create() > 0) {
1248       // no requested size and we have a more specific default value
1249       stack_size = JavaThread::stack_size_at_create();
1250     }
1251     stack_size = MAX2(stack_size,
1252                       _java_thread_min_stack_allowed);
1253     break;
1254   case os::compiler_thread:
1255     if (req_stack_size == 0 && CompilerThreadStackSize > 0) {
1256       // no requested size and we have a more specific default value
1257       stack_size = (size_t)(CompilerThreadStackSize * K);
1258     }
1259     stack_size = MAX2(stack_size,
1260                       _compiler_thread_min_stack_allowed);
1261     break;
1262   case os::vm_thread:
1263   case os::pgc_thread:
1264   case os::cgc_thread:
1265   case os::watcher_thread:
1266   default:  // presume the unknown thr_type is a VM internal
1267     if (req_stack_size == 0 && VMThreadStackSize > 0) {
1268       // no requested size and we have a more specific default value
1269       stack_size = (size_t)(VMThreadStackSize * K);
1270     }
1271 
1272     stack_size = MAX2(stack_size,
1273                       _vm_internal_thread_min_stack_allowed);
1274     break;
1275   }
1276 
1277   // pthread_attr_setstacksize() may require that the size be rounded up to the OS page size.
1278   // Be careful not to round up to 0. Align down in that case.
1279   if (stack_size <= SIZE_MAX - vm_page_size()) {
1280     stack_size = align_up(stack_size, vm_page_size());
1281   } else {
1282     stack_size = align_down(stack_size, vm_page_size());
1283   }
1284 
1285   return stack_size;
1286 }
1287 
1288 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
1289   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
1290 }
1291 
1292 /*
1293  * See the caveats for this class in os_posix.hpp
1294  * Protects the callback call so that SIGSEGV / SIGBUS jumps back into this
1295  * method and returns false. If none of the signals are raised, returns true.
1296  * The callback is supposed to provide the method that should be protected.
1297  */
1298 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
1299   sigset_t saved_sig_mask;
1300 
1301   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
1302   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
1303       "crash_protection already set?");
1304 
1305   // we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask
1306   // since on at least some systems (OS X) siglongjmp will restore the mask
1307   // for the process, not the thread
1308   pthread_sigmask(0, NULL, &saved_sig_mask);
1309   if (sigsetjmp(_jmpbuf, 0) == 0) {
1310     // make sure we can see in the signal handler that we have crash protection
1311     // installed
1312     WatcherThread::watcher_thread()->set_crash_protection(this);
1313     cb.call();
1314     // and clear the crash protection
1315     WatcherThread::watcher_thread()->set_crash_protection(NULL);
1316     return true;
1317   }
1318   // this happens when we siglongjmp() back
1319   pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL);
1320   WatcherThread::watcher_thread()->set_crash_protection(NULL);
1321   return false;
1322 }
1323 
1324 void os::WatcherThreadCrashProtection::restore() {
1325   assert(WatcherThread::watcher_thread()->has_crash_protection(),
1326       "must have crash protection");
1327 
1328   siglongjmp(_jmpbuf, 1);
1329 }
1330 
1331 void os::WatcherThreadCrashProtection::check_crash_protection(int sig,
1332     Thread* thread) {
1333 
1334   if (thread != NULL &&
1335       thread->is_Watcher_thread() &&
1336       WatcherThread::watcher_thread()->has_crash_protection()) {
1337 
1338     if (sig == SIGSEGV || sig == SIGBUS) {
1339       WatcherThread::watcher_thread()->crash_protection()->restore();
1340     }
1341   }
1342 }
1343 
1344 #define check_with_errno(check_type, cond, msg)                             \
1345   do {                                                                      \
1346     int err = errno;                                                        \
1347     check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err),   \
1348                os::errno_name(err));                                        \
1349 } while (false)
1350 
1351 #define assert_with_errno(cond, msg)    check_with_errno(assert, cond, msg)
1352 #define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg)
1353 
1354 // POSIX unamed semaphores are not supported on OS X.
1355 #ifndef __APPLE__
1356 
1357 PosixSemaphore::PosixSemaphore(uint value) {
1358   int ret = sem_init(&_semaphore, 0, value);
1359 
1360   guarantee_with_errno(ret == 0, "Failed to initialize semaphore");
1361 }
1362 
1363 PosixSemaphore::~PosixSemaphore() {
1364   sem_destroy(&_semaphore);
1365 }
1366 
1367 void PosixSemaphore::signal(uint count) {
1368   for (uint i = 0; i < count; i++) {
1369     int ret = sem_post(&_semaphore);
1370 
1371     assert_with_errno(ret == 0, "sem_post failed");
1372   }
1373 }
1374 
1375 void PosixSemaphore::wait() {
1376   int ret;
1377 
1378   do {
1379     ret = sem_wait(&_semaphore);
1380   } while (ret != 0 && errno == EINTR);
1381 
1382   assert_with_errno(ret == 0, "sem_wait failed");
1383 }
1384 
1385 bool PosixSemaphore::trywait() {
1386   int ret;
1387 
1388   do {
1389     ret = sem_trywait(&_semaphore);
1390   } while (ret != 0 && errno == EINTR);
1391 
1392   assert_with_errno(ret == 0 || errno == EAGAIN, "trywait failed");
1393 
1394   return ret == 0;
1395 }
1396 
1397 bool PosixSemaphore::timedwait(struct timespec ts) {
1398   while (true) {
1399     int result = sem_timedwait(&_semaphore, &ts);
1400     if (result == 0) {
1401       return true;
1402     } else if (errno == EINTR) {
1403       continue;
1404     } else if (errno == ETIMEDOUT) {
1405       return false;
1406     } else {
1407       assert_with_errno(false, "timedwait failed");
1408       return false;
1409     }
1410   }
1411 }
1412 
1413 #endif // __APPLE__
1414 
1415 
1416 // Shared pthread_mutex/cond based PlatformEvent implementation.
1417 // Not currently usable by Solaris.
1418 
1419 #ifndef SOLARIS
1420 
1421 // Shared condattr object for use with relative timed-waits. Will be associated
1422 // with CLOCK_MONOTONIC if available to avoid issues with time-of-day changes,
1423 // but otherwise whatever default is used by the platform - generally the
1424 // time-of-day clock.
1425 static pthread_condattr_t _condAttr[1];
1426 
1427 // Shared mutexattr to explicitly set the type to PTHREAD_MUTEX_NORMAL as not
1428 // all systems (e.g. FreeBSD) map the default to "normal".
1429 static pthread_mutexattr_t _mutexAttr[1];
1430 
1431 // common basic initialization that is always supported
1432 static void pthread_init_common(void) {
1433   int status;
1434   if ((status = pthread_condattr_init(_condAttr)) != 0) {
1435     fatal("pthread_condattr_init: %s", os::strerror(status));
1436   }
1437   if ((status = pthread_mutexattr_init(_mutexAttr)) != 0) {
1438     fatal("pthread_mutexattr_init: %s", os::strerror(status));
1439   }
1440   if ((status = pthread_mutexattr_settype(_mutexAttr, PTHREAD_MUTEX_NORMAL)) != 0) {
1441     fatal("pthread_mutexattr_settype: %s", os::strerror(status));
1442   }
1443 }
1444 
1445 // Not all POSIX types and API's are available on all notionally "posix"
1446 // platforms. If we have build-time support then we will check for actual
1447 // runtime support via dlopen/dlsym lookup. This allows for running on an
1448 // older OS version compared to the build platform. But if there is no
1449 // build time support then there cannot be any runtime support as we do not
1450 // know what the runtime types would be (for example clockid_t might be an
1451 // int or int64_t).
1452 //
1453 #ifdef SUPPORTS_CLOCK_MONOTONIC
1454 
1455 // This means we have clockid_t, clock_gettime et al and CLOCK_MONOTONIC
1456 
1457 static int (*_clock_gettime)(clockid_t, struct timespec *);
1458 static int (*_pthread_condattr_setclock)(pthread_condattr_t *, clockid_t);
1459 
1460 static bool _use_clock_monotonic_condattr;
1461 
1462 // Determine what POSIX API's are present and do appropriate
1463 // configuration.
1464 void os::Posix::init(void) {
1465 
1466   // NOTE: no logging available when this is called. Put logging
1467   // statements in init_2().
1468 
1469   // Copied from os::Linux::clock_init(). The duplication is temporary.
1470 
1471   // 1. Check for CLOCK_MONOTONIC support.
1472 
1473   void* handle = NULL;
1474 
1475   // For linux we need librt, for other OS we can find
1476   // this function in regular libc.
1477 #ifdef NEEDS_LIBRT
1478   // We do dlopen's in this particular order due to bug in linux
1479   // dynamic loader (see 6348968) leading to crash on exit.
1480   handle = dlopen("librt.so.1", RTLD_LAZY);
1481   if (handle == NULL) {
1482     handle = dlopen("librt.so", RTLD_LAZY);
1483   }
1484 #endif
1485 
1486   if (handle == NULL) {
1487     handle = RTLD_DEFAULT;
1488   }
1489 
1490   _clock_gettime = NULL;
1491 
1492   int (*clock_getres_func)(clockid_t, struct timespec*) =
1493     (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
1494   int (*clock_gettime_func)(clockid_t, struct timespec*) =
1495     (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
1496   if (clock_getres_func != NULL && clock_gettime_func != NULL) {
1497     // We assume that if both clock_gettime and clock_getres support
1498     // CLOCK_MONOTONIC then the OS provides true high-res monotonic clock.
1499     struct timespec res;
1500     struct timespec tp;
1501     if (clock_getres_func(CLOCK_MONOTONIC, &res) == 0 &&
1502         clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) {
1503       // Yes, monotonic clock is supported.
1504       _clock_gettime = clock_gettime_func;
1505     } else {
1506 #ifdef NEEDS_LIBRT
1507       // Close librt if there is no monotonic clock.
1508       if (handle != RTLD_DEFAULT) {
1509         dlclose(handle);
1510       }
1511 #endif
1512     }
1513   }
1514 
1515   // 2. Check for pthread_condattr_setclock support.
1516 
1517   _pthread_condattr_setclock = NULL;
1518 
1519   // libpthread is already loaded.
1520   int (*condattr_setclock_func)(pthread_condattr_t*, clockid_t) =
1521     (int (*)(pthread_condattr_t*, clockid_t))dlsym(RTLD_DEFAULT,
1522                                                    "pthread_condattr_setclock");
1523   if (condattr_setclock_func != NULL) {
1524     _pthread_condattr_setclock = condattr_setclock_func;
1525   }
1526 
1527   // Now do general initialization.
1528 
1529   pthread_init_common();
1530 
1531   int status;
1532   if (_pthread_condattr_setclock != NULL && _clock_gettime != NULL) {
1533     if ((status = _pthread_condattr_setclock(_condAttr, CLOCK_MONOTONIC)) != 0) {
1534       if (status == EINVAL) {
1535         _use_clock_monotonic_condattr = false;
1536         warning("Unable to use monotonic clock with relative timed-waits" \
1537                 " - changes to the time-of-day clock may have adverse affects");
1538       } else {
1539         fatal("pthread_condattr_setclock: %s", os::strerror(status));
1540       }
1541     } else {
1542       _use_clock_monotonic_condattr = true;
1543     }
1544   } else {
1545     _use_clock_monotonic_condattr = false;
1546   }
1547 }
1548 
1549 void os::Posix::init_2(void) {
1550   log_info(os)("Use of CLOCK_MONOTONIC is%s supported",
1551                (_clock_gettime != NULL ? "" : " not"));
1552   log_info(os)("Use of pthread_condattr_setclock is%s supported",
1553                (_pthread_condattr_setclock != NULL ? "" : " not"));
1554   log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with %s",
1555                _use_clock_monotonic_condattr ? "CLOCK_MONOTONIC" : "the default clock");
1556 }
1557 
1558 #else // !SUPPORTS_CLOCK_MONOTONIC
1559 
1560 void os::Posix::init(void) {
1561   pthread_init_common();
1562 }
1563 
1564 void os::Posix::init_2(void) {
1565   log_info(os)("Use of CLOCK_MONOTONIC is not supported");
1566   log_info(os)("Use of pthread_condattr_setclock is not supported");
1567   log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with the default clock");
1568 }
1569 
1570 #endif // SUPPORTS_CLOCK_MONOTONIC
1571 
1572 os::PlatformEvent::PlatformEvent() {
1573   int status = pthread_cond_init(_cond, _condAttr);
1574   assert_status(status == 0, status, "cond_init");
1575   status = pthread_mutex_init(_mutex, _mutexAttr);
1576   assert_status(status == 0, status, "mutex_init");
1577   _event   = 0;
1578   _nParked = 0;
1579 }
1580 
1581 // Utility to convert the given timeout to an absolute timespec
1582 // (based on the appropriate clock) to use with pthread_cond_timewait.
1583 // The clock queried here must be the clock used to manage the
1584 // timeout of the condition variable.
1585 //
1586 // The passed in timeout value is either a relative time in nanoseconds
1587 // or an absolute time in milliseconds. A relative timeout will be
1588 // associated with CLOCK_MONOTONIC if available; otherwise, or if absolute,
1589 // the default time-of-day clock will be used.
1590 
1591 // Given time is a 64-bit value and the time_t used in the timespec is
1592 // sometimes a signed-32-bit value we have to watch for overflow if times
1593 // way in the future are given. Further on Solaris versions
1594 // prior to 10 there is a restriction (see cond_timedwait) that the specified
1595 // number of seconds, in abstime, is less than current_time + 100000000.
1596 // As it will be over 20 years before "now + 100000000" will overflow we can
1597 // ignore overflow and just impose a hard-limit on seconds using the value
1598 // of "now + 100000000". This places a limit on the timeout of about 3.17
1599 // years from "now".
1600 //
1601 #define MAX_SECS 100000000
1602 
1603 // Calculate a new absolute time that is "timeout" nanoseconds from "now".
1604 // "unit" indicates the unit of "now_part_sec" (may be nanos or micros depending
1605 // on which clock is being used).
1606 static void calc_rel_time(timespec* abstime, jlong timeout, jlong now_sec,
1607                           jlong now_part_sec, jlong unit) {
1608   time_t max_secs = now_sec + MAX_SECS;
1609 
1610   jlong seconds = timeout / NANOUNITS;
1611   timeout %= NANOUNITS; // remaining nanos
1612 
1613   if (seconds >= MAX_SECS) {
1614     // More seconds than we can add, so pin to max_secs.
1615     abstime->tv_sec = max_secs;
1616     abstime->tv_nsec = 0;
1617   } else {
1618     abstime->tv_sec = now_sec  + seconds;
1619     long nanos = (now_part_sec * (NANOUNITS / unit)) + timeout;
1620     if (nanos >= NANOUNITS) { // overflow
1621       abstime->tv_sec += 1;
1622       nanos -= NANOUNITS;
1623     }
1624     abstime->tv_nsec = nanos;
1625   }
1626 }
1627 
1628 // Unpack the given deadline in milliseconds since the epoch, into the given timespec.
1629 // The current time in seconds is also passed in to enforce an upper bound as discussed above.
1630 static void unpack_abs_time(timespec* abstime, jlong deadline, jlong now_sec) {
1631   time_t max_secs = now_sec + MAX_SECS;
1632 
1633   jlong seconds = deadline / MILLIUNITS;
1634   jlong millis = deadline % MILLIUNITS;
1635 
1636   if (seconds >= max_secs) {
1637     // Absolute seconds exceeds allowed max, so pin to max_secs.
1638     abstime->tv_sec = max_secs;
1639     abstime->tv_nsec = 0;
1640   } else {
1641     abstime->tv_sec = seconds;
1642     abstime->tv_nsec = millis * (NANOUNITS / MILLIUNITS);
1643   }
1644 }
1645 
1646 static void to_abstime(timespec* abstime, jlong timeout, bool isAbsolute) {
1647   DEBUG_ONLY(int max_secs = MAX_SECS;)
1648 
1649   if (timeout < 0) {
1650     timeout = 0;
1651   }
1652 
1653 #ifdef SUPPORTS_CLOCK_MONOTONIC
1654 
1655   if (_use_clock_monotonic_condattr && !isAbsolute) {
1656     struct timespec now;
1657     int status = _clock_gettime(CLOCK_MONOTONIC, &now);
1658     assert_status(status == 0, status, "clock_gettime");
1659     calc_rel_time(abstime, timeout, now.tv_sec, now.tv_nsec, NANOUNITS);
1660     DEBUG_ONLY(max_secs += now.tv_sec;)
1661   } else {
1662 
1663 #else
1664 
1665   { // Match the block scope.
1666 
1667 #endif // SUPPORTS_CLOCK_MONOTONIC
1668 
1669     // Time-of-day clock is all we can reliably use.
1670     struct timeval now;
1671     int status = gettimeofday(&now, NULL);
1672     assert_status(status == 0, errno, "gettimeofday");
1673     if (isAbsolute) {
1674       unpack_abs_time(abstime, timeout, now.tv_sec);
1675     } else {
1676       calc_rel_time(abstime, timeout, now.tv_sec, now.tv_usec, MICROUNITS);
1677     }
1678     DEBUG_ONLY(max_secs += now.tv_sec;)
1679   }
1680 
1681   assert(abstime->tv_sec >= 0, "tv_sec < 0");
1682   assert(abstime->tv_sec <= max_secs, "tv_sec > max_secs");
1683   assert(abstime->tv_nsec >= 0, "tv_nsec < 0");
1684   assert(abstime->tv_nsec < NANOUNITS, "tv_nsec >= NANOUNITS");
1685 }
1686 
1687 // PlatformEvent
1688 //
1689 // Assumption:
1690 //    Only one parker can exist on an event, which is why we allocate
1691 //    them per-thread. Multiple unparkers can coexist.
1692 //
1693 // _event serves as a restricted-range semaphore.
1694 //   -1 : thread is blocked, i.e. there is a waiter
1695 //    0 : neutral: thread is running or ready,
1696 //        could have been signaled after a wait started
1697 //    1 : signaled - thread is running or ready
1698 //
1699 //    Having three states allows for some detection of bad usage - see
1700 //    comments on unpark().
1701 
1702 void os::PlatformEvent::park() {       // AKA "down()"
1703   // Transitions for _event:
1704   //   -1 => -1 : illegal
1705   //    1 =>  0 : pass - return immediately
1706   //    0 => -1 : block; then set _event to 0 before returning
1707 
1708   // Invariant: Only the thread associated with the PlatformEvent
1709   // may call park().
1710   assert(_nParked == 0, "invariant");
1711 
1712   int v;
1713 
1714   // atomically decrement _event
1715   for (;;) {
1716     v = _event;
1717     if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
1718   }
1719   guarantee(v >= 0, "invariant");
1720 
1721   if (v == 0) { // Do this the hard way by blocking ...
1722     int status = pthread_mutex_lock(_mutex);
1723     assert_status(status == 0, status, "mutex_lock");
1724     guarantee(_nParked == 0, "invariant");
1725     ++_nParked;
1726     while (_event < 0) {
1727       // OS-level "spurious wakeups" are ignored
1728       status = pthread_cond_wait(_cond, _mutex);
1729       assert_status(status == 0, status, "cond_wait");
1730     }
1731     --_nParked;
1732 
1733     _event = 0;
1734     status = pthread_mutex_unlock(_mutex);
1735     assert_status(status == 0, status, "mutex_unlock");
1736     // Paranoia to ensure our locked and lock-free paths interact
1737     // correctly with each other.
1738     OrderAccess::fence();
1739   }
1740   guarantee(_event >= 0, "invariant");
1741 }
1742 
1743 int os::PlatformEvent::park(jlong millis) {
1744   // Transitions for _event:
1745   //   -1 => -1 : illegal
1746   //    1 =>  0 : pass - return immediately
1747   //    0 => -1 : block; then set _event to 0 before returning
1748 
1749   // Invariant: Only the thread associated with the Event/PlatformEvent
1750   // may call park().
1751   assert(_nParked == 0, "invariant");
1752 
1753   int v;
1754   // atomically decrement _event
1755   for (;;) {
1756     v = _event;
1757     if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
1758   }
1759   guarantee(v >= 0, "invariant");
1760 
1761   if (v == 0) { // Do this the hard way by blocking ...
1762     struct timespec abst;
1763     to_abstime(&abst, millis * (NANOUNITS / MILLIUNITS), false);
1764 
1765     int ret = OS_TIMEOUT;
1766     int status = pthread_mutex_lock(_mutex);
1767     assert_status(status == 0, status, "mutex_lock");
1768     guarantee(_nParked == 0, "invariant");
1769     ++_nParked;
1770 
1771     while (_event < 0) {
1772       status = pthread_cond_timedwait(_cond, _mutex, &abst);
1773       assert_status(status == 0 || status == ETIMEDOUT,
1774                     status, "cond_timedwait");
1775       // OS-level "spurious wakeups" are ignored unless the archaic
1776       // FilterSpuriousWakeups is set false. That flag should be obsoleted.
1777       if (!FilterSpuriousWakeups) break;
1778       if (status == ETIMEDOUT) break;
1779     }
1780     --_nParked;
1781 
1782     if (_event >= 0) {
1783       ret = OS_OK;
1784     }
1785 
1786     _event = 0;
1787     status = pthread_mutex_unlock(_mutex);
1788     assert_status(status == 0, status, "mutex_unlock");
1789     // Paranoia to ensure our locked and lock-free paths interact
1790     // correctly with each other.
1791     OrderAccess::fence();
1792     return ret;
1793   }
1794   return OS_OK;
1795 }
1796 
1797 void os::PlatformEvent::unpark() {
1798   // Transitions for _event:
1799   //    0 => 1 : just return
1800   //    1 => 1 : just return
1801   //   -1 => either 0 or 1; must signal target thread
1802   //         That is, we can safely transition _event from -1 to either
1803   //         0 or 1.
1804   // See also: "Semaphores in Plan 9" by Mullender & Cox
1805   //
1806   // Note: Forcing a transition from "-1" to "1" on an unpark() means
1807   // that it will take two back-to-back park() calls for the owning
1808   // thread to block. This has the benefit of forcing a spurious return
1809   // from the first park() call after an unpark() call which will help
1810   // shake out uses of park() and unpark() without checking state conditions
1811   // properly. This spurious return doesn't manifest itself in any user code
1812   // but only in the correctly written condition checking loops of ObjectMonitor,
1813   // Mutex/Monitor, Thread::muxAcquire and os::sleep
1814 
1815   if (Atomic::xchg(1, &_event) >= 0) return;
1816 
1817   int status = pthread_mutex_lock(_mutex);
1818   assert_status(status == 0, status, "mutex_lock");
1819   int anyWaiters = _nParked;
1820   assert(anyWaiters == 0 || anyWaiters == 1, "invariant");
1821   status = pthread_mutex_unlock(_mutex);
1822   assert_status(status == 0, status, "mutex_unlock");
1823 
1824   // Note that we signal() *after* dropping the lock for "immortal" Events.
1825   // This is safe and avoids a common class of futile wakeups.  In rare
1826   // circumstances this can cause a thread to return prematurely from
1827   // cond_{timed}wait() but the spurious wakeup is benign and the victim
1828   // will simply re-test the condition and re-park itself.
1829   // This provides particular benefit if the underlying platform does not
1830   // provide wait morphing.
1831 
1832   if (anyWaiters != 0) {
1833     status = pthread_cond_signal(_cond);
1834     assert_status(status == 0, status, "cond_signal");
1835   }
1836 }
1837 
1838 // JSR166 support
1839 
1840  os::PlatformParker::PlatformParker() {
1841   int status;
1842   status = pthread_cond_init(&_cond[REL_INDEX], _condAttr);
1843   assert_status(status == 0, status, "cond_init rel");
1844   status = pthread_cond_init(&_cond[ABS_INDEX], NULL);
1845   assert_status(status == 0, status, "cond_init abs");
1846   status = pthread_mutex_init(_mutex, _mutexAttr);
1847   assert_status(status == 0, status, "mutex_init");
1848   _cur_index = -1; // mark as unused
1849 }
1850 
1851 // Parker::park decrements count if > 0, else does a condvar wait.  Unpark
1852 // sets count to 1 and signals condvar.  Only one thread ever waits
1853 // on the condvar. Contention seen when trying to park implies that someone
1854 // is unparking you, so don't wait. And spurious returns are fine, so there
1855 // is no need to track notifications.
1856 
1857 void Parker::park(bool isAbsolute, jlong time) {
1858 
1859   // Optional fast-path check:
1860   // Return immediately if a permit is available.
1861   // We depend on Atomic::xchg() having full barrier semantics
1862   // since we are doing a lock-free update to _counter.
1863   if (Atomic::xchg(0, &_counter) > 0) return;
1864 
1865   Thread* thread = Thread::current();
1866   assert(thread->is_Java_thread(), "Must be JavaThread");
1867   JavaThread *jt = (JavaThread *)thread;
1868 
1869   // Optional optimization -- avoid state transitions if there's
1870   // an interrupt pending.
1871   if (Thread::is_interrupted(thread, false)) {
1872     return;
1873   }
1874 
1875   // Next, demultiplex/decode time arguments
1876   struct timespec absTime;
1877   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
1878     return;
1879   }
1880   if (time > 0) {
1881     to_abstime(&absTime, time, isAbsolute);
1882   }
1883 
1884   // Enter safepoint region
1885   // Beware of deadlocks such as 6317397.
1886   // The per-thread Parker:: mutex is a classic leaf-lock.
1887   // In particular a thread must never block on the Threads_lock while
1888   // holding the Parker:: mutex.  If safepoints are pending both the
1889   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
1890   ThreadBlockInVM tbivm(jt);
1891 
1892   // Don't wait if cannot get lock since interference arises from
1893   // unparking. Also re-check interrupt before trying wait.
1894   if (Thread::is_interrupted(thread, false) ||
1895       pthread_mutex_trylock(_mutex) != 0) {
1896     return;
1897   }
1898 
1899   int status;
1900   if (_counter > 0)  { // no wait needed
1901     _counter = 0;
1902     status = pthread_mutex_unlock(_mutex);
1903     assert_status(status == 0, status, "invariant");
1904     // Paranoia to ensure our locked and lock-free paths interact
1905     // correctly with each other and Java-level accesses.
1906     OrderAccess::fence();
1907     return;
1908   }
1909 
1910   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
1911   jt->set_suspend_equivalent();
1912   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1913 
1914   assert(_cur_index == -1, "invariant");
1915   if (time == 0) {
1916     _cur_index = REL_INDEX; // arbitrary choice when not timed
1917     status = pthread_cond_wait(&_cond[_cur_index], _mutex);
1918     assert_status(status == 0, status, "cond_timedwait");
1919   }
1920   else {
1921     _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
1922     status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
1923     assert_status(status == 0 || status == ETIMEDOUT,
1924                   status, "cond_timedwait");
1925   }
1926   _cur_index = -1;
1927 
1928   _counter = 0;
1929   status = pthread_mutex_unlock(_mutex);
1930   assert_status(status == 0, status, "invariant");
1931   // Paranoia to ensure our locked and lock-free paths interact
1932   // correctly with each other and Java-level accesses.
1933   OrderAccess::fence();
1934 
1935   // If externally suspended while waiting, re-suspend
1936   if (jt->handle_special_suspend_equivalent_condition()) {
1937     jt->java_suspend_self();
1938   }
1939 }
1940 
1941 void Parker::unpark() {
1942   int status = pthread_mutex_lock(_mutex);
1943   assert_status(status == 0, status, "invariant");
1944   const int s = _counter;
1945   _counter = 1;
1946   // must capture correct index before unlocking
1947   int index = _cur_index;
1948   status = pthread_mutex_unlock(_mutex);
1949   assert_status(status == 0, status, "invariant");
1950 
1951   // Note that we signal() *after* dropping the lock for "immortal" Events.
1952   // This is safe and avoids a common class of futile wakeups.  In rare
1953   // circumstances this can cause a thread to return prematurely from
1954   // cond_{timed}wait() but the spurious wakeup is benign and the victim
1955   // will simply re-test the condition and re-park itself.
1956   // This provides particular benefit if the underlying platform does not
1957   // provide wait morphing.
1958 
1959   if (s < 1 && index != -1) {
1960     // thread is definitely parked
1961     status = pthread_cond_signal(&_cond[index]);
1962     assert_status(status == 0, status, "invariant");
1963   }
1964 }
1965 
1966 
1967 #endif // !SOLARIS