1 /*
   2  * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "jvm.h"
  26 #include "logging/log.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "os_posix.inline.hpp"
  29 #include "utilities/globalDefinitions.hpp"
  30 #include "runtime/frame.inline.hpp"
  31 #include "runtime/interfaceSupport.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 #include "utilities/formatBuffer.hpp"
  35 #include "utilities/macros.hpp"
  36 #include "utilities/vmError.hpp"
  37 
  38 #include <dirent.h>
  39 #include <dlfcn.h>
  40 #include <grp.h>
  41 #include <pwd.h>
  42 #include <pthread.h>
  43 #include <signal.h>
  44 #include <sys/mman.h>
  45 #include <sys/resource.h>
  46 #include <sys/utsname.h>
  47 #include <time.h>
  48 #include <unistd.h>
  49 
  50 // Todo: provide a os::get_max_process_id() or similar. Number of processes
  51 // may have been configured, can be read more accurately from proc fs etc.
  52 #ifndef MAX_PID
  53 #define MAX_PID INT_MAX
  54 #endif
  55 #define IS_VALID_PID(p) (p > 0 && p < MAX_PID)
  56 
  57 #define ROOT_UID 0
  58 
  59 #ifndef MAP_ANONYMOUS
  60   #define MAP_ANONYMOUS MAP_ANON
  61 #endif
  62 
  63 #define check_with_errno(check_type, cond, msg)                             \
  64   do {                                                                      \
  65     int err = errno;                                                        \
  66     check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err),   \
  67                os::errno_name(err));                                        \
  68 } while (false)
  69 
  70 #define assert_with_errno(cond, msg)    check_with_errno(assert, cond, msg)
  71 #define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg)
  72 
  73 // Check core dump limit and report possible place where core can be found
  74 void os::check_dump_limit(char* buffer, size_t bufferSize) {
  75   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
  76     jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line");
  77     VMError::record_coredump_status(buffer, false);
  78     return;
  79   }
  80 
  81   int n;
  82   struct rlimit rlim;
  83   bool success;
  84 
  85   char core_path[PATH_MAX];
  86   n = get_core_path(core_path, PATH_MAX);
  87 
  88   if (n <= 0) {
  89     jio_snprintf(buffer, bufferSize, "core.%d (may not exist)", current_process_id());
  90     success = true;
  91 #ifdef LINUX
  92   } else if (core_path[0] == '"') { // redirect to user process
  93     jio_snprintf(buffer, bufferSize, "Core dumps may be processed with %s", core_path);
  94     success = true;
  95 #endif
  96   } else if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
  97     jio_snprintf(buffer, bufferSize, "%s (may not exist)", core_path);
  98     success = true;
  99   } else {
 100     switch(rlim.rlim_cur) {
 101       case RLIM_INFINITY:
 102         jio_snprintf(buffer, bufferSize, "%s", core_path);
 103         success = true;
 104         break;
 105       case 0:
 106         jio_snprintf(buffer, bufferSize, "Core dumps have been disabled. To enable core dumping, try \"ulimit -c unlimited\" before starting Java again");
 107         success = false;
 108         break;
 109       default:
 110         jio_snprintf(buffer, bufferSize, "%s (max size " UINT64_FORMAT " kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", core_path, uint64_t(rlim.rlim_cur) / 1024);
 111         success = true;
 112         break;
 113     }
 114   }
 115 
 116   VMError::record_coredump_status(buffer, success);
 117 }
 118 
 119 int os::get_native_stack(address* stack, int frames, int toSkip) {
 120   int frame_idx = 0;
 121   int num_of_frames;  // number of frames captured
 122   frame fr = os::current_frame();
 123   while (fr.pc() && frame_idx < frames) {
 124     if (toSkip > 0) {
 125       toSkip --;
 126     } else {
 127       stack[frame_idx ++] = fr.pc();
 128     }
 129     if (fr.fp() == NULL || fr.cb() != NULL ||
 130         fr.sender_pc() == NULL || os::is_first_C_frame(&fr)) break;
 131 
 132     if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
 133       fr = os::get_sender_for_C_frame(&fr);
 134     } else {
 135       break;
 136     }
 137   }
 138   num_of_frames = frame_idx;
 139   for (; frame_idx < frames; frame_idx ++) {
 140     stack[frame_idx] = NULL;
 141   }
 142 
 143   return num_of_frames;
 144 }
 145 
 146 
 147 bool os::unsetenv(const char* name) {
 148   assert(name != NULL, "Null pointer");
 149   return (::unsetenv(name) == 0);
 150 }
 151 
 152 int os::get_last_error() {
 153   return errno;
 154 }
 155 
 156 size_t os::lasterror(char *buf, size_t len) {
 157   if (errno == 0)  return 0;
 158 
 159   const char *s = os::strerror(errno);
 160   size_t n = ::strlen(s);
 161   if (n >= len) {
 162     n = len - 1;
 163   }
 164   ::strncpy(buf, s, n);
 165   buf[n] = '\0';
 166   return n;
 167 }
 168 
 169 bool os::is_debugger_attached() {
 170   // not implemented
 171   return false;
 172 }
 173 
 174 void os::wait_for_keypress_at_exit(void) {
 175   // don't do anything on posix platforms
 176   return;
 177 }
 178 
 179 int os::create_file_for_heap(const char* dir) {
 180 
 181   const char name_template[] = "/jvmheap.XXXXXX";
 182 
 183   size_t fullname_len = strlen(dir) + strlen(name_template);
 184   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
 185   if (fullname == NULL) {
 186     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
 187     return -1;
 188   }
 189   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
 190   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
 191 
 192   os::native_path(fullname);
 193 
 194   // set the file creation mask.
 195   mode_t file_mode = S_IRUSR | S_IWUSR;
 196 
 197   // create a new file.
 198   int fd = mkstemp(fullname);
 199 
 200   if (fd < 0) {
 201     warning("Could not create file for heap with template %s", fullname);
 202     os::free(fullname);
 203     return -1;
 204   }
 205 
 206   // delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted.
 207   int ret = unlink(fullname);
 208   assert_with_errno(ret == 0, "unlink returned error");
 209 
 210   os::free(fullname);
 211   return fd;
 212 }
 213 
 214 static char* reserve_mmapped_memory(size_t bytes, char* requested_addr) {
 215   char * addr;
 216   int flags = MAP_PRIVATE NOT_AIX( | MAP_NORESERVE ) | MAP_ANONYMOUS;
 217   if (requested_addr != NULL) {
 218     assert((uintptr_t)requested_addr % os::vm_page_size() == 0, "Requested address should be aligned to OS page size");
 219     flags |= MAP_FIXED;
 220   }
 221 
 222   // Map reserved/uncommitted pages PROT_NONE so we fail early if we
 223   // touch an uncommitted page. Otherwise, the read/write might
 224   // succeed if we have enough swap space to back the physical page.
 225   addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
 226                        flags, -1, 0);
 227 
 228   if (addr != MAP_FAILED) {
 229     MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC);
 230     return addr;
 231   }
 232   return NULL;
 233 }
 234 
 235 static int util_posix_fallocate(int fd, off_t offset, off_t len) {
 236 #ifdef __APPLE__
 237   fstore_t store = { F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, len };
 238   // First we try to get a continuous chunk of disk space
 239   int ret = fcntl(fd, F_PREALLOCATE, &store);
 240   if (ret == -1) {
 241     // Maybe we are too fragmented, try to allocate non-continuous range
 242     store.fst_flags = F_ALLOCATEALL;
 243     ret = fcntl(fd, F_PREALLOCATE, &store);
 244   }
 245   if(ret != -1) {
 246     return ftruncate(fd, len);
 247   }
 248   return -1;
 249 #else
 250   return posix_fallocate(fd, offset, len);
 251 #endif
 252 }
 253 
 254 // Map the given address range to the provided file descriptor.
 255 char* os::map_memory_to_file(char* base, size_t size, int fd) {
 256   assert(fd != -1, "File descriptor is not valid");
 257 
 258   // allocate space for the file
 259   int ret = util_posix_fallocate(fd, 0, (off_t)size);
 260   if (ret != 0) {
 261     vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory. error(%d)", ret));
 262     return NULL;
 263   }
 264 
 265   int prot = PROT_READ | PROT_WRITE;
 266   int flags = MAP_SHARED;
 267   if (base != NULL) {
 268     flags |= MAP_FIXED;
 269   }
 270   char* addr = (char*)mmap(base, size, prot, flags, fd, 0);
 271 
 272   if (addr == MAP_FAILED) {
 273     warning("Failed mmap to file. (%s)", os::strerror(errno));
 274     return NULL;
 275   }
 276   if (base != NULL && addr != base) {
 277     if (!os::release_memory(addr, size)) {
 278       warning("Could not release memory on unsuccessful file mapping");
 279     }
 280     return NULL;
 281   }
 282   return addr;
 283 }
 284 
 285 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
 286   assert(fd != -1, "File descriptor is not valid");
 287   assert(base != NULL, "Base cannot be NULL");
 288 
 289   return map_memory_to_file(base, size, fd);
 290 }
 291 
 292 // Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
 293 // so on posix, unmap the section at the start and at the end of the chunk that we mapped
 294 // rather than unmapping and remapping the whole chunk to get requested alignment.
 295 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
 296   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
 297       "Alignment must be a multiple of allocation granularity (page size)");
 298   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
 299 
 300   size_t extra_size = size + alignment;
 301   assert(extra_size >= size, "overflow, size is too large to allow alignment");
 302 
 303   char* extra_base;
 304   if (file_desc != -1) {
 305     // For file mapping, we do not call os:reserve_memory(extra_size, NULL, alignment, file_desc) because
 306     // we need to deal with shrinking of the file space later when we release extra memory after alignment.
 307     // We also cannot called os:reserve_memory() with file_desc set to -1 because on aix we might get SHM memory.
 308     // So here to call a helper function while reserve memory for us. After we have a aligned base,
 309     // we will replace anonymous mapping with file mapping.
 310     extra_base = reserve_mmapped_memory(extra_size, NULL);
 311     if (extra_base != NULL) {
 312       MemTracker::record_virtual_memory_reserve((address)extra_base, extra_size, CALLER_PC);
 313     }
 314   } else {
 315     extra_base = os::reserve_memory(extra_size, NULL, alignment);
 316   }
 317 
 318   if (extra_base == NULL) {
 319     return NULL;
 320   }
 321 
 322   // Do manual alignment
 323   char* aligned_base = align_up(extra_base, alignment);
 324 
 325   // [  |                                       |  ]
 326   // ^ extra_base
 327   //    ^ extra_base + begin_offset == aligned_base
 328   //     extra_base + begin_offset + size       ^
 329   //                       extra_base + extra_size ^
 330   // |<>| == begin_offset
 331   //                              end_offset == |<>|
 332   size_t begin_offset = aligned_base - extra_base;
 333   size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
 334 
 335   if (begin_offset > 0) {
 336       os::release_memory(extra_base, begin_offset);
 337   }
 338 
 339   if (end_offset > 0) {
 340       os::release_memory(extra_base + begin_offset + size, end_offset);
 341   }
 342 
 343   if (file_desc != -1) {
 344     // After we have an aligned address, we can replace anonymous mapping with file mapping
 345     if (replace_existing_mapping_with_file_mapping(aligned_base, size, file_desc) == NULL) {
 346       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
 347     }
 348     MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC);
 349   }
 350   return aligned_base;
 351 }
 352 
 353 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
 354   // All supported POSIX platforms provide C99 semantics.
 355   int result = ::vsnprintf(buf, len, fmt, args);
 356   // If an encoding error occurred (result < 0) then it's not clear
 357   // whether the buffer is NUL terminated, so ensure it is.
 358   if ((result < 0) && (len > 0)) {
 359     buf[len - 1] = '\0';
 360   }
 361   return result;
 362 }
 363 
 364 int os::get_fileno(FILE* fp) {
 365   return NOT_AIX(::)fileno(fp);
 366 }
 367 
 368 struct tm* os::gmtime_pd(const time_t* clock, struct tm*  res) {
 369   return gmtime_r(clock, res);
 370 }
 371 
 372 void os::Posix::print_load_average(outputStream* st) {
 373   st->print("load average:");
 374   double loadavg[3];
 375   os::loadavg(loadavg, 3);
 376   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
 377   st->cr();
 378 }
 379 
 380 void os::Posix::print_rlimit_info(outputStream* st) {
 381   st->print("rlimit:");
 382   struct rlimit rlim;
 383 
 384   st->print(" STACK ");
 385   getrlimit(RLIMIT_STACK, &rlim);
 386   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 387   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 388 
 389   st->print(", CORE ");
 390   getrlimit(RLIMIT_CORE, &rlim);
 391   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 392   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 393 
 394   // Isn't there on solaris
 395 #if defined(AIX)
 396   st->print(", NPROC ");
 397   st->print("%d", sysconf(_SC_CHILD_MAX));
 398 #elif !defined(SOLARIS)
 399   st->print(", NPROC ");
 400   getrlimit(RLIMIT_NPROC, &rlim);
 401   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 402   else st->print(UINT64_FORMAT, uint64_t(rlim.rlim_cur));
 403 #endif
 404 
 405   st->print(", NOFILE ");
 406   getrlimit(RLIMIT_NOFILE, &rlim);
 407   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 408   else st->print(UINT64_FORMAT, uint64_t(rlim.rlim_cur));
 409 
 410   st->print(", AS ");
 411   getrlimit(RLIMIT_AS, &rlim);
 412   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 413   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 414 
 415   st->print(", DATA ");
 416   getrlimit(RLIMIT_DATA, &rlim);
 417   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 418   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 419 
 420   st->print(", FSIZE ");
 421   getrlimit(RLIMIT_FSIZE, &rlim);
 422   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
 423   else st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024);
 424 
 425   st->cr();
 426 }
 427 
 428 void os::Posix::print_uname_info(outputStream* st) {
 429   // kernel
 430   st->print("uname:");
 431   struct utsname name;
 432   uname(&name);
 433   st->print("%s ", name.sysname);
 434 #ifdef ASSERT
 435   st->print("%s ", name.nodename);
 436 #endif
 437   st->print("%s ", name.release);
 438   st->print("%s ", name.version);
 439   st->print("%s", name.machine);
 440   st->cr();
 441 }
 442 
 443 void os::Posix::print_umask(outputStream* st, mode_t umsk) {
 444   st->print((umsk & S_IRUSR) ? "r" : "-");
 445   st->print((umsk & S_IWUSR) ? "w" : "-");
 446   st->print((umsk & S_IXUSR) ? "x" : "-");
 447   st->print((umsk & S_IRGRP) ? "r" : "-");
 448   st->print((umsk & S_IWGRP) ? "w" : "-");
 449   st->print((umsk & S_IXGRP) ? "x" : "-");
 450   st->print((umsk & S_IROTH) ? "r" : "-");
 451   st->print((umsk & S_IWOTH) ? "w" : "-");
 452   st->print((umsk & S_IXOTH) ? "x" : "-");
 453 }
 454 
 455 void os::Posix::print_user_info(outputStream* st) {
 456   unsigned id = (unsigned) ::getuid();
 457   st->print("uid  : %u ", id);
 458   id = (unsigned) ::geteuid();
 459   st->print("euid : %u ", id);
 460   id = (unsigned) ::getgid();
 461   st->print("gid  : %u ", id);
 462   id = (unsigned) ::getegid();
 463   st->print_cr("egid : %u", id);
 464   st->cr();
 465 
 466   mode_t umsk = ::umask(0);
 467   ::umask(umsk);
 468   st->print("umask: %04o (", (unsigned) umsk);
 469   print_umask(st, umsk);
 470   st->print_cr(")");
 471   st->cr();
 472 }
 473 
 474 
 475 bool os::get_host_name(char* buf, size_t buflen) {
 476   struct utsname name;
 477   uname(&name);
 478   jio_snprintf(buf, buflen, "%s", name.nodename);
 479   return true;
 480 }
 481 
 482 bool os::has_allocatable_memory_limit(julong* limit) {
 483   struct rlimit rlim;
 484   int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
 485   // if there was an error when calling getrlimit, assume that there is no limitation
 486   // on virtual memory.
 487   bool result;
 488   if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) {
 489     result = false;
 490   } else {
 491     *limit = (julong)rlim.rlim_cur;
 492     result = true;
 493   }
 494 #ifdef _LP64
 495   return result;
 496 #else
 497   // arbitrary virtual space limit for 32 bit Unices found by testing. If
 498   // getrlimit above returned a limit, bound it with this limit. Otherwise
 499   // directly use it.
 500   const julong max_virtual_limit = (julong)3800*M;
 501   if (result) {
 502     *limit = MIN2(*limit, max_virtual_limit);
 503   } else {
 504     *limit = max_virtual_limit;
 505   }
 506 
 507   // bound by actually allocatable memory. The algorithm uses two bounds, an
 508   // upper and a lower limit. The upper limit is the current highest amount of
 509   // memory that could not be allocated, the lower limit is the current highest
 510   // amount of memory that could be allocated.
 511   // The algorithm iteratively refines the result by halving the difference
 512   // between these limits, updating either the upper limit (if that value could
 513   // not be allocated) or the lower limit (if the that value could be allocated)
 514   // until the difference between these limits is "small".
 515 
 516   // the minimum amount of memory we care about allocating.
 517   const julong min_allocation_size = M;
 518 
 519   julong upper_limit = *limit;
 520 
 521   // first check a few trivial cases
 522   if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) {
 523     *limit = upper_limit;
 524   } else if (!is_allocatable(min_allocation_size)) {
 525     // we found that not even min_allocation_size is allocatable. Return it
 526     // anyway. There is no point to search for a better value any more.
 527     *limit = min_allocation_size;
 528   } else {
 529     // perform the binary search.
 530     julong lower_limit = min_allocation_size;
 531     while ((upper_limit - lower_limit) > min_allocation_size) {
 532       julong temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit;
 533       temp_limit = align_down(temp_limit, min_allocation_size);
 534       if (is_allocatable(temp_limit)) {
 535         lower_limit = temp_limit;
 536       } else {
 537         upper_limit = temp_limit;
 538       }
 539     }
 540     *limit = lower_limit;
 541   }
 542   return true;
 543 #endif
 544 }
 545 
 546 const char* os::get_current_directory(char *buf, size_t buflen) {
 547   return getcwd(buf, buflen);
 548 }
 549 
 550 FILE* os::open(int fd, const char* mode) {
 551   return ::fdopen(fd, mode);
 552 }
 553 
 554 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
 555   return ::pread(fd, buf, nBytes, offset);
 556 }
 557 
 558 void os::flockfile(FILE* fp) {
 559   ::flockfile(fp);
 560 }
 561 
 562 void os::funlockfile(FILE* fp) {
 563   ::funlockfile(fp);
 564 }
 565 
 566 DIR* os::opendir(const char* dirname) {
 567   assert(dirname != NULL, "just checking");
 568   return ::opendir(dirname);
 569 }
 570 
 571 struct dirent* os::readdir(DIR* dirp) {
 572   assert(dirp != NULL, "just checking");
 573   return ::readdir(dirp);
 574 }
 575 
 576 int os::closedir(DIR *dirp) {
 577   assert(dirp != NULL, "just checking");
 578   return ::closedir(dirp);
 579 }
 580 
 581 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
 582 // which is used to find statically linked in agents.
 583 // Parameters:
 584 //            sym_name: Symbol in library we are looking for
 585 //            lib_name: Name of library to look in, NULL for shared libs.
 586 //            is_absolute_path == true if lib_name is absolute path to agent
 587 //                                     such as "/a/b/libL.so"
 588 //            == false if only the base name of the library is passed in
 589 //               such as "L"
 590 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
 591                                     bool is_absolute_path) {
 592   char *agent_entry_name;
 593   size_t len;
 594   size_t name_len;
 595   size_t prefix_len = strlen(JNI_LIB_PREFIX);
 596   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
 597   const char *start;
 598 
 599   if (lib_name != NULL) {
 600     name_len = strlen(lib_name);
 601     if (is_absolute_path) {
 602       // Need to strip path, prefix and suffix
 603       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
 604         lib_name = ++start;
 605       }
 606       if (strlen(lib_name) <= (prefix_len + suffix_len)) {
 607         return NULL;
 608       }
 609       lib_name += prefix_len;
 610       name_len = strlen(lib_name) - suffix_len;
 611     }
 612   }
 613   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
 614   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
 615   if (agent_entry_name == NULL) {
 616     return NULL;
 617   }
 618   strcpy(agent_entry_name, sym_name);
 619   if (lib_name != NULL) {
 620     strcat(agent_entry_name, "_");
 621     strncat(agent_entry_name, lib_name, name_len);
 622   }
 623   return agent_entry_name;
 624 }
 625 
 626 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
 627   assert(thread == Thread::current(),  "thread consistency check");
 628 
 629   ParkEvent * const slp = thread->_SleepEvent ;
 630   slp->reset() ;
 631   OrderAccess::fence() ;
 632 
 633   if (interruptible) {
 634     jlong prevtime = javaTimeNanos();
 635 
 636     for (;;) {
 637       if (os::is_interrupted(thread, true)) {
 638         return OS_INTRPT;
 639       }
 640 
 641       jlong newtime = javaTimeNanos();
 642 
 643       if (newtime - prevtime < 0) {
 644         // time moving backwards, should only happen if no monotonic clock
 645         // not a guarantee() because JVM should not abort on kernel/glibc bugs
 646         assert(!os::supports_monotonic_clock(), "unexpected time moving backwards detected in os::sleep(interruptible)");
 647       } else {
 648         millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
 649       }
 650 
 651       if (millis <= 0) {
 652         return OS_OK;
 653       }
 654 
 655       prevtime = newtime;
 656 
 657       {
 658         assert(thread->is_Java_thread(), "sanity check");
 659         JavaThread *jt = (JavaThread *) thread;
 660         ThreadBlockInVM tbivm(jt);
 661         OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
 662 
 663         jt->set_suspend_equivalent();
 664         // cleared by handle_special_suspend_equivalent_condition() or
 665         // java_suspend_self() via check_and_wait_while_suspended()
 666 
 667         slp->park(millis);
 668 
 669         // were we externally suspended while we were waiting?
 670         jt->check_and_wait_while_suspended();
 671       }
 672     }
 673   } else {
 674     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
 675     jlong prevtime = javaTimeNanos();
 676 
 677     for (;;) {
 678       // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
 679       // the 1st iteration ...
 680       jlong newtime = javaTimeNanos();
 681 
 682       if (newtime - prevtime < 0) {
 683         // time moving backwards, should only happen if no monotonic clock
 684         // not a guarantee() because JVM should not abort on kernel/glibc bugs
 685         assert(!os::supports_monotonic_clock(), "unexpected time moving backwards detected on os::sleep(!interruptible)");
 686       } else {
 687         millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
 688       }
 689 
 690       if (millis <= 0) break ;
 691 
 692       prevtime = newtime;
 693       slp->park(millis);
 694     }
 695     return OS_OK ;
 696   }
 697 }
 698 
 699 void os::naked_short_nanosleep(jlong ns) {
 700   struct timespec req;
 701   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
 702   req.tv_sec = 0;
 703   req.tv_nsec = ns;
 704   ::nanosleep(&req, NULL);
 705   return;
 706 }
 707 
 708 void os::naked_short_sleep(jlong ms) {
 709   assert(ms < MILLIUNITS, "Un-interruptable sleep, short time use only");
 710   os::naked_short_nanosleep(ms * (NANOUNITS / MILLIUNITS));
 711   return;
 712 }
 713 
 714 ////////////////////////////////////////////////////////////////////////////////
 715 // interrupt support
 716 
 717 void os::interrupt(Thread* thread) {
 718   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
 719 
 720   OSThread* osthread = thread->osthread();
 721 
 722   if (!osthread->interrupted()) {
 723     osthread->set_interrupted(true);
 724     // More than one thread can get here with the same value of osthread,
 725     // resulting in multiple notifications.  We do, however, want the store
 726     // to interrupted() to be visible to other threads before we execute unpark().
 727     OrderAccess::fence();
 728     ParkEvent * const slp = thread->_SleepEvent ;
 729     if (slp != NULL) slp->unpark() ;
 730   }
 731 
 732   // For JSR166. Unpark even if interrupt status already was set
 733   if (thread->is_Java_thread())
 734     ((JavaThread*)thread)->parker()->unpark();
 735 
 736   ParkEvent * ev = thread->_ParkEvent ;
 737   if (ev != NULL) ev->unpark() ;
 738 }
 739 
 740 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
 741   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
 742 
 743   OSThread* osthread = thread->osthread();
 744 
 745   bool interrupted = osthread->interrupted();
 746 
 747   // NOTE that since there is no "lock" around the interrupt and
 748   // is_interrupted operations, there is the possibility that the
 749   // interrupted flag (in osThread) will be "false" but that the
 750   // low-level events will be in the signaled state. This is
 751   // intentional. The effect of this is that Object.wait() and
 752   // LockSupport.park() will appear to have a spurious wakeup, which
 753   // is allowed and not harmful, and the possibility is so rare that
 754   // it is not worth the added complexity to add yet another lock.
 755   // For the sleep event an explicit reset is performed on entry
 756   // to os::sleep, so there is no early return. It has also been
 757   // recommended not to put the interrupted flag into the "event"
 758   // structure because it hides the issue.
 759   if (interrupted && clear_interrupted) {
 760     osthread->set_interrupted(false);
 761     // consider thread->_SleepEvent->reset() ... optional optimization
 762   }
 763 
 764   return interrupted;
 765 }
 766 
 767 
 768 
 769 static const struct {
 770   int sig; const char* name;
 771 }
 772  g_signal_info[] =
 773   {
 774   {  SIGABRT,     "SIGABRT" },
 775 #ifdef SIGAIO
 776   {  SIGAIO,      "SIGAIO" },
 777 #endif
 778   {  SIGALRM,     "SIGALRM" },
 779 #ifdef SIGALRM1
 780   {  SIGALRM1,    "SIGALRM1" },
 781 #endif
 782   {  SIGBUS,      "SIGBUS" },
 783 #ifdef SIGCANCEL
 784   {  SIGCANCEL,   "SIGCANCEL" },
 785 #endif
 786   {  SIGCHLD,     "SIGCHLD" },
 787 #ifdef SIGCLD
 788   {  SIGCLD,      "SIGCLD" },
 789 #endif
 790   {  SIGCONT,     "SIGCONT" },
 791 #ifdef SIGCPUFAIL
 792   {  SIGCPUFAIL,  "SIGCPUFAIL" },
 793 #endif
 794 #ifdef SIGDANGER
 795   {  SIGDANGER,   "SIGDANGER" },
 796 #endif
 797 #ifdef SIGDIL
 798   {  SIGDIL,      "SIGDIL" },
 799 #endif
 800 #ifdef SIGEMT
 801   {  SIGEMT,      "SIGEMT" },
 802 #endif
 803   {  SIGFPE,      "SIGFPE" },
 804 #ifdef SIGFREEZE
 805   {  SIGFREEZE,   "SIGFREEZE" },
 806 #endif
 807 #ifdef SIGGFAULT
 808   {  SIGGFAULT,   "SIGGFAULT" },
 809 #endif
 810 #ifdef SIGGRANT
 811   {  SIGGRANT,    "SIGGRANT" },
 812 #endif
 813   {  SIGHUP,      "SIGHUP" },
 814   {  SIGILL,      "SIGILL" },
 815   {  SIGINT,      "SIGINT" },
 816 #ifdef SIGIO
 817   {  SIGIO,       "SIGIO" },
 818 #endif
 819 #ifdef SIGIOINT
 820   {  SIGIOINT,    "SIGIOINT" },
 821 #endif
 822 #ifdef SIGIOT
 823 // SIGIOT is there for BSD compatibility, but on most Unices just a
 824 // synonym for SIGABRT. The result should be "SIGABRT", not
 825 // "SIGIOT".
 826 #if (SIGIOT != SIGABRT )
 827   {  SIGIOT,      "SIGIOT" },
 828 #endif
 829 #endif
 830 #ifdef SIGKAP
 831   {  SIGKAP,      "SIGKAP" },
 832 #endif
 833   {  SIGKILL,     "SIGKILL" },
 834 #ifdef SIGLOST
 835   {  SIGLOST,     "SIGLOST" },
 836 #endif
 837 #ifdef SIGLWP
 838   {  SIGLWP,      "SIGLWP" },
 839 #endif
 840 #ifdef SIGLWPTIMER
 841   {  SIGLWPTIMER, "SIGLWPTIMER" },
 842 #endif
 843 #ifdef SIGMIGRATE
 844   {  SIGMIGRATE,  "SIGMIGRATE" },
 845 #endif
 846 #ifdef SIGMSG
 847   {  SIGMSG,      "SIGMSG" },
 848 #endif
 849   {  SIGPIPE,     "SIGPIPE" },
 850 #ifdef SIGPOLL
 851   {  SIGPOLL,     "SIGPOLL" },
 852 #endif
 853 #ifdef SIGPRE
 854   {  SIGPRE,      "SIGPRE" },
 855 #endif
 856   {  SIGPROF,     "SIGPROF" },
 857 #ifdef SIGPTY
 858   {  SIGPTY,      "SIGPTY" },
 859 #endif
 860 #ifdef SIGPWR
 861   {  SIGPWR,      "SIGPWR" },
 862 #endif
 863   {  SIGQUIT,     "SIGQUIT" },
 864 #ifdef SIGRECONFIG
 865   {  SIGRECONFIG, "SIGRECONFIG" },
 866 #endif
 867 #ifdef SIGRECOVERY
 868   {  SIGRECOVERY, "SIGRECOVERY" },
 869 #endif
 870 #ifdef SIGRESERVE
 871   {  SIGRESERVE,  "SIGRESERVE" },
 872 #endif
 873 #ifdef SIGRETRACT
 874   {  SIGRETRACT,  "SIGRETRACT" },
 875 #endif
 876 #ifdef SIGSAK
 877   {  SIGSAK,      "SIGSAK" },
 878 #endif
 879   {  SIGSEGV,     "SIGSEGV" },
 880 #ifdef SIGSOUND
 881   {  SIGSOUND,    "SIGSOUND" },
 882 #endif
 883 #ifdef SIGSTKFLT
 884   {  SIGSTKFLT,    "SIGSTKFLT" },
 885 #endif
 886   {  SIGSTOP,     "SIGSTOP" },
 887   {  SIGSYS,      "SIGSYS" },
 888 #ifdef SIGSYSERROR
 889   {  SIGSYSERROR, "SIGSYSERROR" },
 890 #endif
 891 #ifdef SIGTALRM
 892   {  SIGTALRM,    "SIGTALRM" },
 893 #endif
 894   {  SIGTERM,     "SIGTERM" },
 895 #ifdef SIGTHAW
 896   {  SIGTHAW,     "SIGTHAW" },
 897 #endif
 898   {  SIGTRAP,     "SIGTRAP" },
 899 #ifdef SIGTSTP
 900   {  SIGTSTP,     "SIGTSTP" },
 901 #endif
 902   {  SIGTTIN,     "SIGTTIN" },
 903   {  SIGTTOU,     "SIGTTOU" },
 904 #ifdef SIGURG
 905   {  SIGURG,      "SIGURG" },
 906 #endif
 907   {  SIGUSR1,     "SIGUSR1" },
 908   {  SIGUSR2,     "SIGUSR2" },
 909 #ifdef SIGVIRT
 910   {  SIGVIRT,     "SIGVIRT" },
 911 #endif
 912   {  SIGVTALRM,   "SIGVTALRM" },
 913 #ifdef SIGWAITING
 914   {  SIGWAITING,  "SIGWAITING" },
 915 #endif
 916 #ifdef SIGWINCH
 917   {  SIGWINCH,    "SIGWINCH" },
 918 #endif
 919 #ifdef SIGWINDOW
 920   {  SIGWINDOW,   "SIGWINDOW" },
 921 #endif
 922   {  SIGXCPU,     "SIGXCPU" },
 923   {  SIGXFSZ,     "SIGXFSZ" },
 924 #ifdef SIGXRES
 925   {  SIGXRES,     "SIGXRES" },
 926 #endif
 927   { -1, NULL }
 928 };
 929 
 930 // Returned string is a constant. For unknown signals "UNKNOWN" is returned.
 931 const char* os::Posix::get_signal_name(int sig, char* out, size_t outlen) {
 932 
 933   const char* ret = NULL;
 934 
 935 #ifdef SIGRTMIN
 936   if (sig >= SIGRTMIN && sig <= SIGRTMAX) {
 937     if (sig == SIGRTMIN) {
 938       ret = "SIGRTMIN";
 939     } else if (sig == SIGRTMAX) {
 940       ret = "SIGRTMAX";
 941     } else {
 942       jio_snprintf(out, outlen, "SIGRTMIN+%d", sig - SIGRTMIN);
 943       return out;
 944     }
 945   }
 946 #endif
 947 
 948   if (sig > 0) {
 949     for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) {
 950       if (g_signal_info[idx].sig == sig) {
 951         ret = g_signal_info[idx].name;
 952         break;
 953       }
 954     }
 955   }
 956 
 957   if (!ret) {
 958     if (!is_valid_signal(sig)) {
 959       ret = "INVALID";
 960     } else {
 961       ret = "UNKNOWN";
 962     }
 963   }
 964 
 965   if (out && outlen > 0) {
 966     strncpy(out, ret, outlen);
 967     out[outlen - 1] = '\0';
 968   }
 969   return out;
 970 }
 971 
 972 int os::Posix::get_signal_number(const char* signal_name) {
 973   char tmp[30];
 974   const char* s = signal_name;
 975   if (s[0] != 'S' || s[1] != 'I' || s[2] != 'G') {
 976     jio_snprintf(tmp, sizeof(tmp), "SIG%s", signal_name);
 977     s = tmp;
 978   }
 979   for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) {
 980     if (strcmp(g_signal_info[idx].name, s) == 0) {
 981       return g_signal_info[idx].sig;
 982     }
 983   }
 984   return -1;
 985 }
 986 
 987 int os::get_signal_number(const char* signal_name) {
 988   return os::Posix::get_signal_number(signal_name);
 989 }
 990 
 991 // Returns true if signal number is valid.
 992 bool os::Posix::is_valid_signal(int sig) {
 993   // MacOS not really POSIX compliant: sigaddset does not return
 994   // an error for invalid signal numbers. However, MacOS does not
 995   // support real time signals and simply seems to have just 33
 996   // signals with no holes in the signal range.
 997 #ifdef __APPLE__
 998   return sig >= 1 && sig < NSIG;
 999 #else
1000   // Use sigaddset to check for signal validity.
1001   sigset_t set;
1002   sigemptyset(&set);
1003   if (sigaddset(&set, sig) == -1 && errno == EINVAL) {
1004     return false;
1005   }
1006   return true;
1007 #endif
1008 }
1009 
1010 bool os::Posix::is_sig_ignored(int sig) {
1011   struct sigaction oact;
1012   sigaction(sig, (struct sigaction*)NULL, &oact);
1013   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1014                                  : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1015   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
1016     return true;
1017   } else {
1018     return false;
1019   }
1020 }
1021 
1022 // Returns:
1023 // NULL for an invalid signal number
1024 // "SIG<num>" for a valid but unknown signal number
1025 // signal name otherwise.
1026 const char* os::exception_name(int sig, char* buf, size_t size) {
1027   if (!os::Posix::is_valid_signal(sig)) {
1028     return NULL;
1029   }
1030   const char* const name = os::Posix::get_signal_name(sig, buf, size);
1031   if (strcmp(name, "UNKNOWN") == 0) {
1032     jio_snprintf(buf, size, "SIG%d", sig);
1033   }
1034   return buf;
1035 }
1036 
1037 #define NUM_IMPORTANT_SIGS 32
1038 // Returns one-line short description of a signal set in a user provided buffer.
1039 const char* os::Posix::describe_signal_set_short(const sigset_t* set, char* buffer, size_t buf_size) {
1040   assert(buf_size == (NUM_IMPORTANT_SIGS + 1), "wrong buffer size");
1041   // Note: for shortness, just print out the first 32. That should
1042   // cover most of the useful ones, apart from realtime signals.
1043   for (int sig = 1; sig <= NUM_IMPORTANT_SIGS; sig++) {
1044     const int rc = sigismember(set, sig);
1045     if (rc == -1 && errno == EINVAL) {
1046       buffer[sig-1] = '?';
1047     } else {
1048       buffer[sig-1] = rc == 0 ? '0' : '1';
1049     }
1050   }
1051   buffer[NUM_IMPORTANT_SIGS] = 0;
1052   return buffer;
1053 }
1054 
1055 // Prints one-line description of a signal set.
1056 void os::Posix::print_signal_set_short(outputStream* st, const sigset_t* set) {
1057   char buf[NUM_IMPORTANT_SIGS + 1];
1058   os::Posix::describe_signal_set_short(set, buf, sizeof(buf));
1059   st->print("%s", buf);
1060 }
1061 
1062 // Writes one-line description of a combination of sigaction.sa_flags into a user
1063 // provided buffer. Returns that buffer.
1064 const char* os::Posix::describe_sa_flags(int flags, char* buffer, size_t size) {
1065   char* p = buffer;
1066   size_t remaining = size;
1067   bool first = true;
1068   int idx = 0;
1069 
1070   assert(buffer, "invalid argument");
1071 
1072   if (size == 0) {
1073     return buffer;
1074   }
1075 
1076   strncpy(buffer, "none", size);
1077 
1078   const struct {
1079     // NB: i is an unsigned int here because SA_RESETHAND is on some
1080     // systems 0x80000000, which is implicitly unsigned.  Assignining
1081     // it to an int field would be an overflow in unsigned-to-signed
1082     // conversion.
1083     unsigned int i;
1084     const char* s;
1085   } flaginfo [] = {
1086     { SA_NOCLDSTOP, "SA_NOCLDSTOP" },
1087     { SA_ONSTACK,   "SA_ONSTACK"   },
1088     { SA_RESETHAND, "SA_RESETHAND" },
1089     { SA_RESTART,   "SA_RESTART"   },
1090     { SA_SIGINFO,   "SA_SIGINFO"   },
1091     { SA_NOCLDWAIT, "SA_NOCLDWAIT" },
1092     { SA_NODEFER,   "SA_NODEFER"   },
1093 #ifdef AIX
1094     { SA_ONSTACK,   "SA_ONSTACK"   },
1095     { SA_OLDSTYLE,  "SA_OLDSTYLE"  },
1096 #endif
1097     { 0, NULL }
1098   };
1099 
1100   for (idx = 0; flaginfo[idx].s && remaining > 1; idx++) {
1101     if (flags & flaginfo[idx].i) {
1102       if (first) {
1103         jio_snprintf(p, remaining, "%s", flaginfo[idx].s);
1104         first = false;
1105       } else {
1106         jio_snprintf(p, remaining, "|%s", flaginfo[idx].s);
1107       }
1108       const size_t len = strlen(p);
1109       p += len;
1110       remaining -= len;
1111     }
1112   }
1113 
1114   buffer[size - 1] = '\0';
1115 
1116   return buffer;
1117 }
1118 
1119 // Prints one-line description of a combination of sigaction.sa_flags.
1120 void os::Posix::print_sa_flags(outputStream* st, int flags) {
1121   char buffer[0x100];
1122   os::Posix::describe_sa_flags(flags, buffer, sizeof(buffer));
1123   st->print("%s", buffer);
1124 }
1125 
1126 // Helper function for os::Posix::print_siginfo_...():
1127 // return a textual description for signal code.
1128 struct enum_sigcode_desc_t {
1129   const char* s_name;
1130   const char* s_desc;
1131 };
1132 
1133 static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t* out) {
1134 
1135   const struct {
1136     int sig; int code; const char* s_code; const char* s_desc;
1137   } t1 [] = {
1138     { SIGILL,  ILL_ILLOPC,   "ILL_ILLOPC",   "Illegal opcode." },
1139     { SIGILL,  ILL_ILLOPN,   "ILL_ILLOPN",   "Illegal operand." },
1140     { SIGILL,  ILL_ILLADR,   "ILL_ILLADR",   "Illegal addressing mode." },
1141     { SIGILL,  ILL_ILLTRP,   "ILL_ILLTRP",   "Illegal trap." },
1142     { SIGILL,  ILL_PRVOPC,   "ILL_PRVOPC",   "Privileged opcode." },
1143     { SIGILL,  ILL_PRVREG,   "ILL_PRVREG",   "Privileged register." },
1144     { SIGILL,  ILL_COPROC,   "ILL_COPROC",   "Coprocessor error." },
1145     { SIGILL,  ILL_BADSTK,   "ILL_BADSTK",   "Internal stack error." },
1146 #if defined(IA64) && defined(LINUX)
1147     { SIGILL,  ILL_BADIADDR, "ILL_BADIADDR", "Unimplemented instruction address" },
1148     { SIGILL,  ILL_BREAK,    "ILL_BREAK",    "Application Break instruction" },
1149 #endif
1150     { SIGFPE,  FPE_INTDIV,   "FPE_INTDIV",   "Integer divide by zero." },
1151     { SIGFPE,  FPE_INTOVF,   "FPE_INTOVF",   "Integer overflow." },
1152     { SIGFPE,  FPE_FLTDIV,   "FPE_FLTDIV",   "Floating-point divide by zero." },
1153     { SIGFPE,  FPE_FLTOVF,   "FPE_FLTOVF",   "Floating-point overflow." },
1154     { SIGFPE,  FPE_FLTUND,   "FPE_FLTUND",   "Floating-point underflow." },
1155     { SIGFPE,  FPE_FLTRES,   "FPE_FLTRES",   "Floating-point inexact result." },
1156     { SIGFPE,  FPE_FLTINV,   "FPE_FLTINV",   "Invalid floating-point operation." },
1157     { SIGFPE,  FPE_FLTSUB,   "FPE_FLTSUB",   "Subscript out of range." },
1158     { SIGSEGV, SEGV_MAPERR,  "SEGV_MAPERR",  "Address not mapped to object." },
1159     { SIGSEGV, SEGV_ACCERR,  "SEGV_ACCERR",  "Invalid permissions for mapped object." },
1160 #ifdef AIX
1161     // no explanation found what keyerr would be
1162     { SIGSEGV, SEGV_KEYERR,  "SEGV_KEYERR",  "key error" },
1163 #endif
1164 #if defined(IA64) && !defined(AIX)
1165     { SIGSEGV, SEGV_PSTKOVF, "SEGV_PSTKOVF", "Paragraph stack overflow" },
1166 #endif
1167 #if defined(__sparc) && defined(SOLARIS)
1168 // define Solaris Sparc M7 ADI SEGV signals
1169 #if !defined(SEGV_ACCADI)
1170 #define SEGV_ACCADI 3
1171 #endif
1172     { SIGSEGV, SEGV_ACCADI,  "SEGV_ACCADI",  "ADI not enabled for mapped object." },
1173 #if !defined(SEGV_ACCDERR)
1174 #define SEGV_ACCDERR 4
1175 #endif
1176     { SIGSEGV, SEGV_ACCDERR, "SEGV_ACCDERR", "ADI disrupting exception." },
1177 #if !defined(SEGV_ACCPERR)
1178 #define SEGV_ACCPERR 5
1179 #endif
1180     { SIGSEGV, SEGV_ACCPERR, "SEGV_ACCPERR", "ADI precise exception." },
1181 #endif // defined(__sparc) && defined(SOLARIS)
1182     { SIGBUS,  BUS_ADRALN,   "BUS_ADRALN",   "Invalid address alignment." },
1183     { SIGBUS,  BUS_ADRERR,   "BUS_ADRERR",   "Nonexistent physical address." },
1184     { SIGBUS,  BUS_OBJERR,   "BUS_OBJERR",   "Object-specific hardware error." },
1185     { SIGTRAP, TRAP_BRKPT,   "TRAP_BRKPT",   "Process breakpoint." },
1186     { SIGTRAP, TRAP_TRACE,   "TRAP_TRACE",   "Process trace trap." },
1187     { SIGCHLD, CLD_EXITED,   "CLD_EXITED",   "Child has exited." },
1188     { SIGCHLD, CLD_KILLED,   "CLD_KILLED",   "Child has terminated abnormally and did not create a core file." },
1189     { SIGCHLD, CLD_DUMPED,   "CLD_DUMPED",   "Child has terminated abnormally and created a core file." },
1190     { SIGCHLD, CLD_TRAPPED,  "CLD_TRAPPED",  "Traced child has trapped." },
1191     { SIGCHLD, CLD_STOPPED,  "CLD_STOPPED",  "Child has stopped." },
1192     { SIGCHLD, CLD_CONTINUED,"CLD_CONTINUED","Stopped child has continued." },
1193 #ifdef SIGPOLL
1194     { SIGPOLL, POLL_OUT,     "POLL_OUT",     "Output buffers available." },
1195     { SIGPOLL, POLL_MSG,     "POLL_MSG",     "Input message available." },
1196     { SIGPOLL, POLL_ERR,     "POLL_ERR",     "I/O error." },
1197     { SIGPOLL, POLL_PRI,     "POLL_PRI",     "High priority input available." },
1198     { SIGPOLL, POLL_HUP,     "POLL_HUP",     "Device disconnected. [Option End]" },
1199 #endif
1200     { -1, -1, NULL, NULL }
1201   };
1202 
1203   // Codes valid in any signal context.
1204   const struct {
1205     int code; const char* s_code; const char* s_desc;
1206   } t2 [] = {
1207     { SI_USER,      "SI_USER",     "Signal sent by kill()." },
1208     { SI_QUEUE,     "SI_QUEUE",    "Signal sent by the sigqueue()." },
1209     { SI_TIMER,     "SI_TIMER",    "Signal generated by expiration of a timer set by timer_settime()." },
1210     { SI_ASYNCIO,   "SI_ASYNCIO",  "Signal generated by completion of an asynchronous I/O request." },
1211     { SI_MESGQ,     "SI_MESGQ",    "Signal generated by arrival of a message on an empty message queue." },
1212     // Linux specific
1213 #ifdef SI_TKILL
1214     { SI_TKILL,     "SI_TKILL",    "Signal sent by tkill (pthread_kill)" },
1215 #endif
1216 #ifdef SI_DETHREAD
1217     { SI_DETHREAD,  "SI_DETHREAD", "Signal sent by execve() killing subsidiary threads" },
1218 #endif
1219 #ifdef SI_KERNEL
1220     { SI_KERNEL,    "SI_KERNEL",   "Signal sent by kernel." },
1221 #endif
1222 #ifdef SI_SIGIO
1223     { SI_SIGIO,     "SI_SIGIO",    "Signal sent by queued SIGIO" },
1224 #endif
1225 
1226 #ifdef AIX
1227     { SI_UNDEFINED, "SI_UNDEFINED","siginfo contains partial information" },
1228     { SI_EMPTY,     "SI_EMPTY",    "siginfo contains no useful information" },
1229 #endif
1230 
1231 #ifdef __sun
1232     { SI_NOINFO,    "SI_NOINFO",   "No signal information" },
1233     { SI_RCTL,      "SI_RCTL",     "kernel generated signal via rctl action" },
1234     { SI_LWP,       "SI_LWP",      "Signal sent via lwp_kill" },
1235 #endif
1236 
1237     { -1, NULL, NULL }
1238   };
1239 
1240   const char* s_code = NULL;
1241   const char* s_desc = NULL;
1242 
1243   for (int i = 0; t1[i].sig != -1; i ++) {
1244     if (t1[i].sig == si->si_signo && t1[i].code == si->si_code) {
1245       s_code = t1[i].s_code;
1246       s_desc = t1[i].s_desc;
1247       break;
1248     }
1249   }
1250 
1251   if (s_code == NULL) {
1252     for (int i = 0; t2[i].s_code != NULL; i ++) {
1253       if (t2[i].code == si->si_code) {
1254         s_code = t2[i].s_code;
1255         s_desc = t2[i].s_desc;
1256       }
1257     }
1258   }
1259 
1260   if (s_code == NULL) {
1261     out->s_name = "unknown";
1262     out->s_desc = "unknown";
1263     return false;
1264   }
1265 
1266   out->s_name = s_code;
1267   out->s_desc = s_desc;
1268 
1269   return true;
1270 }
1271 
1272 void os::print_siginfo(outputStream* os, const void* si0) {
1273 
1274   const siginfo_t* const si = (const siginfo_t*) si0;
1275 
1276   char buf[20];
1277   os->print("siginfo:");
1278 
1279   if (!si) {
1280     os->print(" <null>");
1281     return;
1282   }
1283 
1284   const int sig = si->si_signo;
1285 
1286   os->print(" si_signo: %d (%s)", sig, os::Posix::get_signal_name(sig, buf, sizeof(buf)));
1287 
1288   enum_sigcode_desc_t ed;
1289   get_signal_code_description(si, &ed);
1290   os->print(", si_code: %d (%s)", si->si_code, ed.s_name);
1291 
1292   if (si->si_errno) {
1293     os->print(", si_errno: %d", si->si_errno);
1294   }
1295 
1296   // Output additional information depending on the signal code.
1297 
1298   // Note: Many implementations lump si_addr, si_pid, si_uid etc. together as unions,
1299   // so it depends on the context which member to use. For synchronous error signals,
1300   // we print si_addr, unless the signal was sent by another process or thread, in
1301   // which case we print out pid or tid of the sender.
1302   if (si->si_code == SI_USER || si->si_code == SI_QUEUE) {
1303     const pid_t pid = si->si_pid;
1304     os->print(", si_pid: %ld", (long) pid);
1305     if (IS_VALID_PID(pid)) {
1306       const pid_t me = getpid();
1307       if (me == pid) {
1308         os->print(" (current process)");
1309       }
1310     } else {
1311       os->print(" (invalid)");
1312     }
1313     os->print(", si_uid: %ld", (long) si->si_uid);
1314     if (sig == SIGCHLD) {
1315       os->print(", si_status: %d", si->si_status);
1316     }
1317   } else if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
1318              sig == SIGTRAP || sig == SIGFPE) {
1319     os->print(", si_addr: " PTR_FORMAT, p2i(si->si_addr));
1320 #ifdef SIGPOLL
1321   } else if (sig == SIGPOLL) {
1322     os->print(", si_band: %ld", si->si_band);
1323 #endif
1324   }
1325 
1326 }
1327 
1328 int os::Posix::unblock_thread_signal_mask(const sigset_t *set) {
1329   return pthread_sigmask(SIG_UNBLOCK, set, NULL);
1330 }
1331 
1332 address os::Posix::ucontext_get_pc(const ucontext_t* ctx) {
1333 #if defined(AIX)
1334    return Aix::ucontext_get_pc(ctx);
1335 #elif defined(BSD)
1336    return Bsd::ucontext_get_pc(ctx);
1337 #elif defined(LINUX)
1338    return Linux::ucontext_get_pc(ctx);
1339 #elif defined(SOLARIS)
1340    return Solaris::ucontext_get_pc(ctx);
1341 #else
1342    VMError::report_and_die("unimplemented ucontext_get_pc");
1343 #endif
1344 }
1345 
1346 void os::Posix::ucontext_set_pc(ucontext_t* ctx, address pc) {
1347 #if defined(AIX)
1348    Aix::ucontext_set_pc(ctx, pc);
1349 #elif defined(BSD)
1350    Bsd::ucontext_set_pc(ctx, pc);
1351 #elif defined(LINUX)
1352    Linux::ucontext_set_pc(ctx, pc);
1353 #elif defined(SOLARIS)
1354    Solaris::ucontext_set_pc(ctx, pc);
1355 #else
1356    VMError::report_and_die("unimplemented ucontext_get_pc");
1357 #endif
1358 }
1359 
1360 char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr) {
1361   size_t stack_size = 0;
1362   size_t guard_size = 0;
1363   int detachstate = 0;
1364   pthread_attr_getstacksize(attr, &stack_size);
1365   pthread_attr_getguardsize(attr, &guard_size);
1366   // Work around linux NPTL implementation error, see also os::create_thread() in os_linux.cpp.
1367   LINUX_ONLY(stack_size -= guard_size);
1368   pthread_attr_getdetachstate(attr, &detachstate);
1369   jio_snprintf(buf, buflen, "stacksize: " SIZE_FORMAT "k, guardsize: " SIZE_FORMAT "k, %s",
1370     stack_size / 1024, guard_size / 1024,
1371     (detachstate == PTHREAD_CREATE_DETACHED ? "detached" : "joinable"));
1372   return buf;
1373 }
1374 
1375 char* os::Posix::realpath(const char* filename, char* outbuf, size_t outbuflen) {
1376 
1377   if (filename == NULL || outbuf == NULL || outbuflen < 1) {
1378     assert(false, "os::Posix::realpath: invalid arguments.");
1379     errno = EINVAL;
1380     return NULL;
1381   }
1382 
1383   char* result = NULL;
1384 
1385   // This assumes platform realpath() is implemented according to POSIX.1-2008.
1386   // POSIX.1-2008 allows to specify NULL for the output buffer, in which case
1387   // output buffer is dynamically allocated and must be ::free()'d by the caller.
1388   char* p = ::realpath(filename, NULL);
1389   if (p != NULL) {
1390     if (strlen(p) < outbuflen) {
1391       strcpy(outbuf, p);
1392       result = outbuf;
1393     } else {
1394       errno = ENAMETOOLONG;
1395     }
1396     ::free(p); // *not* os::free
1397   } else {
1398     // Fallback for platforms struggling with modern Posix standards (AIX 5.3, 6.1). If realpath
1399     // returns EINVAL, this may indicate that realpath is not POSIX.1-2008 compatible and
1400     // that it complains about the NULL we handed down as user buffer.
1401     // In this case, use the user provided buffer but at least check whether realpath caused
1402     // a memory overwrite.
1403     if (errno == EINVAL) {
1404       outbuf[outbuflen - 1] = '\0';
1405       p = ::realpath(filename, outbuf);
1406       if (p != NULL) {
1407         guarantee(outbuf[outbuflen - 1] == '\0', "realpath buffer overwrite detected.");
1408         result = p;
1409       }
1410     }
1411   }
1412   return result;
1413 
1414 }
1415 
1416 int os::stat(const char *path, struct stat *sbuf) {
1417   return ::stat(path, sbuf);
1418 }
1419 
1420 char * os::native_path(char *path) {
1421   return path;
1422 }
1423 
1424 // Check minimum allowable stack sizes for thread creation and to initialize
1425 // the java system classes, including StackOverflowError - depends on page
1426 // size.
1427 // The space needed for frames during startup is platform dependent. It
1428 // depends on word size, platform calling conventions, C frame layout and
1429 // interpreter/C1/C2 design decisions. Therefore this is given in a
1430 // platform (os/cpu) dependent constant.
1431 // To this, space for guard mechanisms is added, which depends on the
1432 // page size which again depends on the concrete system the VM is running
1433 // on. Space for libc guard pages is not included in this size.
1434 jint os::Posix::set_minimum_stack_sizes() {
1435   size_t os_min_stack_allowed = SOLARIS_ONLY(thr_min_stack()) NOT_SOLARIS(PTHREAD_STACK_MIN);
1436 
1437   _java_thread_min_stack_allowed = _java_thread_min_stack_allowed +
1438                                    JavaThread::stack_guard_zone_size() +
1439                                    JavaThread::stack_shadow_zone_size();
1440 
1441   _java_thread_min_stack_allowed = align_up(_java_thread_min_stack_allowed, vm_page_size());
1442   _java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, os_min_stack_allowed);
1443 
1444   size_t stack_size_in_bytes = ThreadStackSize * K;
1445   if (stack_size_in_bytes != 0 &&
1446       stack_size_in_bytes < _java_thread_min_stack_allowed) {
1447     // The '-Xss' and '-XX:ThreadStackSize=N' options both set
1448     // ThreadStackSize so we go with "Java thread stack size" instead
1449     // of "ThreadStackSize" to be more friendly.
1450     tty->print_cr("\nThe Java thread stack size specified is too small. "
1451                   "Specify at least " SIZE_FORMAT "k",
1452                   _java_thread_min_stack_allowed / K);
1453     return JNI_ERR;
1454   }
1455 
1456   // Make the stack size a multiple of the page size so that
1457   // the yellow/red zones can be guarded.
1458   JavaThread::set_stack_size_at_create(align_up(stack_size_in_bytes, vm_page_size()));
1459 
1460   // Reminder: a compiler thread is a Java thread.
1461   _compiler_thread_min_stack_allowed = _compiler_thread_min_stack_allowed +
1462                                        JavaThread::stack_guard_zone_size() +
1463                                        JavaThread::stack_shadow_zone_size();
1464 
1465   _compiler_thread_min_stack_allowed = align_up(_compiler_thread_min_stack_allowed, vm_page_size());
1466   _compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, os_min_stack_allowed);
1467 
1468   stack_size_in_bytes = CompilerThreadStackSize * K;
1469   if (stack_size_in_bytes != 0 &&
1470       stack_size_in_bytes < _compiler_thread_min_stack_allowed) {
1471     tty->print_cr("\nThe CompilerThreadStackSize specified is too small. "
1472                   "Specify at least " SIZE_FORMAT "k",
1473                   _compiler_thread_min_stack_allowed / K);
1474     return JNI_ERR;
1475   }
1476 
1477   _vm_internal_thread_min_stack_allowed = align_up(_vm_internal_thread_min_stack_allowed, vm_page_size());
1478   _vm_internal_thread_min_stack_allowed = MAX2(_vm_internal_thread_min_stack_allowed, os_min_stack_allowed);
1479 
1480   stack_size_in_bytes = VMThreadStackSize * K;
1481   if (stack_size_in_bytes != 0 &&
1482       stack_size_in_bytes < _vm_internal_thread_min_stack_allowed) {
1483     tty->print_cr("\nThe VMThreadStackSize specified is too small. "
1484                   "Specify at least " SIZE_FORMAT "k",
1485                   _vm_internal_thread_min_stack_allowed / K);
1486     return JNI_ERR;
1487   }
1488   return JNI_OK;
1489 }
1490 
1491 // Called when creating the thread.  The minimum stack sizes have already been calculated
1492 size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
1493   size_t stack_size;
1494   if (req_stack_size == 0) {
1495     stack_size = default_stack_size(thr_type);
1496   } else {
1497     stack_size = req_stack_size;
1498   }
1499 
1500   switch (thr_type) {
1501   case os::java_thread:
1502     // Java threads use ThreadStackSize which default value can be
1503     // changed with the flag -Xss
1504     if (req_stack_size == 0 && JavaThread::stack_size_at_create() > 0) {
1505       // no requested size and we have a more specific default value
1506       stack_size = JavaThread::stack_size_at_create();
1507     }
1508     stack_size = MAX2(stack_size,
1509                       _java_thread_min_stack_allowed);
1510     break;
1511   case os::compiler_thread:
1512     if (req_stack_size == 0 && CompilerThreadStackSize > 0) {
1513       // no requested size and we have a more specific default value
1514       stack_size = (size_t)(CompilerThreadStackSize * K);
1515     }
1516     stack_size = MAX2(stack_size,
1517                       _compiler_thread_min_stack_allowed);
1518     break;
1519   case os::vm_thread:
1520   case os::pgc_thread:
1521   case os::cgc_thread:
1522   case os::watcher_thread:
1523   default:  // presume the unknown thr_type is a VM internal
1524     if (req_stack_size == 0 && VMThreadStackSize > 0) {
1525       // no requested size and we have a more specific default value
1526       stack_size = (size_t)(VMThreadStackSize * K);
1527     }
1528 
1529     stack_size = MAX2(stack_size,
1530                       _vm_internal_thread_min_stack_allowed);
1531     break;
1532   }
1533 
1534   // pthread_attr_setstacksize() may require that the size be rounded up to the OS page size.
1535   // Be careful not to round up to 0. Align down in that case.
1536   if (stack_size <= SIZE_MAX - vm_page_size()) {
1537     stack_size = align_up(stack_size, vm_page_size());
1538   } else {
1539     stack_size = align_down(stack_size, vm_page_size());
1540   }
1541 
1542   return stack_size;
1543 }
1544 
1545 bool os::Posix::is_root(uid_t uid){
1546     return ROOT_UID == uid;
1547 }
1548 
1549 bool os::Posix::matches_effective_uid_or_root(uid_t uid) {
1550     return is_root(uid) || geteuid() == uid;
1551 }
1552 
1553 bool os::Posix::matches_effective_uid_and_gid_or_root(uid_t uid, gid_t gid) {
1554     return is_root(uid) || (geteuid() == uid && getegid() == gid);
1555 }
1556 
1557 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
1558 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
1559 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
1560 
1561 os::ThreadCrashProtection::ThreadCrashProtection() {
1562 }
1563 
1564 /*
1565  * See the caveats for this class in os_posix.hpp
1566  * Protects the callback call so that SIGSEGV / SIGBUS jumps back into this
1567  * method and returns false. If none of the signals are raised, returns true.
1568  * The callback is supposed to provide the method that should be protected.
1569  */
1570 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
1571   sigset_t saved_sig_mask;
1572 
1573   Thread::muxAcquire(&_crash_mux, "CrashProtection");
1574 
1575   _protected_thread = Thread::current_or_null();
1576   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
1577 
1578   // we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask
1579   // since on at least some systems (OS X) siglongjmp will restore the mask
1580   // for the process, not the thread
1581   pthread_sigmask(0, NULL, &saved_sig_mask);
1582   if (sigsetjmp(_jmpbuf, 0) == 0) {
1583     // make sure we can see in the signal handler that we have crash protection
1584     // installed
1585     _crash_protection = this;
1586     cb.call();
1587     // and clear the crash protection
1588     _crash_protection = NULL;
1589     _protected_thread = NULL;
1590     Thread::muxRelease(&_crash_mux);
1591     return true;
1592   }
1593   // this happens when we siglongjmp() back
1594   pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL);
1595   _crash_protection = NULL;
1596   _protected_thread = NULL;
1597   Thread::muxRelease(&_crash_mux);
1598   return false;
1599 }
1600 
1601 void os::ThreadCrashProtection::restore() {
1602   assert(_crash_protection != NULL, "must have crash protection");
1603   siglongjmp(_jmpbuf, 1);
1604 }
1605 
1606 void os::ThreadCrashProtection::check_crash_protection(int sig,
1607     Thread* thread) {
1608 
1609   if (thread != NULL &&
1610       thread == _protected_thread &&
1611       _crash_protection != NULL) {
1612 
1613     if (sig == SIGSEGV || sig == SIGBUS) {
1614       _crash_protection->restore();
1615     }
1616   }
1617 }
1618 
1619 // Shared clock/time and other supporting routines for pthread_mutex/cond
1620 // initialization. This is enabled on Solaris but only some of the clock/time
1621 // functionality is actually used there.
1622 
1623 // Shared condattr object for use with relative timed-waits. Will be associated
1624 // with CLOCK_MONOTONIC if available to avoid issues with time-of-day changes,
1625 // but otherwise whatever default is used by the platform - generally the
1626 // time-of-day clock.
1627 static pthread_condattr_t _condAttr[1];
1628 
1629 // Shared mutexattr to explicitly set the type to PTHREAD_MUTEX_NORMAL as not
1630 // all systems (e.g. FreeBSD) map the default to "normal".
1631 static pthread_mutexattr_t _mutexAttr[1];
1632 
1633 // common basic initialization that is always supported
1634 static void pthread_init_common(void) {
1635   int status;
1636   if ((status = pthread_condattr_init(_condAttr)) != 0) {
1637     fatal("pthread_condattr_init: %s", os::strerror(status));
1638   }
1639   if ((status = pthread_mutexattr_init(_mutexAttr)) != 0) {
1640     fatal("pthread_mutexattr_init: %s", os::strerror(status));
1641   }
1642   if ((status = pthread_mutexattr_settype(_mutexAttr, PTHREAD_MUTEX_NORMAL)) != 0) {
1643     fatal("pthread_mutexattr_settype: %s", os::strerror(status));
1644   }
1645 }
1646 
1647 // Not all POSIX types and API's are available on all notionally "posix"
1648 // platforms. If we have build-time support then we will check for actual
1649 // runtime support via dlopen/dlsym lookup. This allows for running on an
1650 // older OS version compared to the build platform. But if there is no
1651 // build time support then there cannot be any runtime support as we do not
1652 // know what the runtime types would be (for example clockid_t might be an
1653 // int or int64_t).
1654 //
1655 #ifdef SUPPORTS_CLOCK_MONOTONIC
1656 
1657 // This means we have clockid_t, clock_gettime et al and CLOCK_MONOTONIC
1658 
1659 int (*os::Posix::_clock_gettime)(clockid_t, struct timespec *) = NULL;
1660 int (*os::Posix::_clock_getres)(clockid_t, struct timespec *) = NULL;
1661 
1662 static int (*_pthread_condattr_setclock)(pthread_condattr_t *, clockid_t) = NULL;
1663 
1664 static bool _use_clock_monotonic_condattr = false;
1665 
1666 // Determine what POSIX API's are present and do appropriate
1667 // configuration.
1668 void os::Posix::init(void) {
1669 
1670   // NOTE: no logging available when this is called. Put logging
1671   // statements in init_2().
1672 
1673   // 1. Check for CLOCK_MONOTONIC support.
1674 
1675   void* handle = NULL;
1676 
1677   // For linux we need librt, for other OS we can find
1678   // this function in regular libc.
1679 #ifdef NEEDS_LIBRT
1680   // We do dlopen's in this particular order due to bug in linux
1681   // dynamic loader (see 6348968) leading to crash on exit.
1682   handle = dlopen("librt.so.1", RTLD_LAZY);
1683   if (handle == NULL) {
1684     handle = dlopen("librt.so", RTLD_LAZY);
1685   }
1686 #endif
1687 
1688   if (handle == NULL) {
1689     handle = RTLD_DEFAULT;
1690   }
1691 
1692   int (*clock_getres_func)(clockid_t, struct timespec*) =
1693     (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
1694   int (*clock_gettime_func)(clockid_t, struct timespec*) =
1695     (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
1696   if (clock_getres_func != NULL && clock_gettime_func != NULL) {
1697     // We assume that if both clock_gettime and clock_getres support
1698     // CLOCK_MONOTONIC then the OS provides true high-res monotonic clock.
1699     struct timespec res;
1700     struct timespec tp;
1701     if (clock_getres_func(CLOCK_MONOTONIC, &res) == 0 &&
1702         clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) {
1703       // Yes, monotonic clock is supported.
1704       _clock_gettime = clock_gettime_func;
1705       _clock_getres = clock_getres_func;
1706     } else {
1707 #ifdef NEEDS_LIBRT
1708       // Close librt if there is no monotonic clock.
1709       if (handle != RTLD_DEFAULT) {
1710         dlclose(handle);
1711       }
1712 #endif
1713     }
1714   }
1715 
1716   // 2. Check for pthread_condattr_setclock support.
1717 
1718   // libpthread is already loaded.
1719   int (*condattr_setclock_func)(pthread_condattr_t*, clockid_t) =
1720     (int (*)(pthread_condattr_t*, clockid_t))dlsym(RTLD_DEFAULT,
1721                                                    "pthread_condattr_setclock");
1722   if (condattr_setclock_func != NULL) {
1723     _pthread_condattr_setclock = condattr_setclock_func;
1724   }
1725 
1726   // Now do general initialization.
1727 
1728   pthread_init_common();
1729 
1730 #ifndef SOLARIS
1731   int status;
1732   if (_pthread_condattr_setclock != NULL && _clock_gettime != NULL) {
1733     if ((status = _pthread_condattr_setclock(_condAttr, CLOCK_MONOTONIC)) != 0) {
1734       if (status == EINVAL) {
1735         _use_clock_monotonic_condattr = false;
1736         warning("Unable to use monotonic clock with relative timed-waits" \
1737                 " - changes to the time-of-day clock may have adverse affects");
1738       } else {
1739         fatal("pthread_condattr_setclock: %s", os::strerror(status));
1740       }
1741     } else {
1742       _use_clock_monotonic_condattr = true;
1743     }
1744   }
1745 #endif // !SOLARIS
1746 
1747 }
1748 
1749 void os::Posix::init_2(void) {
1750 #ifndef SOLARIS
1751   log_info(os)("Use of CLOCK_MONOTONIC is%s supported",
1752                (_clock_gettime != NULL ? "" : " not"));
1753   log_info(os)("Use of pthread_condattr_setclock is%s supported",
1754                (_pthread_condattr_setclock != NULL ? "" : " not"));
1755   log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with %s",
1756                _use_clock_monotonic_condattr ? "CLOCK_MONOTONIC" : "the default clock");
1757 #endif // !SOLARIS
1758 }
1759 
1760 #else // !SUPPORTS_CLOCK_MONOTONIC
1761 
1762 void os::Posix::init(void) {
1763   pthread_init_common();
1764 }
1765 
1766 void os::Posix::init_2(void) {
1767 #ifndef SOLARIS
1768   log_info(os)("Use of CLOCK_MONOTONIC is not supported");
1769   log_info(os)("Use of pthread_condattr_setclock is not supported");
1770   log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with the default clock");
1771 #endif // !SOLARIS
1772 }
1773 
1774 #endif // SUPPORTS_CLOCK_MONOTONIC
1775 
1776 // Utility to convert the given timeout to an absolute timespec
1777 // (based on the appropriate clock) to use with pthread_cond_timewait,
1778 // and sem_timedwait().
1779 // The clock queried here must be the clock used to manage the
1780 // timeout of the condition variable or semaphore.
1781 //
1782 // The passed in timeout value is either a relative time in nanoseconds
1783 // or an absolute time in milliseconds. A relative timeout will be
1784 // associated with CLOCK_MONOTONIC if available, unless the real-time clock
1785 // is explicitly requested; otherwise, or if absolute,
1786 // the default time-of-day clock will be used.
1787 
1788 // Given time is a 64-bit value and the time_t used in the timespec is
1789 // sometimes a signed-32-bit value we have to watch for overflow if times
1790 // way in the future are given. Further on Solaris versions
1791 // prior to 10 there is a restriction (see cond_timedwait) that the specified
1792 // number of seconds, in abstime, is less than current_time + 100000000.
1793 // As it will be over 20 years before "now + 100000000" will overflow we can
1794 // ignore overflow and just impose a hard-limit on seconds using the value
1795 // of "now + 100000000". This places a limit on the timeout of about 3.17
1796 // years from "now".
1797 //
1798 #define MAX_SECS 100000000
1799 
1800 // Calculate a new absolute time that is "timeout" nanoseconds from "now".
1801 // "unit" indicates the unit of "now_part_sec" (may be nanos or micros depending
1802 // on which clock API is being used).
1803 static void calc_rel_time(timespec* abstime, jlong timeout, jlong now_sec,
1804                           jlong now_part_sec, jlong unit) {
1805   time_t max_secs = now_sec + MAX_SECS;
1806 
1807   jlong seconds = timeout / NANOUNITS;
1808   timeout %= NANOUNITS; // remaining nanos
1809 
1810   if (seconds >= MAX_SECS) {
1811     // More seconds than we can add, so pin to max_secs.
1812     abstime->tv_sec = max_secs;
1813     abstime->tv_nsec = 0;
1814   } else {
1815     abstime->tv_sec = now_sec  + seconds;
1816     long nanos = (now_part_sec * (NANOUNITS / unit)) + timeout;
1817     if (nanos >= NANOUNITS) { // overflow
1818       abstime->tv_sec += 1;
1819       nanos -= NANOUNITS;
1820     }
1821     abstime->tv_nsec = nanos;
1822   }
1823 }
1824 
1825 // Unpack the given deadline in milliseconds since the epoch, into the given timespec.
1826 // The current time in seconds is also passed in to enforce an upper bound as discussed above.
1827 // This is only used with gettimeofday, when clock_gettime is not available.
1828 static void unpack_abs_time(timespec* abstime, jlong deadline, jlong now_sec) {
1829   time_t max_secs = now_sec + MAX_SECS;
1830 
1831   jlong seconds = deadline / MILLIUNITS;
1832   jlong millis = deadline % MILLIUNITS;
1833 
1834   if (seconds >= max_secs) {
1835     // Absolute seconds exceeds allowed max, so pin to max_secs.
1836     abstime->tv_sec = max_secs;
1837     abstime->tv_nsec = 0;
1838   } else {
1839     abstime->tv_sec = seconds;
1840     abstime->tv_nsec = millis * (NANOUNITS / MILLIUNITS);
1841   }
1842 }
1843 
1844 static jlong millis_to_nanos(jlong millis) {
1845   // We have to watch for overflow when converting millis to nanos,
1846   // but if millis is that large then we will end up limiting to
1847   // MAX_SECS anyway, so just do that here.
1848   if (millis / MILLIUNITS > MAX_SECS) {
1849     millis = jlong(MAX_SECS) * MILLIUNITS;
1850   }
1851   return millis * (NANOUNITS / MILLIUNITS);
1852 }
1853 
1854 static void to_abstime(timespec* abstime, jlong timeout,
1855                        bool isAbsolute, bool isRealtime) {
1856   DEBUG_ONLY(int max_secs = MAX_SECS;)
1857 
1858   if (timeout < 0) {
1859     timeout = 0;
1860   }
1861 
1862 #ifdef SUPPORTS_CLOCK_MONOTONIC
1863 
1864   clockid_t clock = CLOCK_MONOTONIC;
1865   // need to ensure we have a runtime check for clock_gettime support
1866   if (!isAbsolute && os::Posix::supports_monotonic_clock()) {
1867     if (!_use_clock_monotonic_condattr || isRealtime) {
1868       clock = CLOCK_REALTIME;
1869     }
1870     struct timespec now;
1871     int status = os::Posix::clock_gettime(clock, &now);
1872     assert_status(status == 0, status, "clock_gettime");
1873     calc_rel_time(abstime, timeout, now.tv_sec, now.tv_nsec, NANOUNITS);
1874     DEBUG_ONLY(max_secs += now.tv_sec;)
1875   } else {
1876 
1877 #else
1878 
1879   { // Match the block scope.
1880 
1881 #endif // SUPPORTS_CLOCK_MONOTONIC
1882 
1883     // Time-of-day clock is all we can reliably use.
1884     struct timeval now;
1885     int status = gettimeofday(&now, NULL);
1886     assert_status(status == 0, errno, "gettimeofday");
1887     if (isAbsolute) {
1888       unpack_abs_time(abstime, timeout, now.tv_sec);
1889     } else {
1890       calc_rel_time(abstime, timeout, now.tv_sec, now.tv_usec, MICROUNITS);
1891     }
1892     DEBUG_ONLY(max_secs += now.tv_sec;)
1893   }
1894 
1895   assert(abstime->tv_sec >= 0, "tv_sec < 0");
1896   assert(abstime->tv_sec <= max_secs, "tv_sec > max_secs");
1897   assert(abstime->tv_nsec >= 0, "tv_nsec < 0");
1898   assert(abstime->tv_nsec < NANOUNITS, "tv_nsec >= NANOUNITS");
1899 }
1900 
1901 // Create an absolute time 'millis' milliseconds in the future, using the
1902 // real-time (time-of-day) clock. Used by PosixSemaphore.
1903 void os::Posix::to_RTC_abstime(timespec* abstime, int64_t millis) {
1904   to_abstime(abstime, millis_to_nanos(millis),
1905              false /* not absolute */,
1906              true  /* use real-time clock */);
1907 }
1908 
1909 // Shared pthread_mutex/cond based PlatformEvent implementation.
1910 // Not currently usable by Solaris.
1911 
1912 #ifndef SOLARIS
1913 
1914 // PlatformEvent
1915 //
1916 // Assumption:
1917 //    Only one parker can exist on an event, which is why we allocate
1918 //    them per-thread. Multiple unparkers can coexist.
1919 //
1920 // _event serves as a restricted-range semaphore.
1921 //   -1 : thread is blocked, i.e. there is a waiter
1922 //    0 : neutral: thread is running or ready,
1923 //        could have been signaled after a wait started
1924 //    1 : signaled - thread is running or ready
1925 //
1926 //    Having three states allows for some detection of bad usage - see
1927 //    comments on unpark().
1928 
1929 os::PlatformEvent::PlatformEvent() {
1930   int status = pthread_cond_init(_cond, _condAttr);
1931   assert_status(status == 0, status, "cond_init");
1932   status = pthread_mutex_init(_mutex, _mutexAttr);
1933   assert_status(status == 0, status, "mutex_init");
1934   _event   = 0;
1935   _nParked = 0;
1936 }
1937 
1938 void os::PlatformEvent::park() {       // AKA "down()"
1939   // Transitions for _event:
1940   //   -1 => -1 : illegal
1941   //    1 =>  0 : pass - return immediately
1942   //    0 => -1 : block; then set _event to 0 before returning
1943 
1944   // Invariant: Only the thread associated with the PlatformEvent
1945   // may call park().
1946   assert(_nParked == 0, "invariant");
1947 
1948   int v;
1949 
1950   // atomically decrement _event
1951   for (;;) {
1952     v = _event;
1953     if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
1954   }
1955   guarantee(v >= 0, "invariant");
1956 
1957   if (v == 0) { // Do this the hard way by blocking ...
1958     int status = pthread_mutex_lock(_mutex);
1959     assert_status(status == 0, status, "mutex_lock");
1960     guarantee(_nParked == 0, "invariant");
1961     ++_nParked;
1962     while (_event < 0) {
1963       // OS-level "spurious wakeups" are ignored
1964       status = pthread_cond_wait(_cond, _mutex);
1965       assert_status(status == 0, status, "cond_wait");
1966     }
1967     --_nParked;
1968 
1969     _event = 0;
1970     status = pthread_mutex_unlock(_mutex);
1971     assert_status(status == 0, status, "mutex_unlock");
1972     // Paranoia to ensure our locked and lock-free paths interact
1973     // correctly with each other.
1974     OrderAccess::fence();
1975   }
1976   guarantee(_event >= 0, "invariant");
1977 }
1978 
1979 int os::PlatformEvent::park(jlong millis) {
1980   // Transitions for _event:
1981   //   -1 => -1 : illegal
1982   //    1 =>  0 : pass - return immediately
1983   //    0 => -1 : block; then set _event to 0 before returning
1984 
1985   // Invariant: Only the thread associated with the Event/PlatformEvent
1986   // may call park().
1987   assert(_nParked == 0, "invariant");
1988 
1989   int v;
1990   // atomically decrement _event
1991   for (;;) {
1992     v = _event;
1993     if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
1994   }
1995   guarantee(v >= 0, "invariant");
1996 
1997   if (v == 0) { // Do this the hard way by blocking ...
1998     struct timespec abst;
1999     to_abstime(&abst, millis_to_nanos(millis), false, false);
2000 
2001     int ret = OS_TIMEOUT;
2002     int status = pthread_mutex_lock(_mutex);
2003     assert_status(status == 0, status, "mutex_lock");
2004     guarantee(_nParked == 0, "invariant");
2005     ++_nParked;
2006 
2007     while (_event < 0) {
2008       status = pthread_cond_timedwait(_cond, _mutex, &abst);
2009       assert_status(status == 0 || status == ETIMEDOUT,
2010                     status, "cond_timedwait");
2011       // OS-level "spurious wakeups" are ignored unless the archaic
2012       // FilterSpuriousWakeups is set false. That flag should be obsoleted.
2013       if (!FilterSpuriousWakeups) break;
2014       if (status == ETIMEDOUT) break;
2015     }
2016     --_nParked;
2017 
2018     if (_event >= 0) {
2019       ret = OS_OK;
2020     }
2021 
2022     _event = 0;
2023     status = pthread_mutex_unlock(_mutex);
2024     assert_status(status == 0, status, "mutex_unlock");
2025     // Paranoia to ensure our locked and lock-free paths interact
2026     // correctly with each other.
2027     OrderAccess::fence();
2028     return ret;
2029   }
2030   return OS_OK;
2031 }
2032 
2033 void os::PlatformEvent::unpark() {
2034   // Transitions for _event:
2035   //    0 => 1 : just return
2036   //    1 => 1 : just return
2037   //   -1 => either 0 or 1; must signal target thread
2038   //         That is, we can safely transition _event from -1 to either
2039   //         0 or 1.
2040   // See also: "Semaphores in Plan 9" by Mullender & Cox
2041   //
2042   // Note: Forcing a transition from "-1" to "1" on an unpark() means
2043   // that it will take two back-to-back park() calls for the owning
2044   // thread to block. This has the benefit of forcing a spurious return
2045   // from the first park() call after an unpark() call which will help
2046   // shake out uses of park() and unpark() without checking state conditions
2047   // properly. This spurious return doesn't manifest itself in any user code
2048   // but only in the correctly written condition checking loops of ObjectMonitor,
2049   // Mutex/Monitor, Thread::muxAcquire and os::sleep
2050 
2051   if (Atomic::xchg(1, &_event) >= 0) return;
2052 
2053   int status = pthread_mutex_lock(_mutex);
2054   assert_status(status == 0, status, "mutex_lock");
2055   int anyWaiters = _nParked;
2056   assert(anyWaiters == 0 || anyWaiters == 1, "invariant");
2057   status = pthread_mutex_unlock(_mutex);
2058   assert_status(status == 0, status, "mutex_unlock");
2059 
2060   // Note that we signal() *after* dropping the lock for "immortal" Events.
2061   // This is safe and avoids a common class of futile wakeups.  In rare
2062   // circumstances this can cause a thread to return prematurely from
2063   // cond_{timed}wait() but the spurious wakeup is benign and the victim
2064   // will simply re-test the condition and re-park itself.
2065   // This provides particular benefit if the underlying platform does not
2066   // provide wait morphing.
2067 
2068   if (anyWaiters != 0) {
2069     status = pthread_cond_signal(_cond);
2070     assert_status(status == 0, status, "cond_signal");
2071   }
2072 }
2073 
2074 // JSR166 support
2075 
2076  os::PlatformParker::PlatformParker() {
2077   int status;
2078   status = pthread_cond_init(&_cond[REL_INDEX], _condAttr);
2079   assert_status(status == 0, status, "cond_init rel");
2080   status = pthread_cond_init(&_cond[ABS_INDEX], NULL);
2081   assert_status(status == 0, status, "cond_init abs");
2082   status = pthread_mutex_init(_mutex, _mutexAttr);
2083   assert_status(status == 0, status, "mutex_init");
2084   _cur_index = -1; // mark as unused
2085 }
2086 
2087 // Parker::park decrements count if > 0, else does a condvar wait.  Unpark
2088 // sets count to 1 and signals condvar.  Only one thread ever waits
2089 // on the condvar. Contention seen when trying to park implies that someone
2090 // is unparking you, so don't wait. And spurious returns are fine, so there
2091 // is no need to track notifications.
2092 
2093 void Parker::park(bool isAbsolute, jlong time) {
2094 
2095   // Optional fast-path check:
2096   // Return immediately if a permit is available.
2097   // We depend on Atomic::xchg() having full barrier semantics
2098   // since we are doing a lock-free update to _counter.
2099   if (Atomic::xchg(0, &_counter) > 0) return;
2100 
2101   Thread* thread = Thread::current();
2102   assert(thread->is_Java_thread(), "Must be JavaThread");
2103   JavaThread *jt = (JavaThread *)thread;
2104 
2105   // Optional optimization -- avoid state transitions if there's
2106   // an interrupt pending.
2107   if (Thread::is_interrupted(thread, false)) {
2108     return;
2109   }
2110 
2111   // Next, demultiplex/decode time arguments
2112   struct timespec absTime;
2113   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
2114     return;
2115   }
2116   if (time > 0) {
2117     to_abstime(&absTime, time, isAbsolute, false);
2118   }
2119 
2120   // Enter safepoint region
2121   // Beware of deadlocks such as 6317397.
2122   // The per-thread Parker:: mutex is a classic leaf-lock.
2123   // In particular a thread must never block on the Threads_lock while
2124   // holding the Parker:: mutex.  If safepoints are pending both the
2125   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
2126   ThreadBlockInVM tbivm(jt);
2127 
2128   // Don't wait if cannot get lock since interference arises from
2129   // unparking. Also re-check interrupt before trying wait.
2130   if (Thread::is_interrupted(thread, false) ||
2131       pthread_mutex_trylock(_mutex) != 0) {
2132     return;
2133   }
2134 
2135   int status;
2136   if (_counter > 0)  { // no wait needed
2137     _counter = 0;
2138     status = pthread_mutex_unlock(_mutex);
2139     assert_status(status == 0, status, "invariant");
2140     // Paranoia to ensure our locked and lock-free paths interact
2141     // correctly with each other and Java-level accesses.
2142     OrderAccess::fence();
2143     return;
2144   }
2145 
2146   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
2147   jt->set_suspend_equivalent();
2148   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2149 
2150   assert(_cur_index == -1, "invariant");
2151   if (time == 0) {
2152     _cur_index = REL_INDEX; // arbitrary choice when not timed
2153     status = pthread_cond_wait(&_cond[_cur_index], _mutex);
2154     assert_status(status == 0, status, "cond_timedwait");
2155   }
2156   else {
2157     _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
2158     status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
2159     assert_status(status == 0 || status == ETIMEDOUT,
2160                   status, "cond_timedwait");
2161   }
2162   _cur_index = -1;
2163 
2164   _counter = 0;
2165   status = pthread_mutex_unlock(_mutex);
2166   assert_status(status == 0, status, "invariant");
2167   // Paranoia to ensure our locked and lock-free paths interact
2168   // correctly with each other and Java-level accesses.
2169   OrderAccess::fence();
2170 
2171   // If externally suspended while waiting, re-suspend
2172   if (jt->handle_special_suspend_equivalent_condition()) {
2173     jt->java_suspend_self();
2174   }
2175 }
2176 
2177 void Parker::unpark() {
2178   int status = pthread_mutex_lock(_mutex);
2179   assert_status(status == 0, status, "invariant");
2180   const int s = _counter;
2181   _counter = 1;
2182   // must capture correct index before unlocking
2183   int index = _cur_index;
2184   status = pthread_mutex_unlock(_mutex);
2185   assert_status(status == 0, status, "invariant");
2186 
2187   // Note that we signal() *after* dropping the lock for "immortal" Events.
2188   // This is safe and avoids a common class of futile wakeups.  In rare
2189   // circumstances this can cause a thread to return prematurely from
2190   // cond_{timed}wait() but the spurious wakeup is benign and the victim
2191   // will simply re-test the condition and re-park itself.
2192   // This provides particular benefit if the underlying platform does not
2193   // provide wait morphing.
2194 
2195   if (s < 1 && index != -1) {
2196     // thread is definitely parked
2197     status = pthread_cond_signal(&_cond[index]);
2198     assert_status(status == 0, status, "invariant");
2199   }
2200 }
2201 
2202 // Platform Monitor implementation
2203 
2204 os::PlatformMonitor::PlatformMonitor() {
2205   int status = pthread_cond_init(&_cond, _condAttr);
2206   assert_status(status == 0, status, "cond_init");
2207   status = pthread_mutex_init(&_mutex, _mutexAttr);
2208   assert_status(status == 0, status, "mutex_init");
2209 }
2210 
2211 os::PlatformMonitor::~PlatformMonitor() {
2212   int status = pthread_cond_destroy(&_cond);
2213   assert_status(status == 0, status, "cond_destroy");
2214   status = pthread_mutex_destroy(&_mutex);
2215   assert_status(status == 0, status, "mutex_destroy");
2216 }
2217 
2218 // Must already be locked
2219 int os::PlatformMonitor::wait(jlong millis) {
2220   assert(millis >= 0, "negative timeout");
2221   if (millis > 0) {
2222     struct timespec abst;
2223     // We have to watch for overflow when converting millis to nanos,
2224     // but if millis is that large then we will end up limiting to
2225     // MAX_SECS anyway, so just do that here.
2226     if (millis / MILLIUNITS > MAX_SECS) {
2227       millis = jlong(MAX_SECS) * MILLIUNITS;
2228     }
2229     to_abstime(&abst, millis * (NANOUNITS / MILLIUNITS), false, false);
2230 
2231     int ret = OS_TIMEOUT;
2232     int status = pthread_cond_timedwait(&_cond, &_mutex, &abst);
2233     assert_status(status == 0 || status == ETIMEDOUT,
2234                   status, "cond_timedwait");
2235     if (status == 0) {
2236       ret = OS_OK;
2237     }
2238     return ret;
2239   } else {
2240     int status = pthread_cond_wait(&_cond, &_mutex);
2241     assert_status(status == 0, status, "cond_wait");
2242     return OS_OK;
2243   }
2244 }
2245 
2246 #endif // !SOLARIS