1 /*
   2  * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2015 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "libperfstat_aix.hpp"
  40 #include "loadlib_aix.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "mutex_aix.inline.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "os_aix.inline.hpp"
  46 #include "os_share_aix.hpp"
  47 #include "porting_aix.hpp"
  48 #include "prims/jniFastGetField.hpp"
  49 #include "prims/jvm.h"
  50 #include "prims/jvm_misc.hpp"
  51 #include "runtime/arguments.hpp"
  52 #include "runtime/atomic.inline.hpp"
  53 #include "runtime/extendedPC.hpp"
  54 #include "runtime/globals.hpp"
  55 #include "runtime/interfaceSupport.hpp"
  56 #include "runtime/java.hpp"
  57 #include "runtime/javaCalls.hpp"
  58 #include "runtime/mutexLocker.hpp"
  59 #include "runtime/objectMonitor.hpp"
  60 #include "runtime/orderAccess.inline.hpp"
  61 #include "runtime/os.hpp"
  62 #include "runtime/osThread.hpp"
  63 #include "runtime/perfMemory.hpp"
  64 #include "runtime/sharedRuntime.hpp"
  65 #include "runtime/statSampler.hpp"
  66 #include "runtime/stubRoutines.hpp"
  67 #include "runtime/thread.inline.hpp"
  68 #include "runtime/threadCritical.hpp"
  69 #include "runtime/timer.hpp"
  70 #include "runtime/vm_version.hpp"
  71 #include "services/attachListener.hpp"
  72 #include "services/runtimeService.hpp"
  73 #include "utilities/decoder.hpp"
  74 #include "utilities/defaultStream.hpp"
  75 #include "utilities/events.hpp"
  76 #include "utilities/growableArray.hpp"
  77 #include "utilities/vmError.hpp"
  78 
  79 // put OS-includes here (sorted alphabetically)
  80 #include <errno.h>
  81 #include <fcntl.h>
  82 #include <inttypes.h>
  83 #include <poll.h>
  84 #include <procinfo.h>
  85 #include <pthread.h>
  86 #include <pwd.h>
  87 #include <semaphore.h>
  88 #include <signal.h>
  89 #include <stdint.h>
  90 #include <stdio.h>
  91 #include <string.h>
  92 #include <unistd.h>
  93 #include <sys/ioctl.h>
  94 #include <sys/ipc.h>
  95 #include <sys/mman.h>
  96 #include <sys/resource.h>
  97 #include <sys/select.h>
  98 #include <sys/shm.h>
  99 #include <sys/socket.h>
 100 #include <sys/stat.h>
 101 #include <sys/sysinfo.h>
 102 #include <sys/systemcfg.h>
 103 #include <sys/time.h>
 104 #include <sys/times.h>
 105 #include <sys/types.h>
 106 #include <sys/utsname.h>
 107 #include <sys/vminfo.h>
 108 #include <sys/wait.h>
 109 
 110 // If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 111 // getrusage() is prepared to handle the associated failure.
 112 #ifndef RUSAGE_THREAD
 113 #define RUSAGE_THREAD   (1)               /* only the calling thread */
 114 #endif
 115 
 116 // PPC port
 117 static const uintx Use64KPagesThreshold       = 1*M;
 118 static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
 119 
 120 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
 121 #if !defined(_AIXVERSION_610)
 122 extern "C" {
 123   int getthrds64(pid_t ProcessIdentifier,
 124                  struct thrdentry64* ThreadBuffer,
 125                  int ThreadSize,
 126                  tid64_t* IndexPointer,
 127                  int Count);
 128 }
 129 #endif
 130 
 131 #define MAX_PATH (2 * K)
 132 
 133 // for timer info max values which include all bits
 134 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 135 // for multipage initialization error analysis (in 'g_multipage_error')
 136 #define ERROR_MP_OS_TOO_OLD                          100
 137 #define ERROR_MP_EXTSHM_ACTIVE                       101
 138 #define ERROR_MP_VMGETINFO_FAILED                    102
 139 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 140 
 141 // The semantics in this file are thus that codeptr_t is a *real code ptr*.
 142 // This means that any function taking codeptr_t as arguments will assume
 143 // a real codeptr and won't handle function descriptors (eg getFuncName),
 144 // whereas functions taking address as args will deal with function
 145 // descriptors (eg os::dll_address_to_library_name).
 146 typedef unsigned int* codeptr_t;
 147 
 148 // Typedefs for stackslots, stack pointers, pointers to op codes.
 149 typedef unsigned long stackslot_t;
 150 typedef stackslot_t* stackptr_t;
 151 
 152 // Excerpts from systemcfg.h definitions newer than AIX 5.3.
 153 #ifndef PV_7
 154 #define PV_7 0x200000          /* Power PC 7 */
 155 #define PV_7_Compat 0x208000   /* Power PC 7 */
 156 #endif
 157 #ifndef PV_8
 158 #define PV_8 0x300000          /* Power PC 8 */
 159 #define PV_8_Compat 0x308000   /* Power PC 8 */
 160 #endif
 161 
 162 #define trcVerbose(fmt, ...) { /* PPC port */  \
 163   if (Verbose) { \
 164     fprintf(stderr, fmt, ##__VA_ARGS__); \
 165     fputc('\n', stderr); fflush(stderr); \
 166   } \
 167 }
 168 #define trc(fmt, ...)        /* PPC port */
 169 
 170 #define ERRBYE(s) { \
 171     trcVerbose(s); \
 172     return -1; \
 173 }
 174 
 175 // Query dimensions of the stack of the calling thread.
 176 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 177 
 178 // function to check a given stack pointer against given stack limits
 179 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 180   if (((uintptr_t)sp) & 0x7) {
 181     return false;
 182   }
 183   if (sp > stack_base) {
 184     return false;
 185   }
 186   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 187     return false;
 188   }
 189   return true;
 190 }
 191 
 192 // returns true if function is a valid codepointer
 193 inline bool is_valid_codepointer(codeptr_t p) {
 194   if (!p) {
 195     return false;
 196   }
 197   if (((uintptr_t)p) & 0x3) {
 198     return false;
 199   }
 200   if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
 201     return false;
 202   }
 203   return true;
 204 }
 205 
 206 // Macro to check a given stack pointer against given stack limits and to die if test fails.
 207 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 208     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 209 }
 210 
 211 // Macro to check the current stack pointer against given stacklimits.
 212 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 213   address sp; \
 214   sp = os::current_stack_pointer(); \
 215   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 216 }
 217 
 218 ////////////////////////////////////////////////////////////////////////////////
 219 // global variables (for a description see os_aix.hpp)
 220 
 221 julong    os::Aix::_physical_memory = 0;
 222 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 223 int       os::Aix::_page_size = -1;
 224 int       os::Aix::_on_pase = -1;
 225 int       os::Aix::_os_version = -1;
 226 int       os::Aix::_stack_page_size = -1;
 227 int       os::Aix::_xpg_sus_mode = -1;
 228 int       os::Aix::_extshm = -1;
 229 int       os::Aix::_logical_cpus = -1;
 230 
 231 ////////////////////////////////////////////////////////////////////////////////
 232 // local variables
 233 
 234 static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 235 static jlong    initial_time_count = 0;
 236 static int      clock_tics_per_sec = 100;
 237 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 238 static bool     check_signals      = true;
 239 static pid_t    _initial_pid       = 0;
 240 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 241 static sigset_t SR_sigset;
 242 
 243 // This describes the state of multipage support of the underlying
 244 // OS. Note that this is of no interest to the outsize world and
 245 // therefore should not be defined in AIX class.
 246 //
 247 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 248 // latter two (16M "large" resp. 16G "huge" pages) require special
 249 // setup and are normally not available.
 250 //
 251 // AIX supports multiple page sizes per process, for:
 252 //  - Stack (of the primordial thread, so not relevant for us)
 253 //  - Data - data, bss, heap, for us also pthread stacks
 254 //  - Text - text code
 255 //  - shared memory
 256 //
 257 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 258 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 259 //
 260 // For shared memory, page size can be set dynamically via
 261 // shmctl(). Different shared memory regions can have different page
 262 // sizes.
 263 //
 264 // More information can be found at AIBM info center:
 265 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 266 //
 267 static struct {
 268   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 269   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 270   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 271   size_t pthr_stack_pagesize; // stack page size of pthread threads
 272   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 273   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 274   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 275   int error;                  // Error describing if something went wrong at multipage init.
 276 } g_multipage_support = {
 277   (size_t) -1,
 278   (size_t) -1,
 279   (size_t) -1,
 280   (size_t) -1,
 281   (size_t) -1,
 282   false, false,
 283   0
 284 };
 285 
 286 // We must not accidentally allocate memory close to the BRK - even if
 287 // that would work - because then we prevent the BRK segment from
 288 // growing which may result in a malloc OOM even though there is
 289 // enough memory. The problem only arises if we shmat() or mmap() at
 290 // a specific wish address, e.g. to place the heap in a
 291 // compressed-oops-friendly way.
 292 static bool is_close_to_brk(address a) {
 293   address a1 = (address) sbrk(0);
 294   if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {
 295     return true;
 296   }
 297   return false;
 298 }
 299 
 300 julong os::available_memory() {
 301   return Aix::available_memory();
 302 }
 303 
 304 julong os::Aix::available_memory() {
 305   os::Aix::meminfo_t mi;
 306   if (os::Aix::get_meminfo(&mi)) {
 307     return mi.real_free;
 308   } else {
 309     return 0xFFFFFFFFFFFFFFFFLL;
 310   }
 311 }
 312 
 313 julong os::physical_memory() {
 314   return Aix::physical_memory();
 315 }
 316 
 317 // Return true if user is running as root.
 318 
 319 bool os::have_special_privileges() {
 320   static bool init = false;
 321   static bool privileges = false;
 322   if (!init) {
 323     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 324     init = true;
 325   }
 326   return privileges;
 327 }
 328 
 329 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 330 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 331 static bool my_disclaim64(char* addr, size_t size) {
 332 
 333   if (size == 0) {
 334     return true;
 335   }
 336 
 337   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 338   const unsigned int maxDisclaimSize = 0x40000000;
 339 
 340   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 341   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 342 
 343   char* p = addr;
 344 
 345   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 346     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 347       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 348       return false;
 349     }
 350     p += maxDisclaimSize;
 351   }
 352 
 353   if (lastDisclaimSize > 0) {
 354     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 355       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 356       return false;
 357     }
 358   }
 359 
 360   return true;
 361 }
 362 
 363 // Cpu architecture string
 364 #if defined(PPC32)
 365 static char cpu_arch[] = "ppc";
 366 #elif defined(PPC64)
 367 static char cpu_arch[] = "ppc64";
 368 #else
 369 #error Add appropriate cpu_arch setting
 370 #endif
 371 
 372 
 373 // Given an address, returns the size of the page backing that address.
 374 size_t os::Aix::query_pagesize(void* addr) {
 375 
 376   vm_page_info pi;
 377   pi.addr = (uint64_t)addr;
 378   if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 379     return pi.pagesize;
 380   } else {
 381     fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
 382     assert(false, "vmgetinfo failed to retrieve page size");
 383     return SIZE_4K;
 384   }
 385 
 386 }
 387 
 388 // Returns the kernel thread id of the currently running thread.
 389 pid_t os::Aix::gettid() {
 390   return (pid_t) thread_self();
 391 }
 392 
 393 void os::Aix::initialize_system_info() {
 394 
 395   // Get the number of online(logical) cpus instead of configured.
 396   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 397   assert(_processor_count > 0, "_processor_count must be > 0");
 398 
 399   // Retrieve total physical storage.
 400   os::Aix::meminfo_t mi;
 401   if (!os::Aix::get_meminfo(&mi)) {
 402     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
 403     assert(false, "os::Aix::get_meminfo failed.");
 404   }
 405   _physical_memory = (julong) mi.real_total;
 406 }
 407 
 408 // Helper function for tracing page sizes.
 409 static const char* describe_pagesize(size_t pagesize) {
 410   switch (pagesize) {
 411     case SIZE_4K : return "4K";
 412     case SIZE_64K: return "64K";
 413     case SIZE_16M: return "16M";
 414     case SIZE_16G: return "16G";
 415     case -1:       return "not set";
 416     default:
 417       assert(false, "surprise");
 418       return "??";
 419   }
 420 }
 421 
 422 // Probe OS for multipage support.
 423 // Will fill the global g_multipage_support structure.
 424 // Must be called before calling os::large_page_init().
 425 static void query_multipage_support() {
 426 
 427   guarantee(g_multipage_support.pagesize == -1,
 428             "do not call twice");
 429 
 430   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 431 
 432   // This really would surprise me.
 433   assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
 434 
 435   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 436   // Default data page size is defined either by linker options (-bdatapsize)
 437   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 438   // default should be 4K.
 439   {
 440     void* p = ::malloc(SIZE_16M);
 441     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 442     ::free(p);
 443   }
 444 
 445   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 446   {
 447     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 448     guarantee(shmid != -1, "shmget failed");
 449     void* p = ::shmat(shmid, NULL, 0);
 450     ::shmctl(shmid, IPC_RMID, NULL);
 451     guarantee(p != (void*) -1, "shmat failed");
 452     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 453     ::shmdt(p);
 454   }
 455 
 456   // Before querying the stack page size, make sure we are not running as primordial
 457   // thread (because primordial thread's stack may have different page size than
 458   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 459   // number of reasons so we may just as well guarantee it here.
 460   guarantee0(!os::Aix::is_primordial_thread());
 461 
 462   // Query pthread stack page size.
 463   {
 464     int dummy = 0;
 465     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 466   }
 467 
 468   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 469   /* PPC port: so far unused.
 470   {
 471     address any_function =
 472       (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 473     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 474   }
 475   */
 476 
 477   // Now probe for support of 64K pages and 16M pages.
 478 
 479   // Before OS/400 V6R1, there is no support for pages other than 4K.
 480   if (os::Aix::on_pase_V5R4_or_older()) {
 481     Unimplemented();
 482     goto query_multipage_support_end;
 483   }
 484 
 485   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 486   {
 487     const int MAX_PAGE_SIZES = 4;
 488     psize_t sizes[MAX_PAGE_SIZES];
 489     const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 490     if (num_psizes == -1) {
 491       trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
 492       trc("disabling multipage support.\n");
 493       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 494       goto query_multipage_support_end;
 495     }
 496     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 497     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 498     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 499     for (int i = 0; i < num_psizes; i ++) {
 500       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 501     }
 502 
 503     // Can we use 64K, 16M pages?
 504     for (int i = 0; i < num_psizes; i ++) {
 505       const size_t pagesize = sizes[i];
 506       if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
 507         continue;
 508       }
 509       bool can_use = false;
 510       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 511       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 512         IPC_CREAT | S_IRUSR | S_IWUSR);
 513       guarantee0(shmid != -1); // Should always work.
 514       // Try to set pagesize.
 515       struct shmid_ds shm_buf = { 0 };
 516       shm_buf.shm_pagesize = pagesize;
 517       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 518         const int en = errno;
 519         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 520         // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
 521         // PPC port  MiscUtils::describe_errno(en));
 522       } else {
 523         // Attach and double check pageisze.
 524         void* p = ::shmat(shmid, NULL, 0);
 525         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 526         guarantee0(p != (void*) -1); // Should always work.
 527         const size_t real_pagesize = os::Aix::query_pagesize(p);
 528         if (real_pagesize != pagesize) {
 529           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 530         } else {
 531           can_use = true;
 532         }
 533         ::shmdt(p);
 534       }
 535       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 536       if (pagesize == SIZE_64K) {
 537         g_multipage_support.can_use_64K_pages = can_use;
 538       } else if (pagesize == SIZE_16M) {
 539         g_multipage_support.can_use_16M_pages = can_use;
 540       }
 541     }
 542 
 543   } // end: check which pages can be used for shared memory
 544 
 545 query_multipage_support_end:
 546 
 547   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
 548       describe_pagesize(g_multipage_support.pagesize));
 549   trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
 550       describe_pagesize(g_multipage_support.datapsize));
 551   trcVerbose("Text page size: %s\n",
 552       describe_pagesize(g_multipage_support.textpsize));
 553   trcVerbose("Thread stack page size (pthread): %s\n",
 554       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 555   trcVerbose("Default shared memory page size: %s\n",
 556       describe_pagesize(g_multipage_support.shmpsize));
 557   trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
 558       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 559   trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
 560       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 561   trcVerbose("Multipage error details: %d\n",
 562       g_multipage_support.error);
 563 
 564   // sanity checks
 565   assert0(g_multipage_support.pagesize == SIZE_4K);
 566   assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
 567   // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
 568   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 569   assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
 570 
 571 } // end os::Aix::query_multipage_support()
 572 
 573 void os::init_system_properties_values() {
 574 
 575 #define DEFAULT_LIBPATH "/usr/lib:/lib"
 576 #define EXTENSIONS_DIR  "/lib/ext"
 577 
 578   // Buffer that fits several sprintfs.
 579   // Note that the space for the trailing null is provided
 580   // by the nulls included by the sizeof operator.
 581   const size_t bufsize =
 582     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 583          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 584   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 585 
 586   // sysclasspath, java_home, dll_dir
 587   {
 588     char *pslash;
 589     os::jvm_path(buf, bufsize);
 590 
 591     // Found the full path to libjvm.so.
 592     // Now cut the path to <java_home>/jre if we can.
 593     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 594     pslash = strrchr(buf, '/');
 595     if (pslash != NULL) {
 596       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 597     }
 598     Arguments::set_dll_dir(buf);
 599 
 600     if (pslash != NULL) {
 601       pslash = strrchr(buf, '/');
 602       if (pslash != NULL) {
 603         *pslash = '\0';          // Get rid of /<arch>.
 604         pslash = strrchr(buf, '/');
 605         if (pslash != NULL) {
 606           *pslash = '\0';        // Get rid of /lib.
 607         }
 608       }
 609     }
 610     Arguments::set_java_home(buf);
 611     set_boot_path('/', ':');
 612   }
 613 
 614   // Where to look for native libraries.
 615 
 616   // On Aix we get the user setting of LIBPATH.
 617   // Eventually, all the library path setting will be done here.
 618   // Get the user setting of LIBPATH.
 619   const char *v = ::getenv("LIBPATH");
 620   const char *v_colon = ":";
 621   if (v == NULL) { v = ""; v_colon = ""; }
 622 
 623   // Concatenate user and invariant part of ld_library_path.
 624   // That's +1 for the colon and +1 for the trailing '\0'.
 625   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 626   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 627   Arguments::set_library_path(ld_library_path);
 628   FREE_C_HEAP_ARRAY(char, ld_library_path);
 629 
 630   // Extensions directories.
 631   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 632   Arguments::set_ext_dirs(buf);
 633 
 634   FREE_C_HEAP_ARRAY(char, buf);
 635 
 636 #undef DEFAULT_LIBPATH
 637 #undef EXTENSIONS_DIR
 638 }
 639 
 640 ////////////////////////////////////////////////////////////////////////////////
 641 // breakpoint support
 642 
 643 void os::breakpoint() {
 644   BREAKPOINT;
 645 }
 646 
 647 extern "C" void breakpoint() {
 648   // use debugger to set breakpoint here
 649 }
 650 
 651 ////////////////////////////////////////////////////////////////////////////////
 652 // signal support
 653 
 654 debug_only(static bool signal_sets_initialized = false);
 655 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 656 
 657 bool os::Aix::is_sig_ignored(int sig) {
 658   struct sigaction oact;
 659   sigaction(sig, (struct sigaction*)NULL, &oact);
 660   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 661     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 662   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 663     return true;
 664   } else {
 665     return false;
 666   }
 667 }
 668 
 669 void os::Aix::signal_sets_init() {
 670   // Should also have an assertion stating we are still single-threaded.
 671   assert(!signal_sets_initialized, "Already initialized");
 672   // Fill in signals that are necessarily unblocked for all threads in
 673   // the VM. Currently, we unblock the following signals:
 674   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 675   //                         by -Xrs (=ReduceSignalUsage));
 676   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 677   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 678   // the dispositions or masks wrt these signals.
 679   // Programs embedding the VM that want to use the above signals for their
 680   // own purposes must, at this time, use the "-Xrs" option to prevent
 681   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 682   // (See bug 4345157, and other related bugs).
 683   // In reality, though, unblocking these signals is really a nop, since
 684   // these signals are not blocked by default.
 685   sigemptyset(&unblocked_sigs);
 686   sigemptyset(&allowdebug_blocked_sigs);
 687   sigaddset(&unblocked_sigs, SIGILL);
 688   sigaddset(&unblocked_sigs, SIGSEGV);
 689   sigaddset(&unblocked_sigs, SIGBUS);
 690   sigaddset(&unblocked_sigs, SIGFPE);
 691   sigaddset(&unblocked_sigs, SIGTRAP);
 692   sigaddset(&unblocked_sigs, SIGDANGER);
 693   sigaddset(&unblocked_sigs, SR_signum);
 694 
 695   if (!ReduceSignalUsage) {
 696    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 697      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 698      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
 699    }
 700    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 701      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 702      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
 703    }
 704    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 705      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 706      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
 707    }
 708   }
 709   // Fill in signals that are blocked by all but the VM thread.
 710   sigemptyset(&vm_sigs);
 711   if (!ReduceSignalUsage)
 712     sigaddset(&vm_sigs, BREAK_SIGNAL);
 713   debug_only(signal_sets_initialized = true);
 714 }
 715 
 716 // These are signals that are unblocked while a thread is running Java.
 717 // (For some reason, they get blocked by default.)
 718 sigset_t* os::Aix::unblocked_signals() {
 719   assert(signal_sets_initialized, "Not initialized");
 720   return &unblocked_sigs;
 721 }
 722 
 723 // These are the signals that are blocked while a (non-VM) thread is
 724 // running Java. Only the VM thread handles these signals.
 725 sigset_t* os::Aix::vm_signals() {
 726   assert(signal_sets_initialized, "Not initialized");
 727   return &vm_sigs;
 728 }
 729 
 730 // These are signals that are blocked during cond_wait to allow debugger in
 731 sigset_t* os::Aix::allowdebug_blocked_signals() {
 732   assert(signal_sets_initialized, "Not initialized");
 733   return &allowdebug_blocked_sigs;
 734 }
 735 
 736 void os::Aix::hotspot_sigmask(Thread* thread) {
 737 
 738   //Save caller's signal mask before setting VM signal mask
 739   sigset_t caller_sigmask;
 740   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 741 
 742   OSThread* osthread = thread->osthread();
 743   osthread->set_caller_sigmask(caller_sigmask);
 744 
 745   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 746 
 747   if (!ReduceSignalUsage) {
 748     if (thread->is_VM_thread()) {
 749       // Only the VM thread handles BREAK_SIGNAL ...
 750       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 751     } else {
 752       // ... all other threads block BREAK_SIGNAL
 753       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 754     }
 755   }
 756 }
 757 
 758 // retrieve memory information.
 759 // Returns false if something went wrong;
 760 // content of pmi undefined in this case.
 761 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 762 
 763   assert(pmi, "get_meminfo: invalid parameter");
 764 
 765   memset(pmi, 0, sizeof(meminfo_t));
 766 
 767   if (os::Aix::on_pase()) {
 768 
 769     Unimplemented();
 770     return false;
 771 
 772   } else {
 773 
 774     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 775     // See:
 776     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 777     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 778     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 779     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 780 
 781     perfstat_memory_total_t psmt;
 782     memset (&psmt, '\0', sizeof(psmt));
 783     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 784     if (rc == -1) {
 785       fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
 786       assert(0, "perfstat_memory_total() failed");
 787       return false;
 788     }
 789 
 790     assert(rc == 1, "perfstat_memory_total() - weird return code");
 791 
 792     // excerpt from
 793     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 794     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 795     // The fields of perfstat_memory_total_t:
 796     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 797     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 798     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 799     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 800     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 801 
 802     pmi->virt_total = psmt.virt_total * 4096;
 803     pmi->real_total = psmt.real_total * 4096;
 804     pmi->real_free = psmt.real_free * 4096;
 805     pmi->pgsp_total = psmt.pgsp_total * 4096;
 806     pmi->pgsp_free = psmt.pgsp_free * 4096;
 807 
 808     return true;
 809 
 810   }
 811 } // end os::Aix::get_meminfo
 812 
 813 // Retrieve global cpu information.
 814 // Returns false if something went wrong;
 815 // the content of pci is undefined in this case.
 816 bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
 817   assert(pci, "get_cpuinfo: invalid parameter");
 818   memset(pci, 0, sizeof(cpuinfo_t));
 819 
 820   perfstat_cpu_total_t psct;
 821   memset (&psct, '\0', sizeof(psct));
 822 
 823   if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
 824     fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
 825     assert(0, "perfstat_cpu_total() failed");
 826     return false;
 827   }
 828 
 829   // global cpu information
 830   strcpy (pci->description, psct.description);
 831   pci->processorHZ = psct.processorHZ;
 832   pci->ncpus = psct.ncpus;
 833   os::Aix::_logical_cpus = psct.ncpus;
 834   for (int i = 0; i < 3; i++) {
 835     pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
 836   }
 837 
 838   // get the processor version from _system_configuration
 839   switch (_system_configuration.version) {
 840   case PV_8:
 841     strcpy(pci->version, "Power PC 8");
 842     break;
 843   case PV_7:
 844     strcpy(pci->version, "Power PC 7");
 845     break;
 846   case PV_6_1:
 847     strcpy(pci->version, "Power PC 6 DD1.x");
 848     break;
 849   case PV_6:
 850     strcpy(pci->version, "Power PC 6");
 851     break;
 852   case PV_5:
 853     strcpy(pci->version, "Power PC 5");
 854     break;
 855   case PV_5_2:
 856     strcpy(pci->version, "Power PC 5_2");
 857     break;
 858   case PV_5_3:
 859     strcpy(pci->version, "Power PC 5_3");
 860     break;
 861   case PV_5_Compat:
 862     strcpy(pci->version, "PV_5_Compat");
 863     break;
 864   case PV_6_Compat:
 865     strcpy(pci->version, "PV_6_Compat");
 866     break;
 867   case PV_7_Compat:
 868     strcpy(pci->version, "PV_7_Compat");
 869     break;
 870   case PV_8_Compat:
 871     strcpy(pci->version, "PV_8_Compat");
 872     break;
 873   default:
 874     strcpy(pci->version, "unknown");
 875   }
 876 
 877   return true;
 878 
 879 } //end os::Aix::get_cpuinfo
 880 
 881 //////////////////////////////////////////////////////////////////////////////
 882 // detecting pthread library
 883 
 884 void os::Aix::libpthread_init() {
 885   return;
 886 }
 887 
 888 //////////////////////////////////////////////////////////////////////////////
 889 // create new thread
 890 
 891 // Thread start routine for all newly created threads
 892 static void *java_start(Thread *thread) {
 893 
 894   // find out my own stack dimensions
 895   {
 896     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 897     address base = 0;
 898     size_t size = 0;
 899     query_stack_dimensions(&base, &size);
 900     thread->set_stack_base(base);
 901     thread->set_stack_size(size);
 902   }
 903 
 904   // Do some sanity checks.
 905   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 906 
 907   // Try to randomize the cache line index of hot stack frames.
 908   // This helps when threads of the same stack traces evict each other's
 909   // cache lines. The threads can be either from the same JVM instance, or
 910   // from different JVM instances. The benefit is especially true for
 911   // processors with hyperthreading technology.
 912 
 913   static int counter = 0;
 914   int pid = os::current_process_id();
 915   alloca(((pid ^ counter++) & 7) * 128);
 916 
 917   ThreadLocalStorage::set_thread(thread);
 918 
 919   OSThread* osthread = thread->osthread();
 920 
 921   // thread_id is kernel thread id (similar to Solaris LWP id)
 922   osthread->set_thread_id(os::Aix::gettid());
 923 
 924   // initialize signal mask for this thread
 925   os::Aix::hotspot_sigmask(thread);
 926 
 927   // initialize floating point control register
 928   os::Aix::init_thread_fpu_state();
 929 
 930   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 931 
 932   // call one more level start routine
 933   thread->run();
 934 
 935   return 0;
 936 }
 937 
 938 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 939 
 940   // We want the whole function to be synchronized.
 941   ThreadCritical cs;
 942 
 943   assert(thread->osthread() == NULL, "caller responsible");
 944 
 945   // Allocate the OSThread object
 946   OSThread* osthread = new OSThread(NULL, NULL);
 947   if (osthread == NULL) {
 948     return false;
 949   }
 950 
 951   // set the correct thread state
 952   osthread->set_thread_type(thr_type);
 953 
 954   // Initial state is ALLOCATED but not INITIALIZED
 955   osthread->set_state(ALLOCATED);
 956 
 957   thread->set_osthread(osthread);
 958 
 959   // init thread attributes
 960   pthread_attr_t attr;
 961   pthread_attr_init(&attr);
 962   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 963 
 964   // Make sure we run in 1:1 kernel-user-thread mode.
 965   if (os::Aix::on_aix()) {
 966     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 967     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 968   } // end: aix
 969 
 970   // Start in suspended state, and in os::thread_start, wake the thread up.
 971   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 972 
 973   // calculate stack size if it's not specified by caller
 974   if (os::Aix::supports_variable_stack_size()) {
 975     if (stack_size == 0) {
 976       stack_size = os::Aix::default_stack_size(thr_type);
 977 
 978       switch (thr_type) {
 979       case os::java_thread:
 980         // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
 981         assert(JavaThread::stack_size_at_create() > 0, "this should be set");
 982         stack_size = JavaThread::stack_size_at_create();
 983         break;
 984       case os::compiler_thread:
 985         if (CompilerThreadStackSize > 0) {
 986           stack_size = (size_t)(CompilerThreadStackSize * K);
 987           break;
 988         } // else fall through:
 989           // use VMThreadStackSize if CompilerThreadStackSize is not defined
 990       case os::vm_thread:
 991       case os::pgc_thread:
 992       case os::cgc_thread:
 993       case os::watcher_thread:
 994         if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 995         break;
 996       }
 997     }
 998 
 999     stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
1000     pthread_attr_setstacksize(&attr, stack_size);
1001   } //else let thread_create() pick the default value (96 K on AIX)
1002 
1003   pthread_t tid;
1004   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
1005 
1006   pthread_attr_destroy(&attr);
1007 
1008   if (ret == 0) {
1009     // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
1010   } else {
1011     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1012       perror("pthread_create()");
1013     }
1014     // Need to clean up stuff we've allocated so far
1015     thread->set_osthread(NULL);
1016     delete osthread;
1017     return false;
1018   }
1019 
1020   // Store pthread info into the OSThread
1021   osthread->set_pthread_id(tid);
1022 
1023   return true;
1024 }
1025 
1026 /////////////////////////////////////////////////////////////////////////////
1027 // attach existing thread
1028 
1029 // bootstrap the main thread
1030 bool os::create_main_thread(JavaThread* thread) {
1031   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
1032   return create_attached_thread(thread);
1033 }
1034 
1035 bool os::create_attached_thread(JavaThread* thread) {
1036 #ifdef ASSERT
1037     thread->verify_not_published();
1038 #endif
1039 
1040   // Allocate the OSThread object
1041   OSThread* osthread = new OSThread(NULL, NULL);
1042 
1043   if (osthread == NULL) {
1044     return false;
1045   }
1046 
1047   // Store pthread info into the OSThread
1048   osthread->set_thread_id(os::Aix::gettid());
1049   osthread->set_pthread_id(::pthread_self());
1050 
1051   // initialize floating point control register
1052   os::Aix::init_thread_fpu_state();
1053 
1054   // some sanity checks
1055   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1056 
1057   // Initial thread state is RUNNABLE
1058   osthread->set_state(RUNNABLE);
1059 
1060   thread->set_osthread(osthread);
1061 
1062   if (UseNUMA) {
1063     int lgrp_id = os::numa_get_group_id();
1064     if (lgrp_id != -1) {
1065       thread->set_lgrp_id(lgrp_id);
1066     }
1067   }
1068 
1069   // initialize signal mask for this thread
1070   // and save the caller's signal mask
1071   os::Aix::hotspot_sigmask(thread);
1072 
1073   return true;
1074 }
1075 
1076 void os::pd_start_thread(Thread* thread) {
1077   int status = pthread_continue_np(thread->osthread()->pthread_id());
1078   assert(status == 0, "thr_continue failed");
1079 }
1080 
1081 // Free OS resources related to the OSThread
1082 void os::free_thread(OSThread* osthread) {
1083   assert(osthread != NULL, "osthread not set");
1084 
1085   if (Thread::current()->osthread() == osthread) {
1086     // Restore caller's signal mask
1087     sigset_t sigmask = osthread->caller_sigmask();
1088     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1089    }
1090 
1091   delete osthread;
1092 }
1093 
1094 //////////////////////////////////////////////////////////////////////////////
1095 // thread local storage
1096 
1097 int os::allocate_thread_local_storage() {
1098   pthread_key_t key;
1099   int rslt = pthread_key_create(&key, NULL);
1100   assert(rslt == 0, "cannot allocate thread local storage");
1101   return (int)key;
1102 }
1103 
1104 // Note: This is currently not used by VM, as we don't destroy TLS key
1105 // on VM exit.
1106 void os::free_thread_local_storage(int index) {
1107   int rslt = pthread_key_delete((pthread_key_t)index);
1108   assert(rslt == 0, "invalid index");
1109 }
1110 
1111 void os::thread_local_storage_at_put(int index, void* value) {
1112   int rslt = pthread_setspecific((pthread_key_t)index, value);
1113   assert(rslt == 0, "pthread_setspecific failed");
1114 }
1115 
1116 extern "C" Thread* get_thread() {
1117   return ThreadLocalStorage::thread();
1118 }
1119 
1120 ////////////////////////////////////////////////////////////////////////////////
1121 // time support
1122 
1123 // Time since start-up in seconds to a fine granularity.
1124 // Used by VMSelfDestructTimer and the MemProfiler.
1125 double os::elapsedTime() {
1126   return (double)(os::elapsed_counter()) * 0.000001;
1127 }
1128 
1129 jlong os::elapsed_counter() {
1130   timeval time;
1131   int status = gettimeofday(&time, NULL);
1132   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1133 }
1134 
1135 jlong os::elapsed_frequency() {
1136   return (1000 * 1000);
1137 }
1138 
1139 bool os::supports_vtime() { return true; }
1140 bool os::enable_vtime()   { return false; }
1141 bool os::vtime_enabled()  { return false; }
1142 
1143 double os::elapsedVTime() {
1144   struct rusage usage;
1145   int retval = getrusage(RUSAGE_THREAD, &usage);
1146   if (retval == 0) {
1147     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1148   } else {
1149     // better than nothing, but not much
1150     return elapsedTime();
1151   }
1152 }
1153 
1154 jlong os::javaTimeMillis() {
1155   timeval time;
1156   int status = gettimeofday(&time, NULL);
1157   assert(status != -1, "aix error at gettimeofday()");
1158   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1159 }
1160 
1161 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1162   timeval time;
1163   int status = gettimeofday(&time, NULL);
1164   assert(status != -1, "aix error at gettimeofday()");
1165   seconds = jlong(time.tv_sec);
1166   nanos = jlong(time.tv_usec) * 1000;
1167 }
1168 
1169 
1170 // We need to manually declare mread_real_time,
1171 // because IBM didn't provide a prototype in time.h.
1172 // (they probably only ever tested in C, not C++)
1173 extern "C"
1174 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1175 
1176 jlong os::javaTimeNanos() {
1177   if (os::Aix::on_pase()) {
1178     Unimplemented();
1179     return 0;
1180   } else {
1181     // On AIX use the precision of processors real time clock
1182     // or time base registers.
1183     timebasestruct_t time;
1184     int rc;
1185 
1186     // If the CPU has a time register, it will be used and
1187     // we have to convert to real time first. After convertion we have following data:
1188     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1189     // time.tb_low  [nanoseconds after the last full second above]
1190     // We better use mread_real_time here instead of read_real_time
1191     // to ensure that we will get a monotonic increasing time.
1192     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1193       rc = time_base_to_time(&time, TIMEBASE_SZ);
1194       assert(rc != -1, "aix error at time_base_to_time()");
1195     }
1196     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1197   }
1198 }
1199 
1200 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1201   info_ptr->max_value = ALL_64_BITS;
1202   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1203   info_ptr->may_skip_backward = false;
1204   info_ptr->may_skip_forward = false;
1205   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1206 }
1207 
1208 // Return the real, user, and system times in seconds from an
1209 // arbitrary fixed point in the past.
1210 bool os::getTimesSecs(double* process_real_time,
1211                       double* process_user_time,
1212                       double* process_system_time) {
1213   struct tms ticks;
1214   clock_t real_ticks = times(&ticks);
1215 
1216   if (real_ticks == (clock_t) (-1)) {
1217     return false;
1218   } else {
1219     double ticks_per_second = (double) clock_tics_per_sec;
1220     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1221     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1222     *process_real_time = ((double) real_ticks) / ticks_per_second;
1223 
1224     return true;
1225   }
1226 }
1227 
1228 char * os::local_time_string(char *buf, size_t buflen) {
1229   struct tm t;
1230   time_t long_time;
1231   time(&long_time);
1232   localtime_r(&long_time, &t);
1233   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1234                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1235                t.tm_hour, t.tm_min, t.tm_sec);
1236   return buf;
1237 }
1238 
1239 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1240   return localtime_r(clock, res);
1241 }
1242 
1243 ////////////////////////////////////////////////////////////////////////////////
1244 // runtime exit support
1245 
1246 // Note: os::shutdown() might be called very early during initialization, or
1247 // called from signal handler. Before adding something to os::shutdown(), make
1248 // sure it is async-safe and can handle partially initialized VM.
1249 void os::shutdown() {
1250 
1251   // allow PerfMemory to attempt cleanup of any persistent resources
1252   perfMemory_exit();
1253 
1254   // needs to remove object in file system
1255   AttachListener::abort();
1256 
1257   // flush buffered output, finish log files
1258   ostream_abort();
1259 
1260   // Check for abort hook
1261   abort_hook_t abort_hook = Arguments::abort_hook();
1262   if (abort_hook != NULL) {
1263     abort_hook();
1264   }
1265 }
1266 
1267 // Note: os::abort() might be called very early during initialization, or
1268 // called from signal handler. Before adding something to os::abort(), make
1269 // sure it is async-safe and can handle partially initialized VM.
1270 void os::abort(bool dump_core, void* siginfo, void* context) {
1271   os::shutdown();
1272   if (dump_core) {
1273 #ifndef PRODUCT
1274     fdStream out(defaultStream::output_fd());
1275     out.print_raw("Current thread is ");
1276     char buf[16];
1277     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1278     out.print_raw_cr(buf);
1279     out.print_raw_cr("Dumping core ...");
1280 #endif
1281     ::abort(); // dump core
1282   }
1283 
1284   ::exit(1);
1285 }
1286 
1287 // Die immediately, no exit hook, no abort hook, no cleanup.
1288 void os::die() {
1289   ::abort();
1290 }
1291 
1292 // This method is a copy of JDK's sysGetLastErrorString
1293 // from src/solaris/hpi/src/system_md.c
1294 
1295 size_t os::lasterror(char *buf, size_t len) {
1296   if (errno == 0) return 0;
1297 
1298   const char *s = ::strerror(errno);
1299   size_t n = ::strlen(s);
1300   if (n >= len) {
1301     n = len - 1;
1302   }
1303   ::strncpy(buf, s, n);
1304   buf[n] = '\0';
1305   return n;
1306 }
1307 
1308 intx os::current_thread_id() { return (intx)pthread_self(); }
1309 
1310 int os::current_process_id() {
1311 
1312   // This implementation returns a unique pid, the pid of the
1313   // launcher thread that starts the vm 'process'.
1314 
1315   // Under POSIX, getpid() returns the same pid as the
1316   // launcher thread rather than a unique pid per thread.
1317   // Use gettid() if you want the old pre NPTL behaviour.
1318 
1319   // if you are looking for the result of a call to getpid() that
1320   // returns a unique pid for the calling thread, then look at the
1321   // OSThread::thread_id() method in osThread_linux.hpp file
1322 
1323   return (int)(_initial_pid ? _initial_pid : getpid());
1324 }
1325 
1326 // DLL functions
1327 
1328 const char* os::dll_file_extension() { return ".so"; }
1329 
1330 // This must be hard coded because it's the system's temporary
1331 // directory not the java application's temp directory, ala java.io.tmpdir.
1332 const char* os::get_temp_directory() { return "/tmp"; }
1333 
1334 static bool file_exists(const char* filename) {
1335   struct stat statbuf;
1336   if (filename == NULL || strlen(filename) == 0) {
1337     return false;
1338   }
1339   return os::stat(filename, &statbuf) == 0;
1340 }
1341 
1342 bool os::dll_build_name(char* buffer, size_t buflen,
1343                         const char* pname, const char* fname) {
1344   bool retval = false;
1345   // Copied from libhpi
1346   const size_t pnamelen = pname ? strlen(pname) : 0;
1347 
1348   // Return error on buffer overflow.
1349   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1350     *buffer = '\0';
1351     return retval;
1352   }
1353 
1354   if (pnamelen == 0) {
1355     snprintf(buffer, buflen, "lib%s.so", fname);
1356     retval = true;
1357   } else if (strchr(pname, *os::path_separator()) != NULL) {
1358     int n;
1359     char** pelements = split_path(pname, &n);
1360     for (int i = 0; i < n; i++) {
1361       // Really shouldn't be NULL, but check can't hurt
1362       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1363         continue; // skip the empty path values
1364       }
1365       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1366       if (file_exists(buffer)) {
1367         retval = true;
1368         break;
1369       }
1370     }
1371     // release the storage
1372     for (int i = 0; i < n; i++) {
1373       if (pelements[i] != NULL) {
1374         FREE_C_HEAP_ARRAY(char, pelements[i]);
1375       }
1376     }
1377     if (pelements != NULL) {
1378       FREE_C_HEAP_ARRAY(char*, pelements);
1379     }
1380   } else {
1381     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1382     retval = true;
1383   }
1384   return retval;
1385 }
1386 
1387 // Check if addr is inside libjvm.so.
1388 bool os::address_is_in_vm(address addr) {
1389 
1390   // Input could be a real pc or a function pointer literal. The latter
1391   // would be a function descriptor residing in the data segment of a module.
1392 
1393   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
1394   if (lib) {
1395     if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1396       return true;
1397     } else {
1398       return false;
1399     }
1400   } else {
1401     lib = LoadedLibraries::find_for_data_address(addr);
1402     if (lib) {
1403       if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1404         return true;
1405       } else {
1406         return false;
1407       }
1408     } else {
1409       return false;
1410     }
1411   }
1412 }
1413 
1414 // Resolve an AIX function descriptor literal to a code pointer.
1415 // If the input is a valid code pointer to a text segment of a loaded module,
1416 //   it is returned unchanged.
1417 // If the input is a valid AIX function descriptor, it is resolved to the
1418 //   code entry point.
1419 // If the input is neither a valid function descriptor nor a valid code pointer,
1420 //   NULL is returned.
1421 static address resolve_function_descriptor_to_code_pointer(address p) {
1422 
1423   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
1424   if (lib) {
1425     // its a real code pointer
1426     return p;
1427   } else {
1428     lib = LoadedLibraries::find_for_data_address(p);
1429     if (lib) {
1430       // pointer to data segment, potential function descriptor
1431       address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1432       if (LoadedLibraries::find_for_text_address(code_entry)) {
1433         // Its a function descriptor
1434         return code_entry;
1435       }
1436     }
1437   }
1438   return NULL;
1439 }
1440 
1441 bool os::dll_address_to_function_name(address addr, char *buf,
1442                                       int buflen, int *offset,
1443                                       bool demangle) {
1444   if (offset) {
1445     *offset = -1;
1446   }
1447   // Buf is not optional, but offset is optional.
1448   assert(buf != NULL, "sanity check");
1449   buf[0] = '\0';
1450 
1451   // Resolve function ptr literals first.
1452   addr = resolve_function_descriptor_to_code_pointer(addr);
1453   if (!addr) {
1454     return false;
1455   }
1456 
1457   // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1458   return Decoder::decode(addr, buf, buflen, offset, demangle);
1459 }
1460 
1461 static int getModuleName(codeptr_t pc,                    // [in] program counter
1462                          char* p_name, size_t namelen,    // [out] optional: function name
1463                          char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1464                          ) {
1465 
1466   // initialize output parameters
1467   if (p_name && namelen > 0) {
1468     *p_name = '\0';
1469   }
1470   if (p_errmsg && errmsglen > 0) {
1471     *p_errmsg = '\0';
1472   }
1473 
1474   const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
1475   if (lib) {
1476     if (p_name && namelen > 0) {
1477       sprintf(p_name, "%.*s", namelen, lib->get_shortname());
1478     }
1479     return 0;
1480   }
1481 
1482   trcVerbose("pc outside any module");
1483 
1484   return -1;
1485 }
1486 
1487 bool os::dll_address_to_library_name(address addr, char* buf,
1488                                      int buflen, int* offset) {
1489   if (offset) {
1490     *offset = -1;
1491   }
1492   // Buf is not optional, but offset is optional.
1493   assert(buf != NULL, "sanity check");
1494   buf[0] = '\0';
1495 
1496   // Resolve function ptr literals first.
1497   addr = resolve_function_descriptor_to_code_pointer(addr);
1498   if (!addr) {
1499     return false;
1500   }
1501 
1502   if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1503     return true;
1504   }
1505   return false;
1506 }
1507 
1508 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1509 // for the same architecture as Hotspot is running on.
1510 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1511 
1512   if (ebuf && ebuflen > 0) {
1513     ebuf[0] = '\0';
1514     ebuf[ebuflen - 1] = '\0';
1515   }
1516 
1517   if (!filename || strlen(filename) == 0) {
1518     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1519     return NULL;
1520   }
1521 
1522   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1523   void * result= ::dlopen(filename, RTLD_LAZY);
1524   if (result != NULL) {
1525     // Reload dll cache. Don't do this in signal handling.
1526     LoadedLibraries::reload();
1527     return result;
1528   } else {
1529     // error analysis when dlopen fails
1530     const char* const error_report = ::dlerror();
1531     if (error_report && ebuf && ebuflen > 0) {
1532       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1533                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1534     }
1535   }
1536   return NULL;
1537 }
1538 
1539 void* os::dll_lookup(void* handle, const char* name) {
1540   void* res = dlsym(handle, name);
1541   return res;
1542 }
1543 
1544 void* os::get_default_process_handle() {
1545   return (void*)::dlopen(NULL, RTLD_LAZY);
1546 }
1547 
1548 void os::print_dll_info(outputStream *st) {
1549   st->print_cr("Dynamic libraries:");
1550   LoadedLibraries::print(st);
1551 }
1552 
1553 void os::print_os_info(outputStream* st) {
1554   st->print("OS:");
1555 
1556   st->print("uname:");
1557   struct utsname name;
1558   uname(&name);
1559   st->print(name.sysname); st->print(" ");
1560   st->print(name.nodename); st->print(" ");
1561   st->print(name.release); st->print(" ");
1562   st->print(name.version); st->print(" ");
1563   st->print(name.machine);
1564   st->cr();
1565 
1566   // rlimit
1567   st->print("rlimit:");
1568   struct rlimit rlim;
1569 
1570   st->print(" STACK ");
1571   getrlimit(RLIMIT_STACK, &rlim);
1572   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1573   else st->print("%uk", rlim.rlim_cur >> 10);
1574 
1575   st->print(", CORE ");
1576   getrlimit(RLIMIT_CORE, &rlim);
1577   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1578   else st->print("%uk", rlim.rlim_cur >> 10);
1579 
1580   st->print(", NPROC ");
1581   st->print("%d", sysconf(_SC_CHILD_MAX));
1582 
1583   st->print(", NOFILE ");
1584   getrlimit(RLIMIT_NOFILE, &rlim);
1585   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1586   else st->print("%d", rlim.rlim_cur);
1587 
1588   st->print(", AS ");
1589   getrlimit(RLIMIT_AS, &rlim);
1590   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1591   else st->print("%uk", rlim.rlim_cur >> 10);
1592 
1593   // Print limits on DATA, because it limits the C-heap.
1594   st->print(", DATA ");
1595   getrlimit(RLIMIT_DATA, &rlim);
1596   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1597   else st->print("%uk", rlim.rlim_cur >> 10);
1598   st->cr();
1599 
1600   // load average
1601   st->print("load average:");
1602   double loadavg[3] = {-1.L, -1.L, -1.L};
1603   os::loadavg(loadavg, 3);
1604   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1605   st->cr();
1606 }
1607 
1608 void os::print_memory_info(outputStream* st) {
1609 
1610   st->print_cr("Memory:");
1611 
1612   st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1613   st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1614   st->print_cr("  Default shared memory page size:        %s",
1615     describe_pagesize(g_multipage_support.shmpsize));
1616   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1617     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1618   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1619     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1620   if (g_multipage_error != 0) {
1621     st->print_cr("  multipage error: %d", g_multipage_error);
1622   }
1623 
1624   // print out LDR_CNTRL because it affects the default page sizes
1625   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1626   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1627 
1628   const char* const extshm = ::getenv("EXTSHM");
1629   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1630   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1631     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1632   }
1633 
1634   // Call os::Aix::get_meminfo() to retrieve memory statistics.
1635   os::Aix::meminfo_t mi;
1636   if (os::Aix::get_meminfo(&mi)) {
1637     char buffer[256];
1638     if (os::Aix::on_aix()) {
1639       jio_snprintf(buffer, sizeof(buffer),
1640                    "  physical total : %llu\n"
1641                    "  physical free  : %llu\n"
1642                    "  swap total     : %llu\n"
1643                    "  swap free      : %llu\n",
1644                    mi.real_total,
1645                    mi.real_free,
1646                    mi.pgsp_total,
1647                    mi.pgsp_free);
1648     } else {
1649       Unimplemented();
1650     }
1651     st->print_raw(buffer);
1652   } else {
1653     st->print_cr("  (no more information available)");
1654   }
1655 }
1656 
1657 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1658   // cpu
1659   st->print("CPU:");
1660   st->print("total %d", os::processor_count());
1661   // It's not safe to query number of active processors after crash
1662   // st->print("(active %d)", os::active_processor_count());
1663   st->print(" %s", VM_Version::cpu_features());
1664   st->cr();
1665 }
1666 
1667 void os::print_siginfo(outputStream* st, void* siginfo) {
1668   // Use common posix version.
1669   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1670   st->cr();
1671 }
1672 
1673 static void print_signal_handler(outputStream* st, int sig,
1674                                  char* buf, size_t buflen);
1675 
1676 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1677   st->print_cr("Signal Handlers:");
1678   print_signal_handler(st, SIGSEGV, buf, buflen);
1679   print_signal_handler(st, SIGBUS , buf, buflen);
1680   print_signal_handler(st, SIGFPE , buf, buflen);
1681   print_signal_handler(st, SIGPIPE, buf, buflen);
1682   print_signal_handler(st, SIGXFSZ, buf, buflen);
1683   print_signal_handler(st, SIGILL , buf, buflen);
1684   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1685   print_signal_handler(st, SR_signum, buf, buflen);
1686   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1687   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1688   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1689   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1690   print_signal_handler(st, SIGTRAP, buf, buflen);
1691   print_signal_handler(st, SIGDANGER, buf, buflen);
1692 }
1693 
1694 static char saved_jvm_path[MAXPATHLEN] = {0};
1695 
1696 // Find the full path to the current module, libjvm.so.
1697 void os::jvm_path(char *buf, jint buflen) {
1698   // Error checking.
1699   if (buflen < MAXPATHLEN) {
1700     assert(false, "must use a large-enough buffer");
1701     buf[0] = '\0';
1702     return;
1703   }
1704   // Lazy resolve the path to current module.
1705   if (saved_jvm_path[0] != 0) {
1706     strcpy(buf, saved_jvm_path);
1707     return;
1708   }
1709 
1710   Dl_info dlinfo;
1711   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1712   assert(ret != 0, "cannot locate libjvm");
1713   char* rp = realpath((char *)dlinfo.dli_fname, buf);
1714   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1715 
1716   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1717   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1718 }
1719 
1720 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1721   // no prefix required, not even "_"
1722 }
1723 
1724 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1725   // no suffix required
1726 }
1727 
1728 ////////////////////////////////////////////////////////////////////////////////
1729 // sun.misc.Signal support
1730 
1731 static volatile jint sigint_count = 0;
1732 
1733 static void
1734 UserHandler(int sig, void *siginfo, void *context) {
1735   // 4511530 - sem_post is serialized and handled by the manager thread. When
1736   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1737   // don't want to flood the manager thread with sem_post requests.
1738   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1739     return;
1740 
1741   // Ctrl-C is pressed during error reporting, likely because the error
1742   // handler fails to abort. Let VM die immediately.
1743   if (sig == SIGINT && is_error_reported()) {
1744     os::die();
1745   }
1746 
1747   os::signal_notify(sig);
1748 }
1749 
1750 void* os::user_handler() {
1751   return CAST_FROM_FN_PTR(void*, UserHandler);
1752 }
1753 
1754 extern "C" {
1755   typedef void (*sa_handler_t)(int);
1756   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1757 }
1758 
1759 void* os::signal(int signal_number, void* handler) {
1760   struct sigaction sigAct, oldSigAct;
1761 
1762   sigfillset(&(sigAct.sa_mask));
1763 
1764   // Do not block out synchronous signals in the signal handler.
1765   // Blocking synchronous signals only makes sense if you can really
1766   // be sure that those signals won't happen during signal handling,
1767   // when the blocking applies. Normal signal handlers are lean and
1768   // do not cause signals. But our signal handlers tend to be "risky"
1769   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1770   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1771   // by a SIGILL, which was blocked due to the signal mask. The process
1772   // just hung forever. Better to crash from a secondary signal than to hang.
1773   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1774   sigdelset(&(sigAct.sa_mask), SIGBUS);
1775   sigdelset(&(sigAct.sa_mask), SIGILL);
1776   sigdelset(&(sigAct.sa_mask), SIGFPE);
1777   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1778 
1779   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1780 
1781   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1782 
1783   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1784     // -1 means registration failed
1785     return (void *)-1;
1786   }
1787 
1788   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1789 }
1790 
1791 void os::signal_raise(int signal_number) {
1792   ::raise(signal_number);
1793 }
1794 
1795 //
1796 // The following code is moved from os.cpp for making this
1797 // code platform specific, which it is by its very nature.
1798 //
1799 
1800 // Will be modified when max signal is changed to be dynamic
1801 int os::sigexitnum_pd() {
1802   return NSIG;
1803 }
1804 
1805 // a counter for each possible signal value
1806 static volatile jint pending_signals[NSIG+1] = { 0 };
1807 
1808 // Linux(POSIX) specific hand shaking semaphore.
1809 static sem_t sig_sem;
1810 
1811 void os::signal_init_pd() {
1812   // Initialize signal structures
1813   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1814 
1815   // Initialize signal semaphore
1816   int rc = ::sem_init(&sig_sem, 0, 0);
1817   guarantee(rc != -1, "sem_init failed");
1818 }
1819 
1820 void os::signal_notify(int sig) {
1821   Atomic::inc(&pending_signals[sig]);
1822   ::sem_post(&sig_sem);
1823 }
1824 
1825 static int check_pending_signals(bool wait) {
1826   Atomic::store(0, &sigint_count);
1827   for (;;) {
1828     for (int i = 0; i < NSIG + 1; i++) {
1829       jint n = pending_signals[i];
1830       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1831         return i;
1832       }
1833     }
1834     if (!wait) {
1835       return -1;
1836     }
1837     JavaThread *thread = JavaThread::current();
1838     ThreadBlockInVM tbivm(thread);
1839 
1840     bool threadIsSuspended;
1841     do {
1842       thread->set_suspend_equivalent();
1843       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1844 
1845       ::sem_wait(&sig_sem);
1846 
1847       // were we externally suspended while we were waiting?
1848       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1849       if (threadIsSuspended) {
1850         //
1851         // The semaphore has been incremented, but while we were waiting
1852         // another thread suspended us. We don't want to continue running
1853         // while suspended because that would surprise the thread that
1854         // suspended us.
1855         //
1856         ::sem_post(&sig_sem);
1857 
1858         thread->java_suspend_self();
1859       }
1860     } while (threadIsSuspended);
1861   }
1862 }
1863 
1864 int os::signal_lookup() {
1865   return check_pending_signals(false);
1866 }
1867 
1868 int os::signal_wait() {
1869   return check_pending_signals(true);
1870 }
1871 
1872 ////////////////////////////////////////////////////////////////////////////////
1873 // Virtual Memory
1874 
1875 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1876 
1877 #define VMEM_MAPPED  1
1878 #define VMEM_SHMATED 2
1879 
1880 struct vmembk_t {
1881   int type;         // 1 - mmap, 2 - shmat
1882   char* addr;
1883   size_t size;      // Real size, may be larger than usersize.
1884   size_t pagesize;  // page size of area
1885   vmembk_t* next;
1886 
1887   bool contains_addr(char* p) const {
1888     return p >= addr && p < (addr + size);
1889   }
1890 
1891   bool contains_range(char* p, size_t s) const {
1892     return contains_addr(p) && contains_addr(p + s - 1);
1893   }
1894 
1895   void print_on(outputStream* os) const {
1896     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1897       " bytes, %d %s pages), %s",
1898       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1899       (type == VMEM_SHMATED ? "shmat" : "mmap")
1900     );
1901   }
1902 
1903   // Check that range is a sub range of memory block (or equal to memory block);
1904   // also check that range is fully page aligned to the page size if the block.
1905   void assert_is_valid_subrange(char* p, size_t s) const {
1906     if (!contains_range(p, s)) {
1907       fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1908               "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1909               p, p + s - 1, addr, addr + size - 1);
1910       guarantee0(false);
1911     }
1912     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1913       fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1914               " aligned to pagesize (%s)\n", p, p + s);
1915       guarantee0(false);
1916     }
1917   }
1918 };
1919 
1920 static struct {
1921   vmembk_t* first;
1922   MiscUtils::CritSect cs;
1923 } vmem;
1924 
1925 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1926   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1927   assert0(p);
1928   if (p) {
1929     MiscUtils::AutoCritSect lck(&vmem.cs);
1930     p->addr = addr; p->size = size;
1931     p->pagesize = pagesize;
1932     p->type = type;
1933     p->next = vmem.first;
1934     vmem.first = p;
1935   }
1936 }
1937 
1938 static vmembk_t* vmembk_find(char* addr) {
1939   MiscUtils::AutoCritSect lck(&vmem.cs);
1940   for (vmembk_t* p = vmem.first; p; p = p->next) {
1941     if (p->addr <= addr && (p->addr + p->size) > addr) {
1942       return p;
1943     }
1944   }
1945   return NULL;
1946 }
1947 
1948 static void vmembk_remove(vmembk_t* p0) {
1949   MiscUtils::AutoCritSect lck(&vmem.cs);
1950   assert0(p0);
1951   assert0(vmem.first); // List should not be empty.
1952   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1953     if (*pp == p0) {
1954       *pp = p0->next;
1955       ::free(p0);
1956       return;
1957     }
1958   }
1959   assert0(false); // Not found?
1960 }
1961 
1962 static void vmembk_print_on(outputStream* os) {
1963   MiscUtils::AutoCritSect lck(&vmem.cs);
1964   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1965     vmi->print_on(os);
1966     os->cr();
1967   }
1968 }
1969 
1970 // Reserve and attach a section of System V memory.
1971 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1972 // address. Failing that, it will attach the memory anywhere.
1973 // If <requested_addr> is NULL, function will attach the memory anywhere.
1974 //
1975 // <alignment_hint> is being ignored by this function. It is very probable however that the
1976 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1977 // Should this be not enogh, we can put more work into it.
1978 static char* reserve_shmated_memory (
1979   size_t bytes,
1980   char* requested_addr,
1981   size_t alignment_hint) {
1982 
1983   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1984     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1985     bytes, requested_addr, alignment_hint);
1986 
1987   // Either give me wish address or wish alignment but not both.
1988   assert0(!(requested_addr != NULL && alignment_hint != 0));
1989 
1990   // We must prevent anyone from attaching too close to the
1991   // BRK because that may cause malloc OOM.
1992   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1993     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1994       "Will attach anywhere.", requested_addr);
1995     // Act like the OS refused to attach there.
1996     requested_addr = NULL;
1997   }
1998 
1999   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
2000   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
2001   if (os::Aix::on_pase_V5R4_or_older()) {
2002     ShouldNotReachHere();
2003   }
2004 
2005   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
2006   const size_t size = align_size_up(bytes, SIZE_64K);
2007 
2008   // Reserve the shared segment.
2009   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2010   if (shmid == -1) {
2011     trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
2012     return NULL;
2013   }
2014 
2015   // Important note:
2016   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2017   // We must right after attaching it remove it from the system. System V shm segments are global and
2018   // survive the process.
2019   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2020 
2021   struct shmid_ds shmbuf;
2022   memset(&shmbuf, 0, sizeof(shmbuf));
2023   shmbuf.shm_pagesize = SIZE_64K;
2024   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2025     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2026                size / SIZE_64K, errno);
2027     // I want to know if this ever happens.
2028     assert(false, "failed to set page size for shmat");
2029   }
2030 
2031   // Now attach the shared segment.
2032   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2033   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2034   // were not a segment boundary.
2035   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2036   const int errno_shmat = errno;
2037 
2038   // (A) Right after shmat and before handing shmat errors delete the shm segment.
2039   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2040     trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2041     assert(false, "failed to remove shared memory segment!");
2042   }
2043 
2044   // Handle shmat error. If we failed to attach, just return.
2045   if (addr == (char*)-1) {
2046     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2047     return NULL;
2048   }
2049 
2050   // Just for info: query the real page size. In case setting the page size did not
2051   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2052   const size_t real_pagesize = os::Aix::query_pagesize(addr);
2053   if (real_pagesize != shmbuf.shm_pagesize) {
2054     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2055   }
2056 
2057   if (addr) {
2058     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2059       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2060   } else {
2061     if (requested_addr != NULL) {
2062       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2063     } else {
2064       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2065     }
2066   }
2067 
2068   // book-keeping
2069   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2070   assert0(is_aligned_to(addr, os::vm_page_size()));
2071 
2072   return addr;
2073 }
2074 
2075 static bool release_shmated_memory(char* addr, size_t size) {
2076 
2077   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2078     addr, addr + size - 1);
2079 
2080   bool rc = false;
2081 
2082   // TODO: is there a way to verify shm size without doing bookkeeping?
2083   if (::shmdt(addr) != 0) {
2084     trcVerbose("error (%d).", errno);
2085   } else {
2086     trcVerbose("ok.");
2087     rc = true;
2088   }
2089   return rc;
2090 }
2091 
2092 static bool uncommit_shmated_memory(char* addr, size_t size) {
2093   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2094     addr, addr + size - 1);
2095 
2096   const bool rc = my_disclaim64(addr, size);
2097 
2098   if (!rc) {
2099     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2100     return false;
2101   }
2102   return true;
2103 }
2104 
2105 // Reserve memory via mmap.
2106 // If <requested_addr> is given, an attempt is made to attach at the given address.
2107 // Failing that, memory is allocated at any address.
2108 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2109 // allocate at an address aligned with the given alignment. Failing that, memory
2110 // is aligned anywhere.
2111 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2112   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2113     "alignment_hint " UINTX_FORMAT "...",
2114     bytes, requested_addr, alignment_hint);
2115 
2116   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2117   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2118     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2119     return NULL;
2120   }
2121 
2122   // We must prevent anyone from attaching too close to the
2123   // BRK because that may cause malloc OOM.
2124   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2125     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2126       "Will attach anywhere.", requested_addr);
2127     // Act like the OS refused to attach there.
2128     requested_addr = NULL;
2129   }
2130 
2131   // Specify one or the other but not both.
2132   assert0(!(requested_addr != NULL && alignment_hint > 0));
2133 
2134   // In 64K mode, we claim the global page size (os::vm_page_size())
2135   // is 64K. This is one of the few points where that illusion may
2136   // break, because mmap() will always return memory aligned to 4K. So
2137   // we must ensure we only ever return memory aligned to 64k.
2138   if (alignment_hint) {
2139     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2140   } else {
2141     alignment_hint = os::vm_page_size();
2142   }
2143 
2144   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2145   const size_t size = align_size_up(bytes, os::vm_page_size());
2146 
2147   // alignment: Allocate memory large enough to include an aligned range of the right size and
2148   // cut off the leading and trailing waste pages.
2149   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2150   const size_t extra_size = size + alignment_hint;
2151 
2152   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2153   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2154   int flags = MAP_ANONYMOUS | MAP_SHARED;
2155 
2156   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2157   // it means if wishaddress is given but MAP_FIXED is not set.
2158   //
2159   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2160   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2161   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2162   // get clobbered.
2163   if (requested_addr != NULL) {
2164     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2165       flags |= MAP_FIXED;
2166     }
2167   }
2168 
2169   char* addr = (char*)::mmap(requested_addr, extra_size,
2170       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2171 
2172   if (addr == MAP_FAILED) {
2173     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2174     return NULL;
2175   }
2176 
2177   // Handle alignment.
2178   char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2179   const size_t waste_pre = addr_aligned - addr;
2180   char* const addr_aligned_end = addr_aligned + size;
2181   const size_t waste_post = extra_size - waste_pre - size;
2182   if (waste_pre > 0) {
2183     ::munmap(addr, waste_pre);
2184   }
2185   if (waste_post > 0) {
2186     ::munmap(addr_aligned_end, waste_post);
2187   }
2188   addr = addr_aligned;
2189 
2190   if (addr) {
2191     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2192       addr, addr + bytes, bytes);
2193   } else {
2194     if (requested_addr != NULL) {
2195       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2196     } else {
2197       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2198     }
2199   }
2200 
2201   // bookkeeping
2202   vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
2203 
2204   // Test alignment, see above.
2205   assert0(is_aligned_to(addr, os::vm_page_size()));
2206 
2207   return addr;
2208 }
2209 
2210 static bool release_mmaped_memory(char* addr, size_t size) {
2211   assert0(is_aligned_to(addr, os::vm_page_size()));
2212   assert0(is_aligned_to(size, os::vm_page_size()));
2213 
2214   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2215     addr, addr + size - 1);
2216   bool rc = false;
2217 
2218   if (::munmap(addr, size) != 0) {
2219     trcVerbose("failed (%d)\n", errno);
2220     rc = false;
2221   } else {
2222     trcVerbose("ok.");
2223     rc = true;
2224   }
2225 
2226   return rc;
2227 }
2228 
2229 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2230 
2231   assert0(is_aligned_to(addr, os::vm_page_size()));
2232   assert0(is_aligned_to(size, os::vm_page_size()));
2233 
2234   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2235     addr, addr + size - 1);
2236   bool rc = false;
2237 
2238   // Uncommit mmap memory with msync MS_INVALIDATE.
2239   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2240     trcVerbose("failed (%d)\n", errno);
2241     rc = false;
2242   } else {
2243     trcVerbose("ok.");
2244     rc = true;
2245   }
2246 
2247   return rc;
2248 }
2249 
2250 // End: shared memory bookkeeping
2251 ////////////////////////////////////////////////////////////////////////////////////////////////////
2252 
2253 int os::vm_page_size() {
2254   // Seems redundant as all get out.
2255   assert(os::Aix::page_size() != -1, "must call os::init");
2256   return os::Aix::page_size();
2257 }
2258 
2259 // Aix allocates memory by pages.
2260 int os::vm_allocation_granularity() {
2261   assert(os::Aix::page_size() != -1, "must call os::init");
2262   return os::Aix::page_size();
2263 }
2264 
2265 #ifdef PRODUCT
2266 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2267                                     int err) {
2268   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2269           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2270           strerror(err), err);
2271 }
2272 #endif
2273 
2274 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2275                                   const char* mesg) {
2276   assert(mesg != NULL, "mesg must be specified");
2277   if (!pd_commit_memory(addr, size, exec)) {
2278     // Add extra info in product mode for vm_exit_out_of_memory():
2279     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2280     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2281   }
2282 }
2283 
2284 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2285 
2286   assert0(is_aligned_to(addr, os::vm_page_size()));
2287   assert0(is_aligned_to(size, os::vm_page_size()));
2288 
2289   vmembk_t* const vmi = vmembk_find(addr);
2290   assert0(vmi);
2291   vmi->assert_is_valid_subrange(addr, size);
2292 
2293   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2294 
2295   return true;
2296 }
2297 
2298 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2299   return pd_commit_memory(addr, size, exec);
2300 }
2301 
2302 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2303                                   size_t alignment_hint, bool exec,
2304                                   const char* mesg) {
2305   // Alignment_hint is ignored on this OS.
2306   pd_commit_memory_or_exit(addr, size, exec, mesg);
2307 }
2308 
2309 bool os::pd_uncommit_memory(char* addr, size_t size) {
2310   assert0(is_aligned_to(addr, os::vm_page_size()));
2311   assert0(is_aligned_to(size, os::vm_page_size()));
2312 
2313   // Dynamically do different things for mmap/shmat.
2314   const vmembk_t* const vmi = vmembk_find(addr);
2315   assert0(vmi);
2316   vmi->assert_is_valid_subrange(addr, size);
2317 
2318   if (vmi->type == VMEM_SHMATED) {
2319     return uncommit_shmated_memory(addr, size);
2320   } else {
2321     return uncommit_mmaped_memory(addr, size);
2322   }
2323 }
2324 
2325 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2326   // Do not call this; no need to commit stack pages on AIX.
2327   ShouldNotReachHere();
2328   return true;
2329 }
2330 
2331 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2332   // Do not call this; no need to commit stack pages on AIX.
2333   ShouldNotReachHere();
2334   return true;
2335 }
2336 
2337 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2338 }
2339 
2340 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2341 }
2342 
2343 void os::numa_make_global(char *addr, size_t bytes) {
2344 }
2345 
2346 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2347 }
2348 
2349 bool os::numa_topology_changed() {
2350   return false;
2351 }
2352 
2353 size_t os::numa_get_groups_num() {
2354   return 1;
2355 }
2356 
2357 int os::numa_get_group_id() {
2358   return 0;
2359 }
2360 
2361 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2362   if (size > 0) {
2363     ids[0] = 0;
2364     return 1;
2365   }
2366   return 0;
2367 }
2368 
2369 bool os::get_page_info(char *start, page_info* info) {
2370   return false;
2371 }
2372 
2373 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2374   return end;
2375 }
2376 
2377 // Reserves and attaches a shared memory segment.
2378 // Will assert if a wish address is given and could not be obtained.
2379 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2380 
2381   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2382   // thereby clobbering old mappings at that place. That is probably
2383   // not intended, never used and almost certainly an error were it
2384   // ever be used this way (to try attaching at a specified address
2385   // without clobbering old mappings an alternate API exists,
2386   // os::attempt_reserve_memory_at()).
2387   // Instead of mimicking the dangerous coding of the other platforms, here I
2388   // just ignore the request address (release) or assert(debug).
2389   assert0(requested_addr == NULL);
2390 
2391   // Always round to os::vm_page_size(), which may be larger than 4K.
2392   bytes = align_size_up(bytes, os::vm_page_size());
2393   const size_t alignment_hint0 =
2394     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2395 
2396   // In 4K mode always use mmap.
2397   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2398   if (os::vm_page_size() == SIZE_4K) {
2399     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2400   } else {
2401     if (bytes >= Use64KPagesThreshold) {
2402       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2403     } else {
2404       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2405     }
2406   }
2407 }
2408 
2409 bool os::pd_release_memory(char* addr, size_t size) {
2410 
2411   // Dynamically do different things for mmap/shmat.
2412   vmembk_t* const vmi = vmembk_find(addr);
2413   assert0(vmi);
2414 
2415   // Always round to os::vm_page_size(), which may be larger than 4K.
2416   size = align_size_up(size, os::vm_page_size());
2417   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2418 
2419   bool rc = false;
2420   bool remove_bookkeeping = false;
2421   if (vmi->type == VMEM_SHMATED) {
2422     // For shmatted memory, we do:
2423     // - If user wants to release the whole range, release the memory (shmdt).
2424     // - If user only wants to release a partial range, uncommit (disclaim) that
2425     //   range. That way, at least, we do not use memory anymore (bust still page
2426     //   table space).
2427     vmi->assert_is_valid_subrange(addr, size);
2428     if (addr == vmi->addr && size == vmi->size) {
2429       rc = release_shmated_memory(addr, size);
2430       remove_bookkeeping = true;
2431     } else {
2432       rc = uncommit_shmated_memory(addr, size);
2433     }
2434   } else {
2435     // User may unmap partial regions but region has to be fully contained.
2436 #ifdef ASSERT
2437     vmi->assert_is_valid_subrange(addr, size);
2438 #endif
2439     rc = release_mmaped_memory(addr, size);
2440     remove_bookkeeping = true;
2441   }
2442 
2443   // update bookkeeping
2444   if (rc && remove_bookkeeping) {
2445     vmembk_remove(vmi);
2446   }
2447 
2448   return rc;
2449 }
2450 
2451 static bool checked_mprotect(char* addr, size_t size, int prot) {
2452 
2453   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2454   // not tell me if protection failed when trying to protect an un-protectable range.
2455   //
2456   // This means if the memory was allocated using shmget/shmat, protection wont work
2457   // but mprotect will still return 0:
2458   //
2459   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2460 
2461   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2462 
2463   if (!rc) {
2464     const char* const s_errno = strerror(errno);
2465     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2466     return false;
2467   }
2468 
2469   // mprotect success check
2470   //
2471   // Mprotect said it changed the protection but can I believe it?
2472   //
2473   // To be sure I need to check the protection afterwards. Try to
2474   // read from protected memory and check whether that causes a segfault.
2475   //
2476   if (!os::Aix::xpg_sus_mode()) {
2477 
2478     if (CanUseSafeFetch32()) {
2479 
2480       const bool read_protected =
2481         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2482          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2483 
2484       if (prot & PROT_READ) {
2485         rc = !read_protected;
2486       } else {
2487         rc = read_protected;
2488       }
2489     }
2490   }
2491   if (!rc) {
2492     assert(false, "mprotect failed.");
2493   }
2494   return rc;
2495 }
2496 
2497 // Set protections specified
2498 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2499   unsigned int p = 0;
2500   switch (prot) {
2501   case MEM_PROT_NONE: p = PROT_NONE; break;
2502   case MEM_PROT_READ: p = PROT_READ; break;
2503   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2504   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2505   default:
2506     ShouldNotReachHere();
2507   }
2508   // is_committed is unused.
2509   return checked_mprotect(addr, size, p);
2510 }
2511 
2512 bool os::guard_memory(char* addr, size_t size) {
2513   return checked_mprotect(addr, size, PROT_NONE);
2514 }
2515 
2516 bool os::unguard_memory(char* addr, size_t size) {
2517   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2518 }
2519 
2520 // Large page support
2521 
2522 static size_t _large_page_size = 0;
2523 
2524 // Enable large page support if OS allows that.
2525 void os::large_page_init() {
2526   return; // Nothing to do. See query_multipage_support and friends.
2527 }
2528 
2529 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2530   // "exec" is passed in but not used. Creating the shared image for
2531   // the code cache doesn't have an SHM_X executable permission to check.
2532   Unimplemented();
2533   return 0;
2534 }
2535 
2536 bool os::release_memory_special(char* base, size_t bytes) {
2537   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2538   Unimplemented();
2539   return false;
2540 }
2541 
2542 size_t os::large_page_size() {
2543   return _large_page_size;
2544 }
2545 
2546 bool os::can_commit_large_page_memory() {
2547   // Does not matter, we do not support huge pages.
2548   return false;
2549 }
2550 
2551 bool os::can_execute_large_page_memory() {
2552   // Does not matter, we do not support huge pages.
2553   return false;
2554 }
2555 
2556 // Reserve memory at an arbitrary address, only if that area is
2557 // available (and not reserved for something else).
2558 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2559   char* addr = NULL;
2560 
2561   // Always round to os::vm_page_size(), which may be larger than 4K.
2562   bytes = align_size_up(bytes, os::vm_page_size());
2563 
2564   // In 4K mode always use mmap.
2565   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2566   if (os::vm_page_size() == SIZE_4K) {
2567     return reserve_mmaped_memory(bytes, requested_addr, 0);
2568   } else {
2569     if (bytes >= Use64KPagesThreshold) {
2570       return reserve_shmated_memory(bytes, requested_addr, 0);
2571     } else {
2572       return reserve_mmaped_memory(bytes, requested_addr, 0);
2573     }
2574   }
2575 
2576   return addr;
2577 }
2578 
2579 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2580   return ::read(fd, buf, nBytes);
2581 }
2582 
2583 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2584   return ::pread(fd, buf, nBytes, offset);
2585 }
2586 
2587 void os::naked_short_sleep(jlong ms) {
2588   struct timespec req;
2589 
2590   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2591   req.tv_sec = 0;
2592   if (ms > 0) {
2593     req.tv_nsec = (ms % 1000) * 1000000;
2594   }
2595   else {
2596     req.tv_nsec = 1;
2597   }
2598 
2599   nanosleep(&req, NULL);
2600 
2601   return;
2602 }
2603 
2604 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2605 void os::infinite_sleep() {
2606   while (true) {    // sleep forever ...
2607     ::sleep(100);   // ... 100 seconds at a time
2608   }
2609 }
2610 
2611 // Used to convert frequent JVM_Yield() to nops
2612 bool os::dont_yield() {
2613   return DontYieldALot;
2614 }
2615 
2616 void os::naked_yield() {
2617   sched_yield();
2618 }
2619 
2620 ////////////////////////////////////////////////////////////////////////////////
2621 // thread priority support
2622 
2623 // From AIX manpage to pthread_setschedparam
2624 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2625 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2626 //
2627 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2628 // range from 40 to 80, where 40 is the least favored priority and 80
2629 // is the most favored."
2630 //
2631 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2632 // scheduling there; however, this still leaves iSeries.)
2633 //
2634 // We use the same values for AIX and PASE.
2635 int os::java_to_os_priority[CriticalPriority + 1] = {
2636   54,             // 0 Entry should never be used
2637 
2638   55,             // 1 MinPriority
2639   55,             // 2
2640   56,             // 3
2641 
2642   56,             // 4
2643   57,             // 5 NormPriority
2644   57,             // 6
2645 
2646   58,             // 7
2647   58,             // 8
2648   59,             // 9 NearMaxPriority
2649 
2650   60,             // 10 MaxPriority
2651 
2652   60              // 11 CriticalPriority
2653 };
2654 
2655 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2656   if (!UseThreadPriorities) return OS_OK;
2657   pthread_t thr = thread->osthread()->pthread_id();
2658   int policy = SCHED_OTHER;
2659   struct sched_param param;
2660   param.sched_priority = newpri;
2661   int ret = pthread_setschedparam(thr, policy, &param);
2662 
2663   if (ret != 0) {
2664     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2665         (int)thr, newpri, ret, strerror(ret));
2666   }
2667   return (ret == 0) ? OS_OK : OS_ERR;
2668 }
2669 
2670 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2671   if (!UseThreadPriorities) {
2672     *priority_ptr = java_to_os_priority[NormPriority];
2673     return OS_OK;
2674   }
2675   pthread_t thr = thread->osthread()->pthread_id();
2676   int policy = SCHED_OTHER;
2677   struct sched_param param;
2678   int ret = pthread_getschedparam(thr, &policy, &param);
2679   *priority_ptr = param.sched_priority;
2680 
2681   return (ret == 0) ? OS_OK : OS_ERR;
2682 }
2683 
2684 // Hint to the underlying OS that a task switch would not be good.
2685 // Void return because it's a hint and can fail.
2686 void os::hint_no_preempt() {}
2687 
2688 ////////////////////////////////////////////////////////////////////////////////
2689 // suspend/resume support
2690 
2691 //  the low-level signal-based suspend/resume support is a remnant from the
2692 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2693 //  within hotspot. Now there is a single use-case for this:
2694 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
2695 //      that runs in the watcher thread.
2696 //  The remaining code is greatly simplified from the more general suspension
2697 //  code that used to be used.
2698 //
2699 //  The protocol is quite simple:
2700 //  - suspend:
2701 //      - sends a signal to the target thread
2702 //      - polls the suspend state of the osthread using a yield loop
2703 //      - target thread signal handler (SR_handler) sets suspend state
2704 //        and blocks in sigsuspend until continued
2705 //  - resume:
2706 //      - sets target osthread state to continue
2707 //      - sends signal to end the sigsuspend loop in the SR_handler
2708 //
2709 //  Note that the SR_lock plays no role in this suspend/resume protocol.
2710 //
2711 
2712 static void resume_clear_context(OSThread *osthread) {
2713   osthread->set_ucontext(NULL);
2714   osthread->set_siginfo(NULL);
2715 }
2716 
2717 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2718   osthread->set_ucontext(context);
2719   osthread->set_siginfo(siginfo);
2720 }
2721 
2722 //
2723 // Handler function invoked when a thread's execution is suspended or
2724 // resumed. We have to be careful that only async-safe functions are
2725 // called here (Note: most pthread functions are not async safe and
2726 // should be avoided.)
2727 //
2728 // Note: sigwait() is a more natural fit than sigsuspend() from an
2729 // interface point of view, but sigwait() prevents the signal hander
2730 // from being run. libpthread would get very confused by not having
2731 // its signal handlers run and prevents sigwait()'s use with the
2732 // mutex granting granting signal.
2733 //
2734 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2735 //
2736 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2737   // Save and restore errno to avoid confusing native code with EINTR
2738   // after sigsuspend.
2739   int old_errno = errno;
2740 
2741   Thread* thread = Thread::current();
2742   OSThread* osthread = thread->osthread();
2743   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2744 
2745   os::SuspendResume::State current = osthread->sr.state();
2746   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2747     suspend_save_context(osthread, siginfo, context);
2748 
2749     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2750     os::SuspendResume::State state = osthread->sr.suspended();
2751     if (state == os::SuspendResume::SR_SUSPENDED) {
2752       sigset_t suspend_set;  // signals for sigsuspend()
2753 
2754       // get current set of blocked signals and unblock resume signal
2755       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2756       sigdelset(&suspend_set, SR_signum);
2757 
2758       // wait here until we are resumed
2759       while (1) {
2760         sigsuspend(&suspend_set);
2761 
2762         os::SuspendResume::State result = osthread->sr.running();
2763         if (result == os::SuspendResume::SR_RUNNING) {
2764           break;
2765         }
2766       }
2767 
2768     } else if (state == os::SuspendResume::SR_RUNNING) {
2769       // request was cancelled, continue
2770     } else {
2771       ShouldNotReachHere();
2772     }
2773 
2774     resume_clear_context(osthread);
2775   } else if (current == os::SuspendResume::SR_RUNNING) {
2776     // request was cancelled, continue
2777   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2778     // ignore
2779   } else {
2780     ShouldNotReachHere();
2781   }
2782 
2783   errno = old_errno;
2784 }
2785 
2786 static int SR_initialize() {
2787   struct sigaction act;
2788   char *s;
2789   // Get signal number to use for suspend/resume
2790   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2791     int sig = ::strtol(s, 0, 10);
2792     if (sig > 0 || sig < NSIG) {
2793       SR_signum = sig;
2794     }
2795   }
2796 
2797   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2798         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2799 
2800   sigemptyset(&SR_sigset);
2801   sigaddset(&SR_sigset, SR_signum);
2802 
2803   // Set up signal handler for suspend/resume.
2804   act.sa_flags = SA_RESTART|SA_SIGINFO;
2805   act.sa_handler = (void (*)(int)) SR_handler;
2806 
2807   // SR_signum is blocked by default.
2808   // 4528190 - We also need to block pthread restart signal (32 on all
2809   // supported Linux platforms). Note that LinuxThreads need to block
2810   // this signal for all threads to work properly. So we don't have
2811   // to use hard-coded signal number when setting up the mask.
2812   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2813 
2814   if (sigaction(SR_signum, &act, 0) == -1) {
2815     return -1;
2816   }
2817 
2818   // Save signal flag
2819   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2820   return 0;
2821 }
2822 
2823 static int SR_finalize() {
2824   return 0;
2825 }
2826 
2827 static int sr_notify(OSThread* osthread) {
2828   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2829   assert_status(status == 0, status, "pthread_kill");
2830   return status;
2831 }
2832 
2833 // "Randomly" selected value for how long we want to spin
2834 // before bailing out on suspending a thread, also how often
2835 // we send a signal to a thread we want to resume
2836 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2837 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2838 
2839 // returns true on success and false on error - really an error is fatal
2840 // but this seems the normal response to library errors
2841 static bool do_suspend(OSThread* osthread) {
2842   assert(osthread->sr.is_running(), "thread should be running");
2843   // mark as suspended and send signal
2844 
2845   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2846     // failed to switch, state wasn't running?
2847     ShouldNotReachHere();
2848     return false;
2849   }
2850 
2851   if (sr_notify(osthread) != 0) {
2852     // try to cancel, switch to running
2853 
2854     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2855     if (result == os::SuspendResume::SR_RUNNING) {
2856       // cancelled
2857       return false;
2858     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2859       // somehow managed to suspend
2860       return true;
2861     } else {
2862       ShouldNotReachHere();
2863       return false;
2864     }
2865   }
2866 
2867   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2868 
2869   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2870     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2871       os::naked_yield();
2872     }
2873 
2874     // timeout, try to cancel the request
2875     if (n >= RANDOMLY_LARGE_INTEGER) {
2876       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2877       if (cancelled == os::SuspendResume::SR_RUNNING) {
2878         return false;
2879       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2880         return true;
2881       } else {
2882         ShouldNotReachHere();
2883         return false;
2884       }
2885     }
2886   }
2887 
2888   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2889   return true;
2890 }
2891 
2892 static void do_resume(OSThread* osthread) {
2893   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2894 
2895   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2896     // failed to switch to WAKEUP_REQUEST
2897     ShouldNotReachHere();
2898     return;
2899   }
2900 
2901   while (!osthread->sr.is_running()) {
2902     if (sr_notify(osthread) == 0) {
2903       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2904         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2905           os::naked_yield();
2906         }
2907       }
2908     } else {
2909       ShouldNotReachHere();
2910     }
2911   }
2912 
2913   guarantee(osthread->sr.is_running(), "Must be running!");
2914 }
2915 
2916 ///////////////////////////////////////////////////////////////////////////////////
2917 // signal handling (except suspend/resume)
2918 
2919 // This routine may be used by user applications as a "hook" to catch signals.
2920 // The user-defined signal handler must pass unrecognized signals to this
2921 // routine, and if it returns true (non-zero), then the signal handler must
2922 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2923 // routine will never retun false (zero), but instead will execute a VM panic
2924 // routine kill the process.
2925 //
2926 // If this routine returns false, it is OK to call it again. This allows
2927 // the user-defined signal handler to perform checks either before or after
2928 // the VM performs its own checks. Naturally, the user code would be making
2929 // a serious error if it tried to handle an exception (such as a null check
2930 // or breakpoint) that the VM was generating for its own correct operation.
2931 //
2932 // This routine may recognize any of the following kinds of signals:
2933 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2934 // It should be consulted by handlers for any of those signals.
2935 //
2936 // The caller of this routine must pass in the three arguments supplied
2937 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2938 // field of the structure passed to sigaction(). This routine assumes that
2939 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2940 //
2941 // Note that the VM will print warnings if it detects conflicting signal
2942 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2943 //
2944 extern "C" JNIEXPORT int
2945 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2946 
2947 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2948 // to be the thing to call; documentation is not terribly clear about whether
2949 // pthread_sigmask also works, and if it does, whether it does the same.
2950 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2951   const int rc = ::pthread_sigmask(how, set, oset);
2952   // return value semantics differ slightly for error case:
2953   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2954   // (so, pthread_sigmask is more theadsafe for error handling)
2955   // But success is always 0.
2956   return rc == 0 ? true : false;
2957 }
2958 
2959 // Function to unblock all signals which are, according
2960 // to POSIX, typical program error signals. If they happen while being blocked,
2961 // they typically will bring down the process immediately.
2962 bool unblock_program_error_signals() {
2963   sigset_t set;
2964   ::sigemptyset(&set);
2965   ::sigaddset(&set, SIGILL);
2966   ::sigaddset(&set, SIGBUS);
2967   ::sigaddset(&set, SIGFPE);
2968   ::sigaddset(&set, SIGSEGV);
2969   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2970 }
2971 
2972 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2973 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2974   assert(info != NULL && uc != NULL, "it must be old kernel");
2975 
2976   // Never leave program error signals blocked;
2977   // on all our platforms they would bring down the process immediately when
2978   // getting raised while being blocked.
2979   unblock_program_error_signals();
2980 
2981   JVM_handle_aix_signal(sig, info, uc, true);
2982 }
2983 
2984 // This boolean allows users to forward their own non-matching signals
2985 // to JVM_handle_aix_signal, harmlessly.
2986 bool os::Aix::signal_handlers_are_installed = false;
2987 
2988 // For signal-chaining
2989 struct sigaction os::Aix::sigact[MAXSIGNUM];
2990 unsigned int os::Aix::sigs = 0;
2991 bool os::Aix::libjsig_is_loaded = false;
2992 typedef struct sigaction *(*get_signal_t)(int);
2993 get_signal_t os::Aix::get_signal_action = NULL;
2994 
2995 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2996   struct sigaction *actp = NULL;
2997 
2998   if (libjsig_is_loaded) {
2999     // Retrieve the old signal handler from libjsig
3000     actp = (*get_signal_action)(sig);
3001   }
3002   if (actp == NULL) {
3003     // Retrieve the preinstalled signal handler from jvm
3004     actp = get_preinstalled_handler(sig);
3005   }
3006 
3007   return actp;
3008 }
3009 
3010 static bool call_chained_handler(struct sigaction *actp, int sig,
3011                                  siginfo_t *siginfo, void *context) {
3012   // Call the old signal handler
3013   if (actp->sa_handler == SIG_DFL) {
3014     // It's more reasonable to let jvm treat it as an unexpected exception
3015     // instead of taking the default action.
3016     return false;
3017   } else if (actp->sa_handler != SIG_IGN) {
3018     if ((actp->sa_flags & SA_NODEFER) == 0) {
3019       // automaticlly block the signal
3020       sigaddset(&(actp->sa_mask), sig);
3021     }
3022 
3023     sa_handler_t hand = NULL;
3024     sa_sigaction_t sa = NULL;
3025     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3026     // retrieve the chained handler
3027     if (siginfo_flag_set) {
3028       sa = actp->sa_sigaction;
3029     } else {
3030       hand = actp->sa_handler;
3031     }
3032 
3033     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3034       actp->sa_handler = SIG_DFL;
3035     }
3036 
3037     // try to honor the signal mask
3038     sigset_t oset;
3039     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3040 
3041     // call into the chained handler
3042     if (siginfo_flag_set) {
3043       (*sa)(sig, siginfo, context);
3044     } else {
3045       (*hand)(sig);
3046     }
3047 
3048     // restore the signal mask
3049     pthread_sigmask(SIG_SETMASK, &oset, 0);
3050   }
3051   // Tell jvm's signal handler the signal is taken care of.
3052   return true;
3053 }
3054 
3055 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3056   bool chained = false;
3057   // signal-chaining
3058   if (UseSignalChaining) {
3059     struct sigaction *actp = get_chained_signal_action(sig);
3060     if (actp != NULL) {
3061       chained = call_chained_handler(actp, sig, siginfo, context);
3062     }
3063   }
3064   return chained;
3065 }
3066 
3067 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3068   if ((((unsigned int)1 << sig) & sigs) != 0) {
3069     return &sigact[sig];
3070   }
3071   return NULL;
3072 }
3073 
3074 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3075   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3076   sigact[sig] = oldAct;
3077   sigs |= (unsigned int)1 << sig;
3078 }
3079 
3080 // for diagnostic
3081 int os::Aix::sigflags[MAXSIGNUM];
3082 
3083 int os::Aix::get_our_sigflags(int sig) {
3084   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3085   return sigflags[sig];
3086 }
3087 
3088 void os::Aix::set_our_sigflags(int sig, int flags) {
3089   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3090   sigflags[sig] = flags;
3091 }
3092 
3093 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3094   // Check for overwrite.
3095   struct sigaction oldAct;
3096   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3097 
3098   void* oldhand = oldAct.sa_sigaction
3099     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3100     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3101   // Renamed 'signalHandler' to avoid collision with other shared libs.
3102   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3103       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3104       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3105     if (AllowUserSignalHandlers || !set_installed) {
3106       // Do not overwrite; user takes responsibility to forward to us.
3107       return;
3108     } else if (UseSignalChaining) {
3109       // save the old handler in jvm
3110       save_preinstalled_handler(sig, oldAct);
3111       // libjsig also interposes the sigaction() call below and saves the
3112       // old sigaction on it own.
3113     } else {
3114       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
3115                     "%#lx for signal %d.", (long)oldhand, sig));
3116     }
3117   }
3118 
3119   struct sigaction sigAct;
3120   sigfillset(&(sigAct.sa_mask));
3121   if (!set_installed) {
3122     sigAct.sa_handler = SIG_DFL;
3123     sigAct.sa_flags = SA_RESTART;
3124   } else {
3125     // Renamed 'signalHandler' to avoid collision with other shared libs.
3126     sigAct.sa_sigaction = javaSignalHandler;
3127     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3128   }
3129   // Save flags, which are set by ours
3130   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3131   sigflags[sig] = sigAct.sa_flags;
3132 
3133   int ret = sigaction(sig, &sigAct, &oldAct);
3134   assert(ret == 0, "check");
3135 
3136   void* oldhand2 = oldAct.sa_sigaction
3137                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3138                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3139   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3140 }
3141 
3142 // install signal handlers for signals that HotSpot needs to
3143 // handle in order to support Java-level exception handling.
3144 void os::Aix::install_signal_handlers() {
3145   if (!signal_handlers_are_installed) {
3146     signal_handlers_are_installed = true;
3147 
3148     // signal-chaining
3149     typedef void (*signal_setting_t)();
3150     signal_setting_t begin_signal_setting = NULL;
3151     signal_setting_t end_signal_setting = NULL;
3152     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3153                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3154     if (begin_signal_setting != NULL) {
3155       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3156                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3157       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3158                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3159       libjsig_is_loaded = true;
3160       assert(UseSignalChaining, "should enable signal-chaining");
3161     }
3162     if (libjsig_is_loaded) {
3163       // Tell libjsig jvm is setting signal handlers
3164       (*begin_signal_setting)();
3165     }
3166 
3167     set_signal_handler(SIGSEGV, true);
3168     set_signal_handler(SIGPIPE, true);
3169     set_signal_handler(SIGBUS, true);
3170     set_signal_handler(SIGILL, true);
3171     set_signal_handler(SIGFPE, true);
3172     set_signal_handler(SIGTRAP, true);
3173     set_signal_handler(SIGXFSZ, true);
3174     set_signal_handler(SIGDANGER, true);
3175 
3176     if (libjsig_is_loaded) {
3177       // Tell libjsig jvm finishes setting signal handlers.
3178       (*end_signal_setting)();
3179     }
3180 
3181     // We don't activate signal checker if libjsig is in place, we trust ourselves
3182     // and if UserSignalHandler is installed all bets are off.
3183     // Log that signal checking is off only if -verbose:jni is specified.
3184     if (CheckJNICalls) {
3185       if (libjsig_is_loaded) {
3186         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3187         check_signals = false;
3188       }
3189       if (AllowUserSignalHandlers) {
3190         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3191         check_signals = false;
3192       }
3193       // Need to initialize check_signal_done.
3194       ::sigemptyset(&check_signal_done);
3195     }
3196   }
3197 }
3198 
3199 static const char* get_signal_handler_name(address handler,
3200                                            char* buf, int buflen) {
3201   int offset;
3202   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3203   if (found) {
3204     // skip directory names
3205     const char *p1, *p2;
3206     p1 = buf;
3207     size_t len = strlen(os::file_separator());
3208     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3209     // The way os::dll_address_to_library_name is implemented on Aix
3210     // right now, it always returns -1 for the offset which is not
3211     // terribly informative.
3212     // Will fix that. For now, omit the offset.
3213     jio_snprintf(buf, buflen, "%s", p1);
3214   } else {
3215     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3216   }
3217   return buf;
3218 }
3219 
3220 static void print_signal_handler(outputStream* st, int sig,
3221                                  char* buf, size_t buflen) {
3222   struct sigaction sa;
3223   sigaction(sig, NULL, &sa);
3224 
3225   st->print("%s: ", os::exception_name(sig, buf, buflen));
3226 
3227   address handler = (sa.sa_flags & SA_SIGINFO)
3228     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3229     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3230 
3231   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3232     st->print("SIG_DFL");
3233   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3234     st->print("SIG_IGN");
3235   } else {
3236     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3237   }
3238 
3239   // Print readable mask.
3240   st->print(", sa_mask[0]=");
3241   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3242 
3243   address rh = VMError::get_resetted_sighandler(sig);
3244   // May be, handler was resetted by VMError?
3245   if (rh != NULL) {
3246     handler = rh;
3247     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3248   }
3249 
3250   // Print textual representation of sa_flags.
3251   st->print(", sa_flags=");
3252   os::Posix::print_sa_flags(st, sa.sa_flags);
3253 
3254   // Check: is it our handler?
3255   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3256       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3257     // It is our signal handler.
3258     // Check for flags, reset system-used one!
3259     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3260       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3261                 os::Aix::get_our_sigflags(sig));
3262     }
3263   }
3264   st->cr();
3265 }
3266 
3267 #define DO_SIGNAL_CHECK(sig) \
3268   if (!sigismember(&check_signal_done, sig)) \
3269     os::Aix::check_signal_handler(sig)
3270 
3271 // This method is a periodic task to check for misbehaving JNI applications
3272 // under CheckJNI, we can add any periodic checks here
3273 
3274 void os::run_periodic_checks() {
3275 
3276   if (check_signals == false) return;
3277 
3278   // SEGV and BUS if overridden could potentially prevent
3279   // generation of hs*.log in the event of a crash, debugging
3280   // such a case can be very challenging, so we absolutely
3281   // check the following for a good measure:
3282   DO_SIGNAL_CHECK(SIGSEGV);
3283   DO_SIGNAL_CHECK(SIGILL);
3284   DO_SIGNAL_CHECK(SIGFPE);
3285   DO_SIGNAL_CHECK(SIGBUS);
3286   DO_SIGNAL_CHECK(SIGPIPE);
3287   DO_SIGNAL_CHECK(SIGXFSZ);
3288   if (UseSIGTRAP) {
3289     DO_SIGNAL_CHECK(SIGTRAP);
3290   }
3291   DO_SIGNAL_CHECK(SIGDANGER);
3292 
3293   // ReduceSignalUsage allows the user to override these handlers
3294   // see comments at the very top and jvm_solaris.h
3295   if (!ReduceSignalUsage) {
3296     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3297     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3298     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3299     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3300   }
3301 
3302   DO_SIGNAL_CHECK(SR_signum);
3303   DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3304 }
3305 
3306 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3307 
3308 static os_sigaction_t os_sigaction = NULL;
3309 
3310 void os::Aix::check_signal_handler(int sig) {
3311   char buf[O_BUFLEN];
3312   address jvmHandler = NULL;
3313 
3314   struct sigaction act;
3315   if (os_sigaction == NULL) {
3316     // only trust the default sigaction, in case it has been interposed
3317     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3318     if (os_sigaction == NULL) return;
3319   }
3320 
3321   os_sigaction(sig, (struct sigaction*)NULL, &act);
3322 
3323   address thisHandler = (act.sa_flags & SA_SIGINFO)
3324     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3325     : CAST_FROM_FN_PTR(address, act.sa_handler);
3326 
3327   switch(sig) {
3328   case SIGSEGV:
3329   case SIGBUS:
3330   case SIGFPE:
3331   case SIGPIPE:
3332   case SIGILL:
3333   case SIGXFSZ:
3334     // Renamed 'signalHandler' to avoid collision with other shared libs.
3335     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3336     break;
3337 
3338   case SHUTDOWN1_SIGNAL:
3339   case SHUTDOWN2_SIGNAL:
3340   case SHUTDOWN3_SIGNAL:
3341   case BREAK_SIGNAL:
3342     jvmHandler = (address)user_handler();
3343     break;
3344 
3345   case INTERRUPT_SIGNAL:
3346     jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3347     break;
3348 
3349   default:
3350     if (sig == SR_signum) {
3351       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3352     } else {
3353       return;
3354     }
3355     break;
3356   }
3357 
3358   if (thisHandler != jvmHandler) {
3359     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3360     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3361     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3362     // No need to check this sig any longer
3363     sigaddset(&check_signal_done, sig);
3364     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3365     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3366       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3367                     exception_name(sig, buf, O_BUFLEN));
3368     }
3369   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3370     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3371     tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3372     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3373     // No need to check this sig any longer
3374     sigaddset(&check_signal_done, sig);
3375   }
3376 
3377   // Dump all the signal
3378   if (sigismember(&check_signal_done, sig)) {
3379     print_signal_handlers(tty, buf, O_BUFLEN);
3380   }
3381 }
3382 
3383 extern bool signal_name(int signo, char* buf, size_t len);
3384 
3385 const char* os::exception_name(int exception_code, char* buf, size_t size) {
3386   if (0 < exception_code && exception_code <= SIGRTMAX) {
3387     // signal
3388     if (!signal_name(exception_code, buf, size)) {
3389       jio_snprintf(buf, size, "SIG%d", exception_code);
3390     }
3391     return buf;
3392   } else {
3393     return NULL;
3394   }
3395 }
3396 
3397 // To install functions for atexit system call
3398 extern "C" {
3399   static void perfMemory_exit_helper() {
3400     perfMemory_exit();
3401   }
3402 }
3403 
3404 // This is called _before_ the most of global arguments have been parsed.
3405 void os::init(void) {
3406   // This is basic, we want to know if that ever changes.
3407   // (Shared memory boundary is supposed to be a 256M aligned.)
3408   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3409 
3410   // First off, we need to know whether we run on AIX or PASE, and
3411   // the OS level we run on.
3412   os::Aix::initialize_os_info();
3413 
3414   // Scan environment (SPEC1170 behaviour, etc).
3415   os::Aix::scan_environment();
3416 
3417   // Check which pages are supported by AIX.
3418   query_multipage_support();
3419 
3420   // Act like we only have one page size by eliminating corner cases which
3421   // we did not support very well anyway.
3422   // We have two input conditions:
3423   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3424   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3425   //    setting.
3426   //    Data segment page size is important for us because it defines the thread stack page
3427   //    size, which is needed for guard page handling, stack banging etc.
3428   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3429   //    and should be allocated with 64k pages.
3430   //
3431   // So, we do the following:
3432   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3433   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3434   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3435   // 64k          no              --- AIX 5.2 ? ---
3436   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3437 
3438   // We explicitly leave no option to change page size, because only upgrading would work,
3439   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3440 
3441   if (g_multipage_support.datapsize == SIZE_4K) {
3442     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3443     if (g_multipage_support.can_use_64K_pages) {
3444       // .. but we are able to use 64K pages dynamically.
3445       // This would be typical for java launchers which are not linked
3446       // with datapsize=64K (like, any other launcher but our own).
3447       //
3448       // In this case it would be smart to allocate the java heap with 64K
3449       // to get the performance benefit, and to fake 64k pages for the
3450       // data segment (when dealing with thread stacks).
3451       //
3452       // However, leave a possibility to downgrade to 4K, using
3453       // -XX:-Use64KPages.
3454       if (Use64KPages) {
3455         trcVerbose("64K page mode (faked for data segment)");
3456         Aix::_page_size = SIZE_64K;
3457       } else {
3458         trcVerbose("4K page mode (Use64KPages=off)");
3459         Aix::_page_size = SIZE_4K;
3460       }
3461     } else {
3462       // .. and not able to allocate 64k pages dynamically. Here, just
3463       // fall back to 4K paged mode and use mmap for everything.
3464       trcVerbose("4K page mode");
3465       Aix::_page_size = SIZE_4K;
3466       FLAG_SET_ERGO(bool, Use64KPages, false);
3467     }
3468   } else {
3469     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3470     //   This normally means that we can allocate 64k pages dynamically.
3471     //   (There is one special case where this may be false: EXTSHM=on.
3472     //    but we decided to not support that mode).
3473     assert0(g_multipage_support.can_use_64K_pages);
3474     Aix::_page_size = SIZE_64K;
3475     trcVerbose("64K page mode");
3476     FLAG_SET_ERGO(bool, Use64KPages, true);
3477   }
3478 
3479   // Short-wire stack page size to base page size; if that works, we just remove
3480   // that stack page size altogether.
3481   Aix::_stack_page_size = Aix::_page_size;
3482 
3483   // For now UseLargePages is just ignored.
3484   FLAG_SET_ERGO(bool, UseLargePages, false);
3485   _page_sizes[0] = 0;
3486   _large_page_size = -1;
3487 
3488   // debug trace
3489   trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
3490 
3491   // Next, we need to initialize libo4 and libperfstat libraries.
3492   if (os::Aix::on_pase()) {
3493     os::Aix::initialize_libo4();
3494   } else {
3495     os::Aix::initialize_libperfstat();
3496   }
3497 
3498   // Reset the perfstat information provided by ODM.
3499   if (os::Aix::on_aix()) {
3500     libperfstat::perfstat_reset();
3501   }
3502 
3503   // Now initialze basic system properties. Note that for some of the values we
3504   // need libperfstat etc.
3505   os::Aix::initialize_system_info();
3506 
3507   _initial_pid = getpid();
3508 
3509   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3510 
3511   init_random(1234567);
3512 
3513   ThreadCritical::initialize();
3514 
3515   // Main_thread points to the aboriginal thread.
3516   Aix::_main_thread = pthread_self();
3517 
3518   initial_time_count = os::elapsed_counter();
3519 
3520   // If the pagesize of the VM is greater than 8K determine the appropriate
3521   // number of initial guard pages. The user can change this with the
3522   // command line arguments, if needed.
3523   if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3524     StackYellowPages = 1;
3525     StackRedPages = 1;
3526     StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3527   }
3528 }
3529 
3530 // This is called _after_ the global arguments have been parsed.
3531 jint os::init_2(void) {
3532 
3533   trcVerbose("processor count: %d", os::_processor_count);
3534   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3535 
3536   // Initially build up the loaded dll map.
3537   LoadedLibraries::reload();
3538 
3539   const int page_size = Aix::page_size();
3540   const int map_size = page_size;
3541 
3542   address map_address = (address) MAP_FAILED;
3543   const int prot  = PROT_READ;
3544   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3545 
3546   // Use optimized addresses for the polling page,
3547   // e.g. map it to a special 32-bit address.
3548   if (OptimizePollingPageLocation) {
3549     // architecture-specific list of address wishes:
3550     address address_wishes[] = {
3551       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3552       // PPC64: all address wishes are non-negative 32 bit values where
3553       // the lower 16 bits are all zero. we can load these addresses
3554       // with a single ppc_lis instruction.
3555       (address) 0x30000000, (address) 0x31000000,
3556       (address) 0x32000000, (address) 0x33000000,
3557       (address) 0x40000000, (address) 0x41000000,
3558       (address) 0x42000000, (address) 0x43000000,
3559       (address) 0x50000000, (address) 0x51000000,
3560       (address) 0x52000000, (address) 0x53000000,
3561       (address) 0x60000000, (address) 0x61000000,
3562       (address) 0x62000000, (address) 0x63000000
3563     };
3564     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3565 
3566     // iterate over the list of address wishes:
3567     for (int i=0; i<address_wishes_length; i++) {
3568       // Try to map with current address wish.
3569       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3570       // fail if the address is already mapped.
3571       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3572                                      map_size, prot,
3573                                      flags | MAP_FIXED,
3574                                      -1, 0);
3575       if (Verbose) {
3576         fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3577                 address_wishes[i], map_address + (ssize_t)page_size);
3578       }
3579 
3580       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3581         // Map succeeded and map_address is at wished address, exit loop.
3582         break;
3583       }
3584 
3585       if (map_address != (address) MAP_FAILED) {
3586         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3587         ::munmap(map_address, map_size);
3588         map_address = (address) MAP_FAILED;
3589       }
3590       // Map failed, continue loop.
3591     }
3592   } // end OptimizePollingPageLocation
3593 
3594   if (map_address == (address) MAP_FAILED) {
3595     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3596   }
3597   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3598   os::set_polling_page(map_address);
3599 
3600   if (!UseMembar) {
3601     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3602     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3603     os::set_memory_serialize_page(mem_serialize_page);
3604 
3605 #ifndef PRODUCT
3606     if (Verbose && PrintMiscellaneous) {
3607       tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3608     }
3609 #endif
3610   }
3611 
3612   // initialize suspend/resume support - must do this before signal_sets_init()
3613   if (SR_initialize() != 0) {
3614     perror("SR_initialize failed");
3615     return JNI_ERR;
3616   }
3617 
3618   Aix::signal_sets_init();
3619   Aix::install_signal_handlers();
3620 
3621   // Check minimum allowable stack size for thread creation and to initialize
3622   // the java system classes, including StackOverflowError - depends on page
3623   // size. Add a page for compiler2 recursion in main thread.
3624   // Add in 2*BytesPerWord times page size to account for VM stack during
3625   // class initialization depending on 32 or 64 bit VM.
3626   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3627             (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3628                      (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3629 
3630   os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3631 
3632   size_t threadStackSizeInBytes = ThreadStackSize * K;
3633   if (threadStackSizeInBytes != 0 &&
3634       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3635     tty->print_cr("\nThe stack size specified is too small, "
3636                   "Specify at least %dk",
3637                   os::Aix::min_stack_allowed / K);
3638     return JNI_ERR;
3639   }
3640 
3641   // Make the stack size a multiple of the page size so that
3642   // the yellow/red zones can be guarded.
3643   // Note that this can be 0, if no default stacksize was set.
3644   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3645 
3646   Aix::libpthread_init();
3647 
3648   if (MaxFDLimit) {
3649     // Set the number of file descriptors to max. print out error
3650     // if getrlimit/setrlimit fails but continue regardless.
3651     struct rlimit nbr_files;
3652     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3653     if (status != 0) {
3654       if (PrintMiscellaneous && (Verbose || WizardMode))
3655         perror("os::init_2 getrlimit failed");
3656     } else {
3657       nbr_files.rlim_cur = nbr_files.rlim_max;
3658       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3659       if (status != 0) {
3660         if (PrintMiscellaneous && (Verbose || WizardMode))
3661           perror("os::init_2 setrlimit failed");
3662       }
3663     }
3664   }
3665 
3666   if (PerfAllowAtExitRegistration) {
3667     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3668     // Atexit functions can be delayed until process exit time, which
3669     // can be problematic for embedded VM situations. Embedded VMs should
3670     // call DestroyJavaVM() to assure that VM resources are released.
3671 
3672     // Note: perfMemory_exit_helper atexit function may be removed in
3673     // the future if the appropriate cleanup code can be added to the
3674     // VM_Exit VMOperation's doit method.
3675     if (atexit(perfMemory_exit_helper) != 0) {
3676       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3677     }
3678   }
3679 
3680   return JNI_OK;
3681 }
3682 
3683 // Mark the polling page as unreadable
3684 void os::make_polling_page_unreadable(void) {
3685   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3686     fatal("Could not disable polling page");
3687   }
3688 };
3689 
3690 // Mark the polling page as readable
3691 void os::make_polling_page_readable(void) {
3692   // Changed according to os_linux.cpp.
3693   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3694     fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
3695   }
3696 };
3697 
3698 int os::active_processor_count() {
3699   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3700   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3701   return online_cpus;
3702 }
3703 
3704 void os::set_native_thread_name(const char *name) {
3705   // Not yet implemented.
3706   return;
3707 }
3708 
3709 bool os::distribute_processes(uint length, uint* distribution) {
3710   // Not yet implemented.
3711   return false;
3712 }
3713 
3714 bool os::bind_to_processor(uint processor_id) {
3715   // Not yet implemented.
3716   return false;
3717 }
3718 
3719 void os::SuspendedThreadTask::internal_do_task() {
3720   if (do_suspend(_thread->osthread())) {
3721     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3722     do_task(context);
3723     do_resume(_thread->osthread());
3724   }
3725 }
3726 
3727 class PcFetcher : public os::SuspendedThreadTask {
3728 public:
3729   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3730   ExtendedPC result();
3731 protected:
3732   void do_task(const os::SuspendedThreadTaskContext& context);
3733 private:
3734   ExtendedPC _epc;
3735 };
3736 
3737 ExtendedPC PcFetcher::result() {
3738   guarantee(is_done(), "task is not done yet.");
3739   return _epc;
3740 }
3741 
3742 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3743   Thread* thread = context.thread();
3744   OSThread* osthread = thread->osthread();
3745   if (osthread->ucontext() != NULL) {
3746     _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3747   } else {
3748     // NULL context is unexpected, double-check this is the VMThread.
3749     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3750   }
3751 }
3752 
3753 // Suspends the target using the signal mechanism and then grabs the PC before
3754 // resuming the target. Used by the flat-profiler only
3755 ExtendedPC os::get_thread_pc(Thread* thread) {
3756   // Make sure that it is called by the watcher for the VMThread.
3757   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3758   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3759 
3760   PcFetcher fetcher(thread);
3761   fetcher.run();
3762   return fetcher.result();
3763 }
3764 
3765 ////////////////////////////////////////////////////////////////////////////////
3766 // debug support
3767 
3768 static address same_page(address x, address y) {
3769   intptr_t page_bits = -os::vm_page_size();
3770   if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3771     return x;
3772   else if (x > y)
3773     return (address)(intptr_t(y) | ~page_bits) + 1;
3774   else
3775     return (address)(intptr_t(y) & page_bits);
3776 }
3777 
3778 bool os::find(address addr, outputStream* st) {
3779 
3780   st->print(PTR_FORMAT ": ", addr);
3781 
3782   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
3783   if (lib) {
3784     lib->print(st);
3785     return true;
3786   } else {
3787     lib = LoadedLibraries::find_for_data_address(addr);
3788     if (lib) {
3789       lib->print(st);
3790       return true;
3791     } else {
3792       st->print_cr("(outside any module)");
3793     }
3794   }
3795 
3796   return false;
3797 }
3798 
3799 ////////////////////////////////////////////////////////////////////////////////
3800 // misc
3801 
3802 // This does not do anything on Aix. This is basically a hook for being
3803 // able to use structured exception handling (thread-local exception filters)
3804 // on, e.g., Win32.
3805 void
3806 os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
3807                          JavaCallArguments* args, Thread* thread) {
3808   f(value, method, args, thread);
3809 }
3810 
3811 void os::print_statistics() {
3812 }
3813 
3814 int os::message_box(const char* title, const char* message) {
3815   int i;
3816   fdStream err(defaultStream::error_fd());
3817   for (i = 0; i < 78; i++) err.print_raw("=");
3818   err.cr();
3819   err.print_raw_cr(title);
3820   for (i = 0; i < 78; i++) err.print_raw("-");
3821   err.cr();
3822   err.print_raw_cr(message);
3823   for (i = 0; i < 78; i++) err.print_raw("=");
3824   err.cr();
3825 
3826   char buf[16];
3827   // Prevent process from exiting upon "read error" without consuming all CPU
3828   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3829 
3830   return buf[0] == 'y' || buf[0] == 'Y';
3831 }
3832 
3833 int os::stat(const char *path, struct stat *sbuf) {
3834   char pathbuf[MAX_PATH];
3835   if (strlen(path) > MAX_PATH - 1) {
3836     errno = ENAMETOOLONG;
3837     return -1;
3838   }
3839   os::native_path(strcpy(pathbuf, path));
3840   return ::stat(pathbuf, sbuf);
3841 }
3842 
3843 bool os::check_heap(bool force) {
3844   return true;
3845 }
3846 
3847 // Is a (classpath) directory empty?
3848 bool os::dir_is_empty(const char* path) {
3849   DIR *dir = NULL;
3850   struct dirent *ptr;
3851 
3852   dir = opendir(path);
3853   if (dir == NULL) return true;
3854 
3855   /* Scan the directory */
3856   bool result = true;
3857   char buf[sizeof(struct dirent) + MAX_PATH];
3858   while (result && (ptr = ::readdir(dir)) != NULL) {
3859     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3860       result = false;
3861     }
3862   }
3863   closedir(dir);
3864   return result;
3865 }
3866 
3867 // This code originates from JDK's sysOpen and open64_w
3868 // from src/solaris/hpi/src/system_md.c
3869 
3870 int os::open(const char *path, int oflag, int mode) {
3871 
3872   if (strlen(path) > MAX_PATH - 1) {
3873     errno = ENAMETOOLONG;
3874     return -1;
3875   }
3876   int fd;
3877 
3878   fd = ::open64(path, oflag, mode);
3879   if (fd == -1) return -1;
3880 
3881   // If the open succeeded, the file might still be a directory.
3882   {
3883     struct stat64 buf64;
3884     int ret = ::fstat64(fd, &buf64);
3885     int st_mode = buf64.st_mode;
3886 
3887     if (ret != -1) {
3888       if ((st_mode & S_IFMT) == S_IFDIR) {
3889         errno = EISDIR;
3890         ::close(fd);
3891         return -1;
3892       }
3893     } else {
3894       ::close(fd);
3895       return -1;
3896     }
3897   }
3898 
3899   // All file descriptors that are opened in the JVM and not
3900   // specifically destined for a subprocess should have the
3901   // close-on-exec flag set. If we don't set it, then careless 3rd
3902   // party native code might fork and exec without closing all
3903   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3904   // UNIXProcess.c), and this in turn might:
3905   //
3906   // - cause end-of-file to fail to be detected on some file
3907   //   descriptors, resulting in mysterious hangs, or
3908   //
3909   // - might cause an fopen in the subprocess to fail on a system
3910   //   suffering from bug 1085341.
3911   //
3912   // (Yes, the default setting of the close-on-exec flag is a Unix
3913   // design flaw.)
3914   //
3915   // See:
3916   // 1085341: 32-bit stdio routines should support file descriptors >255
3917   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3918   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3919 #ifdef FD_CLOEXEC
3920   {
3921     int flags = ::fcntl(fd, F_GETFD);
3922     if (flags != -1)
3923       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3924   }
3925 #endif
3926 
3927   return fd;
3928 }
3929 
3930 // create binary file, rewriting existing file if required
3931 int os::create_binary_file(const char* path, bool rewrite_existing) {
3932   int oflags = O_WRONLY | O_CREAT;
3933   if (!rewrite_existing) {
3934     oflags |= O_EXCL;
3935   }
3936   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3937 }
3938 
3939 // return current position of file pointer
3940 jlong os::current_file_offset(int fd) {
3941   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3942 }
3943 
3944 // move file pointer to the specified offset
3945 jlong os::seek_to_file_offset(int fd, jlong offset) {
3946   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3947 }
3948 
3949 // This code originates from JDK's sysAvailable
3950 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3951 
3952 int os::available(int fd, jlong *bytes) {
3953   jlong cur, end;
3954   int mode;
3955   struct stat64 buf64;
3956 
3957   if (::fstat64(fd, &buf64) >= 0) {
3958     mode = buf64.st_mode;
3959     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3960       // XXX: is the following call interruptible? If so, this might
3961       // need to go through the INTERRUPT_IO() wrapper as for other
3962       // blocking, interruptible calls in this file.
3963       int n;
3964       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3965         *bytes = n;
3966         return 1;
3967       }
3968     }
3969   }
3970   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3971     return 0;
3972   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3973     return 0;
3974   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3975     return 0;
3976   }
3977   *bytes = end - cur;
3978   return 1;
3979 }
3980 
3981 // Map a block of memory.
3982 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3983                         char *addr, size_t bytes, bool read_only,
3984                         bool allow_exec) {
3985   int prot;
3986   int flags = MAP_PRIVATE;
3987 
3988   if (read_only) {
3989     prot = PROT_READ;
3990     flags = MAP_SHARED;
3991   } else {
3992     prot = PROT_READ | PROT_WRITE;
3993     flags = MAP_PRIVATE;
3994   }
3995 
3996   if (allow_exec) {
3997     prot |= PROT_EXEC;
3998   }
3999 
4000   if (addr != NULL) {
4001     flags |= MAP_FIXED;
4002   }
4003 
4004   // Allow anonymous mappings if 'fd' is -1.
4005   if (fd == -1) {
4006     flags |= MAP_ANONYMOUS;
4007   }
4008 
4009   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
4010                                      fd, file_offset);
4011   if (mapped_address == MAP_FAILED) {
4012     return NULL;
4013   }
4014   return mapped_address;
4015 }
4016 
4017 // Remap a block of memory.
4018 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4019                           char *addr, size_t bytes, bool read_only,
4020                           bool allow_exec) {
4021   // same as map_memory() on this OS
4022   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4023                         allow_exec);
4024 }
4025 
4026 // Unmap a block of memory.
4027 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4028   return munmap(addr, bytes) == 0;
4029 }
4030 
4031 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4032 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4033 // of a thread.
4034 //
4035 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4036 // the fast estimate available on the platform.
4037 
4038 jlong os::current_thread_cpu_time() {
4039   // return user + sys since the cost is the same
4040   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4041   assert(n >= 0, "negative CPU time");
4042   return n;
4043 }
4044 
4045 jlong os::thread_cpu_time(Thread* thread) {
4046   // consistent with what current_thread_cpu_time() returns
4047   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4048   assert(n >= 0, "negative CPU time");
4049   return n;
4050 }
4051 
4052 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4053   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4054   assert(n >= 0, "negative CPU time");
4055   return n;
4056 }
4057 
4058 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4059   bool error = false;
4060 
4061   jlong sys_time = 0;
4062   jlong user_time = 0;
4063 
4064   // Reimplemented using getthrds64().
4065   //
4066   // Works like this:
4067   // For the thread in question, get the kernel thread id. Then get the
4068   // kernel thread statistics using that id.
4069   //
4070   // This only works of course when no pthread scheduling is used,
4071   // i.e. there is a 1:1 relationship to kernel threads.
4072   // On AIX, see AIXTHREAD_SCOPE variable.
4073 
4074   pthread_t pthtid = thread->osthread()->pthread_id();
4075 
4076   // retrieve kernel thread id for the pthread:
4077   tid64_t tid = 0;
4078   struct __pthrdsinfo pinfo;
4079   // I just love those otherworldly IBM APIs which force me to hand down
4080   // dummy buffers for stuff I dont care for...
4081   char dummy[1];
4082   int dummy_size = sizeof(dummy);
4083   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4084                           dummy, &dummy_size) == 0) {
4085     tid = pinfo.__pi_tid;
4086   } else {
4087     tty->print_cr("pthread_getthrds_np failed.");
4088     error = true;
4089   }
4090 
4091   // retrieve kernel timing info for that kernel thread
4092   if (!error) {
4093     struct thrdentry64 thrdentry;
4094     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4095       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4096       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4097     } else {
4098       tty->print_cr("pthread_getthrds_np failed.");
4099       error = true;
4100     }
4101   }
4102 
4103   if (p_sys_time) {
4104     *p_sys_time = sys_time;
4105   }
4106 
4107   if (p_user_time) {
4108     *p_user_time = user_time;
4109   }
4110 
4111   if (error) {
4112     return false;
4113   }
4114 
4115   return true;
4116 }
4117 
4118 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4119   jlong sys_time;
4120   jlong user_time;
4121 
4122   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4123     return -1;
4124   }
4125 
4126   return user_sys_cpu_time ? sys_time + user_time : user_time;
4127 }
4128 
4129 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4130   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4131   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4132   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4133   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4134 }
4135 
4136 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4137   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4138   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4139   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4140   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4141 }
4142 
4143 bool os::is_thread_cpu_time_supported() {
4144   return true;
4145 }
4146 
4147 // System loadavg support. Returns -1 if load average cannot be obtained.
4148 // For now just return the system wide load average (no processor sets).
4149 int os::loadavg(double values[], int nelem) {
4150 
4151   // Implemented using libperfstat on AIX.
4152 
4153   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4154   guarantee(values, "argument error");
4155 
4156   if (os::Aix::on_pase()) {
4157     Unimplemented();
4158     return -1;
4159   } else {
4160     // AIX: use libperfstat
4161     //
4162     // See also:
4163     // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4164     // /usr/include/libperfstat.h:
4165 
4166     // Use the already AIX version independent get_cpuinfo.
4167     os::Aix::cpuinfo_t ci;
4168     if (os::Aix::get_cpuinfo(&ci)) {
4169       for (int i = 0; i < nelem; i++) {
4170         values[i] = ci.loadavg[i];
4171       }
4172     } else {
4173       return -1;
4174     }
4175     return nelem;
4176   }
4177 }
4178 
4179 void os::pause() {
4180   char filename[MAX_PATH];
4181   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4182     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4183   } else {
4184     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4185   }
4186 
4187   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4188   if (fd != -1) {
4189     struct stat buf;
4190     ::close(fd);
4191     while (::stat(filename, &buf) == 0) {
4192       (void)::poll(NULL, 0, 100);
4193     }
4194   } else {
4195     jio_fprintf(stderr,
4196       "Could not open pause file '%s', continuing immediately.\n", filename);
4197   }
4198 }
4199 
4200 bool os::Aix::is_primordial_thread() {
4201   if (pthread_self() == (pthread_t)1) {
4202     return true;
4203   } else {
4204     return false;
4205   }
4206 }
4207 
4208 // OS recognitions (PASE/AIX, OS level) call this before calling any
4209 // one of Aix::on_pase(), Aix::os_version() static
4210 void os::Aix::initialize_os_info() {
4211 
4212   assert(_on_pase == -1 && _os_version == -1, "already called.");
4213 
4214   struct utsname uts;
4215   memset(&uts, 0, sizeof(uts));
4216   strcpy(uts.sysname, "?");
4217   if (::uname(&uts) == -1) {
4218     trc("uname failed (%d)", errno);
4219     guarantee(0, "Could not determine whether we run on AIX or PASE");
4220   } else {
4221     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4222                "node \"%s\" machine \"%s\"\n",
4223                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4224     const int major = atoi(uts.version);
4225     assert(major > 0, "invalid OS version");
4226     const int minor = atoi(uts.release);
4227     assert(minor > 0, "invalid OS release");
4228     _os_version = (major << 8) | minor;
4229     if (strcmp(uts.sysname, "OS400") == 0) {
4230       Unimplemented();
4231     } else if (strcmp(uts.sysname, "AIX") == 0) {
4232       // We run on AIX. We do not support versions older than AIX 5.3.
4233       _on_pase = 0;
4234       if (_os_version < 0x0503) {
4235         trc("AIX release older than AIX 5.3 not supported.");
4236         assert(false, "AIX release too old.");
4237       } else {
4238         trcVerbose("We run on AIX %d.%d\n", major, minor);
4239       }
4240     } else {
4241       assert(false, "unknown OS");
4242     }
4243   }
4244 
4245   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4246 } // end: os::Aix::initialize_os_info()
4247 
4248 // Scan environment for important settings which might effect the VM.
4249 // Trace out settings. Warn about invalid settings and/or correct them.
4250 //
4251 // Must run after os::Aix::initialue_os_info().
4252 void os::Aix::scan_environment() {
4253 
4254   char* p;
4255   int rc;
4256 
4257   // Warn explicity if EXTSHM=ON is used. That switch changes how
4258   // System V shared memory behaves. One effect is that page size of
4259   // shared memory cannot be change dynamically, effectivly preventing
4260   // large pages from working.
4261   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4262   // recommendation is (in OSS notes) to switch it off.
4263   p = ::getenv("EXTSHM");
4264   if (Verbose) {
4265     fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4266   }
4267   if (p && strcasecmp(p, "ON") == 0) {
4268     fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4269     _extshm = 1;
4270   } else {
4271     _extshm = 0;
4272   }
4273 
4274   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4275   // Not tested, not supported.
4276   //
4277   // Note that it might be worth the trouble to test and to require it, if only to
4278   // get useful return codes for mprotect.
4279   //
4280   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4281   // exec() ? before loading the libjvm ? ....)
4282   p = ::getenv("XPG_SUS_ENV");
4283   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4284   if (p && strcmp(p, "ON") == 0) {
4285     _xpg_sus_mode = 1;
4286     trc("Unsupported setting: XPG_SUS_ENV=ON");
4287     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4288     // clobber address ranges. If we ever want to support that, we have to do some
4289     // testing first.
4290     guarantee(false, "XPG_SUS_ENV=ON not supported");
4291   } else {
4292     _xpg_sus_mode = 0;
4293   }
4294 
4295   // Switch off AIX internal (pthread) guard pages. This has
4296   // immediate effect for any pthread_create calls which follow.
4297   p = ::getenv("AIXTHREAD_GUARDPAGES");
4298   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4299   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4300   guarantee(rc == 0, "");
4301 
4302 } // end: os::Aix::scan_environment()
4303 
4304 // PASE: initialize the libo4 library (AS400 PASE porting library).
4305 void os::Aix::initialize_libo4() {
4306   Unimplemented();
4307 }
4308 
4309 // AIX: initialize the libperfstat library (we load this dynamically
4310 // because it is only available on AIX.
4311 void os::Aix::initialize_libperfstat() {
4312 
4313   assert(os::Aix::on_aix(), "AIX only");
4314 
4315   if (!libperfstat::init()) {
4316     trc("libperfstat initialization failed.");
4317     assert(false, "libperfstat initialization failed");
4318   } else {
4319     if (Verbose) {
4320       fprintf(stderr, "libperfstat initialized.\n");
4321     }
4322   }
4323 } // end: os::Aix::initialize_libperfstat
4324 
4325 /////////////////////////////////////////////////////////////////////////////
4326 // thread stack
4327 
4328 // Function to query the current stack size using pthread_getthrds_np.
4329 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4330   // This only works when invoked on a pthread. As we agreed not to use
4331   // primordial threads anyway, I assert here.
4332   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4333 
4334   // Information about this api can be found (a) in the pthread.h header and
4335   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4336   //
4337   // The use of this API to find out the current stack is kind of undefined.
4338   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4339   // enough for cases where I let the pthread library create its stacks. For cases
4340   // where I create an own stack and pass this to pthread_create, it seems not to
4341   // work (the returned stack size in that case is 0).
4342 
4343   pthread_t tid = pthread_self();
4344   struct __pthrdsinfo pinfo;
4345   char dummy[1]; // We only need this to satisfy the api and to not get E.
4346   int dummy_size = sizeof(dummy);
4347 
4348   memset(&pinfo, 0, sizeof(pinfo));
4349 
4350   const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4351                                      sizeof(pinfo), dummy, &dummy_size);
4352 
4353   if (rc != 0) {
4354     assert0(false);
4355     trcVerbose("pthread_getthrds_np failed (%d)", rc);
4356     return false;
4357   }
4358   guarantee0(pinfo.__pi_stackend);
4359 
4360   // The following can happen when invoking pthread_getthrds_np on a pthread running
4361   // on a user provided stack (when handing down a stack to pthread create, see
4362   // pthread_attr_setstackaddr).
4363   // Not sure what to do here - I feel inclined to forbid this use case completely.
4364   guarantee0(pinfo.__pi_stacksize);
4365 
4366   // Note: the pthread stack on AIX seems to look like this:
4367   //
4368   // ---------------------   real base ? at page border ?
4369   //
4370   //     pthread internal data, like ~2K, see also
4371   //     http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/thread_supp_tun_params.htm
4372   //
4373   // ---------------------   __pi_stackend - not page aligned, (xxxxF890)
4374   //
4375   //     stack
4376   //      ....
4377   //
4378   //     stack
4379   //
4380   // ---------------------   __pi_stackend  - __pi_stacksize
4381   //
4382   //     padding due to AIX guard pages (?) see AIXTHREAD_GUARDPAGES
4383   // ---------------------   __pi_stackaddr  (page aligned if AIXTHREAD_GUARDPAGES > 0)
4384   //
4385   //   AIX guard pages (?)
4386   //
4387 
4388   // So, the safe thing to do is to use the area from __pi_stackend to __pi_stackaddr;
4389   // __pi_stackend however is almost never page aligned.
4390   //
4391 
4392   if (p_stack_base) {
4393     (*p_stack_base) = (address) (pinfo.__pi_stackend);
4394   }
4395 
4396   if (p_stack_size) {
4397     (*p_stack_size) = pinfo.__pi_stackend - pinfo.__pi_stackaddr;
4398   }
4399 
4400   return true;
4401 }
4402 
4403 // Get the current stack base from the OS (actually, the pthread library).
4404 address os::current_stack_base() {
4405   address p;
4406   query_stack_dimensions(&p, 0);
4407   return p;
4408 }
4409 
4410 // Get the current stack size from the OS (actually, the pthread library).
4411 size_t os::current_stack_size() {
4412   size_t s;
4413   query_stack_dimensions(0, &s);
4414   return s;
4415 }
4416 
4417 // Refer to the comments in os_solaris.cpp park-unpark.
4418 //
4419 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4420 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4421 // For specifics regarding the bug see GLIBC BUGID 261237 :
4422 //    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4423 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4424 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4425 // is used. (The simple C test-case provided in the GLIBC bug report manifests the
4426 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4427 // and monitorenter when we're using 1-0 locking. All those operations may result in
4428 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4429 // of libpthread avoids the problem, but isn't practical.
4430 //
4431 // Possible remedies:
4432 //
4433 // 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4434 //      This is palliative and probabilistic, however. If the thread is preempted
4435 //      between the call to compute_abstime() and pthread_cond_timedwait(), more
4436 //      than the minimum period may have passed, and the abstime may be stale (in the
4437 //      past) resultin in a hang. Using this technique reduces the odds of a hang
4438 //      but the JVM is still vulnerable, particularly on heavily loaded systems.
4439 //
4440 // 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4441 //      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4442 //      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4443 //      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4444 //      thread.
4445 //
4446 // 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4447 //      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4448 //      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4449 //      This also works well. In fact it avoids kernel-level scalability impediments
4450 //      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4451 //      timers in a graceful fashion.
4452 //
4453 // 4.   When the abstime value is in the past it appears that control returns
4454 //      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4455 //      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4456 //      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4457 //      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4458 //      It may be possible to avoid reinitialization by checking the return
4459 //      value from pthread_cond_timedwait(). In addition to reinitializing the
4460 //      condvar we must establish the invariant that cond_signal() is only called
4461 //      within critical sections protected by the adjunct mutex. This prevents
4462 //      cond_signal() from "seeing" a condvar that's in the midst of being
4463 //      reinitialized or that is corrupt. Sadly, this invariant obviates the
4464 //      desirable signal-after-unlock optimization that avoids futile context switching.
4465 //
4466 //      I'm also concerned that some versions of NTPL might allocate an auxilliary
4467 //      structure when a condvar is used or initialized. cond_destroy() would
4468 //      release the helper structure. Our reinitialize-after-timedwait fix
4469 //      put excessive stress on malloc/free and locks protecting the c-heap.
4470 //
4471 // We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4472 // It may be possible to refine (4) by checking the kernel and NTPL verisons
4473 // and only enabling the work-around for vulnerable environments.
4474 
4475 // utility to compute the abstime argument to timedwait:
4476 // millis is the relative timeout time
4477 // abstime will be the absolute timeout time
4478 // TODO: replace compute_abstime() with unpackTime()
4479 
4480 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4481   if (millis < 0) millis = 0;
4482   struct timeval now;
4483   int status = gettimeofday(&now, NULL);
4484   assert(status == 0, "gettimeofday");
4485   jlong seconds = millis / 1000;
4486   millis %= 1000;
4487   if (seconds > 50000000) { // see man cond_timedwait(3T)
4488     seconds = 50000000;
4489   }
4490   abstime->tv_sec = now.tv_sec  + seconds;
4491   long       usec = now.tv_usec + millis * 1000;
4492   if (usec >= 1000000) {
4493     abstime->tv_sec += 1;
4494     usec -= 1000000;
4495   }
4496   abstime->tv_nsec = usec * 1000;
4497   return abstime;
4498 }
4499 
4500 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4501 // Conceptually TryPark() should be equivalent to park(0).
4502 
4503 int os::PlatformEvent::TryPark() {
4504   for (;;) {
4505     const int v = _Event;
4506     guarantee ((v == 0) || (v == 1), "invariant");
4507     if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4508   }
4509 }
4510 
4511 void os::PlatformEvent::park() {       // AKA "down()"
4512   // Invariant: Only the thread associated with the Event/PlatformEvent
4513   // may call park().
4514   // TODO: assert that _Assoc != NULL or _Assoc == Self
4515   int v;
4516   for (;;) {
4517     v = _Event;
4518     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4519   }
4520   guarantee (v >= 0, "invariant");
4521   if (v == 0) {
4522     // Do this the hard way by blocking ...
4523     int status = pthread_mutex_lock(_mutex);
4524     assert_status(status == 0, status, "mutex_lock");
4525     guarantee (_nParked == 0, "invariant");
4526     ++ _nParked;
4527     while (_Event < 0) {
4528       status = pthread_cond_wait(_cond, _mutex);
4529       assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4530     }
4531     -- _nParked;
4532 
4533     // In theory we could move the ST of 0 into _Event past the unlock(),
4534     // but then we'd need a MEMBAR after the ST.
4535     _Event = 0;
4536     status = pthread_mutex_unlock(_mutex);
4537     assert_status(status == 0, status, "mutex_unlock");
4538   }
4539   guarantee (_Event >= 0, "invariant");
4540 }
4541 
4542 int os::PlatformEvent::park(jlong millis) {
4543   guarantee (_nParked == 0, "invariant");
4544 
4545   int v;
4546   for (;;) {
4547     v = _Event;
4548     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4549   }
4550   guarantee (v >= 0, "invariant");
4551   if (v != 0) return OS_OK;
4552 
4553   // We do this the hard way, by blocking the thread.
4554   // Consider enforcing a minimum timeout value.
4555   struct timespec abst;
4556   compute_abstime(&abst, millis);
4557 
4558   int ret = OS_TIMEOUT;
4559   int status = pthread_mutex_lock(_mutex);
4560   assert_status(status == 0, status, "mutex_lock");
4561   guarantee (_nParked == 0, "invariant");
4562   ++_nParked;
4563 
4564   // Object.wait(timo) will return because of
4565   // (a) notification
4566   // (b) timeout
4567   // (c) thread.interrupt
4568   //
4569   // Thread.interrupt and object.notify{All} both call Event::set.
4570   // That is, we treat thread.interrupt as a special case of notification.
4571   // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4572   // We assume all ETIME returns are valid.
4573   //
4574   // TODO: properly differentiate simultaneous notify+interrupt.
4575   // In that case, we should propagate the notify to another waiter.
4576 
4577   while (_Event < 0) {
4578     status = pthread_cond_timedwait(_cond, _mutex, &abst);
4579     assert_status(status == 0 || status == ETIMEDOUT,
4580                   status, "cond_timedwait");
4581     if (!FilterSpuriousWakeups) break;         // previous semantics
4582     if (status == ETIMEDOUT) break;
4583     // We consume and ignore EINTR and spurious wakeups.
4584   }
4585   --_nParked;
4586   if (_Event >= 0) {
4587      ret = OS_OK;
4588   }
4589   _Event = 0;
4590   status = pthread_mutex_unlock(_mutex);
4591   assert_status(status == 0, status, "mutex_unlock");
4592   assert (_nParked == 0, "invariant");
4593   return ret;
4594 }
4595 
4596 void os::PlatformEvent::unpark() {
4597   int v, AnyWaiters;
4598   for (;;) {
4599     v = _Event;
4600     if (v > 0) {
4601       // The LD of _Event could have reordered or be satisfied
4602       // by a read-aside from this processor's write buffer.
4603       // To avoid problems execute a barrier and then
4604       // ratify the value.
4605       OrderAccess::fence();
4606       if (_Event == v) return;
4607       continue;
4608     }
4609     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4610   }
4611   if (v < 0) {
4612     // Wait for the thread associated with the event to vacate
4613     int status = pthread_mutex_lock(_mutex);
4614     assert_status(status == 0, status, "mutex_lock");
4615     AnyWaiters = _nParked;
4616 
4617     if (AnyWaiters != 0) {
4618       // We intentional signal *after* dropping the lock
4619       // to avoid a common class of futile wakeups.
4620       status = pthread_cond_signal(_cond);
4621       assert_status(status == 0, status, "cond_signal");
4622     }
4623     // Mutex should be locked for pthread_cond_signal(_cond).
4624     status = pthread_mutex_unlock(_mutex);
4625     assert_status(status == 0, status, "mutex_unlock");
4626   }
4627 
4628   // Note that we signal() _after dropping the lock for "immortal" Events.
4629   // This is safe and avoids a common class of futile wakeups. In rare
4630   // circumstances this can cause a thread to return prematurely from
4631   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4632   // simply re-test the condition and re-park itself.
4633 }
4634 
4635 
4636 // JSR166
4637 // -------------------------------------------------------
4638 
4639 //
4640 // The solaris and linux implementations of park/unpark are fairly
4641 // conservative for now, but can be improved. They currently use a
4642 // mutex/condvar pair, plus a a count.
4643 // Park decrements count if > 0, else does a condvar wait. Unpark
4644 // sets count to 1 and signals condvar. Only one thread ever waits
4645 // on the condvar. Contention seen when trying to park implies that someone
4646 // is unparking you, so don't wait. And spurious returns are fine, so there
4647 // is no need to track notifications.
4648 //
4649 
4650 #define MAX_SECS 100000000
4651 //
4652 // This code is common to linux and solaris and will be moved to a
4653 // common place in dolphin.
4654 //
4655 // The passed in time value is either a relative time in nanoseconds
4656 // or an absolute time in milliseconds. Either way it has to be unpacked
4657 // into suitable seconds and nanoseconds components and stored in the
4658 // given timespec structure.
4659 // Given time is a 64-bit value and the time_t used in the timespec is only
4660 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
4661 // overflow if times way in the future are given. Further on Solaris versions
4662 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4663 // number of seconds, in abstime, is less than current_time + 100,000,000.
4664 // As it will be 28 years before "now + 100000000" will overflow we can
4665 // ignore overflow and just impose a hard-limit on seconds using the value
4666 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4667 // years from "now".
4668 //
4669 
4670 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4671   assert (time > 0, "convertTime");
4672 
4673   struct timeval now;
4674   int status = gettimeofday(&now, NULL);
4675   assert(status == 0, "gettimeofday");
4676 
4677   time_t max_secs = now.tv_sec + MAX_SECS;
4678 
4679   if (isAbsolute) {
4680     jlong secs = time / 1000;
4681     if (secs > max_secs) {
4682       absTime->tv_sec = max_secs;
4683     }
4684     else {
4685       absTime->tv_sec = secs;
4686     }
4687     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4688   }
4689   else {
4690     jlong secs = time / NANOSECS_PER_SEC;
4691     if (secs >= MAX_SECS) {
4692       absTime->tv_sec = max_secs;
4693       absTime->tv_nsec = 0;
4694     }
4695     else {
4696       absTime->tv_sec = now.tv_sec + secs;
4697       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4698       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4699         absTime->tv_nsec -= NANOSECS_PER_SEC;
4700         ++absTime->tv_sec; // note: this must be <= max_secs
4701       }
4702     }
4703   }
4704   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4705   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4706   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4707   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4708 }
4709 
4710 void Parker::park(bool isAbsolute, jlong time) {
4711   // Optional fast-path check:
4712   // Return immediately if a permit is available.
4713   if (_counter > 0) {
4714     _counter = 0;
4715     OrderAccess::fence();
4716     return;
4717   }
4718 
4719   Thread* thread = Thread::current();
4720   assert(thread->is_Java_thread(), "Must be JavaThread");
4721   JavaThread *jt = (JavaThread *)thread;
4722 
4723   // Optional optimization -- avoid state transitions if there's an interrupt pending.
4724   // Check interrupt before trying to wait
4725   if (Thread::is_interrupted(thread, false)) {
4726     return;
4727   }
4728 
4729   // Next, demultiplex/decode time arguments
4730   timespec absTime;
4731   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4732     return;
4733   }
4734   if (time > 0) {
4735     unpackTime(&absTime, isAbsolute, time);
4736   }
4737 
4738   // Enter safepoint region
4739   // Beware of deadlocks such as 6317397.
4740   // The per-thread Parker:: mutex is a classic leaf-lock.
4741   // In particular a thread must never block on the Threads_lock while
4742   // holding the Parker:: mutex. If safepoints are pending both the
4743   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4744   ThreadBlockInVM tbivm(jt);
4745 
4746   // Don't wait if cannot get lock since interference arises from
4747   // unblocking. Also. check interrupt before trying wait
4748   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4749     return;
4750   }
4751 
4752   int status;
4753   if (_counter > 0) { // no wait needed
4754     _counter = 0;
4755     status = pthread_mutex_unlock(_mutex);
4756     assert (status == 0, "invariant");
4757     OrderAccess::fence();
4758     return;
4759   }
4760 
4761 #ifdef ASSERT
4762   // Don't catch signals while blocked; let the running threads have the signals.
4763   // (This allows a debugger to break into the running thread.)
4764   sigset_t oldsigs;
4765   sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4766   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4767 #endif
4768 
4769   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4770   jt->set_suspend_equivalent();
4771   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4772 
4773   if (time == 0) {
4774     status = pthread_cond_wait (_cond, _mutex);
4775   } else {
4776     status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4777     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4778       pthread_cond_destroy (_cond);
4779       pthread_cond_init    (_cond, NULL);
4780     }
4781   }
4782   assert_status(status == 0 || status == EINTR ||
4783                 status == ETIME || status == ETIMEDOUT,
4784                 status, "cond_timedwait");
4785 
4786 #ifdef ASSERT
4787   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4788 #endif
4789 
4790   _counter = 0;
4791   status = pthread_mutex_unlock(_mutex);
4792   assert_status(status == 0, status, "invariant");
4793   // If externally suspended while waiting, re-suspend
4794   if (jt->handle_special_suspend_equivalent_condition()) {
4795     jt->java_suspend_self();
4796   }
4797 
4798   OrderAccess::fence();
4799 }
4800 
4801 void Parker::unpark() {
4802   int s, status;
4803   status = pthread_mutex_lock(_mutex);
4804   assert (status == 0, "invariant");
4805   s = _counter;
4806   _counter = 1;
4807   if (s < 1) {
4808     if (WorkAroundNPTLTimedWaitHang) {
4809       status = pthread_cond_signal (_cond);
4810       assert (status == 0, "invariant");
4811       status = pthread_mutex_unlock(_mutex);
4812       assert (status == 0, "invariant");
4813     } else {
4814       status = pthread_mutex_unlock(_mutex);
4815       assert (status == 0, "invariant");
4816       status = pthread_cond_signal (_cond);
4817       assert (status == 0, "invariant");
4818     }
4819   } else {
4820     pthread_mutex_unlock(_mutex);
4821     assert (status == 0, "invariant");
4822   }
4823 }
4824 
4825 extern char** environ;
4826 
4827 // Run the specified command in a separate process. Return its exit value,
4828 // or -1 on failure (e.g. can't fork a new process).
4829 // Unlike system(), this function can be called from signal handler. It
4830 // doesn't block SIGINT et al.
4831 int os::fork_and_exec(char* cmd) {
4832   char * argv[4] = {"sh", "-c", cmd, NULL};
4833 
4834   pid_t pid = fork();
4835 
4836   if (pid < 0) {
4837     // fork failed
4838     return -1;
4839 
4840   } else if (pid == 0) {
4841     // child process
4842 
4843     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4844     execve("/usr/bin/sh", argv, environ);
4845 
4846     // execve failed
4847     _exit(-1);
4848 
4849   } else {
4850     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4851     // care about the actual exit code, for now.
4852 
4853     int status;
4854 
4855     // Wait for the child process to exit. This returns immediately if
4856     // the child has already exited. */
4857     while (waitpid(pid, &status, 0) < 0) {
4858       switch (errno) {
4859         case ECHILD: return 0;
4860         case EINTR: break;
4861         default: return -1;
4862       }
4863     }
4864 
4865     if (WIFEXITED(status)) {
4866       // The child exited normally; get its exit code.
4867       return WEXITSTATUS(status);
4868     } else if (WIFSIGNALED(status)) {
4869       // The child exited because of a signal.
4870       // The best value to return is 0x80 + signal number,
4871       // because that is what all Unix shells do, and because
4872       // it allows callers to distinguish between process exit and
4873       // process death by signal.
4874       return 0x80 + WTERMSIG(status);
4875     } else {
4876       // Unknown exit code; pass it through.
4877       return status;
4878     }
4879   }
4880   return -1;
4881 }
4882 
4883 // is_headless_jre()
4884 //
4885 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4886 // in order to report if we are running in a headless jre.
4887 //
4888 // Since JDK8 xawt/libmawt.so is moved into the same directory
4889 // as libawt.so, and renamed libawt_xawt.so
4890 bool os::is_headless_jre() {
4891   struct stat statbuf;
4892   char buf[MAXPATHLEN];
4893   char libmawtpath[MAXPATHLEN];
4894   const char *xawtstr = "/xawt/libmawt.so";
4895   const char *new_xawtstr = "/libawt_xawt.so";
4896 
4897   char *p;
4898 
4899   // Get path to libjvm.so
4900   os::jvm_path(buf, sizeof(buf));
4901 
4902   // Get rid of libjvm.so
4903   p = strrchr(buf, '/');
4904   if (p == NULL) return false;
4905   else *p = '\0';
4906 
4907   // Get rid of client or server
4908   p = strrchr(buf, '/');
4909   if (p == NULL) return false;
4910   else *p = '\0';
4911 
4912   // check xawt/libmawt.so
4913   strcpy(libmawtpath, buf);
4914   strcat(libmawtpath, xawtstr);
4915   if (::stat(libmawtpath, &statbuf) == 0) return false;
4916 
4917   // check libawt_xawt.so
4918   strcpy(libmawtpath, buf);
4919   strcat(libmawtpath, new_xawtstr);
4920   if (::stat(libmawtpath, &statbuf) == 0) return false;
4921 
4922   return true;
4923 }
4924 
4925 // Get the default path to the core file
4926 // Returns the length of the string
4927 int os::get_core_path(char* buffer, size_t bufferSize) {
4928   const char* p = get_current_directory(buffer, bufferSize);
4929 
4930   if (p == NULL) {
4931     assert(p != NULL, "failed to get current directory");
4932     return 0;
4933   }
4934 
4935   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4936                                                p, current_process_id());
4937 
4938   return strlen(buffer);
4939 }
4940 
4941 #ifndef PRODUCT
4942 void TestReserveMemorySpecial_test() {
4943   // No tests available for this platform
4944 }
4945 #endif