1 /*
   2  * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2015 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "libperfstat_aix.hpp"
  40 #include "loadlib_aix.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "mutex_aix.inline.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "os_aix.inline.hpp"
  46 #include "os_share_aix.hpp"
  47 #include "porting_aix.hpp"
  48 #include "prims/jniFastGetField.hpp"
  49 #include "prims/jvm.h"
  50 #include "prims/jvm_misc.hpp"
  51 #include "runtime/arguments.hpp"
  52 #include "runtime/atomic.inline.hpp"
  53 #include "runtime/extendedPC.hpp"
  54 #include "runtime/globals.hpp"
  55 #include "runtime/interfaceSupport.hpp"
  56 #include "runtime/java.hpp"
  57 #include "runtime/javaCalls.hpp"
  58 #include "runtime/mutexLocker.hpp"
  59 #include "runtime/objectMonitor.hpp"
  60 #include "runtime/orderAccess.inline.hpp"
  61 #include "runtime/os.hpp"
  62 #include "runtime/osThread.hpp"
  63 #include "runtime/perfMemory.hpp"
  64 #include "runtime/sharedRuntime.hpp"
  65 #include "runtime/statSampler.hpp"
  66 #include "runtime/stubRoutines.hpp"
  67 #include "runtime/thread.inline.hpp"
  68 #include "runtime/threadCritical.hpp"
  69 #include "runtime/timer.hpp"
  70 #include "runtime/vm_version.hpp"
  71 #include "services/attachListener.hpp"
  72 #include "services/runtimeService.hpp"
  73 #include "utilities/decoder.hpp"
  74 #include "utilities/defaultStream.hpp"
  75 #include "utilities/events.hpp"
  76 #include "utilities/growableArray.hpp"
  77 #include "utilities/vmError.hpp"
  78 
  79 // put OS-includes here (sorted alphabetically)
  80 #include <errno.h>
  81 #include <fcntl.h>
  82 #include <inttypes.h>
  83 #include <poll.h>
  84 #include <procinfo.h>
  85 #include <pthread.h>
  86 #include <pwd.h>
  87 #include <semaphore.h>
  88 #include <signal.h>
  89 #include <stdint.h>
  90 #include <stdio.h>
  91 #include <string.h>
  92 #include <unistd.h>
  93 #include <sys/ioctl.h>
  94 #include <sys/ipc.h>
  95 #include <sys/mman.h>
  96 #include <sys/resource.h>
  97 #include <sys/select.h>
  98 #include <sys/shm.h>
  99 #include <sys/socket.h>
 100 #include <sys/stat.h>
 101 #include <sys/sysinfo.h>
 102 #include <sys/systemcfg.h>
 103 #include <sys/time.h>
 104 #include <sys/times.h>
 105 #include <sys/types.h>
 106 #include <sys/utsname.h>
 107 #include <sys/vminfo.h>
 108 #include <sys/wait.h>
 109 
 110 // If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 111 // getrusage() is prepared to handle the associated failure.
 112 #ifndef RUSAGE_THREAD
 113 #define RUSAGE_THREAD   (1)               /* only the calling thread */
 114 #endif
 115 
 116 // PPC port
 117 static const uintx Use64KPagesThreshold       = 1*M;
 118 static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
 119 
 120 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
 121 #if !defined(_AIXVERSION_610)
 122 extern "C" {
 123   int getthrds64(pid_t ProcessIdentifier,
 124                  struct thrdentry64* ThreadBuffer,
 125                  int ThreadSize,
 126                  tid64_t* IndexPointer,
 127                  int Count);
 128 }
 129 #endif
 130 
 131 #define MAX_PATH (2 * K)
 132 
 133 // for timer info max values which include all bits
 134 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 135 // for multipage initialization error analysis (in 'g_multipage_error')
 136 #define ERROR_MP_OS_TOO_OLD                          100
 137 #define ERROR_MP_EXTSHM_ACTIVE                       101
 138 #define ERROR_MP_VMGETINFO_FAILED                    102
 139 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 140 
 141 // The semantics in this file are thus that codeptr_t is a *real code ptr*.
 142 // This means that any function taking codeptr_t as arguments will assume
 143 // a real codeptr and won't handle function descriptors (eg getFuncName),
 144 // whereas functions taking address as args will deal with function
 145 // descriptors (eg os::dll_address_to_library_name).
 146 typedef unsigned int* codeptr_t;
 147 
 148 // Typedefs for stackslots, stack pointers, pointers to op codes.
 149 typedef unsigned long stackslot_t;
 150 typedef stackslot_t* stackptr_t;
 151 
 152 // Excerpts from systemcfg.h definitions newer than AIX 5.3.
 153 #ifndef PV_7
 154 #define PV_7 0x200000          /* Power PC 7 */
 155 #define PV_7_Compat 0x208000   /* Power PC 7 */
 156 #endif
 157 #ifndef PV_8
 158 #define PV_8 0x300000          /* Power PC 8 */
 159 #define PV_8_Compat 0x308000   /* Power PC 8 */
 160 #endif
 161 
 162 #define trcVerbose(fmt, ...) { /* PPC port */  \
 163   if (Verbose) { \
 164     fprintf(stderr, fmt, ##__VA_ARGS__); \
 165     fputc('\n', stderr); fflush(stderr); \
 166   } \
 167 }
 168 #define trc(fmt, ...)        /* PPC port */
 169 
 170 #define ERRBYE(s) { \
 171     trcVerbose(s); \
 172     return -1; \
 173 }
 174 
 175 // Query dimensions of the stack of the calling thread.
 176 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 177 
 178 // function to check a given stack pointer against given stack limits
 179 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 180   if (((uintptr_t)sp) & 0x7) {
 181     return false;
 182   }
 183   if (sp > stack_base) {
 184     return false;
 185   }
 186   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 187     return false;
 188   }
 189   return true;
 190 }
 191 
 192 // returns true if function is a valid codepointer
 193 inline bool is_valid_codepointer(codeptr_t p) {
 194   if (!p) {
 195     return false;
 196   }
 197   if (((uintptr_t)p) & 0x3) {
 198     return false;
 199   }
 200   if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
 201     return false;
 202   }
 203   return true;
 204 }
 205 
 206 // Macro to check a given stack pointer against given stack limits and to die if test fails.
 207 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 208     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 209 }
 210 
 211 // Macro to check the current stack pointer against given stacklimits.
 212 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 213   address sp; \
 214   sp = os::current_stack_pointer(); \
 215   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 216 }
 217 
 218 ////////////////////////////////////////////////////////////////////////////////
 219 // global variables (for a description see os_aix.hpp)
 220 
 221 julong    os::Aix::_physical_memory = 0;
 222 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 223 int       os::Aix::_page_size = -1;
 224 int       os::Aix::_on_pase = -1;
 225 int       os::Aix::_os_version = -1;
 226 int       os::Aix::_stack_page_size = -1;
 227 int       os::Aix::_xpg_sus_mode = -1;
 228 int       os::Aix::_extshm = -1;
 229 int       os::Aix::_logical_cpus = -1;
 230 
 231 ////////////////////////////////////////////////////////////////////////////////
 232 // local variables
 233 
 234 static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 235 static jlong    initial_time_count = 0;
 236 static int      clock_tics_per_sec = 100;
 237 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 238 static bool     check_signals      = true;
 239 static pid_t    _initial_pid       = 0;
 240 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 241 static sigset_t SR_sigset;
 242 static pthread_mutex_t dl_mutex;              // Used to protect dlsym() calls.
 243 
 244 // This describes the state of multipage support of the underlying
 245 // OS. Note that this is of no interest to the outsize world and
 246 // therefore should not be defined in AIX class.
 247 //
 248 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 249 // latter two (16M "large" resp. 16G "huge" pages) require special
 250 // setup and are normally not available.
 251 //
 252 // AIX supports multiple page sizes per process, for:
 253 //  - Stack (of the primordial thread, so not relevant for us)
 254 //  - Data - data, bss, heap, for us also pthread stacks
 255 //  - Text - text code
 256 //  - shared memory
 257 //
 258 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 259 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 260 //
 261 // For shared memory, page size can be set dynamically via
 262 // shmctl(). Different shared memory regions can have different page
 263 // sizes.
 264 //
 265 // More information can be found at AIBM info center:
 266 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 267 //
 268 static struct {
 269   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 270   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 271   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 272   size_t pthr_stack_pagesize; // stack page size of pthread threads
 273   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 274   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 275   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 276   int error;                  // Error describing if something went wrong at multipage init.
 277 } g_multipage_support = {
 278   (size_t) -1,
 279   (size_t) -1,
 280   (size_t) -1,
 281   (size_t) -1,
 282   (size_t) -1,
 283   false, false,
 284   0
 285 };
 286 
 287 // We must not accidentally allocate memory close to the BRK - even if
 288 // that would work - because then we prevent the BRK segment from
 289 // growing which may result in a malloc OOM even though there is
 290 // enough memory. The problem only arises if we shmat() or mmap() at
 291 // a specific wish address, e.g. to place the heap in a
 292 // compressed-oops-friendly way.
 293 static bool is_close_to_brk(address a) {
 294   address a1 = (address) sbrk(0);
 295   if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {
 296     return true;
 297   }
 298   return false;
 299 }
 300 
 301 julong os::available_memory() {
 302   return Aix::available_memory();
 303 }
 304 
 305 julong os::Aix::available_memory() {
 306   os::Aix::meminfo_t mi;
 307   if (os::Aix::get_meminfo(&mi)) {
 308     return mi.real_free;
 309   } else {
 310     return 0xFFFFFFFFFFFFFFFFLL;
 311   }
 312 }
 313 
 314 julong os::physical_memory() {
 315   return Aix::physical_memory();
 316 }
 317 
 318 ////////////////////////////////////////////////////////////////////////////////
 319 // environment support
 320 
 321 bool os::getenv(const char* name, char* buf, int len) {
 322   const char* val = ::getenv(name);
 323   if (val != NULL && strlen(val) < (size_t)len) {
 324     strcpy(buf, val);
 325     return true;
 326   }
 327   if (len > 0) buf[0] = 0;  // return a null string
 328   return false;
 329 }
 330 
 331 // Return true if user is running as root.
 332 
 333 bool os::have_special_privileges() {
 334   static bool init = false;
 335   static bool privileges = false;
 336   if (!init) {
 337     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 338     init = true;
 339   }
 340   return privileges;
 341 }
 342 
 343 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 344 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 345 static bool my_disclaim64(char* addr, size_t size) {
 346 
 347   if (size == 0) {
 348     return true;
 349   }
 350 
 351   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 352   const unsigned int maxDisclaimSize = 0x40000000;
 353 
 354   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 355   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 356 
 357   char* p = addr;
 358 
 359   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 360     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 361       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 362       return false;
 363     }
 364     p += maxDisclaimSize;
 365   }
 366 
 367   if (lastDisclaimSize > 0) {
 368     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 369       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 370       return false;
 371     }
 372   }
 373 
 374   return true;
 375 }
 376 
 377 // Cpu architecture string
 378 #if defined(PPC32)
 379 static char cpu_arch[] = "ppc";
 380 #elif defined(PPC64)
 381 static char cpu_arch[] = "ppc64";
 382 #else
 383 #error Add appropriate cpu_arch setting
 384 #endif
 385 
 386 
 387 // Given an address, returns the size of the page backing that address.
 388 size_t os::Aix::query_pagesize(void* addr) {
 389 
 390   vm_page_info pi;
 391   pi.addr = (uint64_t)addr;
 392   if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 393     return pi.pagesize;
 394   } else {
 395     fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
 396     assert(false, "vmgetinfo failed to retrieve page size");
 397     return SIZE_4K;
 398   }
 399 
 400 }
 401 
 402 // Returns the kernel thread id of the currently running thread.
 403 pid_t os::Aix::gettid() {
 404   return (pid_t) thread_self();
 405 }
 406 
 407 void os::Aix::initialize_system_info() {
 408 
 409   // Get the number of online(logical) cpus instead of configured.
 410   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 411   assert(_processor_count > 0, "_processor_count must be > 0");
 412 
 413   // Retrieve total physical storage.
 414   os::Aix::meminfo_t mi;
 415   if (!os::Aix::get_meminfo(&mi)) {
 416     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
 417     assert(false, "os::Aix::get_meminfo failed.");
 418   }
 419   _physical_memory = (julong) mi.real_total;
 420 }
 421 
 422 // Helper function for tracing page sizes.
 423 static const char* describe_pagesize(size_t pagesize) {
 424   switch (pagesize) {
 425     case SIZE_4K : return "4K";
 426     case SIZE_64K: return "64K";
 427     case SIZE_16M: return "16M";
 428     case SIZE_16G: return "16G";
 429     case -1:       return "not set";
 430     default:
 431       assert(false, "surprise");
 432       return "??";
 433   }
 434 }
 435 
 436 // Probe OS for multipage support.
 437 // Will fill the global g_multipage_support structure.
 438 // Must be called before calling os::large_page_init().
 439 static void query_multipage_support() {
 440 
 441   guarantee(g_multipage_support.pagesize == -1,
 442             "do not call twice");
 443 
 444   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 445 
 446   // This really would surprise me.
 447   assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
 448 
 449   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 450   // Default data page size is defined either by linker options (-bdatapsize)
 451   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 452   // default should be 4K.
 453   {
 454     void* p = ::malloc(SIZE_16M);
 455     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 456     ::free(p);
 457   }
 458 
 459   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 460   {
 461     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 462     guarantee(shmid != -1, "shmget failed");
 463     void* p = ::shmat(shmid, NULL, 0);
 464     ::shmctl(shmid, IPC_RMID, NULL);
 465     guarantee(p != (void*) -1, "shmat failed");
 466     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 467     ::shmdt(p);
 468   }
 469 
 470   // Before querying the stack page size, make sure we are not running as primordial
 471   // thread (because primordial thread's stack may have different page size than
 472   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 473   // number of reasons so we may just as well guarantee it here.
 474   guarantee0(!os::Aix::is_primordial_thread());
 475 
 476   // Query pthread stack page size.
 477   {
 478     int dummy = 0;
 479     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 480   }
 481 
 482   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 483   /* PPC port: so far unused.
 484   {
 485     address any_function =
 486       (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 487     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 488   }
 489   */
 490 
 491   // Now probe for support of 64K pages and 16M pages.
 492 
 493   // Before OS/400 V6R1, there is no support for pages other than 4K.
 494   if (os::Aix::on_pase_V5R4_or_older()) {
 495     Unimplemented();
 496     goto query_multipage_support_end;
 497   }
 498 
 499   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 500   {
 501     const int MAX_PAGE_SIZES = 4;
 502     psize_t sizes[MAX_PAGE_SIZES];
 503     const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 504     if (num_psizes == -1) {
 505       trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
 506       trc("disabling multipage support.\n");
 507       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 508       goto query_multipage_support_end;
 509     }
 510     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 511     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 512     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 513     for (int i = 0; i < num_psizes; i ++) {
 514       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 515     }
 516 
 517     // Can we use 64K, 16M pages?
 518     for (int i = 0; i < num_psizes; i ++) {
 519       const size_t pagesize = sizes[i];
 520       if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
 521         continue;
 522       }
 523       bool can_use = false;
 524       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 525       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 526         IPC_CREAT | S_IRUSR | S_IWUSR);
 527       guarantee0(shmid != -1); // Should always work.
 528       // Try to set pagesize.
 529       struct shmid_ds shm_buf = { 0 };
 530       shm_buf.shm_pagesize = pagesize;
 531       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 532         const int en = errno;
 533         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 534         // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
 535         // PPC port  MiscUtils::describe_errno(en));
 536       } else {
 537         // Attach and double check pageisze.
 538         void* p = ::shmat(shmid, NULL, 0);
 539         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 540         guarantee0(p != (void*) -1); // Should always work.
 541         const size_t real_pagesize = os::Aix::query_pagesize(p);
 542         if (real_pagesize != pagesize) {
 543           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 544         } else {
 545           can_use = true;
 546         }
 547         ::shmdt(p);
 548       }
 549       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 550       if (pagesize == SIZE_64K) {
 551         g_multipage_support.can_use_64K_pages = can_use;
 552       } else if (pagesize == SIZE_16M) {
 553         g_multipage_support.can_use_16M_pages = can_use;
 554       }
 555     }
 556 
 557   } // end: check which pages can be used for shared memory
 558 
 559 query_multipage_support_end:
 560 
 561   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
 562       describe_pagesize(g_multipage_support.pagesize));
 563   trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
 564       describe_pagesize(g_multipage_support.datapsize));
 565   trcVerbose("Text page size: %s\n",
 566       describe_pagesize(g_multipage_support.textpsize));
 567   trcVerbose("Thread stack page size (pthread): %s\n",
 568       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 569   trcVerbose("Default shared memory page size: %s\n",
 570       describe_pagesize(g_multipage_support.shmpsize));
 571   trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
 572       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 573   trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
 574       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 575   trcVerbose("Multipage error details: %d\n",
 576       g_multipage_support.error);
 577 
 578   // sanity checks
 579   assert0(g_multipage_support.pagesize == SIZE_4K);
 580   assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
 581   // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
 582   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 583   assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
 584 
 585 } // end os::Aix::query_multipage_support()
 586 
 587 void os::init_system_properties_values() {
 588 
 589 #define DEFAULT_LIBPATH "/usr/lib:/lib"
 590 #define EXTENSIONS_DIR  "/lib/ext"
 591 
 592   // Buffer that fits several sprintfs.
 593   // Note that the space for the trailing null is provided
 594   // by the nulls included by the sizeof operator.
 595   const size_t bufsize =
 596     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 597          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 598   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 599 
 600   // sysclasspath, java_home, dll_dir
 601   {
 602     char *pslash;
 603     os::jvm_path(buf, bufsize);
 604 
 605     // Found the full path to libjvm.so.
 606     // Now cut the path to <java_home>/jre if we can.
 607     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 608     pslash = strrchr(buf, '/');
 609     if (pslash != NULL) {
 610       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 611     }
 612     Arguments::set_dll_dir(buf);
 613 
 614     if (pslash != NULL) {
 615       pslash = strrchr(buf, '/');
 616       if (pslash != NULL) {
 617         *pslash = '\0';          // Get rid of /<arch>.
 618         pslash = strrchr(buf, '/');
 619         if (pslash != NULL) {
 620           *pslash = '\0';        // Get rid of /lib.
 621         }
 622       }
 623     }
 624     Arguments::set_java_home(buf);
 625     set_boot_path('/', ':');
 626   }
 627 
 628   // Where to look for native libraries.
 629 
 630   // On Aix we get the user setting of LIBPATH.
 631   // Eventually, all the library path setting will be done here.
 632   // Get the user setting of LIBPATH.
 633   const char *v = ::getenv("LIBPATH");
 634   const char *v_colon = ":";
 635   if (v == NULL) { v = ""; v_colon = ""; }
 636 
 637   // Concatenate user and invariant part of ld_library_path.
 638   // That's +1 for the colon and +1 for the trailing '\0'.
 639   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 640   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 641   Arguments::set_library_path(ld_library_path);
 642   FREE_C_HEAP_ARRAY(char, ld_library_path);
 643 
 644   // Extensions directories.
 645   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 646   Arguments::set_ext_dirs(buf);
 647 
 648   FREE_C_HEAP_ARRAY(char, buf);
 649 
 650 #undef DEFAULT_LIBPATH
 651 #undef EXTENSIONS_DIR
 652 }
 653 
 654 ////////////////////////////////////////////////////////////////////////////////
 655 // breakpoint support
 656 
 657 void os::breakpoint() {
 658   BREAKPOINT;
 659 }
 660 
 661 extern "C" void breakpoint() {
 662   // use debugger to set breakpoint here
 663 }
 664 
 665 ////////////////////////////////////////////////////////////////////////////////
 666 // signal support
 667 
 668 debug_only(static bool signal_sets_initialized = false);
 669 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 670 
 671 bool os::Aix::is_sig_ignored(int sig) {
 672   struct sigaction oact;
 673   sigaction(sig, (struct sigaction*)NULL, &oact);
 674   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 675     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 676   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 677     return true;
 678   } else {
 679     return false;
 680   }
 681 }
 682 
 683 void os::Aix::signal_sets_init() {
 684   // Should also have an assertion stating we are still single-threaded.
 685   assert(!signal_sets_initialized, "Already initialized");
 686   // Fill in signals that are necessarily unblocked for all threads in
 687   // the VM. Currently, we unblock the following signals:
 688   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 689   //                         by -Xrs (=ReduceSignalUsage));
 690   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 691   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 692   // the dispositions or masks wrt these signals.
 693   // Programs embedding the VM that want to use the above signals for their
 694   // own purposes must, at this time, use the "-Xrs" option to prevent
 695   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 696   // (See bug 4345157, and other related bugs).
 697   // In reality, though, unblocking these signals is really a nop, since
 698   // these signals are not blocked by default.
 699   sigemptyset(&unblocked_sigs);
 700   sigemptyset(&allowdebug_blocked_sigs);
 701   sigaddset(&unblocked_sigs, SIGILL);
 702   sigaddset(&unblocked_sigs, SIGSEGV);
 703   sigaddset(&unblocked_sigs, SIGBUS);
 704   sigaddset(&unblocked_sigs, SIGFPE);
 705   sigaddset(&unblocked_sigs, SIGTRAP);
 706   sigaddset(&unblocked_sigs, SIGDANGER);
 707   sigaddset(&unblocked_sigs, SR_signum);
 708 
 709   if (!ReduceSignalUsage) {
 710    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 711      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 712      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
 713    }
 714    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 715      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 716      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
 717    }
 718    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 719      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 720      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
 721    }
 722   }
 723   // Fill in signals that are blocked by all but the VM thread.
 724   sigemptyset(&vm_sigs);
 725   if (!ReduceSignalUsage)
 726     sigaddset(&vm_sigs, BREAK_SIGNAL);
 727   debug_only(signal_sets_initialized = true);
 728 }
 729 
 730 // These are signals that are unblocked while a thread is running Java.
 731 // (For some reason, they get blocked by default.)
 732 sigset_t* os::Aix::unblocked_signals() {
 733   assert(signal_sets_initialized, "Not initialized");
 734   return &unblocked_sigs;
 735 }
 736 
 737 // These are the signals that are blocked while a (non-VM) thread is
 738 // running Java. Only the VM thread handles these signals.
 739 sigset_t* os::Aix::vm_signals() {
 740   assert(signal_sets_initialized, "Not initialized");
 741   return &vm_sigs;
 742 }
 743 
 744 // These are signals that are blocked during cond_wait to allow debugger in
 745 sigset_t* os::Aix::allowdebug_blocked_signals() {
 746   assert(signal_sets_initialized, "Not initialized");
 747   return &allowdebug_blocked_sigs;
 748 }
 749 
 750 void os::Aix::hotspot_sigmask(Thread* thread) {
 751 
 752   //Save caller's signal mask before setting VM signal mask
 753   sigset_t caller_sigmask;
 754   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 755 
 756   OSThread* osthread = thread->osthread();
 757   osthread->set_caller_sigmask(caller_sigmask);
 758 
 759   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 760 
 761   if (!ReduceSignalUsage) {
 762     if (thread->is_VM_thread()) {
 763       // Only the VM thread handles BREAK_SIGNAL ...
 764       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 765     } else {
 766       // ... all other threads block BREAK_SIGNAL
 767       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 768     }
 769   }
 770 }
 771 
 772 // retrieve memory information.
 773 // Returns false if something went wrong;
 774 // content of pmi undefined in this case.
 775 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 776 
 777   assert(pmi, "get_meminfo: invalid parameter");
 778 
 779   memset(pmi, 0, sizeof(meminfo_t));
 780 
 781   if (os::Aix::on_pase()) {
 782 
 783     Unimplemented();
 784     return false;
 785 
 786   } else {
 787 
 788     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 789     // See:
 790     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 791     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 792     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 793     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 794 
 795     perfstat_memory_total_t psmt;
 796     memset (&psmt, '\0', sizeof(psmt));
 797     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 798     if (rc == -1) {
 799       fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
 800       assert(0, "perfstat_memory_total() failed");
 801       return false;
 802     }
 803 
 804     assert(rc == 1, "perfstat_memory_total() - weird return code");
 805 
 806     // excerpt from
 807     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 808     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 809     // The fields of perfstat_memory_total_t:
 810     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 811     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 812     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 813     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 814     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 815 
 816     pmi->virt_total = psmt.virt_total * 4096;
 817     pmi->real_total = psmt.real_total * 4096;
 818     pmi->real_free = psmt.real_free * 4096;
 819     pmi->pgsp_total = psmt.pgsp_total * 4096;
 820     pmi->pgsp_free = psmt.pgsp_free * 4096;
 821 
 822     return true;
 823 
 824   }
 825 } // end os::Aix::get_meminfo
 826 
 827 // Retrieve global cpu information.
 828 // Returns false if something went wrong;
 829 // the content of pci is undefined in this case.
 830 bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
 831   assert(pci, "get_cpuinfo: invalid parameter");
 832   memset(pci, 0, sizeof(cpuinfo_t));
 833 
 834   perfstat_cpu_total_t psct;
 835   memset (&psct, '\0', sizeof(psct));
 836 
 837   if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
 838     fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
 839     assert(0, "perfstat_cpu_total() failed");
 840     return false;
 841   }
 842 
 843   // global cpu information
 844   strcpy (pci->description, psct.description);
 845   pci->processorHZ = psct.processorHZ;
 846   pci->ncpus = psct.ncpus;
 847   os::Aix::_logical_cpus = psct.ncpus;
 848   for (int i = 0; i < 3; i++) {
 849     pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
 850   }
 851 
 852   // get the processor version from _system_configuration
 853   switch (_system_configuration.version) {
 854   case PV_8:
 855     strcpy(pci->version, "Power PC 8");
 856     break;
 857   case PV_7:
 858     strcpy(pci->version, "Power PC 7");
 859     break;
 860   case PV_6_1:
 861     strcpy(pci->version, "Power PC 6 DD1.x");
 862     break;
 863   case PV_6:
 864     strcpy(pci->version, "Power PC 6");
 865     break;
 866   case PV_5:
 867     strcpy(pci->version, "Power PC 5");
 868     break;
 869   case PV_5_2:
 870     strcpy(pci->version, "Power PC 5_2");
 871     break;
 872   case PV_5_3:
 873     strcpy(pci->version, "Power PC 5_3");
 874     break;
 875   case PV_5_Compat:
 876     strcpy(pci->version, "PV_5_Compat");
 877     break;
 878   case PV_6_Compat:
 879     strcpy(pci->version, "PV_6_Compat");
 880     break;
 881   case PV_7_Compat:
 882     strcpy(pci->version, "PV_7_Compat");
 883     break;
 884   case PV_8_Compat:
 885     strcpy(pci->version, "PV_8_Compat");
 886     break;
 887   default:
 888     strcpy(pci->version, "unknown");
 889   }
 890 
 891   return true;
 892 
 893 } //end os::Aix::get_cpuinfo
 894 
 895 //////////////////////////////////////////////////////////////////////////////
 896 // detecting pthread library
 897 
 898 void os::Aix::libpthread_init() {
 899   return;
 900 }
 901 
 902 //////////////////////////////////////////////////////////////////////////////
 903 // create new thread
 904 
 905 // Thread start routine for all newly created threads
 906 static void *java_start(Thread *thread) {
 907 
 908   // find out my own stack dimensions
 909   {
 910     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 911     address base = 0;
 912     size_t size = 0;
 913     query_stack_dimensions(&base, &size);
 914     thread->set_stack_base(base);
 915     thread->set_stack_size(size);
 916   }
 917 
 918   // Do some sanity checks.
 919   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 920 
 921   // Try to randomize the cache line index of hot stack frames.
 922   // This helps when threads of the same stack traces evict each other's
 923   // cache lines. The threads can be either from the same JVM instance, or
 924   // from different JVM instances. The benefit is especially true for
 925   // processors with hyperthreading technology.
 926 
 927   static int counter = 0;
 928   int pid = os::current_process_id();
 929   alloca(((pid ^ counter++) & 7) * 128);
 930 
 931   ThreadLocalStorage::set_thread(thread);
 932 
 933   OSThread* osthread = thread->osthread();
 934 
 935   // thread_id is kernel thread id (similar to Solaris LWP id)
 936   osthread->set_thread_id(os::Aix::gettid());
 937 
 938   // initialize signal mask for this thread
 939   os::Aix::hotspot_sigmask(thread);
 940 
 941   // initialize floating point control register
 942   os::Aix::init_thread_fpu_state();
 943 
 944   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 945 
 946   // call one more level start routine
 947   thread->run();
 948 
 949   return 0;
 950 }
 951 
 952 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 953 
 954   // We want the whole function to be synchronized.
 955   ThreadCritical cs;
 956 
 957   assert(thread->osthread() == NULL, "caller responsible");
 958 
 959   // Allocate the OSThread object
 960   OSThread* osthread = new OSThread(NULL, NULL);
 961   if (osthread == NULL) {
 962     return false;
 963   }
 964 
 965   // set the correct thread state
 966   osthread->set_thread_type(thr_type);
 967 
 968   // Initial state is ALLOCATED but not INITIALIZED
 969   osthread->set_state(ALLOCATED);
 970 
 971   thread->set_osthread(osthread);
 972 
 973   // init thread attributes
 974   pthread_attr_t attr;
 975   pthread_attr_init(&attr);
 976   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 977 
 978   // Make sure we run in 1:1 kernel-user-thread mode.
 979   if (os::Aix::on_aix()) {
 980     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 981     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 982   } // end: aix
 983 
 984   // Start in suspended state, and in os::thread_start, wake the thread up.
 985   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 986 
 987   // calculate stack size if it's not specified by caller
 988   if (os::Aix::supports_variable_stack_size()) {
 989     if (stack_size == 0) {
 990       stack_size = os::Aix::default_stack_size(thr_type);
 991 
 992       switch (thr_type) {
 993       case os::java_thread:
 994         // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
 995         assert(JavaThread::stack_size_at_create() > 0, "this should be set");
 996         stack_size = JavaThread::stack_size_at_create();
 997         break;
 998       case os::compiler_thread:
 999         if (CompilerThreadStackSize > 0) {
1000           stack_size = (size_t)(CompilerThreadStackSize * K);
1001           break;
1002         } // else fall through:
1003           // use VMThreadStackSize if CompilerThreadStackSize is not defined
1004       case os::vm_thread:
1005       case os::pgc_thread:
1006       case os::cgc_thread:
1007       case os::watcher_thread:
1008         if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1009         break;
1010       }
1011     }
1012 
1013     stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
1014     pthread_attr_setstacksize(&attr, stack_size);
1015   } //else let thread_create() pick the default value (96 K on AIX)
1016 
1017   pthread_t tid;
1018   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
1019 
1020   pthread_attr_destroy(&attr);
1021 
1022   if (ret == 0) {
1023     // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
1024   } else {
1025     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1026       perror("pthread_create()");
1027     }
1028     // Need to clean up stuff we've allocated so far
1029     thread->set_osthread(NULL);
1030     delete osthread;
1031     return false;
1032   }
1033 
1034   // Store pthread info into the OSThread
1035   osthread->set_pthread_id(tid);
1036 
1037   return true;
1038 }
1039 
1040 /////////////////////////////////////////////////////////////////////////////
1041 // attach existing thread
1042 
1043 // bootstrap the main thread
1044 bool os::create_main_thread(JavaThread* thread) {
1045   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
1046   return create_attached_thread(thread);
1047 }
1048 
1049 bool os::create_attached_thread(JavaThread* thread) {
1050 #ifdef ASSERT
1051     thread->verify_not_published();
1052 #endif
1053 
1054   // Allocate the OSThread object
1055   OSThread* osthread = new OSThread(NULL, NULL);
1056 
1057   if (osthread == NULL) {
1058     return false;
1059   }
1060 
1061   // Store pthread info into the OSThread
1062   osthread->set_thread_id(os::Aix::gettid());
1063   osthread->set_pthread_id(::pthread_self());
1064 
1065   // initialize floating point control register
1066   os::Aix::init_thread_fpu_state();
1067 
1068   // some sanity checks
1069   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1070 
1071   // Initial thread state is RUNNABLE
1072   osthread->set_state(RUNNABLE);
1073 
1074   thread->set_osthread(osthread);
1075 
1076   if (UseNUMA) {
1077     int lgrp_id = os::numa_get_group_id();
1078     if (lgrp_id != -1) {
1079       thread->set_lgrp_id(lgrp_id);
1080     }
1081   }
1082 
1083   // initialize signal mask for this thread
1084   // and save the caller's signal mask
1085   os::Aix::hotspot_sigmask(thread);
1086 
1087   return true;
1088 }
1089 
1090 void os::pd_start_thread(Thread* thread) {
1091   int status = pthread_continue_np(thread->osthread()->pthread_id());
1092   assert(status == 0, "thr_continue failed");
1093 }
1094 
1095 // Free OS resources related to the OSThread
1096 void os::free_thread(OSThread* osthread) {
1097   assert(osthread != NULL, "osthread not set");
1098 
1099   if (Thread::current()->osthread() == osthread) {
1100     // Restore caller's signal mask
1101     sigset_t sigmask = osthread->caller_sigmask();
1102     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1103    }
1104 
1105   delete osthread;
1106 }
1107 
1108 //////////////////////////////////////////////////////////////////////////////
1109 // thread local storage
1110 
1111 int os::allocate_thread_local_storage() {
1112   pthread_key_t key;
1113   int rslt = pthread_key_create(&key, NULL);
1114   assert(rslt == 0, "cannot allocate thread local storage");
1115   return (int)key;
1116 }
1117 
1118 // Note: This is currently not used by VM, as we don't destroy TLS key
1119 // on VM exit.
1120 void os::free_thread_local_storage(int index) {
1121   int rslt = pthread_key_delete((pthread_key_t)index);
1122   assert(rslt == 0, "invalid index");
1123 }
1124 
1125 void os::thread_local_storage_at_put(int index, void* value) {
1126   int rslt = pthread_setspecific((pthread_key_t)index, value);
1127   assert(rslt == 0, "pthread_setspecific failed");
1128 }
1129 
1130 extern "C" Thread* get_thread() {
1131   return ThreadLocalStorage::thread();
1132 }
1133 
1134 ////////////////////////////////////////////////////////////////////////////////
1135 // time support
1136 
1137 // Time since start-up in seconds to a fine granularity.
1138 // Used by VMSelfDestructTimer and the MemProfiler.
1139 double os::elapsedTime() {
1140   return (double)(os::elapsed_counter()) * 0.000001;
1141 }
1142 
1143 jlong os::elapsed_counter() {
1144   timeval time;
1145   int status = gettimeofday(&time, NULL);
1146   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1147 }
1148 
1149 jlong os::elapsed_frequency() {
1150   return (1000 * 1000);
1151 }
1152 
1153 bool os::supports_vtime() { return true; }
1154 bool os::enable_vtime()   { return false; }
1155 bool os::vtime_enabled()  { return false; }
1156 
1157 double os::elapsedVTime() {
1158   struct rusage usage;
1159   int retval = getrusage(RUSAGE_THREAD, &usage);
1160   if (retval == 0) {
1161     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1162   } else {
1163     // better than nothing, but not much
1164     return elapsedTime();
1165   }
1166 }
1167 
1168 jlong os::javaTimeMillis() {
1169   timeval time;
1170   int status = gettimeofday(&time, NULL);
1171   assert(status != -1, "aix error at gettimeofday()");
1172   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1173 }
1174 
1175 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1176   timeval time;
1177   int status = gettimeofday(&time, NULL);
1178   assert(status != -1, "aix error at gettimeofday()");
1179   seconds = jlong(time.tv_sec);
1180   nanos = jlong(time.tv_usec) * 1000;
1181 }
1182 
1183 
1184 // We need to manually declare mread_real_time,
1185 // because IBM didn't provide a prototype in time.h.
1186 // (they probably only ever tested in C, not C++)
1187 extern "C"
1188 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1189 
1190 jlong os::javaTimeNanos() {
1191   if (os::Aix::on_pase()) {
1192     Unimplemented();
1193     return 0;
1194   } else {
1195     // On AIX use the precision of processors real time clock
1196     // or time base registers.
1197     timebasestruct_t time;
1198     int rc;
1199 
1200     // If the CPU has a time register, it will be used and
1201     // we have to convert to real time first. After convertion we have following data:
1202     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1203     // time.tb_low  [nanoseconds after the last full second above]
1204     // We better use mread_real_time here instead of read_real_time
1205     // to ensure that we will get a monotonic increasing time.
1206     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1207       rc = time_base_to_time(&time, TIMEBASE_SZ);
1208       assert(rc != -1, "aix error at time_base_to_time()");
1209     }
1210     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1211   }
1212 }
1213 
1214 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1215   info_ptr->max_value = ALL_64_BITS;
1216   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1217   info_ptr->may_skip_backward = false;
1218   info_ptr->may_skip_forward = false;
1219   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1220 }
1221 
1222 // Return the real, user, and system times in seconds from an
1223 // arbitrary fixed point in the past.
1224 bool os::getTimesSecs(double* process_real_time,
1225                       double* process_user_time,
1226                       double* process_system_time) {
1227   struct tms ticks;
1228   clock_t real_ticks = times(&ticks);
1229 
1230   if (real_ticks == (clock_t) (-1)) {
1231     return false;
1232   } else {
1233     double ticks_per_second = (double) clock_tics_per_sec;
1234     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1235     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1236     *process_real_time = ((double) real_ticks) / ticks_per_second;
1237 
1238     return true;
1239   }
1240 }
1241 
1242 char * os::local_time_string(char *buf, size_t buflen) {
1243   struct tm t;
1244   time_t long_time;
1245   time(&long_time);
1246   localtime_r(&long_time, &t);
1247   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1248                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1249                t.tm_hour, t.tm_min, t.tm_sec);
1250   return buf;
1251 }
1252 
1253 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1254   return localtime_r(clock, res);
1255 }
1256 
1257 ////////////////////////////////////////////////////////////////////////////////
1258 // runtime exit support
1259 
1260 // Note: os::shutdown() might be called very early during initialization, or
1261 // called from signal handler. Before adding something to os::shutdown(), make
1262 // sure it is async-safe and can handle partially initialized VM.
1263 void os::shutdown() {
1264 
1265   // allow PerfMemory to attempt cleanup of any persistent resources
1266   perfMemory_exit();
1267 
1268   // needs to remove object in file system
1269   AttachListener::abort();
1270 
1271   // flush buffered output, finish log files
1272   ostream_abort();
1273 
1274   // Check for abort hook
1275   abort_hook_t abort_hook = Arguments::abort_hook();
1276   if (abort_hook != NULL) {
1277     abort_hook();
1278   }
1279 }
1280 
1281 // Note: os::abort() might be called very early during initialization, or
1282 // called from signal handler. Before adding something to os::abort(), make
1283 // sure it is async-safe and can handle partially initialized VM.
1284 void os::abort(bool dump_core) {
1285   os::shutdown();
1286   if (dump_core) {
1287 #ifndef PRODUCT
1288     fdStream out(defaultStream::output_fd());
1289     out.print_raw("Current thread is ");
1290     char buf[16];
1291     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1292     out.print_raw_cr(buf);
1293     out.print_raw_cr("Dumping core ...");
1294 #endif
1295     ::abort(); // dump core
1296   }
1297 
1298   ::exit(1);
1299 }
1300 
1301 // Die immediately, no exit hook, no abort hook, no cleanup.
1302 void os::die() {
1303   ::abort();
1304 }
1305 
1306 // This method is a copy of JDK's sysGetLastErrorString
1307 // from src/solaris/hpi/src/system_md.c
1308 
1309 size_t os::lasterror(char *buf, size_t len) {
1310   if (errno == 0) return 0;
1311 
1312   const char *s = ::strerror(errno);
1313   size_t n = ::strlen(s);
1314   if (n >= len) {
1315     n = len - 1;
1316   }
1317   ::strncpy(buf, s, n);
1318   buf[n] = '\0';
1319   return n;
1320 }
1321 
1322 intx os::current_thread_id() { return (intx)pthread_self(); }
1323 
1324 int os::current_process_id() {
1325 
1326   // This implementation returns a unique pid, the pid of the
1327   // launcher thread that starts the vm 'process'.
1328 
1329   // Under POSIX, getpid() returns the same pid as the
1330   // launcher thread rather than a unique pid per thread.
1331   // Use gettid() if you want the old pre NPTL behaviour.
1332 
1333   // if you are looking for the result of a call to getpid() that
1334   // returns a unique pid for the calling thread, then look at the
1335   // OSThread::thread_id() method in osThread_linux.hpp file
1336 
1337   return (int)(_initial_pid ? _initial_pid : getpid());
1338 }
1339 
1340 // DLL functions
1341 
1342 const char* os::dll_file_extension() { return ".so"; }
1343 
1344 // This must be hard coded because it's the system's temporary
1345 // directory not the java application's temp directory, ala java.io.tmpdir.
1346 const char* os::get_temp_directory() { return "/tmp"; }
1347 
1348 static bool file_exists(const char* filename) {
1349   struct stat statbuf;
1350   if (filename == NULL || strlen(filename) == 0) {
1351     return false;
1352   }
1353   return os::stat(filename, &statbuf) == 0;
1354 }
1355 
1356 bool os::dll_build_name(char* buffer, size_t buflen,
1357                         const char* pname, const char* fname) {
1358   bool retval = false;
1359   // Copied from libhpi
1360   const size_t pnamelen = pname ? strlen(pname) : 0;
1361 
1362   // Return error on buffer overflow.
1363   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1364     *buffer = '\0';
1365     return retval;
1366   }
1367 
1368   if (pnamelen == 0) {
1369     snprintf(buffer, buflen, "lib%s.so", fname);
1370     retval = true;
1371   } else if (strchr(pname, *os::path_separator()) != NULL) {
1372     int n;
1373     char** pelements = split_path(pname, &n);
1374     for (int i = 0; i < n; i++) {
1375       // Really shouldn't be NULL, but check can't hurt
1376       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1377         continue; // skip the empty path values
1378       }
1379       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1380       if (file_exists(buffer)) {
1381         retval = true;
1382         break;
1383       }
1384     }
1385     // release the storage
1386     for (int i = 0; i < n; i++) {
1387       if (pelements[i] != NULL) {
1388         FREE_C_HEAP_ARRAY(char, pelements[i]);
1389       }
1390     }
1391     if (pelements != NULL) {
1392       FREE_C_HEAP_ARRAY(char*, pelements);
1393     }
1394   } else {
1395     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1396     retval = true;
1397   }
1398   return retval;
1399 }
1400 
1401 // Check if addr is inside libjvm.so.
1402 bool os::address_is_in_vm(address addr) {
1403 
1404   // Input could be a real pc or a function pointer literal. The latter
1405   // would be a function descriptor residing in the data segment of a module.
1406 
1407   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
1408   if (lib) {
1409     if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1410       return true;
1411     } else {
1412       return false;
1413     }
1414   } else {
1415     lib = LoadedLibraries::find_for_data_address(addr);
1416     if (lib) {
1417       if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1418         return true;
1419       } else {
1420         return false;
1421       }
1422     } else {
1423       return false;
1424     }
1425   }
1426 }
1427 
1428 // Resolve an AIX function descriptor literal to a code pointer.
1429 // If the input is a valid code pointer to a text segment of a loaded module,
1430 //   it is returned unchanged.
1431 // If the input is a valid AIX function descriptor, it is resolved to the
1432 //   code entry point.
1433 // If the input is neither a valid function descriptor nor a valid code pointer,
1434 //   NULL is returned.
1435 static address resolve_function_descriptor_to_code_pointer(address p) {
1436 
1437   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
1438   if (lib) {
1439     // its a real code pointer
1440     return p;
1441   } else {
1442     lib = LoadedLibraries::find_for_data_address(p);
1443     if (lib) {
1444       // pointer to data segment, potential function descriptor
1445       address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1446       if (LoadedLibraries::find_for_text_address(code_entry)) {
1447         // Its a function descriptor
1448         return code_entry;
1449       }
1450     }
1451   }
1452   return NULL;
1453 }
1454 
1455 bool os::dll_address_to_function_name(address addr, char *buf,
1456                                       int buflen, int *offset) {
1457   if (offset) {
1458     *offset = -1;
1459   }
1460   // Buf is not optional, but offset is optional.
1461   assert(buf != NULL, "sanity check");
1462   buf[0] = '\0';
1463 
1464   // Resolve function ptr literals first.
1465   addr = resolve_function_descriptor_to_code_pointer(addr);
1466   if (!addr) {
1467     return false;
1468   }
1469 
1470   // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1471   return Decoder::decode(addr, buf, buflen, offset);
1472 }
1473 
1474 static int getModuleName(codeptr_t pc,                    // [in] program counter
1475                          char* p_name, size_t namelen,    // [out] optional: function name
1476                          char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1477                          ) {
1478 
1479   // initialize output parameters
1480   if (p_name && namelen > 0) {
1481     *p_name = '\0';
1482   }
1483   if (p_errmsg && errmsglen > 0) {
1484     *p_errmsg = '\0';
1485   }
1486 
1487   const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
1488   if (lib) {
1489     if (p_name && namelen > 0) {
1490       sprintf(p_name, "%.*s", namelen, lib->get_shortname());
1491     }
1492     return 0;
1493   }
1494 
1495   trcVerbose("pc outside any module");
1496 
1497   return -1;
1498 }
1499 
1500 bool os::dll_address_to_library_name(address addr, char* buf,
1501                                      int buflen, int* offset) {
1502   if (offset) {
1503     *offset = -1;
1504   }
1505   // Buf is not optional, but offset is optional.
1506   assert(buf != NULL, "sanity check");
1507   buf[0] = '\0';
1508 
1509   // Resolve function ptr literals first.
1510   addr = resolve_function_descriptor_to_code_pointer(addr);
1511   if (!addr) {
1512     return false;
1513   }
1514 
1515   if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1516     return true;
1517   }
1518   return false;
1519 }
1520 
1521 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1522 // for the same architecture as Hotspot is running on.
1523 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1524 
1525   if (ebuf && ebuflen > 0) {
1526     ebuf[0] = '\0';
1527     ebuf[ebuflen - 1] = '\0';
1528   }
1529 
1530   if (!filename || strlen(filename) == 0) {
1531     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1532     return NULL;
1533   }
1534 
1535   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1536   void * result= ::dlopen(filename, RTLD_LAZY);
1537   if (result != NULL) {
1538     // Reload dll cache. Don't do this in signal handling.
1539     LoadedLibraries::reload();
1540     return result;
1541   } else {
1542     // error analysis when dlopen fails
1543     const char* const error_report = ::dlerror();
1544     if (error_report && ebuf && ebuflen > 0) {
1545       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1546                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1547     }
1548   }
1549   return NULL;
1550 }
1551 
1552 // Glibc-2.0 libdl is not MT safe. If you are building with any glibc,
1553 // chances are you might want to run the generated bits against glibc-2.0
1554 // libdl.so, so always use locking for any version of glibc.
1555 void* os::dll_lookup(void* handle, const char* name) {
1556   pthread_mutex_lock(&dl_mutex);
1557   void* res = dlsym(handle, name);
1558   pthread_mutex_unlock(&dl_mutex);
1559   return res;
1560 }
1561 
1562 void* os::get_default_process_handle() {
1563   return (void*)::dlopen(NULL, RTLD_LAZY);
1564 }
1565 
1566 void os::print_dll_info(outputStream *st) {
1567   st->print_cr("Dynamic libraries:");
1568   LoadedLibraries::print(st);
1569 }
1570 
1571 void os::print_os_info(outputStream* st) {
1572   st->print("OS:");
1573 
1574   st->print("uname:");
1575   struct utsname name;
1576   uname(&name);
1577   st->print(name.sysname); st->print(" ");
1578   st->print(name.nodename); st->print(" ");
1579   st->print(name.release); st->print(" ");
1580   st->print(name.version); st->print(" ");
1581   st->print(name.machine);
1582   st->cr();
1583 
1584   // rlimit
1585   st->print("rlimit:");
1586   struct rlimit rlim;
1587 
1588   st->print(" STACK ");
1589   getrlimit(RLIMIT_STACK, &rlim);
1590   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1591   else st->print("%uk", rlim.rlim_cur >> 10);
1592 
1593   st->print(", CORE ");
1594   getrlimit(RLIMIT_CORE, &rlim);
1595   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1596   else st->print("%uk", rlim.rlim_cur >> 10);
1597 
1598   st->print(", NPROC ");
1599   st->print("%d", sysconf(_SC_CHILD_MAX));
1600 
1601   st->print(", NOFILE ");
1602   getrlimit(RLIMIT_NOFILE, &rlim);
1603   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1604   else st->print("%d", rlim.rlim_cur);
1605 
1606   st->print(", AS ");
1607   getrlimit(RLIMIT_AS, &rlim);
1608   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1609   else st->print("%uk", rlim.rlim_cur >> 10);
1610 
1611   // Print limits on DATA, because it limits the C-heap.
1612   st->print(", DATA ");
1613   getrlimit(RLIMIT_DATA, &rlim);
1614   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1615   else st->print("%uk", rlim.rlim_cur >> 10);
1616   st->cr();
1617 
1618   // load average
1619   st->print("load average:");
1620   double loadavg[3] = {-1.L, -1.L, -1.L};
1621   os::loadavg(loadavg, 3);
1622   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1623   st->cr();
1624 }
1625 
1626 void os::print_memory_info(outputStream* st) {
1627 
1628   st->print_cr("Memory:");
1629 
1630   st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1631   st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1632   st->print_cr("  Default shared memory page size:        %s",
1633     describe_pagesize(g_multipage_support.shmpsize));
1634   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1635     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1636   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1637     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1638   if (g_multipage_error != 0) {
1639     st->print_cr("  multipage error: %d", g_multipage_error);
1640   }
1641 
1642   // print out LDR_CNTRL because it affects the default page sizes
1643   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1644   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1645 
1646   const char* const extshm = ::getenv("EXTSHM");
1647   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1648   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1649     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1650   }
1651 
1652   // Call os::Aix::get_meminfo() to retrieve memory statistics.
1653   os::Aix::meminfo_t mi;
1654   if (os::Aix::get_meminfo(&mi)) {
1655     char buffer[256];
1656     if (os::Aix::on_aix()) {
1657       jio_snprintf(buffer, sizeof(buffer),
1658                    "  physical total : %llu\n"
1659                    "  physical free  : %llu\n"
1660                    "  swap total     : %llu\n"
1661                    "  swap free      : %llu\n",
1662                    mi.real_total,
1663                    mi.real_free,
1664                    mi.pgsp_total,
1665                    mi.pgsp_free);
1666     } else {
1667       Unimplemented();
1668     }
1669     st->print_raw(buffer);
1670   } else {
1671     st->print_cr("  (no more information available)");
1672   }
1673 }
1674 
1675 void os::pd_print_cpu_info(outputStream* st) {
1676   // cpu
1677   st->print("CPU:");
1678   st->print("total %d", os::processor_count());
1679   // It's not safe to query number of active processors after crash
1680   // st->print("(active %d)", os::active_processor_count());
1681   st->print(" %s", VM_Version::cpu_features());
1682   st->cr();
1683 }
1684 
1685 void os::print_siginfo(outputStream* st, void* siginfo) {
1686   // Use common posix version.
1687   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1688   st->cr();
1689 }
1690 
1691 static void print_signal_handler(outputStream* st, int sig,
1692                                  char* buf, size_t buflen);
1693 
1694 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1695   st->print_cr("Signal Handlers:");
1696   print_signal_handler(st, SIGSEGV, buf, buflen);
1697   print_signal_handler(st, SIGBUS , buf, buflen);
1698   print_signal_handler(st, SIGFPE , buf, buflen);
1699   print_signal_handler(st, SIGPIPE, buf, buflen);
1700   print_signal_handler(st, SIGXFSZ, buf, buflen);
1701   print_signal_handler(st, SIGILL , buf, buflen);
1702   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1703   print_signal_handler(st, SR_signum, buf, buflen);
1704   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1705   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1706   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1707   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1708   print_signal_handler(st, SIGTRAP, buf, buflen);
1709   print_signal_handler(st, SIGDANGER, buf, buflen);
1710 }
1711 
1712 static char saved_jvm_path[MAXPATHLEN] = {0};
1713 
1714 // Find the full path to the current module, libjvm.so.
1715 void os::jvm_path(char *buf, jint buflen) {
1716   // Error checking.
1717   if (buflen < MAXPATHLEN) {
1718     assert(false, "must use a large-enough buffer");
1719     buf[0] = '\0';
1720     return;
1721   }
1722   // Lazy resolve the path to current module.
1723   if (saved_jvm_path[0] != 0) {
1724     strcpy(buf, saved_jvm_path);
1725     return;
1726   }
1727 
1728   Dl_info dlinfo;
1729   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1730   assert(ret != 0, "cannot locate libjvm");
1731   char* rp = realpath((char *)dlinfo.dli_fname, buf);
1732   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1733 
1734   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1735   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1736 }
1737 
1738 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1739   // no prefix required, not even "_"
1740 }
1741 
1742 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1743   // no suffix required
1744 }
1745 
1746 ////////////////////////////////////////////////////////////////////////////////
1747 // sun.misc.Signal support
1748 
1749 static volatile jint sigint_count = 0;
1750 
1751 static void
1752 UserHandler(int sig, void *siginfo, void *context) {
1753   // 4511530 - sem_post is serialized and handled by the manager thread. When
1754   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1755   // don't want to flood the manager thread with sem_post requests.
1756   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1757     return;
1758 
1759   // Ctrl-C is pressed during error reporting, likely because the error
1760   // handler fails to abort. Let VM die immediately.
1761   if (sig == SIGINT && is_error_reported()) {
1762     os::die();
1763   }
1764 
1765   os::signal_notify(sig);
1766 }
1767 
1768 void* os::user_handler() {
1769   return CAST_FROM_FN_PTR(void*, UserHandler);
1770 }
1771 
1772 extern "C" {
1773   typedef void (*sa_handler_t)(int);
1774   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1775 }
1776 
1777 void* os::signal(int signal_number, void* handler) {
1778   struct sigaction sigAct, oldSigAct;
1779 
1780   sigfillset(&(sigAct.sa_mask));
1781 
1782   // Do not block out synchronous signals in the signal handler.
1783   // Blocking synchronous signals only makes sense if you can really
1784   // be sure that those signals won't happen during signal handling,
1785   // when the blocking applies. Normal signal handlers are lean and
1786   // do not cause signals. But our signal handlers tend to be "risky"
1787   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1788   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1789   // by a SIGILL, which was blocked due to the signal mask. The process
1790   // just hung forever. Better to crash from a secondary signal than to hang.
1791   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1792   sigdelset(&(sigAct.sa_mask), SIGBUS);
1793   sigdelset(&(sigAct.sa_mask), SIGILL);
1794   sigdelset(&(sigAct.sa_mask), SIGFPE);
1795   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1796 
1797   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1798 
1799   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1800 
1801   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1802     // -1 means registration failed
1803     return (void *)-1;
1804   }
1805 
1806   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1807 }
1808 
1809 void os::signal_raise(int signal_number) {
1810   ::raise(signal_number);
1811 }
1812 
1813 //
1814 // The following code is moved from os.cpp for making this
1815 // code platform specific, which it is by its very nature.
1816 //
1817 
1818 // Will be modified when max signal is changed to be dynamic
1819 int os::sigexitnum_pd() {
1820   return NSIG;
1821 }
1822 
1823 // a counter for each possible signal value
1824 static volatile jint pending_signals[NSIG+1] = { 0 };
1825 
1826 // Linux(POSIX) specific hand shaking semaphore.
1827 static sem_t sig_sem;
1828 
1829 void os::signal_init_pd() {
1830   // Initialize signal structures
1831   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1832 
1833   // Initialize signal semaphore
1834   int rc = ::sem_init(&sig_sem, 0, 0);
1835   guarantee(rc != -1, "sem_init failed");
1836 }
1837 
1838 void os::signal_notify(int sig) {
1839   Atomic::inc(&pending_signals[sig]);
1840   ::sem_post(&sig_sem);
1841 }
1842 
1843 static int check_pending_signals(bool wait) {
1844   Atomic::store(0, &sigint_count);
1845   for (;;) {
1846     for (int i = 0; i < NSIG + 1; i++) {
1847       jint n = pending_signals[i];
1848       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1849         return i;
1850       }
1851     }
1852     if (!wait) {
1853       return -1;
1854     }
1855     JavaThread *thread = JavaThread::current();
1856     ThreadBlockInVM tbivm(thread);
1857 
1858     bool threadIsSuspended;
1859     do {
1860       thread->set_suspend_equivalent();
1861       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1862 
1863       ::sem_wait(&sig_sem);
1864 
1865       // were we externally suspended while we were waiting?
1866       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1867       if (threadIsSuspended) {
1868         //
1869         // The semaphore has been incremented, but while we were waiting
1870         // another thread suspended us. We don't want to continue running
1871         // while suspended because that would surprise the thread that
1872         // suspended us.
1873         //
1874         ::sem_post(&sig_sem);
1875 
1876         thread->java_suspend_self();
1877       }
1878     } while (threadIsSuspended);
1879   }
1880 }
1881 
1882 int os::signal_lookup() {
1883   return check_pending_signals(false);
1884 }
1885 
1886 int os::signal_wait() {
1887   return check_pending_signals(true);
1888 }
1889 
1890 ////////////////////////////////////////////////////////////////////////////////
1891 // Virtual Memory
1892 
1893 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1894 
1895 #define VMEM_MAPPED  1
1896 #define VMEM_SHMATED 2
1897 
1898 struct vmembk_t {
1899   int type;         // 1 - mmap, 2 - shmat
1900   char* addr;
1901   size_t size;      // Real size, may be larger than usersize.
1902   size_t pagesize;  // page size of area
1903   vmembk_t* next;
1904 
1905   bool contains_addr(char* p) const {
1906     return p >= addr && p < (addr + size);
1907   }
1908 
1909   bool contains_range(char* p, size_t s) const {
1910     return contains_addr(p) && contains_addr(p + s - 1);
1911   }
1912 
1913   void print_on(outputStream* os) const {
1914     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1915       " bytes, %d %s pages), %s",
1916       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1917       (type == VMEM_SHMATED ? "shmat" : "mmap")
1918     );
1919   }
1920 
1921   // Check that range is a sub range of memory block (or equal to memory block);
1922   // also check that range is fully page aligned to the page size if the block.
1923   void assert_is_valid_subrange(char* p, size_t s) const {
1924     if (!contains_range(p, s)) {
1925       fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1926               "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1927               p, p + s - 1, addr, addr + size - 1);
1928       guarantee0(false);
1929     }
1930     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1931       fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1932               " aligned to pagesize (%s)\n", p, p + s);
1933       guarantee0(false);
1934     }
1935   }
1936 };
1937 
1938 static struct {
1939   vmembk_t* first;
1940   MiscUtils::CritSect cs;
1941 } vmem;
1942 
1943 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1944   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1945   assert0(p);
1946   if (p) {
1947     MiscUtils::AutoCritSect lck(&vmem.cs);
1948     p->addr = addr; p->size = size;
1949     p->pagesize = pagesize;
1950     p->type = type;
1951     p->next = vmem.first;
1952     vmem.first = p;
1953   }
1954 }
1955 
1956 static vmembk_t* vmembk_find(char* addr) {
1957   MiscUtils::AutoCritSect lck(&vmem.cs);
1958   for (vmembk_t* p = vmem.first; p; p = p->next) {
1959     if (p->addr <= addr && (p->addr + p->size) > addr) {
1960       return p;
1961     }
1962   }
1963   return NULL;
1964 }
1965 
1966 static void vmembk_remove(vmembk_t* p0) {
1967   MiscUtils::AutoCritSect lck(&vmem.cs);
1968   assert0(p0);
1969   assert0(vmem.first); // List should not be empty.
1970   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1971     if (*pp == p0) {
1972       *pp = p0->next;
1973       ::free(p0);
1974       return;
1975     }
1976   }
1977   assert0(false); // Not found?
1978 }
1979 
1980 static void vmembk_print_on(outputStream* os) {
1981   MiscUtils::AutoCritSect lck(&vmem.cs);
1982   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1983     vmi->print_on(os);
1984     os->cr();
1985   }
1986 }
1987 
1988 // Reserve and attach a section of System V memory.
1989 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1990 // address. Failing that, it will attach the memory anywhere.
1991 // If <requested_addr> is NULL, function will attach the memory anywhere.
1992 //
1993 // <alignment_hint> is being ignored by this function. It is very probable however that the
1994 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1995 // Should this be not enogh, we can put more work into it.
1996 static char* reserve_shmated_memory (
1997   size_t bytes,
1998   char* requested_addr,
1999   size_t alignment_hint) {
2000 
2001   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
2002     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
2003     bytes, requested_addr, alignment_hint);
2004 
2005   // Either give me wish address or wish alignment but not both.
2006   assert0(!(requested_addr != NULL && alignment_hint != 0));
2007 
2008   // We must prevent anyone from attaching too close to the
2009   // BRK because that may cause malloc OOM.
2010   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2011     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2012       "Will attach anywhere.", requested_addr);
2013     // Act like the OS refused to attach there.
2014     requested_addr = NULL;
2015   }
2016 
2017   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
2018   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
2019   if (os::Aix::on_pase_V5R4_or_older()) {
2020     ShouldNotReachHere();
2021   }
2022 
2023   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
2024   const size_t size = align_size_up(bytes, SIZE_64K);
2025 
2026   // Reserve the shared segment.
2027   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2028   if (shmid == -1) {
2029     trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
2030     return NULL;
2031   }
2032 
2033   // Important note:
2034   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2035   // We must right after attaching it remove it from the system. System V shm segments are global and
2036   // survive the process.
2037   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2038 
2039   struct shmid_ds shmbuf;
2040   memset(&shmbuf, 0, sizeof(shmbuf));
2041   shmbuf.shm_pagesize = SIZE_64K;
2042   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2043     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2044                size / SIZE_64K, errno);
2045     // I want to know if this ever happens.
2046     assert(false, "failed to set page size for shmat");
2047   }
2048 
2049   // Now attach the shared segment.
2050   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2051   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2052   // were not a segment boundary.
2053   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2054   const int errno_shmat = errno;
2055 
2056   // (A) Right after shmat and before handing shmat errors delete the shm segment.
2057   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2058     trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2059     assert(false, "failed to remove shared memory segment!");
2060   }
2061 
2062   // Handle shmat error. If we failed to attach, just return.
2063   if (addr == (char*)-1) {
2064     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2065     return NULL;
2066   }
2067 
2068   // Just for info: query the real page size. In case setting the page size did not
2069   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2070   const size_t real_pagesize = os::Aix::query_pagesize(addr);
2071   if (real_pagesize != shmbuf.shm_pagesize) {
2072     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2073   }
2074 
2075   if (addr) {
2076     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2077       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2078   } else {
2079     if (requested_addr != NULL) {
2080       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2081     } else {
2082       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2083     }
2084   }
2085 
2086   // book-keeping
2087   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2088   assert0(is_aligned_to(addr, os::vm_page_size()));
2089 
2090   return addr;
2091 }
2092 
2093 static bool release_shmated_memory(char* addr, size_t size) {
2094 
2095   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2096     addr, addr + size - 1);
2097 
2098   bool rc = false;
2099 
2100   // TODO: is there a way to verify shm size without doing bookkeeping?
2101   if (::shmdt(addr) != 0) {
2102     trcVerbose("error (%d).", errno);
2103   } else {
2104     trcVerbose("ok.");
2105     rc = true;
2106   }
2107   return rc;
2108 }
2109 
2110 static bool uncommit_shmated_memory(char* addr, size_t size) {
2111   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2112     addr, addr + size - 1);
2113 
2114   const bool rc = my_disclaim64(addr, size);
2115 
2116   if (!rc) {
2117     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2118     return false;
2119   }
2120   return true;
2121 }
2122 
2123 // Reserve memory via mmap.
2124 // If <requested_addr> is given, an attempt is made to attach at the given address.
2125 // Failing that, memory is allocated at any address.
2126 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2127 // allocate at an address aligned with the given alignment. Failing that, memory
2128 // is aligned anywhere.
2129 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2130   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2131     "alignment_hint " UINTX_FORMAT "...",
2132     bytes, requested_addr, alignment_hint);
2133 
2134   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2135   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2136     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2137     return NULL;
2138   }
2139 
2140   // We must prevent anyone from attaching too close to the
2141   // BRK because that may cause malloc OOM.
2142   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2143     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2144       "Will attach anywhere.", requested_addr);
2145     // Act like the OS refused to attach there.
2146     requested_addr = NULL;
2147   }
2148 
2149   // Specify one or the other but not both.
2150   assert0(!(requested_addr != NULL && alignment_hint > 0));
2151 
2152   // In 64K mode, we claim the global page size (os::vm_page_size())
2153   // is 64K. This is one of the few points where that illusion may
2154   // break, because mmap() will always return memory aligned to 4K. So
2155   // we must ensure we only ever return memory aligned to 64k.
2156   if (alignment_hint) {
2157     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2158   } else {
2159     alignment_hint = os::vm_page_size();
2160   }
2161 
2162   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2163   const size_t size = align_size_up(bytes, os::vm_page_size());
2164 
2165   // alignment: Allocate memory large enough to include an aligned range of the right size and
2166   // cut off the leading and trailing waste pages.
2167   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2168   const size_t extra_size = size + alignment_hint;
2169 
2170   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2171   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2172   int flags = MAP_ANONYMOUS | MAP_SHARED;
2173 
2174   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2175   // it means if wishaddress is given but MAP_FIXED is not set.
2176   //
2177   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2178   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2179   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2180   // get clobbered.
2181   if (requested_addr != NULL) {
2182     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2183       flags |= MAP_FIXED;
2184     }
2185   }
2186 
2187   char* addr = (char*)::mmap(requested_addr, extra_size,
2188       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2189 
2190   if (addr == MAP_FAILED) {
2191     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2192     return NULL;
2193   }
2194 
2195   // Handle alignment.
2196   char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2197   const size_t waste_pre = addr_aligned - addr;
2198   char* const addr_aligned_end = addr_aligned + size;
2199   const size_t waste_post = extra_size - waste_pre - size;
2200   if (waste_pre > 0) {
2201     ::munmap(addr, waste_pre);
2202   }
2203   if (waste_post > 0) {
2204     ::munmap(addr_aligned_end, waste_post);
2205   }
2206   addr = addr_aligned;
2207 
2208   if (addr) {
2209     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2210       addr, addr + bytes, bytes);
2211   } else {
2212     if (requested_addr != NULL) {
2213       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2214     } else {
2215       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2216     }
2217   }
2218 
2219   // bookkeeping
2220   vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
2221 
2222   // Test alignment, see above.
2223   assert0(is_aligned_to(addr, os::vm_page_size()));
2224 
2225   return addr;
2226 }
2227 
2228 static bool release_mmaped_memory(char* addr, size_t size) {
2229   assert0(is_aligned_to(addr, os::vm_page_size()));
2230   assert0(is_aligned_to(size, os::vm_page_size()));
2231 
2232   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2233     addr, addr + size - 1);
2234   bool rc = false;
2235 
2236   if (::munmap(addr, size) != 0) {
2237     trcVerbose("failed (%d)\n", errno);
2238     rc = false;
2239   } else {
2240     trcVerbose("ok.");
2241     rc = true;
2242   }
2243 
2244   return rc;
2245 }
2246 
2247 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2248 
2249   assert0(is_aligned_to(addr, os::vm_page_size()));
2250   assert0(is_aligned_to(size, os::vm_page_size()));
2251 
2252   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2253     addr, addr + size - 1);
2254   bool rc = false;
2255 
2256   // Uncommit mmap memory with msync MS_INVALIDATE.
2257   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2258     trcVerbose("failed (%d)\n", errno);
2259     rc = false;
2260   } else {
2261     trcVerbose("ok.");
2262     rc = true;
2263   }
2264 
2265   return rc;
2266 }
2267 
2268 // End: shared memory bookkeeping
2269 ////////////////////////////////////////////////////////////////////////////////////////////////////
2270 
2271 int os::vm_page_size() {
2272   // Seems redundant as all get out.
2273   assert(os::Aix::page_size() != -1, "must call os::init");
2274   return os::Aix::page_size();
2275 }
2276 
2277 // Aix allocates memory by pages.
2278 int os::vm_allocation_granularity() {
2279   assert(os::Aix::page_size() != -1, "must call os::init");
2280   return os::Aix::page_size();
2281 }
2282 
2283 #ifdef PRODUCT
2284 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2285                                     int err) {
2286   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2287           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2288           strerror(err), err);
2289 }
2290 #endif
2291 
2292 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2293                                   const char* mesg) {
2294   assert(mesg != NULL, "mesg must be specified");
2295   if (!pd_commit_memory(addr, size, exec)) {
2296     // Add extra info in product mode for vm_exit_out_of_memory():
2297     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2298     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2299   }
2300 }
2301 
2302 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2303 
2304   assert0(is_aligned_to(addr, os::vm_page_size()));
2305   assert0(is_aligned_to(size, os::vm_page_size()));
2306 
2307   vmembk_t* const vmi = vmembk_find(addr);
2308   assert0(vmi);
2309   vmi->assert_is_valid_subrange(addr, size);
2310 
2311   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2312 
2313   return true;
2314 }
2315 
2316 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2317   return pd_commit_memory(addr, size, exec);
2318 }
2319 
2320 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2321                                   size_t alignment_hint, bool exec,
2322                                   const char* mesg) {
2323   // Alignment_hint is ignored on this OS.
2324   pd_commit_memory_or_exit(addr, size, exec, mesg);
2325 }
2326 
2327 bool os::pd_uncommit_memory(char* addr, size_t size) {
2328   assert0(is_aligned_to(addr, os::vm_page_size()));
2329   assert0(is_aligned_to(size, os::vm_page_size()));
2330 
2331   // Dynamically do different things for mmap/shmat.
2332   const vmembk_t* const vmi = vmembk_find(addr);
2333   assert0(vmi);
2334   vmi->assert_is_valid_subrange(addr, size);
2335 
2336   if (vmi->type == VMEM_SHMATED) {
2337     return uncommit_shmated_memory(addr, size);
2338   } else {
2339     return uncommit_mmaped_memory(addr, size);
2340   }
2341 }
2342 
2343 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2344   // Do not call this; no need to commit stack pages on AIX.
2345   ShouldNotReachHere();
2346   return true;
2347 }
2348 
2349 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2350   // Do not call this; no need to commit stack pages on AIX.
2351   ShouldNotReachHere();
2352   return true;
2353 }
2354 
2355 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2356 }
2357 
2358 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2359 }
2360 
2361 void os::numa_make_global(char *addr, size_t bytes) {
2362 }
2363 
2364 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2365 }
2366 
2367 bool os::numa_topology_changed() {
2368   return false;
2369 }
2370 
2371 size_t os::numa_get_groups_num() {
2372   return 1;
2373 }
2374 
2375 int os::numa_get_group_id() {
2376   return 0;
2377 }
2378 
2379 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2380   if (size > 0) {
2381     ids[0] = 0;
2382     return 1;
2383   }
2384   return 0;
2385 }
2386 
2387 bool os::get_page_info(char *start, page_info* info) {
2388   return false;
2389 }
2390 
2391 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2392   return end;
2393 }
2394 
2395 // Reserves and attaches a shared memory segment.
2396 // Will assert if a wish address is given and could not be obtained.
2397 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2398 
2399   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2400   // thereby clobbering old mappings at that place. That is probably
2401   // not intended, never used and almost certainly an error were it
2402   // ever be used this way (to try attaching at a specified address
2403   // without clobbering old mappings an alternate API exists,
2404   // os::attempt_reserve_memory_at()).
2405   // Instead of mimicking the dangerous coding of the other platforms, here I
2406   // just ignore the request address (release) or assert(debug).
2407   assert0(requested_addr == NULL);
2408 
2409   // Always round to os::vm_page_size(), which may be larger than 4K.
2410   bytes = align_size_up(bytes, os::vm_page_size());
2411   const size_t alignment_hint0 =
2412     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2413 
2414   // In 4K mode always use mmap.
2415   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2416   if (os::vm_page_size() == SIZE_4K) {
2417     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2418   } else {
2419     if (bytes >= Use64KPagesThreshold) {
2420       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2421     } else {
2422       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2423     }
2424   }
2425 }
2426 
2427 bool os::pd_release_memory(char* addr, size_t size) {
2428 
2429   // Dynamically do different things for mmap/shmat.
2430   vmembk_t* const vmi = vmembk_find(addr);
2431   assert0(vmi);
2432 
2433   // Always round to os::vm_page_size(), which may be larger than 4K.
2434   size = align_size_up(size, os::vm_page_size());
2435   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2436 
2437   bool rc = false;
2438   bool remove_bookkeeping = false;
2439   if (vmi->type == VMEM_SHMATED) {
2440     // For shmatted memory, we do:
2441     // - If user wants to release the whole range, release the memory (shmdt).
2442     // - If user only wants to release a partial range, uncommit (disclaim) that
2443     //   range. That way, at least, we do not use memory anymore (bust still page
2444     //   table space).
2445     vmi->assert_is_valid_subrange(addr, size);
2446     if (addr == vmi->addr && size == vmi->size) {
2447       rc = release_shmated_memory(addr, size);
2448       remove_bookkeeping = true;
2449     } else {
2450       rc = uncommit_shmated_memory(addr, size);
2451     }
2452   } else {
2453     // User may unmap partial regions but region has to be fully contained.
2454 #ifdef ASSERT
2455     vmi->assert_is_valid_subrange(addr, size);
2456 #endif
2457     rc = release_mmaped_memory(addr, size);
2458     remove_bookkeeping = true;
2459   }
2460 
2461   // update bookkeeping
2462   if (rc && remove_bookkeeping) {
2463     vmembk_remove(vmi);
2464   }
2465 
2466   return rc;
2467 }
2468 
2469 static bool checked_mprotect(char* addr, size_t size, int prot) {
2470 
2471   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2472   // not tell me if protection failed when trying to protect an un-protectable range.
2473   //
2474   // This means if the memory was allocated using shmget/shmat, protection wont work
2475   // but mprotect will still return 0:
2476   //
2477   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2478 
2479   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2480 
2481   if (!rc) {
2482     const char* const s_errno = strerror(errno);
2483     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2484     return false;
2485   }
2486 
2487   // mprotect success check
2488   //
2489   // Mprotect said it changed the protection but can I believe it?
2490   //
2491   // To be sure I need to check the protection afterwards. Try to
2492   // read from protected memory and check whether that causes a segfault.
2493   //
2494   if (!os::Aix::xpg_sus_mode()) {
2495 
2496     if (StubRoutines::SafeFetch32_stub()) {
2497 
2498       const bool read_protected =
2499         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2500          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2501 
2502       if (prot & PROT_READ) {
2503         rc = !read_protected;
2504       } else {
2505         rc = read_protected;
2506       }
2507     }
2508   }
2509   if (!rc) {
2510     assert(false, "mprotect failed.");
2511   }
2512   return rc;
2513 }
2514 
2515 // Set protections specified
2516 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2517   unsigned int p = 0;
2518   switch (prot) {
2519   case MEM_PROT_NONE: p = PROT_NONE; break;
2520   case MEM_PROT_READ: p = PROT_READ; break;
2521   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2522   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2523   default:
2524     ShouldNotReachHere();
2525   }
2526   // is_committed is unused.
2527   return checked_mprotect(addr, size, p);
2528 }
2529 
2530 bool os::guard_memory(char* addr, size_t size) {
2531   return checked_mprotect(addr, size, PROT_NONE);
2532 }
2533 
2534 bool os::unguard_memory(char* addr, size_t size) {
2535   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2536 }
2537 
2538 // Large page support
2539 
2540 static size_t _large_page_size = 0;
2541 
2542 // Enable large page support if OS allows that.
2543 void os::large_page_init() {
2544   return; // Nothing to do. See query_multipage_support and friends.
2545 }
2546 
2547 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2548   // "exec" is passed in but not used. Creating the shared image for
2549   // the code cache doesn't have an SHM_X executable permission to check.
2550   Unimplemented();
2551   return 0;
2552 }
2553 
2554 bool os::release_memory_special(char* base, size_t bytes) {
2555   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2556   Unimplemented();
2557   return false;
2558 }
2559 
2560 size_t os::large_page_size() {
2561   return _large_page_size;
2562 }
2563 
2564 bool os::can_commit_large_page_memory() {
2565   // Does not matter, we do not support huge pages.
2566   return false;
2567 }
2568 
2569 bool os::can_execute_large_page_memory() {
2570   // Does not matter, we do not support huge pages.
2571   return false;
2572 }
2573 
2574 // Reserve memory at an arbitrary address, only if that area is
2575 // available (and not reserved for something else).
2576 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2577   char* addr = NULL;
2578 
2579   // Always round to os::vm_page_size(), which may be larger than 4K.
2580   bytes = align_size_up(bytes, os::vm_page_size());
2581 
2582   // In 4K mode always use mmap.
2583   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2584   if (os::vm_page_size() == SIZE_4K) {
2585     return reserve_mmaped_memory(bytes, requested_addr, 0);
2586   } else {
2587     if (bytes >= Use64KPagesThreshold) {
2588       return reserve_shmated_memory(bytes, requested_addr, 0);
2589     } else {
2590       return reserve_mmaped_memory(bytes, requested_addr, 0);
2591     }
2592   }
2593 
2594   return addr;
2595 }
2596 
2597 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2598   return ::read(fd, buf, nBytes);
2599 }
2600 
2601 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2602   return ::pread(fd, buf, nBytes, offset);
2603 }
2604 
2605 void os::naked_short_sleep(jlong ms) {
2606   struct timespec req;
2607 
2608   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2609   req.tv_sec = 0;
2610   if (ms > 0) {
2611     req.tv_nsec = (ms % 1000) * 1000000;
2612   }
2613   else {
2614     req.tv_nsec = 1;
2615   }
2616 
2617   nanosleep(&req, NULL);
2618 
2619   return;
2620 }
2621 
2622 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2623 void os::infinite_sleep() {
2624   while (true) {    // sleep forever ...
2625     ::sleep(100);   // ... 100 seconds at a time
2626   }
2627 }
2628 
2629 // Used to convert frequent JVM_Yield() to nops
2630 bool os::dont_yield() {
2631   return DontYieldALot;
2632 }
2633 
2634 void os::naked_yield() {
2635   sched_yield();
2636 }
2637 
2638 ////////////////////////////////////////////////////////////////////////////////
2639 // thread priority support
2640 
2641 // From AIX manpage to pthread_setschedparam
2642 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2643 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2644 //
2645 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2646 // range from 40 to 80, where 40 is the least favored priority and 80
2647 // is the most favored."
2648 //
2649 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2650 // scheduling there; however, this still leaves iSeries.)
2651 //
2652 // We use the same values for AIX and PASE.
2653 int os::java_to_os_priority[CriticalPriority + 1] = {
2654   54,             // 0 Entry should never be used
2655 
2656   55,             // 1 MinPriority
2657   55,             // 2
2658   56,             // 3
2659 
2660   56,             // 4
2661   57,             // 5 NormPriority
2662   57,             // 6
2663 
2664   58,             // 7
2665   58,             // 8
2666   59,             // 9 NearMaxPriority
2667 
2668   60,             // 10 MaxPriority
2669 
2670   60              // 11 CriticalPriority
2671 };
2672 
2673 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2674   if (!UseThreadPriorities) return OS_OK;
2675   pthread_t thr = thread->osthread()->pthread_id();
2676   int policy = SCHED_OTHER;
2677   struct sched_param param;
2678   param.sched_priority = newpri;
2679   int ret = pthread_setschedparam(thr, policy, &param);
2680 
2681   if (ret != 0) {
2682     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2683         (int)thr, newpri, ret, strerror(ret));
2684   }
2685   return (ret == 0) ? OS_OK : OS_ERR;
2686 }
2687 
2688 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2689   if (!UseThreadPriorities) {
2690     *priority_ptr = java_to_os_priority[NormPriority];
2691     return OS_OK;
2692   }
2693   pthread_t thr = thread->osthread()->pthread_id();
2694   int policy = SCHED_OTHER;
2695   struct sched_param param;
2696   int ret = pthread_getschedparam(thr, &policy, &param);
2697   *priority_ptr = param.sched_priority;
2698 
2699   return (ret == 0) ? OS_OK : OS_ERR;
2700 }
2701 
2702 // Hint to the underlying OS that a task switch would not be good.
2703 // Void return because it's a hint and can fail.
2704 void os::hint_no_preempt() {}
2705 
2706 ////////////////////////////////////////////////////////////////////////////////
2707 // suspend/resume support
2708 
2709 //  the low-level signal-based suspend/resume support is a remnant from the
2710 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2711 //  within hotspot. Now there is a single use-case for this:
2712 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
2713 //      that runs in the watcher thread.
2714 //  The remaining code is greatly simplified from the more general suspension
2715 //  code that used to be used.
2716 //
2717 //  The protocol is quite simple:
2718 //  - suspend:
2719 //      - sends a signal to the target thread
2720 //      - polls the suspend state of the osthread using a yield loop
2721 //      - target thread signal handler (SR_handler) sets suspend state
2722 //        and blocks in sigsuspend until continued
2723 //  - resume:
2724 //      - sets target osthread state to continue
2725 //      - sends signal to end the sigsuspend loop in the SR_handler
2726 //
2727 //  Note that the SR_lock plays no role in this suspend/resume protocol.
2728 //
2729 
2730 static void resume_clear_context(OSThread *osthread) {
2731   osthread->set_ucontext(NULL);
2732   osthread->set_siginfo(NULL);
2733 }
2734 
2735 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2736   osthread->set_ucontext(context);
2737   osthread->set_siginfo(siginfo);
2738 }
2739 
2740 //
2741 // Handler function invoked when a thread's execution is suspended or
2742 // resumed. We have to be careful that only async-safe functions are
2743 // called here (Note: most pthread functions are not async safe and
2744 // should be avoided.)
2745 //
2746 // Note: sigwait() is a more natural fit than sigsuspend() from an
2747 // interface point of view, but sigwait() prevents the signal hander
2748 // from being run. libpthread would get very confused by not having
2749 // its signal handlers run and prevents sigwait()'s use with the
2750 // mutex granting granting signal.
2751 //
2752 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2753 //
2754 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2755   // Save and restore errno to avoid confusing native code with EINTR
2756   // after sigsuspend.
2757   int old_errno = errno;
2758 
2759   Thread* thread = Thread::current();
2760   OSThread* osthread = thread->osthread();
2761   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2762 
2763   os::SuspendResume::State current = osthread->sr.state();
2764   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2765     suspend_save_context(osthread, siginfo, context);
2766 
2767     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2768     os::SuspendResume::State state = osthread->sr.suspended();
2769     if (state == os::SuspendResume::SR_SUSPENDED) {
2770       sigset_t suspend_set;  // signals for sigsuspend()
2771 
2772       // get current set of blocked signals and unblock resume signal
2773       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2774       sigdelset(&suspend_set, SR_signum);
2775 
2776       // wait here until we are resumed
2777       while (1) {
2778         sigsuspend(&suspend_set);
2779 
2780         os::SuspendResume::State result = osthread->sr.running();
2781         if (result == os::SuspendResume::SR_RUNNING) {
2782           break;
2783         }
2784       }
2785 
2786     } else if (state == os::SuspendResume::SR_RUNNING) {
2787       // request was cancelled, continue
2788     } else {
2789       ShouldNotReachHere();
2790     }
2791 
2792     resume_clear_context(osthread);
2793   } else if (current == os::SuspendResume::SR_RUNNING) {
2794     // request was cancelled, continue
2795   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2796     // ignore
2797   } else {
2798     ShouldNotReachHere();
2799   }
2800 
2801   errno = old_errno;
2802 }
2803 
2804 static int SR_initialize() {
2805   struct sigaction act;
2806   char *s;
2807   // Get signal number to use for suspend/resume
2808   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2809     int sig = ::strtol(s, 0, 10);
2810     if (sig > 0 || sig < NSIG) {
2811       SR_signum = sig;
2812     }
2813   }
2814 
2815   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2816         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2817 
2818   sigemptyset(&SR_sigset);
2819   sigaddset(&SR_sigset, SR_signum);
2820 
2821   // Set up signal handler for suspend/resume.
2822   act.sa_flags = SA_RESTART|SA_SIGINFO;
2823   act.sa_handler = (void (*)(int)) SR_handler;
2824 
2825   // SR_signum is blocked by default.
2826   // 4528190 - We also need to block pthread restart signal (32 on all
2827   // supported Linux platforms). Note that LinuxThreads need to block
2828   // this signal for all threads to work properly. So we don't have
2829   // to use hard-coded signal number when setting up the mask.
2830   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2831 
2832   if (sigaction(SR_signum, &act, 0) == -1) {
2833     return -1;
2834   }
2835 
2836   // Save signal flag
2837   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2838   return 0;
2839 }
2840 
2841 static int SR_finalize() {
2842   return 0;
2843 }
2844 
2845 static int sr_notify(OSThread* osthread) {
2846   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2847   assert_status(status == 0, status, "pthread_kill");
2848   return status;
2849 }
2850 
2851 // "Randomly" selected value for how long we want to spin
2852 // before bailing out on suspending a thread, also how often
2853 // we send a signal to a thread we want to resume
2854 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2855 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2856 
2857 // returns true on success and false on error - really an error is fatal
2858 // but this seems the normal response to library errors
2859 static bool do_suspend(OSThread* osthread) {
2860   assert(osthread->sr.is_running(), "thread should be running");
2861   // mark as suspended and send signal
2862 
2863   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2864     // failed to switch, state wasn't running?
2865     ShouldNotReachHere();
2866     return false;
2867   }
2868 
2869   if (sr_notify(osthread) != 0) {
2870     // try to cancel, switch to running
2871 
2872     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2873     if (result == os::SuspendResume::SR_RUNNING) {
2874       // cancelled
2875       return false;
2876     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2877       // somehow managed to suspend
2878       return true;
2879     } else {
2880       ShouldNotReachHere();
2881       return false;
2882     }
2883   }
2884 
2885   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2886 
2887   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2888     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2889       os::naked_yield();
2890     }
2891 
2892     // timeout, try to cancel the request
2893     if (n >= RANDOMLY_LARGE_INTEGER) {
2894       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2895       if (cancelled == os::SuspendResume::SR_RUNNING) {
2896         return false;
2897       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2898         return true;
2899       } else {
2900         ShouldNotReachHere();
2901         return false;
2902       }
2903     }
2904   }
2905 
2906   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2907   return true;
2908 }
2909 
2910 static void do_resume(OSThread* osthread) {
2911   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2912 
2913   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2914     // failed to switch to WAKEUP_REQUEST
2915     ShouldNotReachHere();
2916     return;
2917   }
2918 
2919   while (!osthread->sr.is_running()) {
2920     if (sr_notify(osthread) == 0) {
2921       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2922         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2923           os::naked_yield();
2924         }
2925       }
2926     } else {
2927       ShouldNotReachHere();
2928     }
2929   }
2930 
2931   guarantee(osthread->sr.is_running(), "Must be running!");
2932 }
2933 
2934 ///////////////////////////////////////////////////////////////////////////////////
2935 // signal handling (except suspend/resume)
2936 
2937 // This routine may be used by user applications as a "hook" to catch signals.
2938 // The user-defined signal handler must pass unrecognized signals to this
2939 // routine, and if it returns true (non-zero), then the signal handler must
2940 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2941 // routine will never retun false (zero), but instead will execute a VM panic
2942 // routine kill the process.
2943 //
2944 // If this routine returns false, it is OK to call it again. This allows
2945 // the user-defined signal handler to perform checks either before or after
2946 // the VM performs its own checks. Naturally, the user code would be making
2947 // a serious error if it tried to handle an exception (such as a null check
2948 // or breakpoint) that the VM was generating for its own correct operation.
2949 //
2950 // This routine may recognize any of the following kinds of signals:
2951 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2952 // It should be consulted by handlers for any of those signals.
2953 //
2954 // The caller of this routine must pass in the three arguments supplied
2955 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2956 // field of the structure passed to sigaction(). This routine assumes that
2957 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2958 //
2959 // Note that the VM will print warnings if it detects conflicting signal
2960 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2961 //
2962 extern "C" JNIEXPORT int
2963 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2964 
2965 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2966 // to be the thing to call; documentation is not terribly clear about whether
2967 // pthread_sigmask also works, and if it does, whether it does the same.
2968 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2969   const int rc = ::pthread_sigmask(how, set, oset);
2970   // return value semantics differ slightly for error case:
2971   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2972   // (so, pthread_sigmask is more theadsafe for error handling)
2973   // But success is always 0.
2974   return rc == 0 ? true : false;
2975 }
2976 
2977 // Function to unblock all signals which are, according
2978 // to POSIX, typical program error signals. If they happen while being blocked,
2979 // they typically will bring down the process immediately.
2980 bool unblock_program_error_signals() {
2981   sigset_t set;
2982   ::sigemptyset(&set);
2983   ::sigaddset(&set, SIGILL);
2984   ::sigaddset(&set, SIGBUS);
2985   ::sigaddset(&set, SIGFPE);
2986   ::sigaddset(&set, SIGSEGV);
2987   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2988 }
2989 
2990 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2991 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2992   assert(info != NULL && uc != NULL, "it must be old kernel");
2993 
2994   // Never leave program error signals blocked;
2995   // on all our platforms they would bring down the process immediately when
2996   // getting raised while being blocked.
2997   unblock_program_error_signals();
2998 
2999   JVM_handle_aix_signal(sig, info, uc, true);
3000 }
3001 
3002 // This boolean allows users to forward their own non-matching signals
3003 // to JVM_handle_aix_signal, harmlessly.
3004 bool os::Aix::signal_handlers_are_installed = false;
3005 
3006 // For signal-chaining
3007 struct sigaction os::Aix::sigact[MAXSIGNUM];
3008 unsigned int os::Aix::sigs = 0;
3009 bool os::Aix::libjsig_is_loaded = false;
3010 typedef struct sigaction *(*get_signal_t)(int);
3011 get_signal_t os::Aix::get_signal_action = NULL;
3012 
3013 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3014   struct sigaction *actp = NULL;
3015 
3016   if (libjsig_is_loaded) {
3017     // Retrieve the old signal handler from libjsig
3018     actp = (*get_signal_action)(sig);
3019   }
3020   if (actp == NULL) {
3021     // Retrieve the preinstalled signal handler from jvm
3022     actp = get_preinstalled_handler(sig);
3023   }
3024 
3025   return actp;
3026 }
3027 
3028 static bool call_chained_handler(struct sigaction *actp, int sig,
3029                                  siginfo_t *siginfo, void *context) {
3030   // Call the old signal handler
3031   if (actp->sa_handler == SIG_DFL) {
3032     // It's more reasonable to let jvm treat it as an unexpected exception
3033     // instead of taking the default action.
3034     return false;
3035   } else if (actp->sa_handler != SIG_IGN) {
3036     if ((actp->sa_flags & SA_NODEFER) == 0) {
3037       // automaticlly block the signal
3038       sigaddset(&(actp->sa_mask), sig);
3039     }
3040 
3041     sa_handler_t hand = NULL;
3042     sa_sigaction_t sa = NULL;
3043     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3044     // retrieve the chained handler
3045     if (siginfo_flag_set) {
3046       sa = actp->sa_sigaction;
3047     } else {
3048       hand = actp->sa_handler;
3049     }
3050 
3051     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3052       actp->sa_handler = SIG_DFL;
3053     }
3054 
3055     // try to honor the signal mask
3056     sigset_t oset;
3057     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3058 
3059     // call into the chained handler
3060     if (siginfo_flag_set) {
3061       (*sa)(sig, siginfo, context);
3062     } else {
3063       (*hand)(sig);
3064     }
3065 
3066     // restore the signal mask
3067     pthread_sigmask(SIG_SETMASK, &oset, 0);
3068   }
3069   // Tell jvm's signal handler the signal is taken care of.
3070   return true;
3071 }
3072 
3073 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3074   bool chained = false;
3075   // signal-chaining
3076   if (UseSignalChaining) {
3077     struct sigaction *actp = get_chained_signal_action(sig);
3078     if (actp != NULL) {
3079       chained = call_chained_handler(actp, sig, siginfo, context);
3080     }
3081   }
3082   return chained;
3083 }
3084 
3085 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3086   if ((((unsigned int)1 << sig) & sigs) != 0) {
3087     return &sigact[sig];
3088   }
3089   return NULL;
3090 }
3091 
3092 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3093   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3094   sigact[sig] = oldAct;
3095   sigs |= (unsigned int)1 << sig;
3096 }
3097 
3098 // for diagnostic
3099 int os::Aix::sigflags[MAXSIGNUM];
3100 
3101 int os::Aix::get_our_sigflags(int sig) {
3102   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3103   return sigflags[sig];
3104 }
3105 
3106 void os::Aix::set_our_sigflags(int sig, int flags) {
3107   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3108   sigflags[sig] = flags;
3109 }
3110 
3111 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3112   // Check for overwrite.
3113   struct sigaction oldAct;
3114   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3115 
3116   void* oldhand = oldAct.sa_sigaction
3117     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3118     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3119   // Renamed 'signalHandler' to avoid collision with other shared libs.
3120   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3121       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3122       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3123     if (AllowUserSignalHandlers || !set_installed) {
3124       // Do not overwrite; user takes responsibility to forward to us.
3125       return;
3126     } else if (UseSignalChaining) {
3127       // save the old handler in jvm
3128       save_preinstalled_handler(sig, oldAct);
3129       // libjsig also interposes the sigaction() call below and saves the
3130       // old sigaction on it own.
3131     } else {
3132       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
3133                     "%#lx for signal %d.", (long)oldhand, sig));
3134     }
3135   }
3136 
3137   struct sigaction sigAct;
3138   sigfillset(&(sigAct.sa_mask));
3139   if (!set_installed) {
3140     sigAct.sa_handler = SIG_DFL;
3141     sigAct.sa_flags = SA_RESTART;
3142   } else {
3143     // Renamed 'signalHandler' to avoid collision with other shared libs.
3144     sigAct.sa_sigaction = javaSignalHandler;
3145     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3146   }
3147   // Save flags, which are set by ours
3148   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3149   sigflags[sig] = sigAct.sa_flags;
3150 
3151   int ret = sigaction(sig, &sigAct, &oldAct);
3152   assert(ret == 0, "check");
3153 
3154   void* oldhand2 = oldAct.sa_sigaction
3155                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3156                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3157   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3158 }
3159 
3160 // install signal handlers for signals that HotSpot needs to
3161 // handle in order to support Java-level exception handling.
3162 void os::Aix::install_signal_handlers() {
3163   if (!signal_handlers_are_installed) {
3164     signal_handlers_are_installed = true;
3165 
3166     // signal-chaining
3167     typedef void (*signal_setting_t)();
3168     signal_setting_t begin_signal_setting = NULL;
3169     signal_setting_t end_signal_setting = NULL;
3170     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3171                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3172     if (begin_signal_setting != NULL) {
3173       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3174                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3175       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3176                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3177       libjsig_is_loaded = true;
3178       assert(UseSignalChaining, "should enable signal-chaining");
3179     }
3180     if (libjsig_is_loaded) {
3181       // Tell libjsig jvm is setting signal handlers
3182       (*begin_signal_setting)();
3183     }
3184 
3185     set_signal_handler(SIGSEGV, true);
3186     set_signal_handler(SIGPIPE, true);
3187     set_signal_handler(SIGBUS, true);
3188     set_signal_handler(SIGILL, true);
3189     set_signal_handler(SIGFPE, true);
3190     set_signal_handler(SIGTRAP, true);
3191     set_signal_handler(SIGXFSZ, true);
3192     set_signal_handler(SIGDANGER, true);
3193 
3194     if (libjsig_is_loaded) {
3195       // Tell libjsig jvm finishes setting signal handlers.
3196       (*end_signal_setting)();
3197     }
3198 
3199     // We don't activate signal checker if libjsig is in place, we trust ourselves
3200     // and if UserSignalHandler is installed all bets are off.
3201     // Log that signal checking is off only if -verbose:jni is specified.
3202     if (CheckJNICalls) {
3203       if (libjsig_is_loaded) {
3204         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3205         check_signals = false;
3206       }
3207       if (AllowUserSignalHandlers) {
3208         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3209         check_signals = false;
3210       }
3211       // Need to initialize check_signal_done.
3212       ::sigemptyset(&check_signal_done);
3213     }
3214   }
3215 }
3216 
3217 static const char* get_signal_handler_name(address handler,
3218                                            char* buf, int buflen) {
3219   int offset;
3220   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3221   if (found) {
3222     // skip directory names
3223     const char *p1, *p2;
3224     p1 = buf;
3225     size_t len = strlen(os::file_separator());
3226     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3227     // The way os::dll_address_to_library_name is implemented on Aix
3228     // right now, it always returns -1 for the offset which is not
3229     // terribly informative.
3230     // Will fix that. For now, omit the offset.
3231     jio_snprintf(buf, buflen, "%s", p1);
3232   } else {
3233     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3234   }
3235   return buf;
3236 }
3237 
3238 static void print_signal_handler(outputStream* st, int sig,
3239                                  char* buf, size_t buflen) {
3240   struct sigaction sa;
3241   sigaction(sig, NULL, &sa);
3242 
3243   st->print("%s: ", os::exception_name(sig, buf, buflen));
3244 
3245   address handler = (sa.sa_flags & SA_SIGINFO)
3246     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3247     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3248 
3249   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3250     st->print("SIG_DFL");
3251   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3252     st->print("SIG_IGN");
3253   } else {
3254     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3255   }
3256 
3257   // Print readable mask.
3258   st->print(", sa_mask[0]=");
3259   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3260 
3261   address rh = VMError::get_resetted_sighandler(sig);
3262   // May be, handler was resetted by VMError?
3263   if (rh != NULL) {
3264     handler = rh;
3265     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3266   }
3267 
3268   // Print textual representation of sa_flags.
3269   st->print(", sa_flags=");
3270   os::Posix::print_sa_flags(st, sa.sa_flags);
3271 
3272   // Check: is it our handler?
3273   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3274       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3275     // It is our signal handler.
3276     // Check for flags, reset system-used one!
3277     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3278       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3279                 os::Aix::get_our_sigflags(sig));
3280     }
3281   }
3282   st->cr();
3283 }
3284 
3285 #define DO_SIGNAL_CHECK(sig) \
3286   if (!sigismember(&check_signal_done, sig)) \
3287     os::Aix::check_signal_handler(sig)
3288 
3289 // This method is a periodic task to check for misbehaving JNI applications
3290 // under CheckJNI, we can add any periodic checks here
3291 
3292 void os::run_periodic_checks() {
3293 
3294   if (check_signals == false) return;
3295 
3296   // SEGV and BUS if overridden could potentially prevent
3297   // generation of hs*.log in the event of a crash, debugging
3298   // such a case can be very challenging, so we absolutely
3299   // check the following for a good measure:
3300   DO_SIGNAL_CHECK(SIGSEGV);
3301   DO_SIGNAL_CHECK(SIGILL);
3302   DO_SIGNAL_CHECK(SIGFPE);
3303   DO_SIGNAL_CHECK(SIGBUS);
3304   DO_SIGNAL_CHECK(SIGPIPE);
3305   DO_SIGNAL_CHECK(SIGXFSZ);
3306   if (UseSIGTRAP) {
3307     DO_SIGNAL_CHECK(SIGTRAP);
3308   }
3309   DO_SIGNAL_CHECK(SIGDANGER);
3310 
3311   // ReduceSignalUsage allows the user to override these handlers
3312   // see comments at the very top and jvm_solaris.h
3313   if (!ReduceSignalUsage) {
3314     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3315     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3316     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3317     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3318   }
3319 
3320   DO_SIGNAL_CHECK(SR_signum);
3321   DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3322 }
3323 
3324 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3325 
3326 static os_sigaction_t os_sigaction = NULL;
3327 
3328 void os::Aix::check_signal_handler(int sig) {
3329   char buf[O_BUFLEN];
3330   address jvmHandler = NULL;
3331 
3332   struct sigaction act;
3333   if (os_sigaction == NULL) {
3334     // only trust the default sigaction, in case it has been interposed
3335     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3336     if (os_sigaction == NULL) return;
3337   }
3338 
3339   os_sigaction(sig, (struct sigaction*)NULL, &act);
3340 
3341   address thisHandler = (act.sa_flags & SA_SIGINFO)
3342     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3343     : CAST_FROM_FN_PTR(address, act.sa_handler);
3344 
3345   switch(sig) {
3346   case SIGSEGV:
3347   case SIGBUS:
3348   case SIGFPE:
3349   case SIGPIPE:
3350   case SIGILL:
3351   case SIGXFSZ:
3352     // Renamed 'signalHandler' to avoid collision with other shared libs.
3353     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3354     break;
3355 
3356   case SHUTDOWN1_SIGNAL:
3357   case SHUTDOWN2_SIGNAL:
3358   case SHUTDOWN3_SIGNAL:
3359   case BREAK_SIGNAL:
3360     jvmHandler = (address)user_handler();
3361     break;
3362 
3363   case INTERRUPT_SIGNAL:
3364     jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3365     break;
3366 
3367   default:
3368     if (sig == SR_signum) {
3369       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3370     } else {
3371       return;
3372     }
3373     break;
3374   }
3375 
3376   if (thisHandler != jvmHandler) {
3377     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3378     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3379     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3380     // No need to check this sig any longer
3381     sigaddset(&check_signal_done, sig);
3382     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3383     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3384       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3385                     exception_name(sig, buf, O_BUFLEN));
3386     }
3387   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3388     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3389     tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3390     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3391     // No need to check this sig any longer
3392     sigaddset(&check_signal_done, sig);
3393   }
3394 
3395   // Dump all the signal
3396   if (sigismember(&check_signal_done, sig)) {
3397     print_signal_handlers(tty, buf, O_BUFLEN);
3398   }
3399 }
3400 
3401 extern bool signal_name(int signo, char* buf, size_t len);
3402 
3403 const char* os::exception_name(int exception_code, char* buf, size_t size) {
3404   if (0 < exception_code && exception_code <= SIGRTMAX) {
3405     // signal
3406     if (!signal_name(exception_code, buf, size)) {
3407       jio_snprintf(buf, size, "SIG%d", exception_code);
3408     }
3409     return buf;
3410   } else {
3411     return NULL;
3412   }
3413 }
3414 
3415 // To install functions for atexit system call
3416 extern "C" {
3417   static void perfMemory_exit_helper() {
3418     perfMemory_exit();
3419   }
3420 }
3421 
3422 // This is called _before_ the most of global arguments have been parsed.
3423 void os::init(void) {
3424   // This is basic, we want to know if that ever changes.
3425   // (Shared memory boundary is supposed to be a 256M aligned.)
3426   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3427 
3428   // First off, we need to know whether we run on AIX or PASE, and
3429   // the OS level we run on.
3430   os::Aix::initialize_os_info();
3431 
3432   // Scan environment (SPEC1170 behaviour, etc).
3433   os::Aix::scan_environment();
3434 
3435   // Check which pages are supported by AIX.
3436   query_multipage_support();
3437 
3438   // Act like we only have one page size by eliminating corner cases which
3439   // we did not support very well anyway.
3440   // We have two input conditions:
3441   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3442   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3443   //    setting.
3444   //    Data segment page size is important for us because it defines the thread stack page
3445   //    size, which is needed for guard page handling, stack banging etc.
3446   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3447   //    and should be allocated with 64k pages.
3448   //
3449   // So, we do the following:
3450   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3451   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3452   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3453   // 64k          no              --- AIX 5.2 ? ---
3454   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3455 
3456   // We explicitly leave no option to change page size, because only upgrading would work,
3457   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3458 
3459   if (g_multipage_support.datapsize == SIZE_4K) {
3460     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3461     if (g_multipage_support.can_use_64K_pages) {
3462       // .. but we are able to use 64K pages dynamically.
3463       // This would be typical for java launchers which are not linked
3464       // with datapsize=64K (like, any other launcher but our own).
3465       //
3466       // In this case it would be smart to allocate the java heap with 64K
3467       // to get the performance benefit, and to fake 64k pages for the
3468       // data segment (when dealing with thread stacks).
3469       //
3470       // However, leave a possibility to downgrade to 4K, using
3471       // -XX:-Use64KPages.
3472       if (Use64KPages) {
3473         trcVerbose("64K page mode (faked for data segment)");
3474         Aix::_page_size = SIZE_64K;
3475       } else {
3476         trcVerbose("4K page mode (Use64KPages=off)");
3477         Aix::_page_size = SIZE_4K;
3478       }
3479     } else {
3480       // .. and not able to allocate 64k pages dynamically. Here, just
3481       // fall back to 4K paged mode and use mmap for everything.
3482       trcVerbose("4K page mode");
3483       Aix::_page_size = SIZE_4K;
3484       FLAG_SET_ERGO(bool, Use64KPages, false);
3485     }
3486   } else {
3487     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3488     //   This normally means that we can allocate 64k pages dynamically.
3489     //   (There is one special case where this may be false: EXTSHM=on.
3490     //    but we decided to not support that mode).
3491     assert0(g_multipage_support.can_use_64K_pages);
3492     Aix::_page_size = SIZE_64K;
3493     trcVerbose("64K page mode");
3494     FLAG_SET_ERGO(bool, Use64KPages, true);
3495   }
3496 
3497   // Short-wire stack page size to base page size; if that works, we just remove
3498   // that stack page size altogether.
3499   Aix::_stack_page_size = Aix::_page_size;
3500 
3501   // For now UseLargePages is just ignored.
3502   FLAG_SET_ERGO(bool, UseLargePages, false);
3503   _page_sizes[0] = 0;
3504   _large_page_size = -1;
3505 
3506   // debug trace
3507   trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
3508 
3509   // Next, we need to initialize libo4 and libperfstat libraries.
3510   if (os::Aix::on_pase()) {
3511     os::Aix::initialize_libo4();
3512   } else {
3513     os::Aix::initialize_libperfstat();
3514   }
3515 
3516   // Reset the perfstat information provided by ODM.
3517   if (os::Aix::on_aix()) {
3518     libperfstat::perfstat_reset();
3519   }
3520 
3521   // Now initialze basic system properties. Note that for some of the values we
3522   // need libperfstat etc.
3523   os::Aix::initialize_system_info();
3524 
3525   _initial_pid = getpid();
3526 
3527   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3528 
3529   init_random(1234567);
3530 
3531   ThreadCritical::initialize();
3532 
3533   // Main_thread points to the aboriginal thread.
3534   Aix::_main_thread = pthread_self();
3535 
3536   initial_time_count = os::elapsed_counter();
3537   pthread_mutex_init(&dl_mutex, NULL);
3538 
3539   // If the pagesize of the VM is greater than 8K determine the appropriate
3540   // number of initial guard pages. The user can change this with the
3541   // command line arguments, if needed.
3542   if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3543     StackYellowPages = 1;
3544     StackRedPages = 1;
3545     StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3546   }
3547 }
3548 
3549 // This is called _after_ the global arguments have been parsed.
3550 jint os::init_2(void) {
3551 
3552   trcVerbose("processor count: %d", os::_processor_count);
3553   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3554 
3555   // Initially build up the loaded dll map.
3556   LoadedLibraries::reload();
3557 
3558   const int page_size = Aix::page_size();
3559   const int map_size = page_size;
3560 
3561   address map_address = (address) MAP_FAILED;
3562   const int prot  = PROT_READ;
3563   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3564 
3565   // Use optimized addresses for the polling page,
3566   // e.g. map it to a special 32-bit address.
3567   if (OptimizePollingPageLocation) {
3568     // architecture-specific list of address wishes:
3569     address address_wishes[] = {
3570       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3571       // PPC64: all address wishes are non-negative 32 bit values where
3572       // the lower 16 bits are all zero. we can load these addresses
3573       // with a single ppc_lis instruction.
3574       (address) 0x30000000, (address) 0x31000000,
3575       (address) 0x32000000, (address) 0x33000000,
3576       (address) 0x40000000, (address) 0x41000000,
3577       (address) 0x42000000, (address) 0x43000000,
3578       (address) 0x50000000, (address) 0x51000000,
3579       (address) 0x52000000, (address) 0x53000000,
3580       (address) 0x60000000, (address) 0x61000000,
3581       (address) 0x62000000, (address) 0x63000000
3582     };
3583     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3584 
3585     // iterate over the list of address wishes:
3586     for (int i=0; i<address_wishes_length; i++) {
3587       // Try to map with current address wish.
3588       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3589       // fail if the address is already mapped.
3590       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3591                                      map_size, prot,
3592                                      flags | MAP_FIXED,
3593                                      -1, 0);
3594       if (Verbose) {
3595         fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3596                 address_wishes[i], map_address + (ssize_t)page_size);
3597       }
3598 
3599       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3600         // Map succeeded and map_address is at wished address, exit loop.
3601         break;
3602       }
3603 
3604       if (map_address != (address) MAP_FAILED) {
3605         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3606         ::munmap(map_address, map_size);
3607         map_address = (address) MAP_FAILED;
3608       }
3609       // Map failed, continue loop.
3610     }
3611   } // end OptimizePollingPageLocation
3612 
3613   if (map_address == (address) MAP_FAILED) {
3614     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3615   }
3616   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3617   os::set_polling_page(map_address);
3618 
3619   if (!UseMembar) {
3620     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3621     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3622     os::set_memory_serialize_page(mem_serialize_page);
3623 
3624 #ifndef PRODUCT
3625     if (Verbose && PrintMiscellaneous) {
3626       tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3627     }
3628 #endif
3629   }
3630 
3631   // initialize suspend/resume support - must do this before signal_sets_init()
3632   if (SR_initialize() != 0) {
3633     perror("SR_initialize failed");
3634     return JNI_ERR;
3635   }
3636 
3637   Aix::signal_sets_init();
3638   Aix::install_signal_handlers();
3639 
3640   // Check minimum allowable stack size for thread creation and to initialize
3641   // the java system classes, including StackOverflowError - depends on page
3642   // size. Add a page for compiler2 recursion in main thread.
3643   // Add in 2*BytesPerWord times page size to account for VM stack during
3644   // class initialization depending on 32 or 64 bit VM.
3645   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3646             (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3647                      (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3648 
3649   os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3650 
3651   size_t threadStackSizeInBytes = ThreadStackSize * K;
3652   if (threadStackSizeInBytes != 0 &&
3653       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3654     tty->print_cr("\nThe stack size specified is too small, "
3655                   "Specify at least %dk",
3656                   os::Aix::min_stack_allowed / K);
3657     return JNI_ERR;
3658   }
3659 
3660   // Make the stack size a multiple of the page size so that
3661   // the yellow/red zones can be guarded.
3662   // Note that this can be 0, if no default stacksize was set.
3663   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3664 
3665   Aix::libpthread_init();
3666 
3667   if (MaxFDLimit) {
3668     // Set the number of file descriptors to max. print out error
3669     // if getrlimit/setrlimit fails but continue regardless.
3670     struct rlimit nbr_files;
3671     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3672     if (status != 0) {
3673       if (PrintMiscellaneous && (Verbose || WizardMode))
3674         perror("os::init_2 getrlimit failed");
3675     } else {
3676       nbr_files.rlim_cur = nbr_files.rlim_max;
3677       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3678       if (status != 0) {
3679         if (PrintMiscellaneous && (Verbose || WizardMode))
3680           perror("os::init_2 setrlimit failed");
3681       }
3682     }
3683   }
3684 
3685   if (PerfAllowAtExitRegistration) {
3686     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3687     // Atexit functions can be delayed until process exit time, which
3688     // can be problematic for embedded VM situations. Embedded VMs should
3689     // call DestroyJavaVM() to assure that VM resources are released.
3690 
3691     // Note: perfMemory_exit_helper atexit function may be removed in
3692     // the future if the appropriate cleanup code can be added to the
3693     // VM_Exit VMOperation's doit method.
3694     if (atexit(perfMemory_exit_helper) != 0) {
3695       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3696     }
3697   }
3698 
3699   return JNI_OK;
3700 }
3701 
3702 // Mark the polling page as unreadable
3703 void os::make_polling_page_unreadable(void) {
3704   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3705     fatal("Could not disable polling page");
3706   }
3707 };
3708 
3709 // Mark the polling page as readable
3710 void os::make_polling_page_readable(void) {
3711   // Changed according to os_linux.cpp.
3712   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3713     fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
3714   }
3715 };
3716 
3717 int os::active_processor_count() {
3718   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3719   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3720   return online_cpus;
3721 }
3722 
3723 void os::set_native_thread_name(const char *name) {
3724   // Not yet implemented.
3725   return;
3726 }
3727 
3728 bool os::distribute_processes(uint length, uint* distribution) {
3729   // Not yet implemented.
3730   return false;
3731 }
3732 
3733 bool os::bind_to_processor(uint processor_id) {
3734   // Not yet implemented.
3735   return false;
3736 }
3737 
3738 void os::SuspendedThreadTask::internal_do_task() {
3739   if (do_suspend(_thread->osthread())) {
3740     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3741     do_task(context);
3742     do_resume(_thread->osthread());
3743   }
3744 }
3745 
3746 class PcFetcher : public os::SuspendedThreadTask {
3747 public:
3748   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3749   ExtendedPC result();
3750 protected:
3751   void do_task(const os::SuspendedThreadTaskContext& context);
3752 private:
3753   ExtendedPC _epc;
3754 };
3755 
3756 ExtendedPC PcFetcher::result() {
3757   guarantee(is_done(), "task is not done yet.");
3758   return _epc;
3759 }
3760 
3761 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3762   Thread* thread = context.thread();
3763   OSThread* osthread = thread->osthread();
3764   if (osthread->ucontext() != NULL) {
3765     _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3766   } else {
3767     // NULL context is unexpected, double-check this is the VMThread.
3768     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3769   }
3770 }
3771 
3772 // Suspends the target using the signal mechanism and then grabs the PC before
3773 // resuming the target. Used by the flat-profiler only
3774 ExtendedPC os::get_thread_pc(Thread* thread) {
3775   // Make sure that it is called by the watcher for the VMThread.
3776   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3777   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3778 
3779   PcFetcher fetcher(thread);
3780   fetcher.run();
3781   return fetcher.result();
3782 }
3783 
3784 // Not neede on Aix.
3785 // int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
3786 // }
3787 
3788 ////////////////////////////////////////////////////////////////////////////////
3789 // debug support
3790 
3791 static address same_page(address x, address y) {
3792   intptr_t page_bits = -os::vm_page_size();
3793   if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3794     return x;
3795   else if (x > y)
3796     return (address)(intptr_t(y) | ~page_bits) + 1;
3797   else
3798     return (address)(intptr_t(y) & page_bits);
3799 }
3800 
3801 bool os::find(address addr, outputStream* st) {
3802 
3803   st->print(PTR_FORMAT ": ", addr);
3804 
3805   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
3806   if (lib) {
3807     lib->print(st);
3808     return true;
3809   } else {
3810     lib = LoadedLibraries::find_for_data_address(addr);
3811     if (lib) {
3812       lib->print(st);
3813       return true;
3814     } else {
3815       st->print_cr("(outside any module)");
3816     }
3817   }
3818 
3819   return false;
3820 }
3821 
3822 ////////////////////////////////////////////////////////////////////////////////
3823 // misc
3824 
3825 // This does not do anything on Aix. This is basically a hook for being
3826 // able to use structured exception handling (thread-local exception filters)
3827 // on, e.g., Win32.
3828 void
3829 os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
3830                          JavaCallArguments* args, Thread* thread) {
3831   f(value, method, args, thread);
3832 }
3833 
3834 void os::print_statistics() {
3835 }
3836 
3837 int os::message_box(const char* title, const char* message) {
3838   int i;
3839   fdStream err(defaultStream::error_fd());
3840   for (i = 0; i < 78; i++) err.print_raw("=");
3841   err.cr();
3842   err.print_raw_cr(title);
3843   for (i = 0; i < 78; i++) err.print_raw("-");
3844   err.cr();
3845   err.print_raw_cr(message);
3846   for (i = 0; i < 78; i++) err.print_raw("=");
3847   err.cr();
3848 
3849   char buf[16];
3850   // Prevent process from exiting upon "read error" without consuming all CPU
3851   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3852 
3853   return buf[0] == 'y' || buf[0] == 'Y';
3854 }
3855 
3856 int os::stat(const char *path, struct stat *sbuf) {
3857   char pathbuf[MAX_PATH];
3858   if (strlen(path) > MAX_PATH - 1) {
3859     errno = ENAMETOOLONG;
3860     return -1;
3861   }
3862   os::native_path(strcpy(pathbuf, path));
3863   return ::stat(pathbuf, sbuf);
3864 }
3865 
3866 bool os::check_heap(bool force) {
3867   return true;
3868 }
3869 
3870 // Is a (classpath) directory empty?
3871 bool os::dir_is_empty(const char* path) {
3872   DIR *dir = NULL;
3873   struct dirent *ptr;
3874 
3875   dir = opendir(path);
3876   if (dir == NULL) return true;
3877 
3878   /* Scan the directory */
3879   bool result = true;
3880   char buf[sizeof(struct dirent) + MAX_PATH];
3881   while (result && (ptr = ::readdir(dir)) != NULL) {
3882     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3883       result = false;
3884     }
3885   }
3886   closedir(dir);
3887   return result;
3888 }
3889 
3890 // This code originates from JDK's sysOpen and open64_w
3891 // from src/solaris/hpi/src/system_md.c
3892 
3893 int os::open(const char *path, int oflag, int mode) {
3894 
3895   if (strlen(path) > MAX_PATH - 1) {
3896     errno = ENAMETOOLONG;
3897     return -1;
3898   }
3899   int fd;
3900 
3901   fd = ::open64(path, oflag, mode);
3902   if (fd == -1) return -1;
3903 
3904   // If the open succeeded, the file might still be a directory.
3905   {
3906     struct stat64 buf64;
3907     int ret = ::fstat64(fd, &buf64);
3908     int st_mode = buf64.st_mode;
3909 
3910     if (ret != -1) {
3911       if ((st_mode & S_IFMT) == S_IFDIR) {
3912         errno = EISDIR;
3913         ::close(fd);
3914         return -1;
3915       }
3916     } else {
3917       ::close(fd);
3918       return -1;
3919     }
3920   }
3921 
3922   // All file descriptors that are opened in the JVM and not
3923   // specifically destined for a subprocess should have the
3924   // close-on-exec flag set. If we don't set it, then careless 3rd
3925   // party native code might fork and exec without closing all
3926   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3927   // UNIXProcess.c), and this in turn might:
3928   //
3929   // - cause end-of-file to fail to be detected on some file
3930   //   descriptors, resulting in mysterious hangs, or
3931   //
3932   // - might cause an fopen in the subprocess to fail on a system
3933   //   suffering from bug 1085341.
3934   //
3935   // (Yes, the default setting of the close-on-exec flag is a Unix
3936   // design flaw.)
3937   //
3938   // See:
3939   // 1085341: 32-bit stdio routines should support file descriptors >255
3940   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3941   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3942 #ifdef FD_CLOEXEC
3943   {
3944     int flags = ::fcntl(fd, F_GETFD);
3945     if (flags != -1)
3946       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3947   }
3948 #endif
3949 
3950   return fd;
3951 }
3952 
3953 // create binary file, rewriting existing file if required
3954 int os::create_binary_file(const char* path, bool rewrite_existing) {
3955   int oflags = O_WRONLY | O_CREAT;
3956   if (!rewrite_existing) {
3957     oflags |= O_EXCL;
3958   }
3959   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3960 }
3961 
3962 // return current position of file pointer
3963 jlong os::current_file_offset(int fd) {
3964   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3965 }
3966 
3967 // move file pointer to the specified offset
3968 jlong os::seek_to_file_offset(int fd, jlong offset) {
3969   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3970 }
3971 
3972 // This code originates from JDK's sysAvailable
3973 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3974 
3975 int os::available(int fd, jlong *bytes) {
3976   jlong cur, end;
3977   int mode;
3978   struct stat64 buf64;
3979 
3980   if (::fstat64(fd, &buf64) >= 0) {
3981     mode = buf64.st_mode;
3982     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3983       // XXX: is the following call interruptible? If so, this might
3984       // need to go through the INTERRUPT_IO() wrapper as for other
3985       // blocking, interruptible calls in this file.
3986       int n;
3987       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3988         *bytes = n;
3989         return 1;
3990       }
3991     }
3992   }
3993   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3994     return 0;
3995   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3996     return 0;
3997   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3998     return 0;
3999   }
4000   *bytes = end - cur;
4001   return 1;
4002 }
4003 
4004 // Map a block of memory.
4005 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4006                         char *addr, size_t bytes, bool read_only,
4007                         bool allow_exec) {
4008   int prot;
4009   int flags = MAP_PRIVATE;
4010 
4011   if (read_only) {
4012     prot = PROT_READ;
4013     flags = MAP_SHARED;
4014   } else {
4015     prot = PROT_READ | PROT_WRITE;
4016     flags = MAP_PRIVATE;
4017   }
4018 
4019   if (allow_exec) {
4020     prot |= PROT_EXEC;
4021   }
4022 
4023   if (addr != NULL) {
4024     flags |= MAP_FIXED;
4025   }
4026 
4027   // Allow anonymous mappings if 'fd' is -1.
4028   if (fd == -1) {
4029     flags |= MAP_ANONYMOUS;
4030   }
4031 
4032   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
4033                                      fd, file_offset);
4034   if (mapped_address == MAP_FAILED) {
4035     return NULL;
4036   }
4037   return mapped_address;
4038 }
4039 
4040 // Remap a block of memory.
4041 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4042                           char *addr, size_t bytes, bool read_only,
4043                           bool allow_exec) {
4044   // same as map_memory() on this OS
4045   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4046                         allow_exec);
4047 }
4048 
4049 // Unmap a block of memory.
4050 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4051   return munmap(addr, bytes) == 0;
4052 }
4053 
4054 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4055 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4056 // of a thread.
4057 //
4058 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4059 // the fast estimate available on the platform.
4060 
4061 jlong os::current_thread_cpu_time() {
4062   // return user + sys since the cost is the same
4063   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4064   assert(n >= 0, "negative CPU time");
4065   return n;
4066 }
4067 
4068 jlong os::thread_cpu_time(Thread* thread) {
4069   // consistent with what current_thread_cpu_time() returns
4070   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4071   assert(n >= 0, "negative CPU time");
4072   return n;
4073 }
4074 
4075 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4076   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4077   assert(n >= 0, "negative CPU time");
4078   return n;
4079 }
4080 
4081 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4082   bool error = false;
4083 
4084   jlong sys_time = 0;
4085   jlong user_time = 0;
4086 
4087   // Reimplemented using getthrds64().
4088   //
4089   // Works like this:
4090   // For the thread in question, get the kernel thread id. Then get the
4091   // kernel thread statistics using that id.
4092   //
4093   // This only works of course when no pthread scheduling is used,
4094   // i.e. there is a 1:1 relationship to kernel threads.
4095   // On AIX, see AIXTHREAD_SCOPE variable.
4096 
4097   pthread_t pthtid = thread->osthread()->pthread_id();
4098 
4099   // retrieve kernel thread id for the pthread:
4100   tid64_t tid = 0;
4101   struct __pthrdsinfo pinfo;
4102   // I just love those otherworldly IBM APIs which force me to hand down
4103   // dummy buffers for stuff I dont care for...
4104   char dummy[1];
4105   int dummy_size = sizeof(dummy);
4106   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4107                           dummy, &dummy_size) == 0) {
4108     tid = pinfo.__pi_tid;
4109   } else {
4110     tty->print_cr("pthread_getthrds_np failed.");
4111     error = true;
4112   }
4113 
4114   // retrieve kernel timing info for that kernel thread
4115   if (!error) {
4116     struct thrdentry64 thrdentry;
4117     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4118       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4119       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4120     } else {
4121       tty->print_cr("pthread_getthrds_np failed.");
4122       error = true;
4123     }
4124   }
4125 
4126   if (p_sys_time) {
4127     *p_sys_time = sys_time;
4128   }
4129 
4130   if (p_user_time) {
4131     *p_user_time = user_time;
4132   }
4133 
4134   if (error) {
4135     return false;
4136   }
4137 
4138   return true;
4139 }
4140 
4141 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4142   jlong sys_time;
4143   jlong user_time;
4144 
4145   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4146     return -1;
4147   }
4148 
4149   return user_sys_cpu_time ? sys_time + user_time : user_time;
4150 }
4151 
4152 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4153   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4154   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4155   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4156   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4157 }
4158 
4159 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4160   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4161   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4162   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4163   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4164 }
4165 
4166 bool os::is_thread_cpu_time_supported() {
4167   return true;
4168 }
4169 
4170 // System loadavg support. Returns -1 if load average cannot be obtained.
4171 // For now just return the system wide load average (no processor sets).
4172 int os::loadavg(double values[], int nelem) {
4173 
4174   // Implemented using libperfstat on AIX.
4175 
4176   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4177   guarantee(values, "argument error");
4178 
4179   if (os::Aix::on_pase()) {
4180     Unimplemented();
4181     return -1;
4182   } else {
4183     // AIX: use libperfstat
4184     //
4185     // See also:
4186     // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4187     // /usr/include/libperfstat.h:
4188 
4189     // Use the already AIX version independent get_cpuinfo.
4190     os::Aix::cpuinfo_t ci;
4191     if (os::Aix::get_cpuinfo(&ci)) {
4192       for (int i = 0; i < nelem; i++) {
4193         values[i] = ci.loadavg[i];
4194       }
4195     } else {
4196       return -1;
4197     }
4198     return nelem;
4199   }
4200 }
4201 
4202 void os::pause() {
4203   char filename[MAX_PATH];
4204   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4205     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4206   } else {
4207     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4208   }
4209 
4210   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4211   if (fd != -1) {
4212     struct stat buf;
4213     ::close(fd);
4214     while (::stat(filename, &buf) == 0) {
4215       (void)::poll(NULL, 0, 100);
4216     }
4217   } else {
4218     jio_fprintf(stderr,
4219       "Could not open pause file '%s', continuing immediately.\n", filename);
4220   }
4221 }
4222 
4223 bool os::Aix::is_primordial_thread() {
4224   if (pthread_self() == (pthread_t)1) {
4225     return true;
4226   } else {
4227     return false;
4228   }
4229 }
4230 
4231 // OS recognitions (PASE/AIX, OS level) call this before calling any
4232 // one of Aix::on_pase(), Aix::os_version() static
4233 void os::Aix::initialize_os_info() {
4234 
4235   assert(_on_pase == -1 && _os_version == -1, "already called.");
4236 
4237   struct utsname uts;
4238   memset(&uts, 0, sizeof(uts));
4239   strcpy(uts.sysname, "?");
4240   if (::uname(&uts) == -1) {
4241     trc("uname failed (%d)", errno);
4242     guarantee(0, "Could not determine whether we run on AIX or PASE");
4243   } else {
4244     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4245                "node \"%s\" machine \"%s\"\n",
4246                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4247     const int major = atoi(uts.version);
4248     assert(major > 0, "invalid OS version");
4249     const int minor = atoi(uts.release);
4250     assert(minor > 0, "invalid OS release");
4251     _os_version = (major << 8) | minor;
4252     if (strcmp(uts.sysname, "OS400") == 0) {
4253       Unimplemented();
4254     } else if (strcmp(uts.sysname, "AIX") == 0) {
4255       // We run on AIX. We do not support versions older than AIX 5.3.
4256       _on_pase = 0;
4257       if (_os_version < 0x0503) {
4258         trc("AIX release older than AIX 5.3 not supported.");
4259         assert(false, "AIX release too old.");
4260       } else {
4261         trcVerbose("We run on AIX %d.%d\n", major, minor);
4262       }
4263     } else {
4264       assert(false, "unknown OS");
4265     }
4266   }
4267 
4268   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4269 } // end: os::Aix::initialize_os_info()
4270 
4271 // Scan environment for important settings which might effect the VM.
4272 // Trace out settings. Warn about invalid settings and/or correct them.
4273 //
4274 // Must run after os::Aix::initialue_os_info().
4275 void os::Aix::scan_environment() {
4276 
4277   char* p;
4278   int rc;
4279 
4280   // Warn explicity if EXTSHM=ON is used. That switch changes how
4281   // System V shared memory behaves. One effect is that page size of
4282   // shared memory cannot be change dynamically, effectivly preventing
4283   // large pages from working.
4284   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4285   // recommendation is (in OSS notes) to switch it off.
4286   p = ::getenv("EXTSHM");
4287   if (Verbose) {
4288     fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4289   }
4290   if (p && strcasecmp(p, "ON") == 0) {
4291     fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4292     _extshm = 1;
4293   } else {
4294     _extshm = 0;
4295   }
4296 
4297   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4298   // Not tested, not supported.
4299   //
4300   // Note that it might be worth the trouble to test and to require it, if only to
4301   // get useful return codes for mprotect.
4302   //
4303   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4304   // exec() ? before loading the libjvm ? ....)
4305   p = ::getenv("XPG_SUS_ENV");
4306   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4307   if (p && strcmp(p, "ON") == 0) {
4308     _xpg_sus_mode = 1;
4309     trc("Unsupported setting: XPG_SUS_ENV=ON");
4310     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4311     // clobber address ranges. If we ever want to support that, we have to do some
4312     // testing first.
4313     guarantee(false, "XPG_SUS_ENV=ON not supported");
4314   } else {
4315     _xpg_sus_mode = 0;
4316   }
4317 
4318   // Switch off AIX internal (pthread) guard pages. This has
4319   // immediate effect for any pthread_create calls which follow.
4320   p = ::getenv("AIXTHREAD_GUARDPAGES");
4321   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4322   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4323   guarantee(rc == 0, "");
4324 
4325 } // end: os::Aix::scan_environment()
4326 
4327 // PASE: initialize the libo4 library (AS400 PASE porting library).
4328 void os::Aix::initialize_libo4() {
4329   Unimplemented();
4330 }
4331 
4332 // AIX: initialize the libperfstat library (we load this dynamically
4333 // because it is only available on AIX.
4334 void os::Aix::initialize_libperfstat() {
4335 
4336   assert(os::Aix::on_aix(), "AIX only");
4337 
4338   if (!libperfstat::init()) {
4339     trc("libperfstat initialization failed.");
4340     assert(false, "libperfstat initialization failed");
4341   } else {
4342     if (Verbose) {
4343       fprintf(stderr, "libperfstat initialized.\n");
4344     }
4345   }
4346 } // end: os::Aix::initialize_libperfstat
4347 
4348 /////////////////////////////////////////////////////////////////////////////
4349 // thread stack
4350 
4351 // Function to query the current stack size using pthread_getthrds_np.
4352 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4353   // This only works when invoked on a pthread. As we agreed not to use
4354   // primordial threads anyway, I assert here.
4355   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4356 
4357   // Information about this api can be found (a) in the pthread.h header and
4358   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4359   //
4360   // The use of this API to find out the current stack is kind of undefined.
4361   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4362   // enough for cases where I let the pthread library create its stacks. For cases
4363   // where I create an own stack and pass this to pthread_create, it seems not to
4364   // work (the returned stack size in that case is 0).
4365 
4366   pthread_t tid = pthread_self();
4367   struct __pthrdsinfo pinfo;
4368   char dummy[1]; // We only need this to satisfy the api and to not get E.
4369   int dummy_size = sizeof(dummy);
4370 
4371   memset(&pinfo, 0, sizeof(pinfo));
4372 
4373   const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4374                                      sizeof(pinfo), dummy, &dummy_size);
4375 
4376   if (rc != 0) {
4377     assert0(false);
4378     trcVerbose("pthread_getthrds_np failed (%d)", rc);
4379     return false;
4380   }
4381   guarantee0(pinfo.__pi_stackend);
4382 
4383   // The following can happen when invoking pthread_getthrds_np on a pthread running
4384   // on a user provided stack (when handing down a stack to pthread create, see 
4385   // pthread_attr_setstackaddr).
4386   // Not sure what to do here - I feel inclined to forbid this use case completely.
4387   guarantee0(pinfo.__pi_stacksize);
4388 
4389   // Note: the pthread stack on AIX seems to look like this:
4390   //
4391   // ---------------------   real base ? at page border ?
4392   // 
4393   //     pthread internal data, like ~2K, see also 
4394   //     http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/thread_supp_tun_params.htm
4395   // 
4396   // ---------------------   __pi_stackend - not page aligned, (xxxxF890)
4397   // 
4398   //     stack 
4399   //      ....
4400   //
4401   //     stack 
4402   // 
4403   // ---------------------   __pi_stackend  - __pi_stacksize
4404   // 
4405   //     padding due to AIX guard pages (?) see AIXTHREAD_GUARDPAGES
4406   // ---------------------   __pi_stackaddr  (page aligned if AIXTHREAD_GUARDPAGES > 0)
4407   //
4408   //   AIX guard pages (?)
4409   //
4410 
4411   // So, the safe thing to do is to use the area from __pi_stackend to __pi_stackaddr;
4412   // __pi_stackend however is almost never page aligned.   
4413   //
4414   
4415   if (p_stack_base) {
4416     (*p_stack_base) = (address) (pinfo.__pi_stackend);
4417   }
4418 
4419   if (p_stack_size) {
4420     (*p_stack_size) = pinfo.__pi_stackend - pinfo.__pi_stackaddr;
4421   }
4422 
4423   return true;
4424 }
4425 
4426 // Get the current stack base from the OS (actually, the pthread library).
4427 address os::current_stack_base() {
4428   address p;
4429   query_stack_dimensions(&p, 0);
4430   return p;
4431 }
4432 
4433 // Get the current stack size from the OS (actually, the pthread library).
4434 size_t os::current_stack_size() {
4435   size_t s;
4436   query_stack_dimensions(0, &s);
4437   return s;
4438 }
4439 
4440 // Refer to the comments in os_solaris.cpp park-unpark.
4441 //
4442 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4443 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4444 // For specifics regarding the bug see GLIBC BUGID 261237 :
4445 //    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4446 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4447 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4448 // is used. (The simple C test-case provided in the GLIBC bug report manifests the
4449 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4450 // and monitorenter when we're using 1-0 locking. All those operations may result in
4451 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4452 // of libpthread avoids the problem, but isn't practical.
4453 //
4454 // Possible remedies:
4455 //
4456 // 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4457 //      This is palliative and probabilistic, however. If the thread is preempted
4458 //      between the call to compute_abstime() and pthread_cond_timedwait(), more
4459 //      than the minimum period may have passed, and the abstime may be stale (in the
4460 //      past) resultin in a hang. Using this technique reduces the odds of a hang
4461 //      but the JVM is still vulnerable, particularly on heavily loaded systems.
4462 //
4463 // 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4464 //      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4465 //      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4466 //      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4467 //      thread.
4468 //
4469 // 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4470 //      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4471 //      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4472 //      This also works well. In fact it avoids kernel-level scalability impediments
4473 //      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4474 //      timers in a graceful fashion.
4475 //
4476 // 4.   When the abstime value is in the past it appears that control returns
4477 //      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4478 //      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4479 //      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4480 //      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4481 //      It may be possible to avoid reinitialization by checking the return
4482 //      value from pthread_cond_timedwait(). In addition to reinitializing the
4483 //      condvar we must establish the invariant that cond_signal() is only called
4484 //      within critical sections protected by the adjunct mutex. This prevents
4485 //      cond_signal() from "seeing" a condvar that's in the midst of being
4486 //      reinitialized or that is corrupt. Sadly, this invariant obviates the
4487 //      desirable signal-after-unlock optimization that avoids futile context switching.
4488 //
4489 //      I'm also concerned that some versions of NTPL might allocate an auxilliary
4490 //      structure when a condvar is used or initialized. cond_destroy() would
4491 //      release the helper structure. Our reinitialize-after-timedwait fix
4492 //      put excessive stress on malloc/free and locks protecting the c-heap.
4493 //
4494 // We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4495 // It may be possible to refine (4) by checking the kernel and NTPL verisons
4496 // and only enabling the work-around for vulnerable environments.
4497 
4498 // utility to compute the abstime argument to timedwait:
4499 // millis is the relative timeout time
4500 // abstime will be the absolute timeout time
4501 // TODO: replace compute_abstime() with unpackTime()
4502 
4503 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4504   if (millis < 0) millis = 0;
4505   struct timeval now;
4506   int status = gettimeofday(&now, NULL);
4507   assert(status == 0, "gettimeofday");
4508   jlong seconds = millis / 1000;
4509   millis %= 1000;
4510   if (seconds > 50000000) { // see man cond_timedwait(3T)
4511     seconds = 50000000;
4512   }
4513   abstime->tv_sec = now.tv_sec  + seconds;
4514   long       usec = now.tv_usec + millis * 1000;
4515   if (usec >= 1000000) {
4516     abstime->tv_sec += 1;
4517     usec -= 1000000;
4518   }
4519   abstime->tv_nsec = usec * 1000;
4520   return abstime;
4521 }
4522 
4523 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4524 // Conceptually TryPark() should be equivalent to park(0).
4525 
4526 int os::PlatformEvent::TryPark() {
4527   for (;;) {
4528     const int v = _Event;
4529     guarantee ((v == 0) || (v == 1), "invariant");
4530     if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4531   }
4532 }
4533 
4534 void os::PlatformEvent::park() {       // AKA "down()"
4535   // Invariant: Only the thread associated with the Event/PlatformEvent
4536   // may call park().
4537   // TODO: assert that _Assoc != NULL or _Assoc == Self
4538   int v;
4539   for (;;) {
4540     v = _Event;
4541     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4542   }
4543   guarantee (v >= 0, "invariant");
4544   if (v == 0) {
4545     // Do this the hard way by blocking ...
4546     int status = pthread_mutex_lock(_mutex);
4547     assert_status(status == 0, status, "mutex_lock");
4548     guarantee (_nParked == 0, "invariant");
4549     ++ _nParked;
4550     while (_Event < 0) {
4551       status = pthread_cond_wait(_cond, _mutex);
4552       assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4553     }
4554     -- _nParked;
4555 
4556     // In theory we could move the ST of 0 into _Event past the unlock(),
4557     // but then we'd need a MEMBAR after the ST.
4558     _Event = 0;
4559     status = pthread_mutex_unlock(_mutex);
4560     assert_status(status == 0, status, "mutex_unlock");
4561   }
4562   guarantee (_Event >= 0, "invariant");
4563 }
4564 
4565 int os::PlatformEvent::park(jlong millis) {
4566   guarantee (_nParked == 0, "invariant");
4567 
4568   int v;
4569   for (;;) {
4570     v = _Event;
4571     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4572   }
4573   guarantee (v >= 0, "invariant");
4574   if (v != 0) return OS_OK;
4575 
4576   // We do this the hard way, by blocking the thread.
4577   // Consider enforcing a minimum timeout value.
4578   struct timespec abst;
4579   compute_abstime(&abst, millis);
4580 
4581   int ret = OS_TIMEOUT;
4582   int status = pthread_mutex_lock(_mutex);
4583   assert_status(status == 0, status, "mutex_lock");
4584   guarantee (_nParked == 0, "invariant");
4585   ++_nParked;
4586 
4587   // Object.wait(timo) will return because of
4588   // (a) notification
4589   // (b) timeout
4590   // (c) thread.interrupt
4591   //
4592   // Thread.interrupt and object.notify{All} both call Event::set.
4593   // That is, we treat thread.interrupt as a special case of notification.
4594   // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4595   // We assume all ETIME returns are valid.
4596   //
4597   // TODO: properly differentiate simultaneous notify+interrupt.
4598   // In that case, we should propagate the notify to another waiter.
4599 
4600   while (_Event < 0) {
4601     status = pthread_cond_timedwait(_cond, _mutex, &abst);
4602     assert_status(status == 0 || status == ETIMEDOUT,
4603                   status, "cond_timedwait");
4604     if (!FilterSpuriousWakeups) break;         // previous semantics
4605     if (status == ETIMEDOUT) break;
4606     // We consume and ignore EINTR and spurious wakeups.
4607   }
4608   --_nParked;
4609   if (_Event >= 0) {
4610      ret = OS_OK;
4611   }
4612   _Event = 0;
4613   status = pthread_mutex_unlock(_mutex);
4614   assert_status(status == 0, status, "mutex_unlock");
4615   assert (_nParked == 0, "invariant");
4616   return ret;
4617 }
4618 
4619 void os::PlatformEvent::unpark() {
4620   int v, AnyWaiters;
4621   for (;;) {
4622     v = _Event;
4623     if (v > 0) {
4624       // The LD of _Event could have reordered or be satisfied
4625       // by a read-aside from this processor's write buffer.
4626       // To avoid problems execute a barrier and then
4627       // ratify the value.
4628       OrderAccess::fence();
4629       if (_Event == v) return;
4630       continue;
4631     }
4632     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4633   }
4634   if (v < 0) {
4635     // Wait for the thread associated with the event to vacate
4636     int status = pthread_mutex_lock(_mutex);
4637     assert_status(status == 0, status, "mutex_lock");
4638     AnyWaiters = _nParked;
4639 
4640     if (AnyWaiters != 0) {
4641       // We intentional signal *after* dropping the lock
4642       // to avoid a common class of futile wakeups.
4643       status = pthread_cond_signal(_cond);
4644       assert_status(status == 0, status, "cond_signal");
4645     }
4646     // Mutex should be locked for pthread_cond_signal(_cond).
4647     status = pthread_mutex_unlock(_mutex);
4648     assert_status(status == 0, status, "mutex_unlock");
4649   }
4650 
4651   // Note that we signal() _after dropping the lock for "immortal" Events.
4652   // This is safe and avoids a common class of futile wakeups. In rare
4653   // circumstances this can cause a thread to return prematurely from
4654   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4655   // simply re-test the condition and re-park itself.
4656 }
4657 
4658 
4659 // JSR166
4660 // -------------------------------------------------------
4661 
4662 //
4663 // The solaris and linux implementations of park/unpark are fairly
4664 // conservative for now, but can be improved. They currently use a
4665 // mutex/condvar pair, plus a a count.
4666 // Park decrements count if > 0, else does a condvar wait. Unpark
4667 // sets count to 1 and signals condvar. Only one thread ever waits
4668 // on the condvar. Contention seen when trying to park implies that someone
4669 // is unparking you, so don't wait. And spurious returns are fine, so there
4670 // is no need to track notifications.
4671 //
4672 
4673 #define MAX_SECS 100000000
4674 //
4675 // This code is common to linux and solaris and will be moved to a
4676 // common place in dolphin.
4677 //
4678 // The passed in time value is either a relative time in nanoseconds
4679 // or an absolute time in milliseconds. Either way it has to be unpacked
4680 // into suitable seconds and nanoseconds components and stored in the
4681 // given timespec structure.
4682 // Given time is a 64-bit value and the time_t used in the timespec is only
4683 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
4684 // overflow if times way in the future are given. Further on Solaris versions
4685 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4686 // number of seconds, in abstime, is less than current_time + 100,000,000.
4687 // As it will be 28 years before "now + 100000000" will overflow we can
4688 // ignore overflow and just impose a hard-limit on seconds using the value
4689 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4690 // years from "now".
4691 //
4692 
4693 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4694   assert (time > 0, "convertTime");
4695 
4696   struct timeval now;
4697   int status = gettimeofday(&now, NULL);
4698   assert(status == 0, "gettimeofday");
4699 
4700   time_t max_secs = now.tv_sec + MAX_SECS;
4701 
4702   if (isAbsolute) {
4703     jlong secs = time / 1000;
4704     if (secs > max_secs) {
4705       absTime->tv_sec = max_secs;
4706     }
4707     else {
4708       absTime->tv_sec = secs;
4709     }
4710     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4711   }
4712   else {
4713     jlong secs = time / NANOSECS_PER_SEC;
4714     if (secs >= MAX_SECS) {
4715       absTime->tv_sec = max_secs;
4716       absTime->tv_nsec = 0;
4717     }
4718     else {
4719       absTime->tv_sec = now.tv_sec + secs;
4720       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4721       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4722         absTime->tv_nsec -= NANOSECS_PER_SEC;
4723         ++absTime->tv_sec; // note: this must be <= max_secs
4724       }
4725     }
4726   }
4727   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4728   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4729   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4730   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4731 }
4732 
4733 void Parker::park(bool isAbsolute, jlong time) {
4734   // Optional fast-path check:
4735   // Return immediately if a permit is available.
4736   if (_counter > 0) {
4737     _counter = 0;
4738     OrderAccess::fence();
4739     return;
4740   }
4741 
4742   Thread* thread = Thread::current();
4743   assert(thread->is_Java_thread(), "Must be JavaThread");
4744   JavaThread *jt = (JavaThread *)thread;
4745 
4746   // Optional optimization -- avoid state transitions if there's an interrupt pending.
4747   // Check interrupt before trying to wait
4748   if (Thread::is_interrupted(thread, false)) {
4749     return;
4750   }
4751 
4752   // Next, demultiplex/decode time arguments
4753   timespec absTime;
4754   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4755     return;
4756   }
4757   if (time > 0) {
4758     unpackTime(&absTime, isAbsolute, time);
4759   }
4760 
4761   // Enter safepoint region
4762   // Beware of deadlocks such as 6317397.
4763   // The per-thread Parker:: mutex is a classic leaf-lock.
4764   // In particular a thread must never block on the Threads_lock while
4765   // holding the Parker:: mutex. If safepoints are pending both the
4766   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4767   ThreadBlockInVM tbivm(jt);
4768 
4769   // Don't wait if cannot get lock since interference arises from
4770   // unblocking. Also. check interrupt before trying wait
4771   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4772     return;
4773   }
4774 
4775   int status;
4776   if (_counter > 0) { // no wait needed
4777     _counter = 0;
4778     status = pthread_mutex_unlock(_mutex);
4779     assert (status == 0, "invariant");
4780     OrderAccess::fence();
4781     return;
4782   }
4783 
4784 #ifdef ASSERT
4785   // Don't catch signals while blocked; let the running threads have the signals.
4786   // (This allows a debugger to break into the running thread.)
4787   sigset_t oldsigs;
4788   sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4789   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4790 #endif
4791 
4792   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4793   jt->set_suspend_equivalent();
4794   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4795 
4796   if (time == 0) {
4797     status = pthread_cond_wait (_cond, _mutex);
4798   } else {
4799     status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4800     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4801       pthread_cond_destroy (_cond);
4802       pthread_cond_init    (_cond, NULL);
4803     }
4804   }
4805   assert_status(status == 0 || status == EINTR ||
4806                 status == ETIME || status == ETIMEDOUT,
4807                 status, "cond_timedwait");
4808 
4809 #ifdef ASSERT
4810   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4811 #endif
4812 
4813   _counter = 0;
4814   status = pthread_mutex_unlock(_mutex);
4815   assert_status(status == 0, status, "invariant");
4816   // If externally suspended while waiting, re-suspend
4817   if (jt->handle_special_suspend_equivalent_condition()) {
4818     jt->java_suspend_self();
4819   }
4820 
4821   OrderAccess::fence();
4822 }
4823 
4824 void Parker::unpark() {
4825   int s, status;
4826   status = pthread_mutex_lock(_mutex);
4827   assert (status == 0, "invariant");
4828   s = _counter;
4829   _counter = 1;
4830   if (s < 1) {
4831     if (WorkAroundNPTLTimedWaitHang) {
4832       status = pthread_cond_signal (_cond);
4833       assert (status == 0, "invariant");
4834       status = pthread_mutex_unlock(_mutex);
4835       assert (status == 0, "invariant");
4836     } else {
4837       status = pthread_mutex_unlock(_mutex);
4838       assert (status == 0, "invariant");
4839       status = pthread_cond_signal (_cond);
4840       assert (status == 0, "invariant");
4841     }
4842   } else {
4843     pthread_mutex_unlock(_mutex);
4844     assert (status == 0, "invariant");
4845   }
4846 }
4847 
4848 extern char** environ;
4849 
4850 // Run the specified command in a separate process. Return its exit value,
4851 // or -1 on failure (e.g. can't fork a new process).
4852 // Unlike system(), this function can be called from signal handler. It
4853 // doesn't block SIGINT et al.
4854 int os::fork_and_exec(char* cmd) {
4855   char * argv[4] = {"sh", "-c", cmd, NULL};
4856 
4857   pid_t pid = fork();
4858 
4859   if (pid < 0) {
4860     // fork failed
4861     return -1;
4862 
4863   } else if (pid == 0) {
4864     // child process
4865 
4866     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4867     execve("/usr/bin/sh", argv, environ);
4868 
4869     // execve failed
4870     _exit(-1);
4871 
4872   } else {
4873     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4874     // care about the actual exit code, for now.
4875 
4876     int status;
4877 
4878     // Wait for the child process to exit. This returns immediately if
4879     // the child has already exited. */
4880     while (waitpid(pid, &status, 0) < 0) {
4881       switch (errno) {
4882         case ECHILD: return 0;
4883         case EINTR: break;
4884         default: return -1;
4885       }
4886     }
4887 
4888     if (WIFEXITED(status)) {
4889       // The child exited normally; get its exit code.
4890       return WEXITSTATUS(status);
4891     } else if (WIFSIGNALED(status)) {
4892       // The child exited because of a signal.
4893       // The best value to return is 0x80 + signal number,
4894       // because that is what all Unix shells do, and because
4895       // it allows callers to distinguish between process exit and
4896       // process death by signal.
4897       return 0x80 + WTERMSIG(status);
4898     } else {
4899       // Unknown exit code; pass it through.
4900       return status;
4901     }
4902   }
4903   return -1;
4904 }
4905 
4906 // is_headless_jre()
4907 //
4908 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4909 // in order to report if we are running in a headless jre.
4910 //
4911 // Since JDK8 xawt/libmawt.so is moved into the same directory
4912 // as libawt.so, and renamed libawt_xawt.so
4913 bool os::is_headless_jre() {
4914   struct stat statbuf;
4915   char buf[MAXPATHLEN];
4916   char libmawtpath[MAXPATHLEN];
4917   const char *xawtstr = "/xawt/libmawt.so";
4918   const char *new_xawtstr = "/libawt_xawt.so";
4919 
4920   char *p;
4921 
4922   // Get path to libjvm.so
4923   os::jvm_path(buf, sizeof(buf));
4924 
4925   // Get rid of libjvm.so
4926   p = strrchr(buf, '/');
4927   if (p == NULL) return false;
4928   else *p = '\0';
4929 
4930   // Get rid of client or server
4931   p = strrchr(buf, '/');
4932   if (p == NULL) return false;
4933   else *p = '\0';
4934 
4935   // check xawt/libmawt.so
4936   strcpy(libmawtpath, buf);
4937   strcat(libmawtpath, xawtstr);
4938   if (::stat(libmawtpath, &statbuf) == 0) return false;
4939 
4940   // check libawt_xawt.so
4941   strcpy(libmawtpath, buf);
4942   strcat(libmawtpath, new_xawtstr);
4943   if (::stat(libmawtpath, &statbuf) == 0) return false;
4944 
4945   return true;
4946 }
4947 
4948 // Get the default path to the core file
4949 // Returns the length of the string
4950 int os::get_core_path(char* buffer, size_t bufferSize) {
4951   const char* p = get_current_directory(buffer, bufferSize);
4952 
4953   if (p == NULL) {
4954     assert(p != NULL, "failed to get current directory");
4955     return 0;
4956   }
4957 
4958   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4959                                                p, current_process_id());
4960 
4961   return strlen(buffer);
4962 }
4963 
4964 #ifndef PRODUCT
4965 void TestReserveMemorySpecial_test() {
4966   // No tests available for this platform
4967 }
4968 #endif