1 /*
   2  * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2015 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "libperfstat_aix.hpp"
  40 #include "loadlib_aix.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "mutex_aix.inline.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "os_aix.inline.hpp"
  46 #include "os_share_aix.hpp"
  47 #include "porting_aix.hpp"
  48 #include "prims/jniFastGetField.hpp"
  49 #include "prims/jvm.h"
  50 #include "prims/jvm_misc.hpp"
  51 #include "runtime/arguments.hpp"
  52 #include "runtime/atomic.inline.hpp"
  53 #include "runtime/extendedPC.hpp"
  54 #include "runtime/globals.hpp"
  55 #include "runtime/interfaceSupport.hpp"
  56 #include "runtime/java.hpp"
  57 #include "runtime/javaCalls.hpp"
  58 #include "runtime/mutexLocker.hpp"
  59 #include "runtime/objectMonitor.hpp"
  60 #include "runtime/orderAccess.inline.hpp"
  61 #include "runtime/os.hpp"
  62 #include "runtime/osThread.hpp"
  63 #include "runtime/perfMemory.hpp"
  64 #include "runtime/sharedRuntime.hpp"
  65 #include "runtime/statSampler.hpp"
  66 #include "runtime/stubRoutines.hpp"
  67 #include "runtime/thread.inline.hpp"
  68 #include "runtime/threadCritical.hpp"
  69 #include "runtime/timer.hpp"
  70 #include "runtime/vm_version.hpp"
  71 #include "services/attachListener.hpp"
  72 #include "services/runtimeService.hpp"
  73 #include "utilities/decoder.hpp"
  74 #include "utilities/defaultStream.hpp"
  75 #include "utilities/events.hpp"
  76 #include "utilities/growableArray.hpp"
  77 #include "utilities/vmError.hpp"
  78 
  79 // put OS-includes here (sorted alphabetically)
  80 #include <errno.h>
  81 #include <fcntl.h>
  82 #include <inttypes.h>
  83 #include <poll.h>
  84 #include <procinfo.h>
  85 #include <pthread.h>
  86 #include <pwd.h>
  87 #include <semaphore.h>
  88 #include <signal.h>
  89 #include <stdint.h>
  90 #include <stdio.h>
  91 #include <string.h>
  92 #include <unistd.h>
  93 #include <sys/ioctl.h>
  94 #include <sys/ipc.h>
  95 #include <sys/mman.h>
  96 #include <sys/resource.h>
  97 #include <sys/select.h>
  98 #include <sys/shm.h>
  99 #include <sys/socket.h>
 100 #include <sys/stat.h>
 101 #include <sys/sysinfo.h>
 102 #include <sys/systemcfg.h>
 103 #include <sys/time.h>
 104 #include <sys/times.h>
 105 #include <sys/types.h>
 106 #include <sys/utsname.h>
 107 #include <sys/vminfo.h>
 108 #include <sys/wait.h>
 109 
 110 // If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 111 // getrusage() is prepared to handle the associated failure.
 112 #ifndef RUSAGE_THREAD
 113 #define RUSAGE_THREAD   (1)               /* only the calling thread */
 114 #endif
 115 
 116 // PPC port
 117 static const uintx Use64KPagesThreshold       = 1*M;
 118 static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
 119 
 120 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
 121 #if !defined(_AIXVERSION_610)
 122 extern "C" {
 123   int getthrds64(pid_t ProcessIdentifier,
 124                  struct thrdentry64* ThreadBuffer,
 125                  int ThreadSize,
 126                  tid64_t* IndexPointer,
 127                  int Count);
 128 }
 129 #endif
 130 
 131 #define MAX_PATH (2 * K)
 132 
 133 // for timer info max values which include all bits
 134 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 135 // for multipage initialization error analysis (in 'g_multipage_error')
 136 #define ERROR_MP_OS_TOO_OLD                          100
 137 #define ERROR_MP_EXTSHM_ACTIVE                       101
 138 #define ERROR_MP_VMGETINFO_FAILED                    102
 139 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 140 
 141 // The semantics in this file are thus that codeptr_t is a *real code ptr*.
 142 // This means that any function taking codeptr_t as arguments will assume
 143 // a real codeptr and won't handle function descriptors (eg getFuncName),
 144 // whereas functions taking address as args will deal with function
 145 // descriptors (eg os::dll_address_to_library_name).
 146 typedef unsigned int* codeptr_t;
 147 
 148 // Typedefs for stackslots, stack pointers, pointers to op codes.
 149 typedef unsigned long stackslot_t;
 150 typedef stackslot_t* stackptr_t;
 151 
 152 // Excerpts from systemcfg.h definitions newer than AIX 5.3.
 153 #ifndef PV_7
 154 #define PV_7 0x200000          /* Power PC 7 */
 155 #define PV_7_Compat 0x208000   /* Power PC 7 */
 156 #endif
 157 #ifndef PV_8
 158 #define PV_8 0x300000          /* Power PC 8 */
 159 #define PV_8_Compat 0x308000   /* Power PC 8 */
 160 #endif
 161 
 162 #define trcVerbose(fmt, ...) { /* PPC port */  \
 163   if (Verbose) { \
 164     fprintf(stderr, fmt, ##__VA_ARGS__); \
 165     fputc('\n', stderr); fflush(stderr); \
 166   } \
 167 }
 168 #define trc(fmt, ...)        /* PPC port */
 169 
 170 #define ERRBYE(s) { \
 171     trcVerbose(s); \
 172     return -1; \
 173 }
 174 
 175 // Query dimensions of the stack of the calling thread.
 176 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 177 
 178 // function to check a given stack pointer against given stack limits
 179 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 180   if (((uintptr_t)sp) & 0x7) {
 181     return false;
 182   }
 183   if (sp > stack_base) {
 184     return false;
 185   }
 186   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 187     return false;
 188   }
 189   return true;
 190 }
 191 
 192 // returns true if function is a valid codepointer
 193 inline bool is_valid_codepointer(codeptr_t p) {
 194   if (!p) {
 195     return false;
 196   }
 197   if (((uintptr_t)p) & 0x3) {
 198     return false;
 199   }
 200   if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
 201     return false;
 202   }
 203   return true;
 204 }
 205 
 206 // Macro to check a given stack pointer against given stack limits and to die if test fails.
 207 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 208     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 209 }
 210 
 211 // Macro to check the current stack pointer against given stacklimits.
 212 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 213   address sp; \
 214   sp = os::current_stack_pointer(); \
 215   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 216 }
 217 
 218 ////////////////////////////////////////////////////////////////////////////////
 219 // global variables (for a description see os_aix.hpp)
 220 
 221 julong    os::Aix::_physical_memory = 0;
 222 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 223 int       os::Aix::_page_size = -1;
 224 int       os::Aix::_on_pase = -1;
 225 int       os::Aix::_os_version = -1;
 226 int       os::Aix::_stack_page_size = -1;
 227 int       os::Aix::_xpg_sus_mode = -1;
 228 int       os::Aix::_extshm = -1;
 229 int       os::Aix::_logical_cpus = -1;
 230 
 231 ////////////////////////////////////////////////////////////////////////////////
 232 // local variables
 233 
 234 static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 235 static jlong    initial_time_count = 0;
 236 static int      clock_tics_per_sec = 100;
 237 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 238 static bool     check_signals      = true;
 239 static pid_t    _initial_pid       = 0;
 240 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 241 static sigset_t SR_sigset;
 242 
 243 // This describes the state of multipage support of the underlying
 244 // OS. Note that this is of no interest to the outsize world and
 245 // therefore should not be defined in AIX class.
 246 //
 247 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 248 // latter two (16M "large" resp. 16G "huge" pages) require special
 249 // setup and are normally not available.
 250 //
 251 // AIX supports multiple page sizes per process, for:
 252 //  - Stack (of the primordial thread, so not relevant for us)
 253 //  - Data - data, bss, heap, for us also pthread stacks
 254 //  - Text - text code
 255 //  - shared memory
 256 //
 257 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 258 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 259 //
 260 // For shared memory, page size can be set dynamically via
 261 // shmctl(). Different shared memory regions can have different page
 262 // sizes.
 263 //
 264 // More information can be found at AIBM info center:
 265 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 266 //
 267 static struct {
 268   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 269   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 270   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 271   size_t pthr_stack_pagesize; // stack page size of pthread threads
 272   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 273   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 274   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 275   int error;                  // Error describing if something went wrong at multipage init.
 276 } g_multipage_support = {
 277   (size_t) -1,
 278   (size_t) -1,
 279   (size_t) -1,
 280   (size_t) -1,
 281   (size_t) -1,
 282   false, false,
 283   0
 284 };
 285 
 286 // We must not accidentally allocate memory close to the BRK - even if
 287 // that would work - because then we prevent the BRK segment from
 288 // growing which may result in a malloc OOM even though there is
 289 // enough memory. The problem only arises if we shmat() or mmap() at
 290 // a specific wish address, e.g. to place the heap in a
 291 // compressed-oops-friendly way.
 292 static bool is_close_to_brk(address a) {
 293   address a1 = (address) sbrk(0);
 294   if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {
 295     return true;
 296   }
 297   return false;
 298 }
 299 
 300 julong os::available_memory() {
 301   return Aix::available_memory();
 302 }
 303 
 304 julong os::Aix::available_memory() {
 305   os::Aix::meminfo_t mi;
 306   if (os::Aix::get_meminfo(&mi)) {
 307     return mi.real_free;
 308   } else {
 309     return 0xFFFFFFFFFFFFFFFFLL;
 310   }
 311 }
 312 
 313 julong os::physical_memory() {
 314   return Aix::physical_memory();
 315 }
 316 
 317 // Return true if user is running as root.
 318 
 319 bool os::have_special_privileges() {
 320   static bool init = false;
 321   static bool privileges = false;
 322   if (!init) {
 323     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 324     init = true;
 325   }
 326   return privileges;
 327 }
 328 
 329 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 330 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 331 static bool my_disclaim64(char* addr, size_t size) {
 332 
 333   if (size == 0) {
 334     return true;
 335   }
 336 
 337   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 338   const unsigned int maxDisclaimSize = 0x40000000;
 339 
 340   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 341   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 342 
 343   char* p = addr;
 344 
 345   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 346     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 347       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 348       return false;
 349     }
 350     p += maxDisclaimSize;
 351   }
 352 
 353   if (lastDisclaimSize > 0) {
 354     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 355       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 356       return false;
 357     }
 358   }
 359 
 360   return true;
 361 }
 362 
 363 // Cpu architecture string
 364 #if defined(PPC32)
 365 static char cpu_arch[] = "ppc";
 366 #elif defined(PPC64)
 367 static char cpu_arch[] = "ppc64";
 368 #else
 369 #error Add appropriate cpu_arch setting
 370 #endif
 371 
 372 
 373 // Given an address, returns the size of the page backing that address.
 374 size_t os::Aix::query_pagesize(void* addr) {
 375 
 376   vm_page_info pi;
 377   pi.addr = (uint64_t)addr;
 378   if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 379     return pi.pagesize;
 380   } else {
 381     fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
 382     assert(false, "vmgetinfo failed to retrieve page size");
 383     return SIZE_4K;
 384   }
 385 
 386 }
 387 
 388 // Returns the kernel thread id of the currently running thread.
 389 pid_t os::Aix::gettid() {
 390   return (pid_t) thread_self();
 391 }
 392 
 393 void os::Aix::initialize_system_info() {
 394 
 395   // Get the number of online(logical) cpus instead of configured.
 396   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 397   assert(_processor_count > 0, "_processor_count must be > 0");
 398 
 399   // Retrieve total physical storage.
 400   os::Aix::meminfo_t mi;
 401   if (!os::Aix::get_meminfo(&mi)) {
 402     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
 403     assert(false, "os::Aix::get_meminfo failed.");
 404   }
 405   _physical_memory = (julong) mi.real_total;
 406 }
 407 
 408 // Helper function for tracing page sizes.
 409 static const char* describe_pagesize(size_t pagesize) {
 410   switch (pagesize) {
 411     case SIZE_4K : return "4K";
 412     case SIZE_64K: return "64K";
 413     case SIZE_16M: return "16M";
 414     case SIZE_16G: return "16G";
 415     case -1:       return "not set";
 416     default:
 417       assert(false, "surprise");
 418       return "??";
 419   }
 420 }
 421 
 422 // Probe OS for multipage support.
 423 // Will fill the global g_multipage_support structure.
 424 // Must be called before calling os::large_page_init().
 425 static void query_multipage_support() {
 426 
 427   guarantee(g_multipage_support.pagesize == -1,
 428             "do not call twice");
 429 
 430   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 431 
 432   // This really would surprise me.
 433   assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
 434 
 435   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 436   // Default data page size is defined either by linker options (-bdatapsize)
 437   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 438   // default should be 4K.
 439   {
 440     void* p = ::malloc(SIZE_16M);
 441     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 442     ::free(p);
 443   }
 444 
 445   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 446   {
 447     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 448     guarantee(shmid != -1, "shmget failed");
 449     void* p = ::shmat(shmid, NULL, 0);
 450     ::shmctl(shmid, IPC_RMID, NULL);
 451     guarantee(p != (void*) -1, "shmat failed");
 452     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 453     ::shmdt(p);
 454   }
 455 
 456   // Before querying the stack page size, make sure we are not running as primordial
 457   // thread (because primordial thread's stack may have different page size than
 458   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 459   // number of reasons so we may just as well guarantee it here.
 460   guarantee0(!os::Aix::is_primordial_thread());
 461 
 462   // Query pthread stack page size.
 463   {
 464     int dummy = 0;
 465     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 466   }
 467 
 468   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 469   /* PPC port: so far unused.
 470   {
 471     address any_function =
 472       (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 473     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 474   }
 475   */
 476 
 477   // Now probe for support of 64K pages and 16M pages.
 478 
 479   // Before OS/400 V6R1, there is no support for pages other than 4K.
 480   if (os::Aix::on_pase_V5R4_or_older()) {
 481     Unimplemented();
 482     goto query_multipage_support_end;
 483   }
 484 
 485   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 486   {
 487     const int MAX_PAGE_SIZES = 4;
 488     psize_t sizes[MAX_PAGE_SIZES];
 489     const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 490     if (num_psizes == -1) {
 491       trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
 492       trc("disabling multipage support.\n");
 493       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 494       goto query_multipage_support_end;
 495     }
 496     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 497     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 498     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 499     for (int i = 0; i < num_psizes; i ++) {
 500       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 501     }
 502 
 503     // Can we use 64K, 16M pages?
 504     for (int i = 0; i < num_psizes; i ++) {
 505       const size_t pagesize = sizes[i];
 506       if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
 507         continue;
 508       }
 509       bool can_use = false;
 510       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 511       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 512         IPC_CREAT | S_IRUSR | S_IWUSR);
 513       guarantee0(shmid != -1); // Should always work.
 514       // Try to set pagesize.
 515       struct shmid_ds shm_buf = { 0 };
 516       shm_buf.shm_pagesize = pagesize;
 517       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 518         const int en = errno;
 519         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 520         // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
 521         // PPC port  MiscUtils::describe_errno(en));
 522       } else {
 523         // Attach and double check pageisze.
 524         void* p = ::shmat(shmid, NULL, 0);
 525         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 526         guarantee0(p != (void*) -1); // Should always work.
 527         const size_t real_pagesize = os::Aix::query_pagesize(p);
 528         if (real_pagesize != pagesize) {
 529           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 530         } else {
 531           can_use = true;
 532         }
 533         ::shmdt(p);
 534       }
 535       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 536       if (pagesize == SIZE_64K) {
 537         g_multipage_support.can_use_64K_pages = can_use;
 538       } else if (pagesize == SIZE_16M) {
 539         g_multipage_support.can_use_16M_pages = can_use;
 540       }
 541     }
 542 
 543   } // end: check which pages can be used for shared memory
 544 
 545 query_multipage_support_end:
 546 
 547   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
 548       describe_pagesize(g_multipage_support.pagesize));
 549   trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
 550       describe_pagesize(g_multipage_support.datapsize));
 551   trcVerbose("Text page size: %s\n",
 552       describe_pagesize(g_multipage_support.textpsize));
 553   trcVerbose("Thread stack page size (pthread): %s\n",
 554       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 555   trcVerbose("Default shared memory page size: %s\n",
 556       describe_pagesize(g_multipage_support.shmpsize));
 557   trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
 558       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 559   trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
 560       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 561   trcVerbose("Multipage error details: %d\n",
 562       g_multipage_support.error);
 563 
 564   // sanity checks
 565   assert0(g_multipage_support.pagesize == SIZE_4K);
 566   assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
 567   // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
 568   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 569   assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
 570 
 571 } // end os::Aix::query_multipage_support()
 572 
 573 void os::init_system_properties_values() {
 574 
 575 #define DEFAULT_LIBPATH "/usr/lib:/lib"
 576 #define EXTENSIONS_DIR  "/lib/ext"
 577 
 578   // Buffer that fits several sprintfs.
 579   // Note that the space for the trailing null is provided
 580   // by the nulls included by the sizeof operator.
 581   const size_t bufsize =
 582     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 583          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 584   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 585 
 586   // sysclasspath, java_home, dll_dir
 587   {
 588     char *pslash;
 589     os::jvm_path(buf, bufsize);
 590 
 591     // Found the full path to libjvm.so.
 592     // Now cut the path to <java_home>/jre if we can.
 593     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 594     pslash = strrchr(buf, '/');
 595     if (pslash != NULL) {
 596       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 597     }
 598     Arguments::set_dll_dir(buf);
 599 
 600     if (pslash != NULL) {
 601       pslash = strrchr(buf, '/');
 602       if (pslash != NULL) {
 603         *pslash = '\0';          // Get rid of /<arch>.
 604         pslash = strrchr(buf, '/');
 605         if (pslash != NULL) {
 606           *pslash = '\0';        // Get rid of /lib.
 607         }
 608       }
 609     }
 610     Arguments::set_java_home(buf);
 611     set_boot_path('/', ':');
 612   }
 613 
 614   // Where to look for native libraries.
 615 
 616   // On Aix we get the user setting of LIBPATH.
 617   // Eventually, all the library path setting will be done here.
 618   // Get the user setting of LIBPATH.
 619   const char *v = ::getenv("LIBPATH");
 620   const char *v_colon = ":";
 621   if (v == NULL) { v = ""; v_colon = ""; }
 622 
 623   // Concatenate user and invariant part of ld_library_path.
 624   // That's +1 for the colon and +1 for the trailing '\0'.
 625   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 626   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 627   Arguments::set_library_path(ld_library_path);
 628   FREE_C_HEAP_ARRAY(char, ld_library_path);
 629 
 630   // Extensions directories.
 631   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 632   Arguments::set_ext_dirs(buf);
 633 
 634   FREE_C_HEAP_ARRAY(char, buf);
 635 
 636 #undef DEFAULT_LIBPATH
 637 #undef EXTENSIONS_DIR
 638 }
 639 
 640 ////////////////////////////////////////////////////////////////////////////////
 641 // breakpoint support
 642 
 643 void os::breakpoint() {
 644   BREAKPOINT;
 645 }
 646 
 647 extern "C" void breakpoint() {
 648   // use debugger to set breakpoint here
 649 }
 650 
 651 ////////////////////////////////////////////////////////////////////////////////
 652 // signal support
 653 
 654 debug_only(static bool signal_sets_initialized = false);
 655 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 656 
 657 bool os::Aix::is_sig_ignored(int sig) {
 658   struct sigaction oact;
 659   sigaction(sig, (struct sigaction*)NULL, &oact);
 660   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 661     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 662   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 663     return true;
 664   } else {
 665     return false;
 666   }
 667 }
 668 
 669 void os::Aix::signal_sets_init() {
 670   // Should also have an assertion stating we are still single-threaded.
 671   assert(!signal_sets_initialized, "Already initialized");
 672   // Fill in signals that are necessarily unblocked for all threads in
 673   // the VM. Currently, we unblock the following signals:
 674   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 675   //                         by -Xrs (=ReduceSignalUsage));
 676   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 677   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 678   // the dispositions or masks wrt these signals.
 679   // Programs embedding the VM that want to use the above signals for their
 680   // own purposes must, at this time, use the "-Xrs" option to prevent
 681   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 682   // (See bug 4345157, and other related bugs).
 683   // In reality, though, unblocking these signals is really a nop, since
 684   // these signals are not blocked by default.
 685   sigemptyset(&unblocked_sigs);
 686   sigemptyset(&allowdebug_blocked_sigs);
 687   sigaddset(&unblocked_sigs, SIGILL);
 688   sigaddset(&unblocked_sigs, SIGSEGV);
 689   sigaddset(&unblocked_sigs, SIGBUS);
 690   sigaddset(&unblocked_sigs, SIGFPE);
 691   sigaddset(&unblocked_sigs, SIGTRAP);
 692   sigaddset(&unblocked_sigs, SIGDANGER);
 693   sigaddset(&unblocked_sigs, SR_signum);
 694 
 695   if (!ReduceSignalUsage) {
 696    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 697      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 698      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
 699    }
 700    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 701      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 702      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
 703    }
 704    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 705      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 706      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
 707    }
 708   }
 709   // Fill in signals that are blocked by all but the VM thread.
 710   sigemptyset(&vm_sigs);
 711   if (!ReduceSignalUsage)
 712     sigaddset(&vm_sigs, BREAK_SIGNAL);
 713   debug_only(signal_sets_initialized = true);
 714 }
 715 
 716 // These are signals that are unblocked while a thread is running Java.
 717 // (For some reason, they get blocked by default.)
 718 sigset_t* os::Aix::unblocked_signals() {
 719   assert(signal_sets_initialized, "Not initialized");
 720   return &unblocked_sigs;
 721 }
 722 
 723 // These are the signals that are blocked while a (non-VM) thread is
 724 // running Java. Only the VM thread handles these signals.
 725 sigset_t* os::Aix::vm_signals() {
 726   assert(signal_sets_initialized, "Not initialized");
 727   return &vm_sigs;
 728 }
 729 
 730 // These are signals that are blocked during cond_wait to allow debugger in
 731 sigset_t* os::Aix::allowdebug_blocked_signals() {
 732   assert(signal_sets_initialized, "Not initialized");
 733   return &allowdebug_blocked_sigs;
 734 }
 735 
 736 void os::Aix::hotspot_sigmask(Thread* thread) {
 737 
 738   //Save caller's signal mask before setting VM signal mask
 739   sigset_t caller_sigmask;
 740   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 741 
 742   OSThread* osthread = thread->osthread();
 743   osthread->set_caller_sigmask(caller_sigmask);
 744 
 745   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 746 
 747   if (!ReduceSignalUsage) {
 748     if (thread->is_VM_thread()) {
 749       // Only the VM thread handles BREAK_SIGNAL ...
 750       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 751     } else {
 752       // ... all other threads block BREAK_SIGNAL
 753       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 754     }
 755   }
 756 }
 757 
 758 // retrieve memory information.
 759 // Returns false if something went wrong;
 760 // content of pmi undefined in this case.
 761 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 762 
 763   assert(pmi, "get_meminfo: invalid parameter");
 764 
 765   memset(pmi, 0, sizeof(meminfo_t));
 766 
 767   if (os::Aix::on_pase()) {
 768 
 769     Unimplemented();
 770     return false;
 771 
 772   } else {
 773 
 774     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 775     // See:
 776     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 777     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 778     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 779     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 780 
 781     perfstat_memory_total_t psmt;
 782     memset (&psmt, '\0', sizeof(psmt));
 783     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 784     if (rc == -1) {
 785       fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
 786       assert(0, "perfstat_memory_total() failed");
 787       return false;
 788     }
 789 
 790     assert(rc == 1, "perfstat_memory_total() - weird return code");
 791 
 792     // excerpt from
 793     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 794     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 795     // The fields of perfstat_memory_total_t:
 796     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 797     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 798     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 799     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 800     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 801 
 802     pmi->virt_total = psmt.virt_total * 4096;
 803     pmi->real_total = psmt.real_total * 4096;
 804     pmi->real_free = psmt.real_free * 4096;
 805     pmi->pgsp_total = psmt.pgsp_total * 4096;
 806     pmi->pgsp_free = psmt.pgsp_free * 4096;
 807 
 808     return true;
 809 
 810   }
 811 } // end os::Aix::get_meminfo
 812 
 813 // Retrieve global cpu information.
 814 // Returns false if something went wrong;
 815 // the content of pci is undefined in this case.
 816 bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
 817   assert(pci, "get_cpuinfo: invalid parameter");
 818   memset(pci, 0, sizeof(cpuinfo_t));
 819 
 820   perfstat_cpu_total_t psct;
 821   memset (&psct, '\0', sizeof(psct));
 822 
 823   if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
 824     fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
 825     assert(0, "perfstat_cpu_total() failed");
 826     return false;
 827   }
 828 
 829   // global cpu information
 830   strcpy (pci->description, psct.description);
 831   pci->processorHZ = psct.processorHZ;
 832   pci->ncpus = psct.ncpus;
 833   os::Aix::_logical_cpus = psct.ncpus;
 834   for (int i = 0; i < 3; i++) {
 835     pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
 836   }
 837 
 838   // get the processor version from _system_configuration
 839   switch (_system_configuration.version) {
 840   case PV_8:
 841     strcpy(pci->version, "Power PC 8");
 842     break;
 843   case PV_7:
 844     strcpy(pci->version, "Power PC 7");
 845     break;
 846   case PV_6_1:
 847     strcpy(pci->version, "Power PC 6 DD1.x");
 848     break;
 849   case PV_6:
 850     strcpy(pci->version, "Power PC 6");
 851     break;
 852   case PV_5:
 853     strcpy(pci->version, "Power PC 5");
 854     break;
 855   case PV_5_2:
 856     strcpy(pci->version, "Power PC 5_2");
 857     break;
 858   case PV_5_3:
 859     strcpy(pci->version, "Power PC 5_3");
 860     break;
 861   case PV_5_Compat:
 862     strcpy(pci->version, "PV_5_Compat");
 863     break;
 864   case PV_6_Compat:
 865     strcpy(pci->version, "PV_6_Compat");
 866     break;
 867   case PV_7_Compat:
 868     strcpy(pci->version, "PV_7_Compat");
 869     break;
 870   case PV_8_Compat:
 871     strcpy(pci->version, "PV_8_Compat");
 872     break;
 873   default:
 874     strcpy(pci->version, "unknown");
 875   }
 876 
 877   return true;
 878 
 879 } //end os::Aix::get_cpuinfo
 880 
 881 //////////////////////////////////////////////////////////////////////////////
 882 // detecting pthread library
 883 
 884 void os::Aix::libpthread_init() {
 885   return;
 886 }
 887 
 888 //////////////////////////////////////////////////////////////////////////////
 889 // create new thread
 890 
 891 // Thread start routine for all newly created threads
 892 static void *java_start(Thread *thread) {
 893 
 894   // find out my own stack dimensions
 895   {
 896     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 897     address base = 0;
 898     size_t size = 0;
 899     query_stack_dimensions(&base, &size);
 900     thread->set_stack_base(base);
 901     thread->set_stack_size(size);
 902   }
 903 
 904   // Do some sanity checks.
 905   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 906 
 907   // Try to randomize the cache line index of hot stack frames.
 908   // This helps when threads of the same stack traces evict each other's
 909   // cache lines. The threads can be either from the same JVM instance, or
 910   // from different JVM instances. The benefit is especially true for
 911   // processors with hyperthreading technology.
 912 
 913   static int counter = 0;
 914   int pid = os::current_process_id();
 915   alloca(((pid ^ counter++) & 7) * 128);
 916 
 917   ThreadLocalStorage::set_thread(thread);
 918 
 919   OSThread* osthread = thread->osthread();
 920 
 921   // thread_id is kernel thread id (similar to Solaris LWP id)
 922   osthread->set_thread_id(os::Aix::gettid());
 923 
 924   // initialize signal mask for this thread
 925   os::Aix::hotspot_sigmask(thread);
 926 
 927   // initialize floating point control register
 928   os::Aix::init_thread_fpu_state();
 929 
 930   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 931 
 932   // call one more level start routine
 933   thread->run();
 934 
 935   return 0;
 936 }
 937 
 938 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 939 
 940   // We want the whole function to be synchronized.
 941   ThreadCritical cs;
 942 
 943   assert(thread->osthread() == NULL, "caller responsible");
 944 
 945   // Allocate the OSThread object
 946   OSThread* osthread = new OSThread(NULL, NULL);
 947   if (osthread == NULL) {
 948     return false;
 949   }
 950 
 951   // set the correct thread state
 952   osthread->set_thread_type(thr_type);
 953 
 954   // Initial state is ALLOCATED but not INITIALIZED
 955   osthread->set_state(ALLOCATED);
 956 
 957   thread->set_osthread(osthread);
 958 
 959   // init thread attributes
 960   pthread_attr_t attr;
 961   pthread_attr_init(&attr);
 962   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 963 
 964   // Make sure we run in 1:1 kernel-user-thread mode.
 965   if (os::Aix::on_aix()) {
 966     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 967     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 968   } // end: aix
 969 
 970   // Start in suspended state, and in os::thread_start, wake the thread up.
 971   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 972 
 973   // calculate stack size if it's not specified by caller
 974   if (stack_size == 0) {
 975     stack_size = os::Aix::default_stack_size(thr_type);
 976 
 977     switch (thr_type) {
 978     case os::java_thread:
 979       // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
 980       assert(JavaThread::stack_size_at_create() > 0, "this should be set");
 981       stack_size = JavaThread::stack_size_at_create();
 982       break;
 983     case os::compiler_thread:
 984       if (CompilerThreadStackSize > 0) {
 985         stack_size = (size_t)(CompilerThreadStackSize * K);
 986         break;
 987       } // else fall through:
 988         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 989     case os::vm_thread:
 990     case os::pgc_thread:
 991     case os::cgc_thread:
 992     case os::watcher_thread:
 993       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 994       break;
 995     }
 996   }
 997 
 998   stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
 999   pthread_attr_setstacksize(&attr, stack_size);
1000 
1001   pthread_t tid;
1002   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
1003 
1004   pthread_attr_destroy(&attr);
1005 
1006   if (ret == 0) {
1007     // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
1008   } else {
1009     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1010       perror("pthread_create()");
1011     }
1012     // Need to clean up stuff we've allocated so far
1013     thread->set_osthread(NULL);
1014     delete osthread;
1015     return false;
1016   }
1017 
1018   // Store pthread info into the OSThread
1019   osthread->set_pthread_id(tid);
1020 
1021   return true;
1022 }
1023 
1024 /////////////////////////////////////////////////////////////////////////////
1025 // attach existing thread
1026 
1027 // bootstrap the main thread
1028 bool os::create_main_thread(JavaThread* thread) {
1029   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
1030   return create_attached_thread(thread);
1031 }
1032 
1033 bool os::create_attached_thread(JavaThread* thread) {
1034 #ifdef ASSERT
1035     thread->verify_not_published();
1036 #endif
1037 
1038   // Allocate the OSThread object
1039   OSThread* osthread = new OSThread(NULL, NULL);
1040 
1041   if (osthread == NULL) {
1042     return false;
1043   }
1044 
1045   // Store pthread info into the OSThread
1046   osthread->set_thread_id(os::Aix::gettid());
1047   osthread->set_pthread_id(::pthread_self());
1048 
1049   // initialize floating point control register
1050   os::Aix::init_thread_fpu_state();
1051 
1052   // some sanity checks
1053   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1054 
1055   // Initial thread state is RUNNABLE
1056   osthread->set_state(RUNNABLE);
1057 
1058   thread->set_osthread(osthread);
1059 
1060   if (UseNUMA) {
1061     int lgrp_id = os::numa_get_group_id();
1062     if (lgrp_id != -1) {
1063       thread->set_lgrp_id(lgrp_id);
1064     }
1065   }
1066 
1067   // initialize signal mask for this thread
1068   // and save the caller's signal mask
1069   os::Aix::hotspot_sigmask(thread);
1070 
1071   return true;
1072 }
1073 
1074 void os::pd_start_thread(Thread* thread) {
1075   int status = pthread_continue_np(thread->osthread()->pthread_id());
1076   assert(status == 0, "thr_continue failed");
1077 }
1078 
1079 // Free OS resources related to the OSThread
1080 void os::free_thread(OSThread* osthread) {
1081   assert(osthread != NULL, "osthread not set");
1082 
1083   if (Thread::current()->osthread() == osthread) {
1084     // Restore caller's signal mask
1085     sigset_t sigmask = osthread->caller_sigmask();
1086     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1087    }
1088 
1089   delete osthread;
1090 }
1091 
1092 //////////////////////////////////////////////////////////////////////////////
1093 // thread local storage
1094 
1095 int os::allocate_thread_local_storage() {
1096   pthread_key_t key;
1097   int rslt = pthread_key_create(&key, NULL);
1098   assert(rslt == 0, "cannot allocate thread local storage");
1099   return (int)key;
1100 }
1101 
1102 // Note: This is currently not used by VM, as we don't destroy TLS key
1103 // on VM exit.
1104 void os::free_thread_local_storage(int index) {
1105   int rslt = pthread_key_delete((pthread_key_t)index);
1106   assert(rslt == 0, "invalid index");
1107 }
1108 
1109 void os::thread_local_storage_at_put(int index, void* value) {
1110   int rslt = pthread_setspecific((pthread_key_t)index, value);
1111   assert(rslt == 0, "pthread_setspecific failed");
1112 }
1113 
1114 extern "C" Thread* get_thread() {
1115   return ThreadLocalStorage::thread();
1116 }
1117 
1118 ////////////////////////////////////////////////////////////////////////////////
1119 // time support
1120 
1121 // Time since start-up in seconds to a fine granularity.
1122 // Used by VMSelfDestructTimer and the MemProfiler.
1123 double os::elapsedTime() {
1124   return (double)(os::elapsed_counter()) * 0.000001;
1125 }
1126 
1127 jlong os::elapsed_counter() {
1128   timeval time;
1129   int status = gettimeofday(&time, NULL);
1130   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1131 }
1132 
1133 jlong os::elapsed_frequency() {
1134   return (1000 * 1000);
1135 }
1136 
1137 bool os::supports_vtime() { return true; }
1138 bool os::enable_vtime()   { return false; }
1139 bool os::vtime_enabled()  { return false; }
1140 
1141 double os::elapsedVTime() {
1142   struct rusage usage;
1143   int retval = getrusage(RUSAGE_THREAD, &usage);
1144   if (retval == 0) {
1145     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1146   } else {
1147     // better than nothing, but not much
1148     return elapsedTime();
1149   }
1150 }
1151 
1152 jlong os::javaTimeMillis() {
1153   timeval time;
1154   int status = gettimeofday(&time, NULL);
1155   assert(status != -1, "aix error at gettimeofday()");
1156   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1157 }
1158 
1159 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1160   timeval time;
1161   int status = gettimeofday(&time, NULL);
1162   assert(status != -1, "aix error at gettimeofday()");
1163   seconds = jlong(time.tv_sec);
1164   nanos = jlong(time.tv_usec) * 1000;
1165 }
1166 
1167 
1168 // We need to manually declare mread_real_time,
1169 // because IBM didn't provide a prototype in time.h.
1170 // (they probably only ever tested in C, not C++)
1171 extern "C"
1172 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1173 
1174 jlong os::javaTimeNanos() {
1175   if (os::Aix::on_pase()) {
1176     Unimplemented();
1177     return 0;
1178   } else {
1179     // On AIX use the precision of processors real time clock
1180     // or time base registers.
1181     timebasestruct_t time;
1182     int rc;
1183 
1184     // If the CPU has a time register, it will be used and
1185     // we have to convert to real time first. After convertion we have following data:
1186     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1187     // time.tb_low  [nanoseconds after the last full second above]
1188     // We better use mread_real_time here instead of read_real_time
1189     // to ensure that we will get a monotonic increasing time.
1190     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1191       rc = time_base_to_time(&time, TIMEBASE_SZ);
1192       assert(rc != -1, "aix error at time_base_to_time()");
1193     }
1194     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1195   }
1196 }
1197 
1198 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1199   info_ptr->max_value = ALL_64_BITS;
1200   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1201   info_ptr->may_skip_backward = false;
1202   info_ptr->may_skip_forward = false;
1203   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1204 }
1205 
1206 // Return the real, user, and system times in seconds from an
1207 // arbitrary fixed point in the past.
1208 bool os::getTimesSecs(double* process_real_time,
1209                       double* process_user_time,
1210                       double* process_system_time) {
1211   struct tms ticks;
1212   clock_t real_ticks = times(&ticks);
1213 
1214   if (real_ticks == (clock_t) (-1)) {
1215     return false;
1216   } else {
1217     double ticks_per_second = (double) clock_tics_per_sec;
1218     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1219     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1220     *process_real_time = ((double) real_ticks) / ticks_per_second;
1221 
1222     return true;
1223   }
1224 }
1225 
1226 char * os::local_time_string(char *buf, size_t buflen) {
1227   struct tm t;
1228   time_t long_time;
1229   time(&long_time);
1230   localtime_r(&long_time, &t);
1231   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1232                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1233                t.tm_hour, t.tm_min, t.tm_sec);
1234   return buf;
1235 }
1236 
1237 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1238   return localtime_r(clock, res);
1239 }
1240 
1241 ////////////////////////////////////////////////////////////////////////////////
1242 // runtime exit support
1243 
1244 // Note: os::shutdown() might be called very early during initialization, or
1245 // called from signal handler. Before adding something to os::shutdown(), make
1246 // sure it is async-safe and can handle partially initialized VM.
1247 void os::shutdown() {
1248 
1249   // allow PerfMemory to attempt cleanup of any persistent resources
1250   perfMemory_exit();
1251 
1252   // needs to remove object in file system
1253   AttachListener::abort();
1254 
1255   // flush buffered output, finish log files
1256   ostream_abort();
1257 
1258   // Check for abort hook
1259   abort_hook_t abort_hook = Arguments::abort_hook();
1260   if (abort_hook != NULL) {
1261     abort_hook();
1262   }
1263 }
1264 
1265 // Note: os::abort() might be called very early during initialization, or
1266 // called from signal handler. Before adding something to os::abort(), make
1267 // sure it is async-safe and can handle partially initialized VM.
1268 void os::abort(bool dump_core, void* siginfo, void* context) {
1269   os::shutdown();
1270   if (dump_core) {
1271 #ifndef PRODUCT
1272     fdStream out(defaultStream::output_fd());
1273     out.print_raw("Current thread is ");
1274     char buf[16];
1275     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1276     out.print_raw_cr(buf);
1277     out.print_raw_cr("Dumping core ...");
1278 #endif
1279     ::abort(); // dump core
1280   }
1281 
1282   ::exit(1);
1283 }
1284 
1285 // Die immediately, no exit hook, no abort hook, no cleanup.
1286 void os::die() {
1287   ::abort();
1288 }
1289 
1290 // This method is a copy of JDK's sysGetLastErrorString
1291 // from src/solaris/hpi/src/system_md.c
1292 
1293 size_t os::lasterror(char *buf, size_t len) {
1294   if (errno == 0) return 0;
1295 
1296   const char *s = ::strerror(errno);
1297   size_t n = ::strlen(s);
1298   if (n >= len) {
1299     n = len - 1;
1300   }
1301   ::strncpy(buf, s, n);
1302   buf[n] = '\0';
1303   return n;
1304 }
1305 
1306 intx os::current_thread_id() { return (intx)pthread_self(); }
1307 
1308 int os::current_process_id() {
1309 
1310   // This implementation returns a unique pid, the pid of the
1311   // launcher thread that starts the vm 'process'.
1312 
1313   // Under POSIX, getpid() returns the same pid as the
1314   // launcher thread rather than a unique pid per thread.
1315   // Use gettid() if you want the old pre NPTL behaviour.
1316 
1317   // if you are looking for the result of a call to getpid() that
1318   // returns a unique pid for the calling thread, then look at the
1319   // OSThread::thread_id() method in osThread_linux.hpp file
1320 
1321   return (int)(_initial_pid ? _initial_pid : getpid());
1322 }
1323 
1324 // DLL functions
1325 
1326 const char* os::dll_file_extension() { return ".so"; }
1327 
1328 // This must be hard coded because it's the system's temporary
1329 // directory not the java application's temp directory, ala java.io.tmpdir.
1330 const char* os::get_temp_directory() { return "/tmp"; }
1331 
1332 static bool file_exists(const char* filename) {
1333   struct stat statbuf;
1334   if (filename == NULL || strlen(filename) == 0) {
1335     return false;
1336   }
1337   return os::stat(filename, &statbuf) == 0;
1338 }
1339 
1340 bool os::dll_build_name(char* buffer, size_t buflen,
1341                         const char* pname, const char* fname) {
1342   bool retval = false;
1343   // Copied from libhpi
1344   const size_t pnamelen = pname ? strlen(pname) : 0;
1345 
1346   // Return error on buffer overflow.
1347   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1348     *buffer = '\0';
1349     return retval;
1350   }
1351 
1352   if (pnamelen == 0) {
1353     snprintf(buffer, buflen, "lib%s.so", fname);
1354     retval = true;
1355   } else if (strchr(pname, *os::path_separator()) != NULL) {
1356     int n;
1357     char** pelements = split_path(pname, &n);
1358     for (int i = 0; i < n; i++) {
1359       // Really shouldn't be NULL, but check can't hurt
1360       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1361         continue; // skip the empty path values
1362       }
1363       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1364       if (file_exists(buffer)) {
1365         retval = true;
1366         break;
1367       }
1368     }
1369     // release the storage
1370     for (int i = 0; i < n; i++) {
1371       if (pelements[i] != NULL) {
1372         FREE_C_HEAP_ARRAY(char, pelements[i]);
1373       }
1374     }
1375     if (pelements != NULL) {
1376       FREE_C_HEAP_ARRAY(char*, pelements);
1377     }
1378   } else {
1379     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1380     retval = true;
1381   }
1382   return retval;
1383 }
1384 
1385 // Check if addr is inside libjvm.so.
1386 bool os::address_is_in_vm(address addr) {
1387 
1388   // Input could be a real pc or a function pointer literal. The latter
1389   // would be a function descriptor residing in the data segment of a module.
1390 
1391   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
1392   if (lib) {
1393     if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1394       return true;
1395     } else {
1396       return false;
1397     }
1398   } else {
1399     lib = LoadedLibraries::find_for_data_address(addr);
1400     if (lib) {
1401       if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1402         return true;
1403       } else {
1404         return false;
1405       }
1406     } else {
1407       return false;
1408     }
1409   }
1410 }
1411 
1412 // Resolve an AIX function descriptor literal to a code pointer.
1413 // If the input is a valid code pointer to a text segment of a loaded module,
1414 //   it is returned unchanged.
1415 // If the input is a valid AIX function descriptor, it is resolved to the
1416 //   code entry point.
1417 // If the input is neither a valid function descriptor nor a valid code pointer,
1418 //   NULL is returned.
1419 static address resolve_function_descriptor_to_code_pointer(address p) {
1420 
1421   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
1422   if (lib) {
1423     // its a real code pointer
1424     return p;
1425   } else {
1426     lib = LoadedLibraries::find_for_data_address(p);
1427     if (lib) {
1428       // pointer to data segment, potential function descriptor
1429       address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1430       if (LoadedLibraries::find_for_text_address(code_entry)) {
1431         // Its a function descriptor
1432         return code_entry;
1433       }
1434     }
1435   }
1436   return NULL;
1437 }
1438 
1439 bool os::dll_address_to_function_name(address addr, char *buf,
1440                                       int buflen, int *offset,
1441                                       bool demangle) {
1442   if (offset) {
1443     *offset = -1;
1444   }
1445   // Buf is not optional, but offset is optional.
1446   assert(buf != NULL, "sanity check");
1447   buf[0] = '\0';
1448 
1449   // Resolve function ptr literals first.
1450   addr = resolve_function_descriptor_to_code_pointer(addr);
1451   if (!addr) {
1452     return false;
1453   }
1454 
1455   // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1456   return Decoder::decode(addr, buf, buflen, offset, demangle);
1457 }
1458 
1459 static int getModuleName(codeptr_t pc,                    // [in] program counter
1460                          char* p_name, size_t namelen,    // [out] optional: function name
1461                          char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1462                          ) {
1463 
1464   // initialize output parameters
1465   if (p_name && namelen > 0) {
1466     *p_name = '\0';
1467   }
1468   if (p_errmsg && errmsglen > 0) {
1469     *p_errmsg = '\0';
1470   }
1471 
1472   const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
1473   if (lib) {
1474     if (p_name && namelen > 0) {
1475       sprintf(p_name, "%.*s", namelen, lib->get_shortname());
1476     }
1477     return 0;
1478   }
1479 
1480   trcVerbose("pc outside any module");
1481 
1482   return -1;
1483 }
1484 
1485 bool os::dll_address_to_library_name(address addr, char* buf,
1486                                      int buflen, int* offset) {
1487   if (offset) {
1488     *offset = -1;
1489   }
1490   // Buf is not optional, but offset is optional.
1491   assert(buf != NULL, "sanity check");
1492   buf[0] = '\0';
1493 
1494   // Resolve function ptr literals first.
1495   addr = resolve_function_descriptor_to_code_pointer(addr);
1496   if (!addr) {
1497     return false;
1498   }
1499 
1500   if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1501     return true;
1502   }
1503   return false;
1504 }
1505 
1506 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1507 // for the same architecture as Hotspot is running on.
1508 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1509 
1510   if (ebuf && ebuflen > 0) {
1511     ebuf[0] = '\0';
1512     ebuf[ebuflen - 1] = '\0';
1513   }
1514 
1515   if (!filename || strlen(filename) == 0) {
1516     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1517     return NULL;
1518   }
1519 
1520   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1521   void * result= ::dlopen(filename, RTLD_LAZY);
1522   if (result != NULL) {
1523     // Reload dll cache. Don't do this in signal handling.
1524     LoadedLibraries::reload();
1525     return result;
1526   } else {
1527     // error analysis when dlopen fails
1528     const char* const error_report = ::dlerror();
1529     if (error_report && ebuf && ebuflen > 0) {
1530       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1531                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1532     }
1533   }
1534   return NULL;
1535 }
1536 
1537 void* os::dll_lookup(void* handle, const char* name) {
1538   void* res = dlsym(handle, name);
1539   return res;
1540 }
1541 
1542 void* os::get_default_process_handle() {
1543   return (void*)::dlopen(NULL, RTLD_LAZY);
1544 }
1545 
1546 void os::print_dll_info(outputStream *st) {
1547   st->print_cr("Dynamic libraries:");
1548   LoadedLibraries::print(st);
1549 }
1550 
1551 void os::get_summary_os_info(char* buf, size_t buflen) {
1552   // There might be something more readable than uname results for AIX.
1553   struct utsname name;
1554   uname(&name);
1555   snprintf(buf, buflen, "%s %s", name.release, name.version);
1556 }
1557 
1558 void os::print_os_info(outputStream* st) {
1559   st->print("OS:");
1560 
1561   st->print("uname:");
1562   struct utsname name;
1563   uname(&name);
1564   st->print(name.sysname); st->print(" ");
1565   st->print(name.nodename); st->print(" ");
1566   st->print(name.release); st->print(" ");
1567   st->print(name.version); st->print(" ");
1568   st->print(name.machine);
1569   st->cr();
1570 
1571   // rlimit
1572   st->print("rlimit:");
1573   struct rlimit rlim;
1574 
1575   st->print(" STACK ");
1576   getrlimit(RLIMIT_STACK, &rlim);
1577   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1578   else st->print("%uk", rlim.rlim_cur >> 10);
1579 
1580   st->print(", CORE ");
1581   getrlimit(RLIMIT_CORE, &rlim);
1582   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1583   else st->print("%uk", rlim.rlim_cur >> 10);
1584 
1585   st->print(", NPROC ");
1586   st->print("%d", sysconf(_SC_CHILD_MAX));
1587 
1588   st->print(", NOFILE ");
1589   getrlimit(RLIMIT_NOFILE, &rlim);
1590   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1591   else st->print("%d", rlim.rlim_cur);
1592 
1593   st->print(", AS ");
1594   getrlimit(RLIMIT_AS, &rlim);
1595   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1596   else st->print("%uk", rlim.rlim_cur >> 10);
1597 
1598   // Print limits on DATA, because it limits the C-heap.
1599   st->print(", DATA ");
1600   getrlimit(RLIMIT_DATA, &rlim);
1601   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1602   else st->print("%uk", rlim.rlim_cur >> 10);
1603   st->cr();
1604 
1605   // load average
1606   st->print("load average:");
1607   double loadavg[3] = {-1.L, -1.L, -1.L};
1608   os::loadavg(loadavg, 3);
1609   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1610   st->cr();
1611 }
1612 
1613 void os::print_memory_info(outputStream* st) {
1614 
1615   st->print_cr("Memory:");
1616 
1617   st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1618   st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1619   st->print_cr("  Default shared memory page size:        %s",
1620     describe_pagesize(g_multipage_support.shmpsize));
1621   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1622     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1623   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1624     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1625   if (g_multipage_error != 0) {
1626     st->print_cr("  multipage error: %d", g_multipage_error);
1627   }
1628 
1629   // print out LDR_CNTRL because it affects the default page sizes
1630   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1631   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1632 
1633   const char* const extshm = ::getenv("EXTSHM");
1634   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1635   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1636     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1637   }
1638 
1639   // Call os::Aix::get_meminfo() to retrieve memory statistics.
1640   os::Aix::meminfo_t mi;
1641   if (os::Aix::get_meminfo(&mi)) {
1642     char buffer[256];
1643     if (os::Aix::on_aix()) {
1644       jio_snprintf(buffer, sizeof(buffer),
1645                    "  physical total : %llu\n"
1646                    "  physical free  : %llu\n"
1647                    "  swap total     : %llu\n"
1648                    "  swap free      : %llu\n",
1649                    mi.real_total,
1650                    mi.real_free,
1651                    mi.pgsp_total,
1652                    mi.pgsp_free);
1653     } else {
1654       Unimplemented();
1655     }
1656     st->print_raw(buffer);
1657   } else {
1658     st->print_cr("  (no more information available)");
1659   }
1660 }
1661 
1662 // Get a string for the cpuinfo that is a summary of the cpu type
1663 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1664   // This looks good
1665   os::Aix::cpuinfo_t ci;
1666   if (os::Aix::get_cpuinfo(&ci)) {
1667     strncpy(buf, ci.version, buflen);
1668   } else {
1669     strncpy(buf, "AIX", buflen);
1670   }
1671 }
1672 
1673 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1674 }
1675 
1676 void os::print_siginfo(outputStream* st, void* siginfo) {
1677   // Use common posix version.
1678   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1679   st->cr();
1680 }
1681 
1682 static void print_signal_handler(outputStream* st, int sig,
1683                                  char* buf, size_t buflen);
1684 
1685 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1686   st->print_cr("Signal Handlers:");
1687   print_signal_handler(st, SIGSEGV, buf, buflen);
1688   print_signal_handler(st, SIGBUS , buf, buflen);
1689   print_signal_handler(st, SIGFPE , buf, buflen);
1690   print_signal_handler(st, SIGPIPE, buf, buflen);
1691   print_signal_handler(st, SIGXFSZ, buf, buflen);
1692   print_signal_handler(st, SIGILL , buf, buflen);
1693   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1694   print_signal_handler(st, SR_signum, buf, buflen);
1695   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1696   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1697   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1698   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1699   print_signal_handler(st, SIGTRAP, buf, buflen);
1700   print_signal_handler(st, SIGDANGER, buf, buflen);
1701 }
1702 
1703 static char saved_jvm_path[MAXPATHLEN] = {0};
1704 
1705 // Find the full path to the current module, libjvm.so.
1706 void os::jvm_path(char *buf, jint buflen) {
1707   // Error checking.
1708   if (buflen < MAXPATHLEN) {
1709     assert(false, "must use a large-enough buffer");
1710     buf[0] = '\0';
1711     return;
1712   }
1713   // Lazy resolve the path to current module.
1714   if (saved_jvm_path[0] != 0) {
1715     strcpy(buf, saved_jvm_path);
1716     return;
1717   }
1718 
1719   Dl_info dlinfo;
1720   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1721   assert(ret != 0, "cannot locate libjvm");
1722   char* rp = realpath((char *)dlinfo.dli_fname, buf);
1723   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1724 
1725   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1726   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1727 }
1728 
1729 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1730   // no prefix required, not even "_"
1731 }
1732 
1733 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1734   // no suffix required
1735 }
1736 
1737 ////////////////////////////////////////////////////////////////////////////////
1738 // sun.misc.Signal support
1739 
1740 static volatile jint sigint_count = 0;
1741 
1742 static void
1743 UserHandler(int sig, void *siginfo, void *context) {
1744   // 4511530 - sem_post is serialized and handled by the manager thread. When
1745   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1746   // don't want to flood the manager thread with sem_post requests.
1747   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1748     return;
1749 
1750   // Ctrl-C is pressed during error reporting, likely because the error
1751   // handler fails to abort. Let VM die immediately.
1752   if (sig == SIGINT && is_error_reported()) {
1753     os::die();
1754   }
1755 
1756   os::signal_notify(sig);
1757 }
1758 
1759 void* os::user_handler() {
1760   return CAST_FROM_FN_PTR(void*, UserHandler);
1761 }
1762 
1763 extern "C" {
1764   typedef void (*sa_handler_t)(int);
1765   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1766 }
1767 
1768 void* os::signal(int signal_number, void* handler) {
1769   struct sigaction sigAct, oldSigAct;
1770 
1771   sigfillset(&(sigAct.sa_mask));
1772 
1773   // Do not block out synchronous signals in the signal handler.
1774   // Blocking synchronous signals only makes sense if you can really
1775   // be sure that those signals won't happen during signal handling,
1776   // when the blocking applies. Normal signal handlers are lean and
1777   // do not cause signals. But our signal handlers tend to be "risky"
1778   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1779   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1780   // by a SIGILL, which was blocked due to the signal mask. The process
1781   // just hung forever. Better to crash from a secondary signal than to hang.
1782   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1783   sigdelset(&(sigAct.sa_mask), SIGBUS);
1784   sigdelset(&(sigAct.sa_mask), SIGILL);
1785   sigdelset(&(sigAct.sa_mask), SIGFPE);
1786   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1787 
1788   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1789 
1790   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1791 
1792   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1793     // -1 means registration failed
1794     return (void *)-1;
1795   }
1796 
1797   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1798 }
1799 
1800 void os::signal_raise(int signal_number) {
1801   ::raise(signal_number);
1802 }
1803 
1804 //
1805 // The following code is moved from os.cpp for making this
1806 // code platform specific, which it is by its very nature.
1807 //
1808 
1809 // Will be modified when max signal is changed to be dynamic
1810 int os::sigexitnum_pd() {
1811   return NSIG;
1812 }
1813 
1814 // a counter for each possible signal value
1815 static volatile jint pending_signals[NSIG+1] = { 0 };
1816 
1817 // Linux(POSIX) specific hand shaking semaphore.
1818 static sem_t sig_sem;
1819 
1820 void os::signal_init_pd() {
1821   // Initialize signal structures
1822   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1823 
1824   // Initialize signal semaphore
1825   int rc = ::sem_init(&sig_sem, 0, 0);
1826   guarantee(rc != -1, "sem_init failed");
1827 }
1828 
1829 void os::signal_notify(int sig) {
1830   Atomic::inc(&pending_signals[sig]);
1831   ::sem_post(&sig_sem);
1832 }
1833 
1834 static int check_pending_signals(bool wait) {
1835   Atomic::store(0, &sigint_count);
1836   for (;;) {
1837     for (int i = 0; i < NSIG + 1; i++) {
1838       jint n = pending_signals[i];
1839       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1840         return i;
1841       }
1842     }
1843     if (!wait) {
1844       return -1;
1845     }
1846     JavaThread *thread = JavaThread::current();
1847     ThreadBlockInVM tbivm(thread);
1848 
1849     bool threadIsSuspended;
1850     do {
1851       thread->set_suspend_equivalent();
1852       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1853 
1854       ::sem_wait(&sig_sem);
1855 
1856       // were we externally suspended while we were waiting?
1857       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1858       if (threadIsSuspended) {
1859         //
1860         // The semaphore has been incremented, but while we were waiting
1861         // another thread suspended us. We don't want to continue running
1862         // while suspended because that would surprise the thread that
1863         // suspended us.
1864         //
1865         ::sem_post(&sig_sem);
1866 
1867         thread->java_suspend_self();
1868       }
1869     } while (threadIsSuspended);
1870   }
1871 }
1872 
1873 int os::signal_lookup() {
1874   return check_pending_signals(false);
1875 }
1876 
1877 int os::signal_wait() {
1878   return check_pending_signals(true);
1879 }
1880 
1881 ////////////////////////////////////////////////////////////////////////////////
1882 // Virtual Memory
1883 
1884 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1885 
1886 #define VMEM_MAPPED  1
1887 #define VMEM_SHMATED 2
1888 
1889 struct vmembk_t {
1890   int type;         // 1 - mmap, 2 - shmat
1891   char* addr;
1892   size_t size;      // Real size, may be larger than usersize.
1893   size_t pagesize;  // page size of area
1894   vmembk_t* next;
1895 
1896   bool contains_addr(char* p) const {
1897     return p >= addr && p < (addr + size);
1898   }
1899 
1900   bool contains_range(char* p, size_t s) const {
1901     return contains_addr(p) && contains_addr(p + s - 1);
1902   }
1903 
1904   void print_on(outputStream* os) const {
1905     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1906       " bytes, %d %s pages), %s",
1907       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1908       (type == VMEM_SHMATED ? "shmat" : "mmap")
1909     );
1910   }
1911 
1912   // Check that range is a sub range of memory block (or equal to memory block);
1913   // also check that range is fully page aligned to the page size if the block.
1914   void assert_is_valid_subrange(char* p, size_t s) const {
1915     if (!contains_range(p, s)) {
1916       fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1917               "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1918               p, p + s - 1, addr, addr + size - 1);
1919       guarantee0(false);
1920     }
1921     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1922       fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1923               " aligned to pagesize (%s)\n", p, p + s);
1924       guarantee0(false);
1925     }
1926   }
1927 };
1928 
1929 static struct {
1930   vmembk_t* first;
1931   MiscUtils::CritSect cs;
1932 } vmem;
1933 
1934 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1935   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1936   assert0(p);
1937   if (p) {
1938     MiscUtils::AutoCritSect lck(&vmem.cs);
1939     p->addr = addr; p->size = size;
1940     p->pagesize = pagesize;
1941     p->type = type;
1942     p->next = vmem.first;
1943     vmem.first = p;
1944   }
1945 }
1946 
1947 static vmembk_t* vmembk_find(char* addr) {
1948   MiscUtils::AutoCritSect lck(&vmem.cs);
1949   for (vmembk_t* p = vmem.first; p; p = p->next) {
1950     if (p->addr <= addr && (p->addr + p->size) > addr) {
1951       return p;
1952     }
1953   }
1954   return NULL;
1955 }
1956 
1957 static void vmembk_remove(vmembk_t* p0) {
1958   MiscUtils::AutoCritSect lck(&vmem.cs);
1959   assert0(p0);
1960   assert0(vmem.first); // List should not be empty.
1961   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1962     if (*pp == p0) {
1963       *pp = p0->next;
1964       ::free(p0);
1965       return;
1966     }
1967   }
1968   assert0(false); // Not found?
1969 }
1970 
1971 static void vmembk_print_on(outputStream* os) {
1972   MiscUtils::AutoCritSect lck(&vmem.cs);
1973   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1974     vmi->print_on(os);
1975     os->cr();
1976   }
1977 }
1978 
1979 // Reserve and attach a section of System V memory.
1980 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1981 // address. Failing that, it will attach the memory anywhere.
1982 // If <requested_addr> is NULL, function will attach the memory anywhere.
1983 //
1984 // <alignment_hint> is being ignored by this function. It is very probable however that the
1985 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1986 // Should this be not enogh, we can put more work into it.
1987 static char* reserve_shmated_memory (
1988   size_t bytes,
1989   char* requested_addr,
1990   size_t alignment_hint) {
1991 
1992   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1993     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1994     bytes, requested_addr, alignment_hint);
1995 
1996   // Either give me wish address or wish alignment but not both.
1997   assert0(!(requested_addr != NULL && alignment_hint != 0));
1998 
1999   // We must prevent anyone from attaching too close to the
2000   // BRK because that may cause malloc OOM.
2001   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2002     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2003       "Will attach anywhere.", requested_addr);
2004     // Act like the OS refused to attach there.
2005     requested_addr = NULL;
2006   }
2007 
2008   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
2009   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
2010   if (os::Aix::on_pase_V5R4_or_older()) {
2011     ShouldNotReachHere();
2012   }
2013 
2014   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
2015   const size_t size = align_size_up(bytes, SIZE_64K);
2016 
2017   // Reserve the shared segment.
2018   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2019   if (shmid == -1) {
2020     trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
2021     return NULL;
2022   }
2023 
2024   // Important note:
2025   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2026   // We must right after attaching it remove it from the system. System V shm segments are global and
2027   // survive the process.
2028   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2029 
2030   struct shmid_ds shmbuf;
2031   memset(&shmbuf, 0, sizeof(shmbuf));
2032   shmbuf.shm_pagesize = SIZE_64K;
2033   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2034     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2035                size / SIZE_64K, errno);
2036     // I want to know if this ever happens.
2037     assert(false, "failed to set page size for shmat");
2038   }
2039 
2040   // Now attach the shared segment.
2041   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2042   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2043   // were not a segment boundary.
2044   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2045   const int errno_shmat = errno;
2046 
2047   // (A) Right after shmat and before handing shmat errors delete the shm segment.
2048   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2049     trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2050     assert(false, "failed to remove shared memory segment!");
2051   }
2052 
2053   // Handle shmat error. If we failed to attach, just return.
2054   if (addr == (char*)-1) {
2055     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2056     return NULL;
2057   }
2058 
2059   // Just for info: query the real page size. In case setting the page size did not
2060   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2061   const size_t real_pagesize = os::Aix::query_pagesize(addr);
2062   if (real_pagesize != shmbuf.shm_pagesize) {
2063     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2064   }
2065 
2066   if (addr) {
2067     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2068       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2069   } else {
2070     if (requested_addr != NULL) {
2071       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2072     } else {
2073       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2074     }
2075   }
2076 
2077   // book-keeping
2078   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2079   assert0(is_aligned_to(addr, os::vm_page_size()));
2080 
2081   return addr;
2082 }
2083 
2084 static bool release_shmated_memory(char* addr, size_t size) {
2085 
2086   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2087     addr, addr + size - 1);
2088 
2089   bool rc = false;
2090 
2091   // TODO: is there a way to verify shm size without doing bookkeeping?
2092   if (::shmdt(addr) != 0) {
2093     trcVerbose("error (%d).", errno);
2094   } else {
2095     trcVerbose("ok.");
2096     rc = true;
2097   }
2098   return rc;
2099 }
2100 
2101 static bool uncommit_shmated_memory(char* addr, size_t size) {
2102   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2103     addr, addr + size - 1);
2104 
2105   const bool rc = my_disclaim64(addr, size);
2106 
2107   if (!rc) {
2108     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2109     return false;
2110   }
2111   return true;
2112 }
2113 
2114 // Reserve memory via mmap.
2115 // If <requested_addr> is given, an attempt is made to attach at the given address.
2116 // Failing that, memory is allocated at any address.
2117 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2118 // allocate at an address aligned with the given alignment. Failing that, memory
2119 // is aligned anywhere.
2120 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2121   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2122     "alignment_hint " UINTX_FORMAT "...",
2123     bytes, requested_addr, alignment_hint);
2124 
2125   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2126   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2127     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2128     return NULL;
2129   }
2130 
2131   // We must prevent anyone from attaching too close to the
2132   // BRK because that may cause malloc OOM.
2133   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2134     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2135       "Will attach anywhere.", requested_addr);
2136     // Act like the OS refused to attach there.
2137     requested_addr = NULL;
2138   }
2139 
2140   // Specify one or the other but not both.
2141   assert0(!(requested_addr != NULL && alignment_hint > 0));
2142 
2143   // In 64K mode, we claim the global page size (os::vm_page_size())
2144   // is 64K. This is one of the few points where that illusion may
2145   // break, because mmap() will always return memory aligned to 4K. So
2146   // we must ensure we only ever return memory aligned to 64k.
2147   if (alignment_hint) {
2148     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2149   } else {
2150     alignment_hint = os::vm_page_size();
2151   }
2152 
2153   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2154   const size_t size = align_size_up(bytes, os::vm_page_size());
2155 
2156   // alignment: Allocate memory large enough to include an aligned range of the right size and
2157   // cut off the leading and trailing waste pages.
2158   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2159   const size_t extra_size = size + alignment_hint;
2160 
2161   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2162   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2163   int flags = MAP_ANONYMOUS | MAP_SHARED;
2164 
2165   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2166   // it means if wishaddress is given but MAP_FIXED is not set.
2167   //
2168   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2169   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2170   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2171   // get clobbered.
2172   if (requested_addr != NULL) {
2173     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2174       flags |= MAP_FIXED;
2175     }
2176   }
2177 
2178   char* addr = (char*)::mmap(requested_addr, extra_size,
2179       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2180 
2181   if (addr == MAP_FAILED) {
2182     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2183     return NULL;
2184   }
2185 
2186   // Handle alignment.
2187   char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2188   const size_t waste_pre = addr_aligned - addr;
2189   char* const addr_aligned_end = addr_aligned + size;
2190   const size_t waste_post = extra_size - waste_pre - size;
2191   if (waste_pre > 0) {
2192     ::munmap(addr, waste_pre);
2193   }
2194   if (waste_post > 0) {
2195     ::munmap(addr_aligned_end, waste_post);
2196   }
2197   addr = addr_aligned;
2198 
2199   if (addr) {
2200     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2201       addr, addr + bytes, bytes);
2202   } else {
2203     if (requested_addr != NULL) {
2204       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2205     } else {
2206       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2207     }
2208   }
2209 
2210   // bookkeeping
2211   vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
2212 
2213   // Test alignment, see above.
2214   assert0(is_aligned_to(addr, os::vm_page_size()));
2215 
2216   return addr;
2217 }
2218 
2219 static bool release_mmaped_memory(char* addr, size_t size) {
2220   assert0(is_aligned_to(addr, os::vm_page_size()));
2221   assert0(is_aligned_to(size, os::vm_page_size()));
2222 
2223   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2224     addr, addr + size - 1);
2225   bool rc = false;
2226 
2227   if (::munmap(addr, size) != 0) {
2228     trcVerbose("failed (%d)\n", errno);
2229     rc = false;
2230   } else {
2231     trcVerbose("ok.");
2232     rc = true;
2233   }
2234 
2235   return rc;
2236 }
2237 
2238 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2239 
2240   assert0(is_aligned_to(addr, os::vm_page_size()));
2241   assert0(is_aligned_to(size, os::vm_page_size()));
2242 
2243   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2244     addr, addr + size - 1);
2245   bool rc = false;
2246 
2247   // Uncommit mmap memory with msync MS_INVALIDATE.
2248   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2249     trcVerbose("failed (%d)\n", errno);
2250     rc = false;
2251   } else {
2252     trcVerbose("ok.");
2253     rc = true;
2254   }
2255 
2256   return rc;
2257 }
2258 
2259 // End: shared memory bookkeeping
2260 ////////////////////////////////////////////////////////////////////////////////////////////////////
2261 
2262 int os::vm_page_size() {
2263   // Seems redundant as all get out.
2264   assert(os::Aix::page_size() != -1, "must call os::init");
2265   return os::Aix::page_size();
2266 }
2267 
2268 // Aix allocates memory by pages.
2269 int os::vm_allocation_granularity() {
2270   assert(os::Aix::page_size() != -1, "must call os::init");
2271   return os::Aix::page_size();
2272 }
2273 
2274 #ifdef PRODUCT
2275 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2276                                     int err) {
2277   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2278           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2279           strerror(err), err);
2280 }
2281 #endif
2282 
2283 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2284                                   const char* mesg) {
2285   assert(mesg != NULL, "mesg must be specified");
2286   if (!pd_commit_memory(addr, size, exec)) {
2287     // Add extra info in product mode for vm_exit_out_of_memory():
2288     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2289     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2290   }
2291 }
2292 
2293 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2294 
2295   assert0(is_aligned_to(addr, os::vm_page_size()));
2296   assert0(is_aligned_to(size, os::vm_page_size()));
2297 
2298   vmembk_t* const vmi = vmembk_find(addr);
2299   assert0(vmi);
2300   vmi->assert_is_valid_subrange(addr, size);
2301 
2302   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2303 
2304   return true;
2305 }
2306 
2307 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2308   return pd_commit_memory(addr, size, exec);
2309 }
2310 
2311 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2312                                   size_t alignment_hint, bool exec,
2313                                   const char* mesg) {
2314   // Alignment_hint is ignored on this OS.
2315   pd_commit_memory_or_exit(addr, size, exec, mesg);
2316 }
2317 
2318 bool os::pd_uncommit_memory(char* addr, size_t size) {
2319   assert0(is_aligned_to(addr, os::vm_page_size()));
2320   assert0(is_aligned_to(size, os::vm_page_size()));
2321 
2322   // Dynamically do different things for mmap/shmat.
2323   const vmembk_t* const vmi = vmembk_find(addr);
2324   assert0(vmi);
2325   vmi->assert_is_valid_subrange(addr, size);
2326 
2327   if (vmi->type == VMEM_SHMATED) {
2328     return uncommit_shmated_memory(addr, size);
2329   } else {
2330     return uncommit_mmaped_memory(addr, size);
2331   }
2332 }
2333 
2334 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2335   // Do not call this; no need to commit stack pages on AIX.
2336   ShouldNotReachHere();
2337   return true;
2338 }
2339 
2340 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2341   // Do not call this; no need to commit stack pages on AIX.
2342   ShouldNotReachHere();
2343   return true;
2344 }
2345 
2346 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2347 }
2348 
2349 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2350 }
2351 
2352 void os::numa_make_global(char *addr, size_t bytes) {
2353 }
2354 
2355 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2356 }
2357 
2358 bool os::numa_topology_changed() {
2359   return false;
2360 }
2361 
2362 size_t os::numa_get_groups_num() {
2363   return 1;
2364 }
2365 
2366 int os::numa_get_group_id() {
2367   return 0;
2368 }
2369 
2370 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2371   if (size > 0) {
2372     ids[0] = 0;
2373     return 1;
2374   }
2375   return 0;
2376 }
2377 
2378 bool os::get_page_info(char *start, page_info* info) {
2379   return false;
2380 }
2381 
2382 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2383   return end;
2384 }
2385 
2386 // Reserves and attaches a shared memory segment.
2387 // Will assert if a wish address is given and could not be obtained.
2388 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2389 
2390   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2391   // thereby clobbering old mappings at that place. That is probably
2392   // not intended, never used and almost certainly an error were it
2393   // ever be used this way (to try attaching at a specified address
2394   // without clobbering old mappings an alternate API exists,
2395   // os::attempt_reserve_memory_at()).
2396   // Instead of mimicking the dangerous coding of the other platforms, here I
2397   // just ignore the request address (release) or assert(debug).
2398   assert0(requested_addr == NULL);
2399 
2400   // Always round to os::vm_page_size(), which may be larger than 4K.
2401   bytes = align_size_up(bytes, os::vm_page_size());
2402   const size_t alignment_hint0 =
2403     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2404 
2405   // In 4K mode always use mmap.
2406   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2407   if (os::vm_page_size() == SIZE_4K) {
2408     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2409   } else {
2410     if (bytes >= Use64KPagesThreshold) {
2411       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2412     } else {
2413       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2414     }
2415   }
2416 }
2417 
2418 bool os::pd_release_memory(char* addr, size_t size) {
2419 
2420   // Dynamically do different things for mmap/shmat.
2421   vmembk_t* const vmi = vmembk_find(addr);
2422   assert0(vmi);
2423 
2424   // Always round to os::vm_page_size(), which may be larger than 4K.
2425   size = align_size_up(size, os::vm_page_size());
2426   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2427 
2428   bool rc = false;
2429   bool remove_bookkeeping = false;
2430   if (vmi->type == VMEM_SHMATED) {
2431     // For shmatted memory, we do:
2432     // - If user wants to release the whole range, release the memory (shmdt).
2433     // - If user only wants to release a partial range, uncommit (disclaim) that
2434     //   range. That way, at least, we do not use memory anymore (bust still page
2435     //   table space).
2436     vmi->assert_is_valid_subrange(addr, size);
2437     if (addr == vmi->addr && size == vmi->size) {
2438       rc = release_shmated_memory(addr, size);
2439       remove_bookkeeping = true;
2440     } else {
2441       rc = uncommit_shmated_memory(addr, size);
2442     }
2443   } else {
2444     // User may unmap partial regions but region has to be fully contained.
2445 #ifdef ASSERT
2446     vmi->assert_is_valid_subrange(addr, size);
2447 #endif
2448     rc = release_mmaped_memory(addr, size);
2449     remove_bookkeeping = true;
2450   }
2451 
2452   // update bookkeeping
2453   if (rc && remove_bookkeeping) {
2454     vmembk_remove(vmi);
2455   }
2456 
2457   return rc;
2458 }
2459 
2460 static bool checked_mprotect(char* addr, size_t size, int prot) {
2461 
2462   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2463   // not tell me if protection failed when trying to protect an un-protectable range.
2464   //
2465   // This means if the memory was allocated using shmget/shmat, protection wont work
2466   // but mprotect will still return 0:
2467   //
2468   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2469 
2470   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2471 
2472   if (!rc) {
2473     const char* const s_errno = strerror(errno);
2474     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2475     return false;
2476   }
2477 
2478   // mprotect success check
2479   //
2480   // Mprotect said it changed the protection but can I believe it?
2481   //
2482   // To be sure I need to check the protection afterwards. Try to
2483   // read from protected memory and check whether that causes a segfault.
2484   //
2485   if (!os::Aix::xpg_sus_mode()) {
2486 
2487     if (CanUseSafeFetch32()) {
2488 
2489       const bool read_protected =
2490         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2491          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2492 
2493       if (prot & PROT_READ) {
2494         rc = !read_protected;
2495       } else {
2496         rc = read_protected;
2497       }
2498     }
2499   }
2500   if (!rc) {
2501     assert(false, "mprotect failed.");
2502   }
2503   return rc;
2504 }
2505 
2506 // Set protections specified
2507 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2508   unsigned int p = 0;
2509   switch (prot) {
2510   case MEM_PROT_NONE: p = PROT_NONE; break;
2511   case MEM_PROT_READ: p = PROT_READ; break;
2512   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2513   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2514   default:
2515     ShouldNotReachHere();
2516   }
2517   // is_committed is unused.
2518   return checked_mprotect(addr, size, p);
2519 }
2520 
2521 bool os::guard_memory(char* addr, size_t size) {
2522   return checked_mprotect(addr, size, PROT_NONE);
2523 }
2524 
2525 bool os::unguard_memory(char* addr, size_t size) {
2526   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2527 }
2528 
2529 // Large page support
2530 
2531 static size_t _large_page_size = 0;
2532 
2533 // Enable large page support if OS allows that.
2534 void os::large_page_init() {
2535   return; // Nothing to do. See query_multipage_support and friends.
2536 }
2537 
2538 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2539   // "exec" is passed in but not used. Creating the shared image for
2540   // the code cache doesn't have an SHM_X executable permission to check.
2541   Unimplemented();
2542   return 0;
2543 }
2544 
2545 bool os::release_memory_special(char* base, size_t bytes) {
2546   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2547   Unimplemented();
2548   return false;
2549 }
2550 
2551 size_t os::large_page_size() {
2552   return _large_page_size;
2553 }
2554 
2555 bool os::can_commit_large_page_memory() {
2556   // Does not matter, we do not support huge pages.
2557   return false;
2558 }
2559 
2560 bool os::can_execute_large_page_memory() {
2561   // Does not matter, we do not support huge pages.
2562   return false;
2563 }
2564 
2565 // Reserve memory at an arbitrary address, only if that area is
2566 // available (and not reserved for something else).
2567 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2568   char* addr = NULL;
2569 
2570   // Always round to os::vm_page_size(), which may be larger than 4K.
2571   bytes = align_size_up(bytes, os::vm_page_size());
2572 
2573   // In 4K mode always use mmap.
2574   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2575   if (os::vm_page_size() == SIZE_4K) {
2576     return reserve_mmaped_memory(bytes, requested_addr, 0);
2577   } else {
2578     if (bytes >= Use64KPagesThreshold) {
2579       return reserve_shmated_memory(bytes, requested_addr, 0);
2580     } else {
2581       return reserve_mmaped_memory(bytes, requested_addr, 0);
2582     }
2583   }
2584 
2585   return addr;
2586 }
2587 
2588 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2589   return ::read(fd, buf, nBytes);
2590 }
2591 
2592 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2593   return ::pread(fd, buf, nBytes, offset);
2594 }
2595 
2596 void os::naked_short_sleep(jlong ms) {
2597   struct timespec req;
2598 
2599   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2600   req.tv_sec = 0;
2601   if (ms > 0) {
2602     req.tv_nsec = (ms % 1000) * 1000000;
2603   }
2604   else {
2605     req.tv_nsec = 1;
2606   }
2607 
2608   nanosleep(&req, NULL);
2609 
2610   return;
2611 }
2612 
2613 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2614 void os::infinite_sleep() {
2615   while (true) {    // sleep forever ...
2616     ::sleep(100);   // ... 100 seconds at a time
2617   }
2618 }
2619 
2620 // Used to convert frequent JVM_Yield() to nops
2621 bool os::dont_yield() {
2622   return DontYieldALot;
2623 }
2624 
2625 void os::naked_yield() {
2626   sched_yield();
2627 }
2628 
2629 ////////////////////////////////////////////////////////////////////////////////
2630 // thread priority support
2631 
2632 // From AIX manpage to pthread_setschedparam
2633 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2634 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2635 //
2636 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2637 // range from 40 to 80, where 40 is the least favored priority and 80
2638 // is the most favored."
2639 //
2640 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2641 // scheduling there; however, this still leaves iSeries.)
2642 //
2643 // We use the same values for AIX and PASE.
2644 int os::java_to_os_priority[CriticalPriority + 1] = {
2645   54,             // 0 Entry should never be used
2646 
2647   55,             // 1 MinPriority
2648   55,             // 2
2649   56,             // 3
2650 
2651   56,             // 4
2652   57,             // 5 NormPriority
2653   57,             // 6
2654 
2655   58,             // 7
2656   58,             // 8
2657   59,             // 9 NearMaxPriority
2658 
2659   60,             // 10 MaxPriority
2660 
2661   60              // 11 CriticalPriority
2662 };
2663 
2664 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2665   if (!UseThreadPriorities) return OS_OK;
2666   pthread_t thr = thread->osthread()->pthread_id();
2667   int policy = SCHED_OTHER;
2668   struct sched_param param;
2669   param.sched_priority = newpri;
2670   int ret = pthread_setschedparam(thr, policy, &param);
2671 
2672   if (ret != 0) {
2673     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2674         (int)thr, newpri, ret, strerror(ret));
2675   }
2676   return (ret == 0) ? OS_OK : OS_ERR;
2677 }
2678 
2679 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2680   if (!UseThreadPriorities) {
2681     *priority_ptr = java_to_os_priority[NormPriority];
2682     return OS_OK;
2683   }
2684   pthread_t thr = thread->osthread()->pthread_id();
2685   int policy = SCHED_OTHER;
2686   struct sched_param param;
2687   int ret = pthread_getschedparam(thr, &policy, &param);
2688   *priority_ptr = param.sched_priority;
2689 
2690   return (ret == 0) ? OS_OK : OS_ERR;
2691 }
2692 
2693 // Hint to the underlying OS that a task switch would not be good.
2694 // Void return because it's a hint and can fail.
2695 void os::hint_no_preempt() {}
2696 
2697 ////////////////////////////////////////////////////////////////////////////////
2698 // suspend/resume support
2699 
2700 //  the low-level signal-based suspend/resume support is a remnant from the
2701 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2702 //  within hotspot. Now there is a single use-case for this:
2703 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
2704 //      that runs in the watcher thread.
2705 //  The remaining code is greatly simplified from the more general suspension
2706 //  code that used to be used.
2707 //
2708 //  The protocol is quite simple:
2709 //  - suspend:
2710 //      - sends a signal to the target thread
2711 //      - polls the suspend state of the osthread using a yield loop
2712 //      - target thread signal handler (SR_handler) sets suspend state
2713 //        and blocks in sigsuspend until continued
2714 //  - resume:
2715 //      - sets target osthread state to continue
2716 //      - sends signal to end the sigsuspend loop in the SR_handler
2717 //
2718 //  Note that the SR_lock plays no role in this suspend/resume protocol.
2719 //
2720 
2721 static void resume_clear_context(OSThread *osthread) {
2722   osthread->set_ucontext(NULL);
2723   osthread->set_siginfo(NULL);
2724 }
2725 
2726 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2727   osthread->set_ucontext(context);
2728   osthread->set_siginfo(siginfo);
2729 }
2730 
2731 //
2732 // Handler function invoked when a thread's execution is suspended or
2733 // resumed. We have to be careful that only async-safe functions are
2734 // called here (Note: most pthread functions are not async safe and
2735 // should be avoided.)
2736 //
2737 // Note: sigwait() is a more natural fit than sigsuspend() from an
2738 // interface point of view, but sigwait() prevents the signal hander
2739 // from being run. libpthread would get very confused by not having
2740 // its signal handlers run and prevents sigwait()'s use with the
2741 // mutex granting granting signal.
2742 //
2743 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2744 //
2745 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2746   // Save and restore errno to avoid confusing native code with EINTR
2747   // after sigsuspend.
2748   int old_errno = errno;
2749 
2750   Thread* thread = Thread::current();
2751   OSThread* osthread = thread->osthread();
2752   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2753 
2754   os::SuspendResume::State current = osthread->sr.state();
2755   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2756     suspend_save_context(osthread, siginfo, context);
2757 
2758     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2759     os::SuspendResume::State state = osthread->sr.suspended();
2760     if (state == os::SuspendResume::SR_SUSPENDED) {
2761       sigset_t suspend_set;  // signals for sigsuspend()
2762 
2763       // get current set of blocked signals and unblock resume signal
2764       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2765       sigdelset(&suspend_set, SR_signum);
2766 
2767       // wait here until we are resumed
2768       while (1) {
2769         sigsuspend(&suspend_set);
2770 
2771         os::SuspendResume::State result = osthread->sr.running();
2772         if (result == os::SuspendResume::SR_RUNNING) {
2773           break;
2774         }
2775       }
2776 
2777     } else if (state == os::SuspendResume::SR_RUNNING) {
2778       // request was cancelled, continue
2779     } else {
2780       ShouldNotReachHere();
2781     }
2782 
2783     resume_clear_context(osthread);
2784   } else if (current == os::SuspendResume::SR_RUNNING) {
2785     // request was cancelled, continue
2786   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2787     // ignore
2788   } else {
2789     ShouldNotReachHere();
2790   }
2791 
2792   errno = old_errno;
2793 }
2794 
2795 static int SR_initialize() {
2796   struct sigaction act;
2797   char *s;
2798   // Get signal number to use for suspend/resume
2799   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2800     int sig = ::strtol(s, 0, 10);
2801     if (sig > 0 || sig < NSIG) {
2802       SR_signum = sig;
2803     }
2804   }
2805 
2806   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2807         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2808 
2809   sigemptyset(&SR_sigset);
2810   sigaddset(&SR_sigset, SR_signum);
2811 
2812   // Set up signal handler for suspend/resume.
2813   act.sa_flags = SA_RESTART|SA_SIGINFO;
2814   act.sa_handler = (void (*)(int)) SR_handler;
2815 
2816   // SR_signum is blocked by default.
2817   // 4528190 - We also need to block pthread restart signal (32 on all
2818   // supported Linux platforms). Note that LinuxThreads need to block
2819   // this signal for all threads to work properly. So we don't have
2820   // to use hard-coded signal number when setting up the mask.
2821   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2822 
2823   if (sigaction(SR_signum, &act, 0) == -1) {
2824     return -1;
2825   }
2826 
2827   // Save signal flag
2828   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2829   return 0;
2830 }
2831 
2832 static int SR_finalize() {
2833   return 0;
2834 }
2835 
2836 static int sr_notify(OSThread* osthread) {
2837   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2838   assert_status(status == 0, status, "pthread_kill");
2839   return status;
2840 }
2841 
2842 // "Randomly" selected value for how long we want to spin
2843 // before bailing out on suspending a thread, also how often
2844 // we send a signal to a thread we want to resume
2845 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2846 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2847 
2848 // returns true on success and false on error - really an error is fatal
2849 // but this seems the normal response to library errors
2850 static bool do_suspend(OSThread* osthread) {
2851   assert(osthread->sr.is_running(), "thread should be running");
2852   // mark as suspended and send signal
2853 
2854   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2855     // failed to switch, state wasn't running?
2856     ShouldNotReachHere();
2857     return false;
2858   }
2859 
2860   if (sr_notify(osthread) != 0) {
2861     // try to cancel, switch to running
2862 
2863     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2864     if (result == os::SuspendResume::SR_RUNNING) {
2865       // cancelled
2866       return false;
2867     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2868       // somehow managed to suspend
2869       return true;
2870     } else {
2871       ShouldNotReachHere();
2872       return false;
2873     }
2874   }
2875 
2876   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2877 
2878   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2879     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2880       os::naked_yield();
2881     }
2882 
2883     // timeout, try to cancel the request
2884     if (n >= RANDOMLY_LARGE_INTEGER) {
2885       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2886       if (cancelled == os::SuspendResume::SR_RUNNING) {
2887         return false;
2888       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2889         return true;
2890       } else {
2891         ShouldNotReachHere();
2892         return false;
2893       }
2894     }
2895   }
2896 
2897   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2898   return true;
2899 }
2900 
2901 static void do_resume(OSThread* osthread) {
2902   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2903 
2904   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2905     // failed to switch to WAKEUP_REQUEST
2906     ShouldNotReachHere();
2907     return;
2908   }
2909 
2910   while (!osthread->sr.is_running()) {
2911     if (sr_notify(osthread) == 0) {
2912       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2913         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2914           os::naked_yield();
2915         }
2916       }
2917     } else {
2918       ShouldNotReachHere();
2919     }
2920   }
2921 
2922   guarantee(osthread->sr.is_running(), "Must be running!");
2923 }
2924 
2925 ///////////////////////////////////////////////////////////////////////////////////
2926 // signal handling (except suspend/resume)
2927 
2928 // This routine may be used by user applications as a "hook" to catch signals.
2929 // The user-defined signal handler must pass unrecognized signals to this
2930 // routine, and if it returns true (non-zero), then the signal handler must
2931 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2932 // routine will never retun false (zero), but instead will execute a VM panic
2933 // routine kill the process.
2934 //
2935 // If this routine returns false, it is OK to call it again. This allows
2936 // the user-defined signal handler to perform checks either before or after
2937 // the VM performs its own checks. Naturally, the user code would be making
2938 // a serious error if it tried to handle an exception (such as a null check
2939 // or breakpoint) that the VM was generating for its own correct operation.
2940 //
2941 // This routine may recognize any of the following kinds of signals:
2942 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2943 // It should be consulted by handlers for any of those signals.
2944 //
2945 // The caller of this routine must pass in the three arguments supplied
2946 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2947 // field of the structure passed to sigaction(). This routine assumes that
2948 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2949 //
2950 // Note that the VM will print warnings if it detects conflicting signal
2951 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2952 //
2953 extern "C" JNIEXPORT int
2954 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2955 
2956 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2957 // to be the thing to call; documentation is not terribly clear about whether
2958 // pthread_sigmask also works, and if it does, whether it does the same.
2959 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2960   const int rc = ::pthread_sigmask(how, set, oset);
2961   // return value semantics differ slightly for error case:
2962   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2963   // (so, pthread_sigmask is more theadsafe for error handling)
2964   // But success is always 0.
2965   return rc == 0 ? true : false;
2966 }
2967 
2968 // Function to unblock all signals which are, according
2969 // to POSIX, typical program error signals. If they happen while being blocked,
2970 // they typically will bring down the process immediately.
2971 bool unblock_program_error_signals() {
2972   sigset_t set;
2973   ::sigemptyset(&set);
2974   ::sigaddset(&set, SIGILL);
2975   ::sigaddset(&set, SIGBUS);
2976   ::sigaddset(&set, SIGFPE);
2977   ::sigaddset(&set, SIGSEGV);
2978   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2979 }
2980 
2981 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2982 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2983   assert(info != NULL && uc != NULL, "it must be old kernel");
2984 
2985   // Never leave program error signals blocked;
2986   // on all our platforms they would bring down the process immediately when
2987   // getting raised while being blocked.
2988   unblock_program_error_signals();
2989 
2990   JVM_handle_aix_signal(sig, info, uc, true);
2991 }
2992 
2993 // This boolean allows users to forward their own non-matching signals
2994 // to JVM_handle_aix_signal, harmlessly.
2995 bool os::Aix::signal_handlers_are_installed = false;
2996 
2997 // For signal-chaining
2998 struct sigaction os::Aix::sigact[MAXSIGNUM];
2999 unsigned int os::Aix::sigs = 0;
3000 bool os::Aix::libjsig_is_loaded = false;
3001 typedef struct sigaction *(*get_signal_t)(int);
3002 get_signal_t os::Aix::get_signal_action = NULL;
3003 
3004 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3005   struct sigaction *actp = NULL;
3006 
3007   if (libjsig_is_loaded) {
3008     // Retrieve the old signal handler from libjsig
3009     actp = (*get_signal_action)(sig);
3010   }
3011   if (actp == NULL) {
3012     // Retrieve the preinstalled signal handler from jvm
3013     actp = get_preinstalled_handler(sig);
3014   }
3015 
3016   return actp;
3017 }
3018 
3019 static bool call_chained_handler(struct sigaction *actp, int sig,
3020                                  siginfo_t *siginfo, void *context) {
3021   // Call the old signal handler
3022   if (actp->sa_handler == SIG_DFL) {
3023     // It's more reasonable to let jvm treat it as an unexpected exception
3024     // instead of taking the default action.
3025     return false;
3026   } else if (actp->sa_handler != SIG_IGN) {
3027     if ((actp->sa_flags & SA_NODEFER) == 0) {
3028       // automaticlly block the signal
3029       sigaddset(&(actp->sa_mask), sig);
3030     }
3031 
3032     sa_handler_t hand = NULL;
3033     sa_sigaction_t sa = NULL;
3034     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3035     // retrieve the chained handler
3036     if (siginfo_flag_set) {
3037       sa = actp->sa_sigaction;
3038     } else {
3039       hand = actp->sa_handler;
3040     }
3041 
3042     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3043       actp->sa_handler = SIG_DFL;
3044     }
3045 
3046     // try to honor the signal mask
3047     sigset_t oset;
3048     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3049 
3050     // call into the chained handler
3051     if (siginfo_flag_set) {
3052       (*sa)(sig, siginfo, context);
3053     } else {
3054       (*hand)(sig);
3055     }
3056 
3057     // restore the signal mask
3058     pthread_sigmask(SIG_SETMASK, &oset, 0);
3059   }
3060   // Tell jvm's signal handler the signal is taken care of.
3061   return true;
3062 }
3063 
3064 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3065   bool chained = false;
3066   // signal-chaining
3067   if (UseSignalChaining) {
3068     struct sigaction *actp = get_chained_signal_action(sig);
3069     if (actp != NULL) {
3070       chained = call_chained_handler(actp, sig, siginfo, context);
3071     }
3072   }
3073   return chained;
3074 }
3075 
3076 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3077   if ((((unsigned int)1 << sig) & sigs) != 0) {
3078     return &sigact[sig];
3079   }
3080   return NULL;
3081 }
3082 
3083 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3084   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3085   sigact[sig] = oldAct;
3086   sigs |= (unsigned int)1 << sig;
3087 }
3088 
3089 // for diagnostic
3090 int os::Aix::sigflags[MAXSIGNUM];
3091 
3092 int os::Aix::get_our_sigflags(int sig) {
3093   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3094   return sigflags[sig];
3095 }
3096 
3097 void os::Aix::set_our_sigflags(int sig, int flags) {
3098   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3099   sigflags[sig] = flags;
3100 }
3101 
3102 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3103   // Check for overwrite.
3104   struct sigaction oldAct;
3105   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3106 
3107   void* oldhand = oldAct.sa_sigaction
3108     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3109     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3110   // Renamed 'signalHandler' to avoid collision with other shared libs.
3111   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3112       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3113       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3114     if (AllowUserSignalHandlers || !set_installed) {
3115       // Do not overwrite; user takes responsibility to forward to us.
3116       return;
3117     } else if (UseSignalChaining) {
3118       // save the old handler in jvm
3119       save_preinstalled_handler(sig, oldAct);
3120       // libjsig also interposes the sigaction() call below and saves the
3121       // old sigaction on it own.
3122     } else {
3123       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
3124                     "%#lx for signal %d.", (long)oldhand, sig));
3125     }
3126   }
3127 
3128   struct sigaction sigAct;
3129   sigfillset(&(sigAct.sa_mask));
3130   if (!set_installed) {
3131     sigAct.sa_handler = SIG_DFL;
3132     sigAct.sa_flags = SA_RESTART;
3133   } else {
3134     // Renamed 'signalHandler' to avoid collision with other shared libs.
3135     sigAct.sa_sigaction = javaSignalHandler;
3136     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3137   }
3138   // Save flags, which are set by ours
3139   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3140   sigflags[sig] = sigAct.sa_flags;
3141 
3142   int ret = sigaction(sig, &sigAct, &oldAct);
3143   assert(ret == 0, "check");
3144 
3145   void* oldhand2 = oldAct.sa_sigaction
3146                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3147                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3148   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3149 }
3150 
3151 // install signal handlers for signals that HotSpot needs to
3152 // handle in order to support Java-level exception handling.
3153 void os::Aix::install_signal_handlers() {
3154   if (!signal_handlers_are_installed) {
3155     signal_handlers_are_installed = true;
3156 
3157     // signal-chaining
3158     typedef void (*signal_setting_t)();
3159     signal_setting_t begin_signal_setting = NULL;
3160     signal_setting_t end_signal_setting = NULL;
3161     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3162                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3163     if (begin_signal_setting != NULL) {
3164       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3165                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3166       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3167                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3168       libjsig_is_loaded = true;
3169       assert(UseSignalChaining, "should enable signal-chaining");
3170     }
3171     if (libjsig_is_loaded) {
3172       // Tell libjsig jvm is setting signal handlers
3173       (*begin_signal_setting)();
3174     }
3175 
3176     set_signal_handler(SIGSEGV, true);
3177     set_signal_handler(SIGPIPE, true);
3178     set_signal_handler(SIGBUS, true);
3179     set_signal_handler(SIGILL, true);
3180     set_signal_handler(SIGFPE, true);
3181     set_signal_handler(SIGTRAP, true);
3182     set_signal_handler(SIGXFSZ, true);
3183     set_signal_handler(SIGDANGER, true);
3184 
3185     if (libjsig_is_loaded) {
3186       // Tell libjsig jvm finishes setting signal handlers.
3187       (*end_signal_setting)();
3188     }
3189 
3190     // We don't activate signal checker if libjsig is in place, we trust ourselves
3191     // and if UserSignalHandler is installed all bets are off.
3192     // Log that signal checking is off only if -verbose:jni is specified.
3193     if (CheckJNICalls) {
3194       if (libjsig_is_loaded) {
3195         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3196         check_signals = false;
3197       }
3198       if (AllowUserSignalHandlers) {
3199         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3200         check_signals = false;
3201       }
3202       // Need to initialize check_signal_done.
3203       ::sigemptyset(&check_signal_done);
3204     }
3205   }
3206 }
3207 
3208 static const char* get_signal_handler_name(address handler,
3209                                            char* buf, int buflen) {
3210   int offset;
3211   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3212   if (found) {
3213     // skip directory names
3214     const char *p1, *p2;
3215     p1 = buf;
3216     size_t len = strlen(os::file_separator());
3217     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3218     // The way os::dll_address_to_library_name is implemented on Aix
3219     // right now, it always returns -1 for the offset which is not
3220     // terribly informative.
3221     // Will fix that. For now, omit the offset.
3222     jio_snprintf(buf, buflen, "%s", p1);
3223   } else {
3224     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3225   }
3226   return buf;
3227 }
3228 
3229 static void print_signal_handler(outputStream* st, int sig,
3230                                  char* buf, size_t buflen) {
3231   struct sigaction sa;
3232   sigaction(sig, NULL, &sa);
3233 
3234   st->print("%s: ", os::exception_name(sig, buf, buflen));
3235 
3236   address handler = (sa.sa_flags & SA_SIGINFO)
3237     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3238     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3239 
3240   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3241     st->print("SIG_DFL");
3242   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3243     st->print("SIG_IGN");
3244   } else {
3245     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3246   }
3247 
3248   // Print readable mask.
3249   st->print(", sa_mask[0]=");
3250   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3251 
3252   address rh = VMError::get_resetted_sighandler(sig);
3253   // May be, handler was resetted by VMError?
3254   if (rh != NULL) {
3255     handler = rh;
3256     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3257   }
3258 
3259   // Print textual representation of sa_flags.
3260   st->print(", sa_flags=");
3261   os::Posix::print_sa_flags(st, sa.sa_flags);
3262 
3263   // Check: is it our handler?
3264   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3265       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3266     // It is our signal handler.
3267     // Check for flags, reset system-used one!
3268     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3269       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3270                 os::Aix::get_our_sigflags(sig));
3271     }
3272   }
3273   st->cr();
3274 }
3275 
3276 #define DO_SIGNAL_CHECK(sig) \
3277   if (!sigismember(&check_signal_done, sig)) \
3278     os::Aix::check_signal_handler(sig)
3279 
3280 // This method is a periodic task to check for misbehaving JNI applications
3281 // under CheckJNI, we can add any periodic checks here
3282 
3283 void os::run_periodic_checks() {
3284 
3285   if (check_signals == false) return;
3286 
3287   // SEGV and BUS if overridden could potentially prevent
3288   // generation of hs*.log in the event of a crash, debugging
3289   // such a case can be very challenging, so we absolutely
3290   // check the following for a good measure:
3291   DO_SIGNAL_CHECK(SIGSEGV);
3292   DO_SIGNAL_CHECK(SIGILL);
3293   DO_SIGNAL_CHECK(SIGFPE);
3294   DO_SIGNAL_CHECK(SIGBUS);
3295   DO_SIGNAL_CHECK(SIGPIPE);
3296   DO_SIGNAL_CHECK(SIGXFSZ);
3297   if (UseSIGTRAP) {
3298     DO_SIGNAL_CHECK(SIGTRAP);
3299   }
3300   DO_SIGNAL_CHECK(SIGDANGER);
3301 
3302   // ReduceSignalUsage allows the user to override these handlers
3303   // see comments at the very top and jvm_solaris.h
3304   if (!ReduceSignalUsage) {
3305     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3306     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3307     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3308     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3309   }
3310 
3311   DO_SIGNAL_CHECK(SR_signum);
3312   DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3313 }
3314 
3315 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3316 
3317 static os_sigaction_t os_sigaction = NULL;
3318 
3319 void os::Aix::check_signal_handler(int sig) {
3320   char buf[O_BUFLEN];
3321   address jvmHandler = NULL;
3322 
3323   struct sigaction act;
3324   if (os_sigaction == NULL) {
3325     // only trust the default sigaction, in case it has been interposed
3326     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3327     if (os_sigaction == NULL) return;
3328   }
3329 
3330   os_sigaction(sig, (struct sigaction*)NULL, &act);
3331 
3332   address thisHandler = (act.sa_flags & SA_SIGINFO)
3333     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3334     : CAST_FROM_FN_PTR(address, act.sa_handler);
3335 
3336   switch(sig) {
3337   case SIGSEGV:
3338   case SIGBUS:
3339   case SIGFPE:
3340   case SIGPIPE:
3341   case SIGILL:
3342   case SIGXFSZ:
3343     // Renamed 'signalHandler' to avoid collision with other shared libs.
3344     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3345     break;
3346 
3347   case SHUTDOWN1_SIGNAL:
3348   case SHUTDOWN2_SIGNAL:
3349   case SHUTDOWN3_SIGNAL:
3350   case BREAK_SIGNAL:
3351     jvmHandler = (address)user_handler();
3352     break;
3353 
3354   case INTERRUPT_SIGNAL:
3355     jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3356     break;
3357 
3358   default:
3359     if (sig == SR_signum) {
3360       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3361     } else {
3362       return;
3363     }
3364     break;
3365   }
3366 
3367   if (thisHandler != jvmHandler) {
3368     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3369     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3370     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3371     // No need to check this sig any longer
3372     sigaddset(&check_signal_done, sig);
3373     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3374     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3375       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3376                     exception_name(sig, buf, O_BUFLEN));
3377     }
3378   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3379     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3380     tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3381     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3382     // No need to check this sig any longer
3383     sigaddset(&check_signal_done, sig);
3384   }
3385 
3386   // Dump all the signal
3387   if (sigismember(&check_signal_done, sig)) {
3388     print_signal_handlers(tty, buf, O_BUFLEN);
3389   }
3390 }
3391 
3392 extern bool signal_name(int signo, char* buf, size_t len);
3393 
3394 const char* os::exception_name(int exception_code, char* buf, size_t size) {
3395   if (0 < exception_code && exception_code <= SIGRTMAX) {
3396     // signal
3397     if (!signal_name(exception_code, buf, size)) {
3398       jio_snprintf(buf, size, "SIG%d", exception_code);
3399     }
3400     return buf;
3401   } else {
3402     return NULL;
3403   }
3404 }
3405 
3406 // To install functions for atexit system call
3407 extern "C" {
3408   static void perfMemory_exit_helper() {
3409     perfMemory_exit();
3410   }
3411 }
3412 
3413 // This is called _before_ the most of global arguments have been parsed.
3414 void os::init(void) {
3415   // This is basic, we want to know if that ever changes.
3416   // (Shared memory boundary is supposed to be a 256M aligned.)
3417   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3418 
3419   // First off, we need to know whether we run on AIX or PASE, and
3420   // the OS level we run on.
3421   os::Aix::initialize_os_info();
3422 
3423   // Scan environment (SPEC1170 behaviour, etc).
3424   os::Aix::scan_environment();
3425 
3426   // Check which pages are supported by AIX.
3427   query_multipage_support();
3428 
3429   // Act like we only have one page size by eliminating corner cases which
3430   // we did not support very well anyway.
3431   // We have two input conditions:
3432   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3433   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3434   //    setting.
3435   //    Data segment page size is important for us because it defines the thread stack page
3436   //    size, which is needed for guard page handling, stack banging etc.
3437   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3438   //    and should be allocated with 64k pages.
3439   //
3440   // So, we do the following:
3441   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3442   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3443   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3444   // 64k          no              --- AIX 5.2 ? ---
3445   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3446 
3447   // We explicitly leave no option to change page size, because only upgrading would work,
3448   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3449 
3450   if (g_multipage_support.datapsize == SIZE_4K) {
3451     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3452     if (g_multipage_support.can_use_64K_pages) {
3453       // .. but we are able to use 64K pages dynamically.
3454       // This would be typical for java launchers which are not linked
3455       // with datapsize=64K (like, any other launcher but our own).
3456       //
3457       // In this case it would be smart to allocate the java heap with 64K
3458       // to get the performance benefit, and to fake 64k pages for the
3459       // data segment (when dealing with thread stacks).
3460       //
3461       // However, leave a possibility to downgrade to 4K, using
3462       // -XX:-Use64KPages.
3463       if (Use64KPages) {
3464         trcVerbose("64K page mode (faked for data segment)");
3465         Aix::_page_size = SIZE_64K;
3466       } else {
3467         trcVerbose("4K page mode (Use64KPages=off)");
3468         Aix::_page_size = SIZE_4K;
3469       }
3470     } else {
3471       // .. and not able to allocate 64k pages dynamically. Here, just
3472       // fall back to 4K paged mode and use mmap for everything.
3473       trcVerbose("4K page mode");
3474       Aix::_page_size = SIZE_4K;
3475       FLAG_SET_ERGO(bool, Use64KPages, false);
3476     }
3477   } else {
3478     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3479     //   This normally means that we can allocate 64k pages dynamically.
3480     //   (There is one special case where this may be false: EXTSHM=on.
3481     //    but we decided to not support that mode).
3482     assert0(g_multipage_support.can_use_64K_pages);
3483     Aix::_page_size = SIZE_64K;
3484     trcVerbose("64K page mode");
3485     FLAG_SET_ERGO(bool, Use64KPages, true);
3486   }
3487 
3488   // Short-wire stack page size to base page size; if that works, we just remove
3489   // that stack page size altogether.
3490   Aix::_stack_page_size = Aix::_page_size;
3491 
3492   // For now UseLargePages is just ignored.
3493   FLAG_SET_ERGO(bool, UseLargePages, false);
3494   _page_sizes[0] = 0;
3495 
3496   // debug trace
3497   trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
3498 
3499   // Next, we need to initialize libo4 and libperfstat libraries.
3500   if (os::Aix::on_pase()) {
3501     os::Aix::initialize_libo4();
3502   } else {
3503     os::Aix::initialize_libperfstat();
3504   }
3505 
3506   // Reset the perfstat information provided by ODM.
3507   if (os::Aix::on_aix()) {
3508     libperfstat::perfstat_reset();
3509   }
3510 
3511   // Now initialze basic system properties. Note that for some of the values we
3512   // need libperfstat etc.
3513   os::Aix::initialize_system_info();
3514 
3515   _initial_pid = getpid();
3516 
3517   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3518 
3519   init_random(1234567);
3520 
3521   ThreadCritical::initialize();
3522 
3523   // Main_thread points to the aboriginal thread.
3524   Aix::_main_thread = pthread_self();
3525 
3526   initial_time_count = os::elapsed_counter();
3527 
3528   // If the pagesize of the VM is greater than 8K determine the appropriate
3529   // number of initial guard pages. The user can change this with the
3530   // command line arguments, if needed.
3531   if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3532     StackYellowPages = 1;
3533     StackRedPages = 1;
3534     StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3535   }
3536 }
3537 
3538 // This is called _after_ the global arguments have been parsed.
3539 jint os::init_2(void) {
3540 
3541   trcVerbose("processor count: %d", os::_processor_count);
3542   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3543 
3544   // Initially build up the loaded dll map.
3545   LoadedLibraries::reload();
3546 
3547   const int page_size = Aix::page_size();
3548   const int map_size = page_size;
3549 
3550   address map_address = (address) MAP_FAILED;
3551   const int prot  = PROT_READ;
3552   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3553 
3554   // Use optimized addresses for the polling page,
3555   // e.g. map it to a special 32-bit address.
3556   if (OptimizePollingPageLocation) {
3557     // architecture-specific list of address wishes:
3558     address address_wishes[] = {
3559       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3560       // PPC64: all address wishes are non-negative 32 bit values where
3561       // the lower 16 bits are all zero. we can load these addresses
3562       // with a single ppc_lis instruction.
3563       (address) 0x30000000, (address) 0x31000000,
3564       (address) 0x32000000, (address) 0x33000000,
3565       (address) 0x40000000, (address) 0x41000000,
3566       (address) 0x42000000, (address) 0x43000000,
3567       (address) 0x50000000, (address) 0x51000000,
3568       (address) 0x52000000, (address) 0x53000000,
3569       (address) 0x60000000, (address) 0x61000000,
3570       (address) 0x62000000, (address) 0x63000000
3571     };
3572     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3573 
3574     // iterate over the list of address wishes:
3575     for (int i=0; i<address_wishes_length; i++) {
3576       // Try to map with current address wish.
3577       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3578       // fail if the address is already mapped.
3579       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3580                                      map_size, prot,
3581                                      flags | MAP_FIXED,
3582                                      -1, 0);
3583       if (Verbose) {
3584         fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3585                 address_wishes[i], map_address + (ssize_t)page_size);
3586       }
3587 
3588       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3589         // Map succeeded and map_address is at wished address, exit loop.
3590         break;
3591       }
3592 
3593       if (map_address != (address) MAP_FAILED) {
3594         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3595         ::munmap(map_address, map_size);
3596         map_address = (address) MAP_FAILED;
3597       }
3598       // Map failed, continue loop.
3599     }
3600   } // end OptimizePollingPageLocation
3601 
3602   if (map_address == (address) MAP_FAILED) {
3603     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3604   }
3605   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3606   os::set_polling_page(map_address);
3607 
3608   if (!UseMembar) {
3609     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3610     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3611     os::set_memory_serialize_page(mem_serialize_page);
3612 
3613 #ifndef PRODUCT
3614     if (Verbose && PrintMiscellaneous) {
3615       tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3616     }
3617 #endif
3618   }
3619 
3620   // initialize suspend/resume support - must do this before signal_sets_init()
3621   if (SR_initialize() != 0) {
3622     perror("SR_initialize failed");
3623     return JNI_ERR;
3624   }
3625 
3626   Aix::signal_sets_init();
3627   Aix::install_signal_handlers();
3628 
3629   // Check minimum allowable stack size for thread creation and to initialize
3630   // the java system classes, including StackOverflowError - depends on page
3631   // size. Add a page for compiler2 recursion in main thread.
3632   // Add in 2*BytesPerWord times page size to account for VM stack during
3633   // class initialization depending on 32 or 64 bit VM.
3634   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3635             (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3636                      (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3637 
3638   os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3639 
3640   size_t threadStackSizeInBytes = ThreadStackSize * K;
3641   if (threadStackSizeInBytes != 0 &&
3642       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3643     tty->print_cr("\nThe stack size specified is too small, "
3644                   "Specify at least %dk",
3645                   os::Aix::min_stack_allowed / K);
3646     return JNI_ERR;
3647   }
3648 
3649   // Make the stack size a multiple of the page size so that
3650   // the yellow/red zones can be guarded.
3651   // Note that this can be 0, if no default stacksize was set.
3652   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3653 
3654   Aix::libpthread_init();
3655 
3656   if (MaxFDLimit) {
3657     // Set the number of file descriptors to max. print out error
3658     // if getrlimit/setrlimit fails but continue regardless.
3659     struct rlimit nbr_files;
3660     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3661     if (status != 0) {
3662       if (PrintMiscellaneous && (Verbose || WizardMode))
3663         perror("os::init_2 getrlimit failed");
3664     } else {
3665       nbr_files.rlim_cur = nbr_files.rlim_max;
3666       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3667       if (status != 0) {
3668         if (PrintMiscellaneous && (Verbose || WizardMode))
3669           perror("os::init_2 setrlimit failed");
3670       }
3671     }
3672   }
3673 
3674   if (PerfAllowAtExitRegistration) {
3675     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3676     // Atexit functions can be delayed until process exit time, which
3677     // can be problematic for embedded VM situations. Embedded VMs should
3678     // call DestroyJavaVM() to assure that VM resources are released.
3679 
3680     // Note: perfMemory_exit_helper atexit function may be removed in
3681     // the future if the appropriate cleanup code can be added to the
3682     // VM_Exit VMOperation's doit method.
3683     if (atexit(perfMemory_exit_helper) != 0) {
3684       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3685     }
3686   }
3687 
3688   return JNI_OK;
3689 }
3690 
3691 // Mark the polling page as unreadable
3692 void os::make_polling_page_unreadable(void) {
3693   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3694     fatal("Could not disable polling page");
3695   }
3696 };
3697 
3698 // Mark the polling page as readable
3699 void os::make_polling_page_readable(void) {
3700   // Changed according to os_linux.cpp.
3701   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3702     fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
3703   }
3704 };
3705 
3706 int os::active_processor_count() {
3707   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3708   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3709   return online_cpus;
3710 }
3711 
3712 void os::set_native_thread_name(const char *name) {
3713   // Not yet implemented.
3714   return;
3715 }
3716 
3717 bool os::distribute_processes(uint length, uint* distribution) {
3718   // Not yet implemented.
3719   return false;
3720 }
3721 
3722 bool os::bind_to_processor(uint processor_id) {
3723   // Not yet implemented.
3724   return false;
3725 }
3726 
3727 void os::SuspendedThreadTask::internal_do_task() {
3728   if (do_suspend(_thread->osthread())) {
3729     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3730     do_task(context);
3731     do_resume(_thread->osthread());
3732   }
3733 }
3734 
3735 class PcFetcher : public os::SuspendedThreadTask {
3736 public:
3737   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3738   ExtendedPC result();
3739 protected:
3740   void do_task(const os::SuspendedThreadTaskContext& context);
3741 private:
3742   ExtendedPC _epc;
3743 };
3744 
3745 ExtendedPC PcFetcher::result() {
3746   guarantee(is_done(), "task is not done yet.");
3747   return _epc;
3748 }
3749 
3750 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3751   Thread* thread = context.thread();
3752   OSThread* osthread = thread->osthread();
3753   if (osthread->ucontext() != NULL) {
3754     _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3755   } else {
3756     // NULL context is unexpected, double-check this is the VMThread.
3757     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3758   }
3759 }
3760 
3761 // Suspends the target using the signal mechanism and then grabs the PC before
3762 // resuming the target. Used by the flat-profiler only
3763 ExtendedPC os::get_thread_pc(Thread* thread) {
3764   // Make sure that it is called by the watcher for the VMThread.
3765   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3766   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3767 
3768   PcFetcher fetcher(thread);
3769   fetcher.run();
3770   return fetcher.result();
3771 }
3772 
3773 ////////////////////////////////////////////////////////////////////////////////
3774 // debug support
3775 
3776 static address same_page(address x, address y) {
3777   intptr_t page_bits = -os::vm_page_size();
3778   if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3779     return x;
3780   else if (x > y)
3781     return (address)(intptr_t(y) | ~page_bits) + 1;
3782   else
3783     return (address)(intptr_t(y) & page_bits);
3784 }
3785 
3786 bool os::find(address addr, outputStream* st) {
3787 
3788   st->print(PTR_FORMAT ": ", addr);
3789 
3790   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
3791   if (lib) {
3792     lib->print(st);
3793     return true;
3794   } else {
3795     lib = LoadedLibraries::find_for_data_address(addr);
3796     if (lib) {
3797       lib->print(st);
3798       return true;
3799     } else {
3800       st->print_cr("(outside any module)");
3801     }
3802   }
3803 
3804   return false;
3805 }
3806 
3807 ////////////////////////////////////////////////////////////////////////////////
3808 // misc
3809 
3810 // This does not do anything on Aix. This is basically a hook for being
3811 // able to use structured exception handling (thread-local exception filters)
3812 // on, e.g., Win32.
3813 void
3814 os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
3815                          JavaCallArguments* args, Thread* thread) {
3816   f(value, method, args, thread);
3817 }
3818 
3819 void os::print_statistics() {
3820 }
3821 
3822 int os::message_box(const char* title, const char* message) {
3823   int i;
3824   fdStream err(defaultStream::error_fd());
3825   for (i = 0; i < 78; i++) err.print_raw("=");
3826   err.cr();
3827   err.print_raw_cr(title);
3828   for (i = 0; i < 78; i++) err.print_raw("-");
3829   err.cr();
3830   err.print_raw_cr(message);
3831   for (i = 0; i < 78; i++) err.print_raw("=");
3832   err.cr();
3833 
3834   char buf[16];
3835   // Prevent process from exiting upon "read error" without consuming all CPU
3836   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3837 
3838   return buf[0] == 'y' || buf[0] == 'Y';
3839 }
3840 
3841 int os::stat(const char *path, struct stat *sbuf) {
3842   char pathbuf[MAX_PATH];
3843   if (strlen(path) > MAX_PATH - 1) {
3844     errno = ENAMETOOLONG;
3845     return -1;
3846   }
3847   os::native_path(strcpy(pathbuf, path));
3848   return ::stat(pathbuf, sbuf);
3849 }
3850 
3851 bool os::check_heap(bool force) {
3852   return true;
3853 }
3854 
3855 // Is a (classpath) directory empty?
3856 bool os::dir_is_empty(const char* path) {
3857   DIR *dir = NULL;
3858   struct dirent *ptr;
3859 
3860   dir = opendir(path);
3861   if (dir == NULL) return true;
3862 
3863   /* Scan the directory */
3864   bool result = true;
3865   char buf[sizeof(struct dirent) + MAX_PATH];
3866   while (result && (ptr = ::readdir(dir)) != NULL) {
3867     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3868       result = false;
3869     }
3870   }
3871   closedir(dir);
3872   return result;
3873 }
3874 
3875 // This code originates from JDK's sysOpen and open64_w
3876 // from src/solaris/hpi/src/system_md.c
3877 
3878 int os::open(const char *path, int oflag, int mode) {
3879 
3880   if (strlen(path) > MAX_PATH - 1) {
3881     errno = ENAMETOOLONG;
3882     return -1;
3883   }
3884   int fd;
3885 
3886   fd = ::open64(path, oflag, mode);
3887   if (fd == -1) return -1;
3888 
3889   // If the open succeeded, the file might still be a directory.
3890   {
3891     struct stat64 buf64;
3892     int ret = ::fstat64(fd, &buf64);
3893     int st_mode = buf64.st_mode;
3894 
3895     if (ret != -1) {
3896       if ((st_mode & S_IFMT) == S_IFDIR) {
3897         errno = EISDIR;
3898         ::close(fd);
3899         return -1;
3900       }
3901     } else {
3902       ::close(fd);
3903       return -1;
3904     }
3905   }
3906 
3907   // All file descriptors that are opened in the JVM and not
3908   // specifically destined for a subprocess should have the
3909   // close-on-exec flag set. If we don't set it, then careless 3rd
3910   // party native code might fork and exec without closing all
3911   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3912   // UNIXProcess.c), and this in turn might:
3913   //
3914   // - cause end-of-file to fail to be detected on some file
3915   //   descriptors, resulting in mysterious hangs, or
3916   //
3917   // - might cause an fopen in the subprocess to fail on a system
3918   //   suffering from bug 1085341.
3919   //
3920   // (Yes, the default setting of the close-on-exec flag is a Unix
3921   // design flaw.)
3922   //
3923   // See:
3924   // 1085341: 32-bit stdio routines should support file descriptors >255
3925   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3926   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3927 #ifdef FD_CLOEXEC
3928   {
3929     int flags = ::fcntl(fd, F_GETFD);
3930     if (flags != -1)
3931       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3932   }
3933 #endif
3934 
3935   return fd;
3936 }
3937 
3938 // create binary file, rewriting existing file if required
3939 int os::create_binary_file(const char* path, bool rewrite_existing) {
3940   int oflags = O_WRONLY | O_CREAT;
3941   if (!rewrite_existing) {
3942     oflags |= O_EXCL;
3943   }
3944   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3945 }
3946 
3947 // return current position of file pointer
3948 jlong os::current_file_offset(int fd) {
3949   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3950 }
3951 
3952 // move file pointer to the specified offset
3953 jlong os::seek_to_file_offset(int fd, jlong offset) {
3954   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3955 }
3956 
3957 // This code originates from JDK's sysAvailable
3958 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3959 
3960 int os::available(int fd, jlong *bytes) {
3961   jlong cur, end;
3962   int mode;
3963   struct stat64 buf64;
3964 
3965   if (::fstat64(fd, &buf64) >= 0) {
3966     mode = buf64.st_mode;
3967     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3968       // XXX: is the following call interruptible? If so, this might
3969       // need to go through the INTERRUPT_IO() wrapper as for other
3970       // blocking, interruptible calls in this file.
3971       int n;
3972       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3973         *bytes = n;
3974         return 1;
3975       }
3976     }
3977   }
3978   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3979     return 0;
3980   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3981     return 0;
3982   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3983     return 0;
3984   }
3985   *bytes = end - cur;
3986   return 1;
3987 }
3988 
3989 // Map a block of memory.
3990 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3991                         char *addr, size_t bytes, bool read_only,
3992                         bool allow_exec) {
3993   int prot;
3994   int flags = MAP_PRIVATE;
3995 
3996   if (read_only) {
3997     prot = PROT_READ;
3998     flags = MAP_SHARED;
3999   } else {
4000     prot = PROT_READ | PROT_WRITE;
4001     flags = MAP_PRIVATE;
4002   }
4003 
4004   if (allow_exec) {
4005     prot |= PROT_EXEC;
4006   }
4007 
4008   if (addr != NULL) {
4009     flags |= MAP_FIXED;
4010   }
4011 
4012   // Allow anonymous mappings if 'fd' is -1.
4013   if (fd == -1) {
4014     flags |= MAP_ANONYMOUS;
4015   }
4016 
4017   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
4018                                      fd, file_offset);
4019   if (mapped_address == MAP_FAILED) {
4020     return NULL;
4021   }
4022   return mapped_address;
4023 }
4024 
4025 // Remap a block of memory.
4026 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4027                           char *addr, size_t bytes, bool read_only,
4028                           bool allow_exec) {
4029   // same as map_memory() on this OS
4030   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4031                         allow_exec);
4032 }
4033 
4034 // Unmap a block of memory.
4035 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4036   return munmap(addr, bytes) == 0;
4037 }
4038 
4039 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4040 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4041 // of a thread.
4042 //
4043 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4044 // the fast estimate available on the platform.
4045 
4046 jlong os::current_thread_cpu_time() {
4047   // return user + sys since the cost is the same
4048   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4049   assert(n >= 0, "negative CPU time");
4050   return n;
4051 }
4052 
4053 jlong os::thread_cpu_time(Thread* thread) {
4054   // consistent with what current_thread_cpu_time() returns
4055   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4056   assert(n >= 0, "negative CPU time");
4057   return n;
4058 }
4059 
4060 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4061   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4062   assert(n >= 0, "negative CPU time");
4063   return n;
4064 }
4065 
4066 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4067   bool error = false;
4068 
4069   jlong sys_time = 0;
4070   jlong user_time = 0;
4071 
4072   // Reimplemented using getthrds64().
4073   //
4074   // Works like this:
4075   // For the thread in question, get the kernel thread id. Then get the
4076   // kernel thread statistics using that id.
4077   //
4078   // This only works of course when no pthread scheduling is used,
4079   // i.e. there is a 1:1 relationship to kernel threads.
4080   // On AIX, see AIXTHREAD_SCOPE variable.
4081 
4082   pthread_t pthtid = thread->osthread()->pthread_id();
4083 
4084   // retrieve kernel thread id for the pthread:
4085   tid64_t tid = 0;
4086   struct __pthrdsinfo pinfo;
4087   // I just love those otherworldly IBM APIs which force me to hand down
4088   // dummy buffers for stuff I dont care for...
4089   char dummy[1];
4090   int dummy_size = sizeof(dummy);
4091   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4092                           dummy, &dummy_size) == 0) {
4093     tid = pinfo.__pi_tid;
4094   } else {
4095     tty->print_cr("pthread_getthrds_np failed.");
4096     error = true;
4097   }
4098 
4099   // retrieve kernel timing info for that kernel thread
4100   if (!error) {
4101     struct thrdentry64 thrdentry;
4102     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4103       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4104       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4105     } else {
4106       tty->print_cr("pthread_getthrds_np failed.");
4107       error = true;
4108     }
4109   }
4110 
4111   if (p_sys_time) {
4112     *p_sys_time = sys_time;
4113   }
4114 
4115   if (p_user_time) {
4116     *p_user_time = user_time;
4117   }
4118 
4119   if (error) {
4120     return false;
4121   }
4122 
4123   return true;
4124 }
4125 
4126 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4127   jlong sys_time;
4128   jlong user_time;
4129 
4130   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4131     return -1;
4132   }
4133 
4134   return user_sys_cpu_time ? sys_time + user_time : user_time;
4135 }
4136 
4137 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4138   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4139   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4140   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4141   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4142 }
4143 
4144 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4145   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4146   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4147   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4148   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4149 }
4150 
4151 bool os::is_thread_cpu_time_supported() {
4152   return true;
4153 }
4154 
4155 // System loadavg support. Returns -1 if load average cannot be obtained.
4156 // For now just return the system wide load average (no processor sets).
4157 int os::loadavg(double values[], int nelem) {
4158 
4159   // Implemented using libperfstat on AIX.
4160 
4161   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4162   guarantee(values, "argument error");
4163 
4164   if (os::Aix::on_pase()) {
4165     Unimplemented();
4166     return -1;
4167   } else {
4168     // AIX: use libperfstat
4169     //
4170     // See also:
4171     // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4172     // /usr/include/libperfstat.h:
4173 
4174     // Use the already AIX version independent get_cpuinfo.
4175     os::Aix::cpuinfo_t ci;
4176     if (os::Aix::get_cpuinfo(&ci)) {
4177       for (int i = 0; i < nelem; i++) {
4178         values[i] = ci.loadavg[i];
4179       }
4180     } else {
4181       return -1;
4182     }
4183     return nelem;
4184   }
4185 }
4186 
4187 void os::pause() {
4188   char filename[MAX_PATH];
4189   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4190     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4191   } else {
4192     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4193   }
4194 
4195   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4196   if (fd != -1) {
4197     struct stat buf;
4198     ::close(fd);
4199     while (::stat(filename, &buf) == 0) {
4200       (void)::poll(NULL, 0, 100);
4201     }
4202   } else {
4203     jio_fprintf(stderr,
4204       "Could not open pause file '%s', continuing immediately.\n", filename);
4205   }
4206 }
4207 
4208 bool os::Aix::is_primordial_thread() {
4209   if (pthread_self() == (pthread_t)1) {
4210     return true;
4211   } else {
4212     return false;
4213   }
4214 }
4215 
4216 // OS recognitions (PASE/AIX, OS level) call this before calling any
4217 // one of Aix::on_pase(), Aix::os_version() static
4218 void os::Aix::initialize_os_info() {
4219 
4220   assert(_on_pase == -1 && _os_version == -1, "already called.");
4221 
4222   struct utsname uts;
4223   memset(&uts, 0, sizeof(uts));
4224   strcpy(uts.sysname, "?");
4225   if (::uname(&uts) == -1) {
4226     trc("uname failed (%d)", errno);
4227     guarantee(0, "Could not determine whether we run on AIX or PASE");
4228   } else {
4229     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4230                "node \"%s\" machine \"%s\"\n",
4231                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4232     const int major = atoi(uts.version);
4233     assert(major > 0, "invalid OS version");
4234     const int minor = atoi(uts.release);
4235     assert(minor > 0, "invalid OS release");
4236     _os_version = (major << 8) | minor;
4237     if (strcmp(uts.sysname, "OS400") == 0) {
4238       Unimplemented();
4239     } else if (strcmp(uts.sysname, "AIX") == 0) {
4240       // We run on AIX. We do not support versions older than AIX 5.3.
4241       _on_pase = 0;
4242       if (_os_version < 0x0503) {
4243         trc("AIX release older than AIX 5.3 not supported.");
4244         assert(false, "AIX release too old.");
4245       } else {
4246         trcVerbose("We run on AIX %d.%d\n", major, minor);
4247       }
4248     } else {
4249       assert(false, "unknown OS");
4250     }
4251   }
4252 
4253   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4254 } // end: os::Aix::initialize_os_info()
4255 
4256 // Scan environment for important settings which might effect the VM.
4257 // Trace out settings. Warn about invalid settings and/or correct them.
4258 //
4259 // Must run after os::Aix::initialue_os_info().
4260 void os::Aix::scan_environment() {
4261 
4262   char* p;
4263   int rc;
4264 
4265   // Warn explicity if EXTSHM=ON is used. That switch changes how
4266   // System V shared memory behaves. One effect is that page size of
4267   // shared memory cannot be change dynamically, effectivly preventing
4268   // large pages from working.
4269   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4270   // recommendation is (in OSS notes) to switch it off.
4271   p = ::getenv("EXTSHM");
4272   if (Verbose) {
4273     fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4274   }
4275   if (p && strcasecmp(p, "ON") == 0) {
4276     fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4277     _extshm = 1;
4278   } else {
4279     _extshm = 0;
4280   }
4281 
4282   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4283   // Not tested, not supported.
4284   //
4285   // Note that it might be worth the trouble to test and to require it, if only to
4286   // get useful return codes for mprotect.
4287   //
4288   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4289   // exec() ? before loading the libjvm ? ....)
4290   p = ::getenv("XPG_SUS_ENV");
4291   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4292   if (p && strcmp(p, "ON") == 0) {
4293     _xpg_sus_mode = 1;
4294     trc("Unsupported setting: XPG_SUS_ENV=ON");
4295     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4296     // clobber address ranges. If we ever want to support that, we have to do some
4297     // testing first.
4298     guarantee(false, "XPG_SUS_ENV=ON not supported");
4299   } else {
4300     _xpg_sus_mode = 0;
4301   }
4302 
4303   // Switch off AIX internal (pthread) guard pages. This has
4304   // immediate effect for any pthread_create calls which follow.
4305   p = ::getenv("AIXTHREAD_GUARDPAGES");
4306   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4307   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4308   guarantee(rc == 0, "");
4309 
4310 } // end: os::Aix::scan_environment()
4311 
4312 // PASE: initialize the libo4 library (AS400 PASE porting library).
4313 void os::Aix::initialize_libo4() {
4314   Unimplemented();
4315 }
4316 
4317 // AIX: initialize the libperfstat library (we load this dynamically
4318 // because it is only available on AIX.
4319 void os::Aix::initialize_libperfstat() {
4320 
4321   assert(os::Aix::on_aix(), "AIX only");
4322 
4323   if (!libperfstat::init()) {
4324     trc("libperfstat initialization failed.");
4325     assert(false, "libperfstat initialization failed");
4326   } else {
4327     if (Verbose) {
4328       fprintf(stderr, "libperfstat initialized.\n");
4329     }
4330   }
4331 } // end: os::Aix::initialize_libperfstat
4332 
4333 /////////////////////////////////////////////////////////////////////////////
4334 // thread stack
4335 
4336 // Function to query the current stack size using pthread_getthrds_np.
4337 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4338   // This only works when invoked on a pthread. As we agreed not to use
4339   // primordial threads anyway, I assert here.
4340   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4341 
4342   // Information about this api can be found (a) in the pthread.h header and
4343   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4344   //
4345   // The use of this API to find out the current stack is kind of undefined.
4346   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4347   // enough for cases where I let the pthread library create its stacks. For cases
4348   // where I create an own stack and pass this to pthread_create, it seems not to
4349   // work (the returned stack size in that case is 0).
4350 
4351   pthread_t tid = pthread_self();
4352   struct __pthrdsinfo pinfo;
4353   char dummy[1]; // We only need this to satisfy the api and to not get E.
4354   int dummy_size = sizeof(dummy);
4355 
4356   memset(&pinfo, 0, sizeof(pinfo));
4357 
4358   const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4359                                      sizeof(pinfo), dummy, &dummy_size);
4360 
4361   if (rc != 0) {
4362     assert0(false);
4363     trcVerbose("pthread_getthrds_np failed (%d)", rc);
4364     return false;
4365   }
4366   guarantee0(pinfo.__pi_stackend);
4367 
4368   // The following can happen when invoking pthread_getthrds_np on a pthread running
4369   // on a user provided stack (when handing down a stack to pthread create, see
4370   // pthread_attr_setstackaddr).
4371   // Not sure what to do here - I feel inclined to forbid this use case completely.
4372   guarantee0(pinfo.__pi_stacksize);
4373 
4374   // Note: the pthread stack on AIX seems to look like this:
4375   //
4376   // ---------------------   real base ? at page border ?
4377   //
4378   //     pthread internal data, like ~2K, see also
4379   //     http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/thread_supp_tun_params.htm
4380   //
4381   // ---------------------   __pi_stackend - not page aligned, (xxxxF890)
4382   //
4383   //     stack
4384   //      ....
4385   //
4386   //     stack
4387   //
4388   // ---------------------   __pi_stackend  - __pi_stacksize
4389   //
4390   //     padding due to AIX guard pages (?) see AIXTHREAD_GUARDPAGES
4391   // ---------------------   __pi_stackaddr  (page aligned if AIXTHREAD_GUARDPAGES > 0)
4392   //
4393   //   AIX guard pages (?)
4394   //
4395 
4396   // So, the safe thing to do is to use the area from __pi_stackend to __pi_stackaddr;
4397   // __pi_stackend however is almost never page aligned.
4398   //
4399 
4400   if (p_stack_base) {
4401     (*p_stack_base) = (address) (pinfo.__pi_stackend);
4402   }
4403 
4404   if (p_stack_size) {
4405     (*p_stack_size) = pinfo.__pi_stackend - pinfo.__pi_stackaddr;
4406   }
4407 
4408   return true;
4409 }
4410 
4411 // Get the current stack base from the OS (actually, the pthread library).
4412 address os::current_stack_base() {
4413   address p;
4414   query_stack_dimensions(&p, 0);
4415   return p;
4416 }
4417 
4418 // Get the current stack size from the OS (actually, the pthread library).
4419 size_t os::current_stack_size() {
4420   size_t s;
4421   query_stack_dimensions(0, &s);
4422   return s;
4423 }
4424 
4425 // Refer to the comments in os_solaris.cpp park-unpark.
4426 //
4427 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4428 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4429 // For specifics regarding the bug see GLIBC BUGID 261237 :
4430 //    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4431 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4432 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4433 // is used. (The simple C test-case provided in the GLIBC bug report manifests the
4434 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4435 // and monitorenter when we're using 1-0 locking. All those operations may result in
4436 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4437 // of libpthread avoids the problem, but isn't practical.
4438 //
4439 // Possible remedies:
4440 //
4441 // 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4442 //      This is palliative and probabilistic, however. If the thread is preempted
4443 //      between the call to compute_abstime() and pthread_cond_timedwait(), more
4444 //      than the minimum period may have passed, and the abstime may be stale (in the
4445 //      past) resultin in a hang. Using this technique reduces the odds of a hang
4446 //      but the JVM is still vulnerable, particularly on heavily loaded systems.
4447 //
4448 // 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4449 //      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4450 //      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4451 //      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4452 //      thread.
4453 //
4454 // 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4455 //      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4456 //      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4457 //      This also works well. In fact it avoids kernel-level scalability impediments
4458 //      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4459 //      timers in a graceful fashion.
4460 //
4461 // 4.   When the abstime value is in the past it appears that control returns
4462 //      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4463 //      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4464 //      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4465 //      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4466 //      It may be possible to avoid reinitialization by checking the return
4467 //      value from pthread_cond_timedwait(). In addition to reinitializing the
4468 //      condvar we must establish the invariant that cond_signal() is only called
4469 //      within critical sections protected by the adjunct mutex. This prevents
4470 //      cond_signal() from "seeing" a condvar that's in the midst of being
4471 //      reinitialized or that is corrupt. Sadly, this invariant obviates the
4472 //      desirable signal-after-unlock optimization that avoids futile context switching.
4473 //
4474 //      I'm also concerned that some versions of NTPL might allocate an auxilliary
4475 //      structure when a condvar is used or initialized. cond_destroy() would
4476 //      release the helper structure. Our reinitialize-after-timedwait fix
4477 //      put excessive stress on malloc/free and locks protecting the c-heap.
4478 //
4479 // We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4480 // It may be possible to refine (4) by checking the kernel and NTPL verisons
4481 // and only enabling the work-around for vulnerable environments.
4482 
4483 // utility to compute the abstime argument to timedwait:
4484 // millis is the relative timeout time
4485 // abstime will be the absolute timeout time
4486 // TODO: replace compute_abstime() with unpackTime()
4487 
4488 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4489   if (millis < 0) millis = 0;
4490   struct timeval now;
4491   int status = gettimeofday(&now, NULL);
4492   assert(status == 0, "gettimeofday");
4493   jlong seconds = millis / 1000;
4494   millis %= 1000;
4495   if (seconds > 50000000) { // see man cond_timedwait(3T)
4496     seconds = 50000000;
4497   }
4498   abstime->tv_sec = now.tv_sec  + seconds;
4499   long       usec = now.tv_usec + millis * 1000;
4500   if (usec >= 1000000) {
4501     abstime->tv_sec += 1;
4502     usec -= 1000000;
4503   }
4504   abstime->tv_nsec = usec * 1000;
4505   return abstime;
4506 }
4507 
4508 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4509 // Conceptually TryPark() should be equivalent to park(0).
4510 
4511 int os::PlatformEvent::TryPark() {
4512   for (;;) {
4513     const int v = _Event;
4514     guarantee ((v == 0) || (v == 1), "invariant");
4515     if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4516   }
4517 }
4518 
4519 void os::PlatformEvent::park() {       // AKA "down()"
4520   // Invariant: Only the thread associated with the Event/PlatformEvent
4521   // may call park().
4522   // TODO: assert that _Assoc != NULL or _Assoc == Self
4523   int v;
4524   for (;;) {
4525     v = _Event;
4526     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4527   }
4528   guarantee (v >= 0, "invariant");
4529   if (v == 0) {
4530     // Do this the hard way by blocking ...
4531     int status = pthread_mutex_lock(_mutex);
4532     assert_status(status == 0, status, "mutex_lock");
4533     guarantee (_nParked == 0, "invariant");
4534     ++ _nParked;
4535     while (_Event < 0) {
4536       status = pthread_cond_wait(_cond, _mutex);
4537       assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4538     }
4539     -- _nParked;
4540 
4541     // In theory we could move the ST of 0 into _Event past the unlock(),
4542     // but then we'd need a MEMBAR after the ST.
4543     _Event = 0;
4544     status = pthread_mutex_unlock(_mutex);
4545     assert_status(status == 0, status, "mutex_unlock");
4546   }
4547   guarantee (_Event >= 0, "invariant");
4548 }
4549 
4550 int os::PlatformEvent::park(jlong millis) {
4551   guarantee (_nParked == 0, "invariant");
4552 
4553   int v;
4554   for (;;) {
4555     v = _Event;
4556     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4557   }
4558   guarantee (v >= 0, "invariant");
4559   if (v != 0) return OS_OK;
4560 
4561   // We do this the hard way, by blocking the thread.
4562   // Consider enforcing a minimum timeout value.
4563   struct timespec abst;
4564   compute_abstime(&abst, millis);
4565 
4566   int ret = OS_TIMEOUT;
4567   int status = pthread_mutex_lock(_mutex);
4568   assert_status(status == 0, status, "mutex_lock");
4569   guarantee (_nParked == 0, "invariant");
4570   ++_nParked;
4571 
4572   // Object.wait(timo) will return because of
4573   // (a) notification
4574   // (b) timeout
4575   // (c) thread.interrupt
4576   //
4577   // Thread.interrupt and object.notify{All} both call Event::set.
4578   // That is, we treat thread.interrupt as a special case of notification.
4579   // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4580   // We assume all ETIME returns are valid.
4581   //
4582   // TODO: properly differentiate simultaneous notify+interrupt.
4583   // In that case, we should propagate the notify to another waiter.
4584 
4585   while (_Event < 0) {
4586     status = pthread_cond_timedwait(_cond, _mutex, &abst);
4587     assert_status(status == 0 || status == ETIMEDOUT,
4588                   status, "cond_timedwait");
4589     if (!FilterSpuriousWakeups) break;         // previous semantics
4590     if (status == ETIMEDOUT) break;
4591     // We consume and ignore EINTR and spurious wakeups.
4592   }
4593   --_nParked;
4594   if (_Event >= 0) {
4595      ret = OS_OK;
4596   }
4597   _Event = 0;
4598   status = pthread_mutex_unlock(_mutex);
4599   assert_status(status == 0, status, "mutex_unlock");
4600   assert (_nParked == 0, "invariant");
4601   return ret;
4602 }
4603 
4604 void os::PlatformEvent::unpark() {
4605   int v, AnyWaiters;
4606   for (;;) {
4607     v = _Event;
4608     if (v > 0) {
4609       // The LD of _Event could have reordered or be satisfied
4610       // by a read-aside from this processor's write buffer.
4611       // To avoid problems execute a barrier and then
4612       // ratify the value.
4613       OrderAccess::fence();
4614       if (_Event == v) return;
4615       continue;
4616     }
4617     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4618   }
4619   if (v < 0) {
4620     // Wait for the thread associated with the event to vacate
4621     int status = pthread_mutex_lock(_mutex);
4622     assert_status(status == 0, status, "mutex_lock");
4623     AnyWaiters = _nParked;
4624 
4625     if (AnyWaiters != 0) {
4626       // We intentional signal *after* dropping the lock
4627       // to avoid a common class of futile wakeups.
4628       status = pthread_cond_signal(_cond);
4629       assert_status(status == 0, status, "cond_signal");
4630     }
4631     // Mutex should be locked for pthread_cond_signal(_cond).
4632     status = pthread_mutex_unlock(_mutex);
4633     assert_status(status == 0, status, "mutex_unlock");
4634   }
4635 
4636   // Note that we signal() _after dropping the lock for "immortal" Events.
4637   // This is safe and avoids a common class of futile wakeups. In rare
4638   // circumstances this can cause a thread to return prematurely from
4639   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4640   // simply re-test the condition and re-park itself.
4641 }
4642 
4643 
4644 // JSR166
4645 // -------------------------------------------------------
4646 
4647 //
4648 // The solaris and linux implementations of park/unpark are fairly
4649 // conservative for now, but can be improved. They currently use a
4650 // mutex/condvar pair, plus a a count.
4651 // Park decrements count if > 0, else does a condvar wait. Unpark
4652 // sets count to 1 and signals condvar. Only one thread ever waits
4653 // on the condvar. Contention seen when trying to park implies that someone
4654 // is unparking you, so don't wait. And spurious returns are fine, so there
4655 // is no need to track notifications.
4656 //
4657 
4658 #define MAX_SECS 100000000
4659 //
4660 // This code is common to linux and solaris and will be moved to a
4661 // common place in dolphin.
4662 //
4663 // The passed in time value is either a relative time in nanoseconds
4664 // or an absolute time in milliseconds. Either way it has to be unpacked
4665 // into suitable seconds and nanoseconds components and stored in the
4666 // given timespec structure.
4667 // Given time is a 64-bit value and the time_t used in the timespec is only
4668 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
4669 // overflow if times way in the future are given. Further on Solaris versions
4670 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4671 // number of seconds, in abstime, is less than current_time + 100,000,000.
4672 // As it will be 28 years before "now + 100000000" will overflow we can
4673 // ignore overflow and just impose a hard-limit on seconds using the value
4674 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4675 // years from "now".
4676 //
4677 
4678 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4679   assert (time > 0, "convertTime");
4680 
4681   struct timeval now;
4682   int status = gettimeofday(&now, NULL);
4683   assert(status == 0, "gettimeofday");
4684 
4685   time_t max_secs = now.tv_sec + MAX_SECS;
4686 
4687   if (isAbsolute) {
4688     jlong secs = time / 1000;
4689     if (secs > max_secs) {
4690       absTime->tv_sec = max_secs;
4691     }
4692     else {
4693       absTime->tv_sec = secs;
4694     }
4695     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4696   }
4697   else {
4698     jlong secs = time / NANOSECS_PER_SEC;
4699     if (secs >= MAX_SECS) {
4700       absTime->tv_sec = max_secs;
4701       absTime->tv_nsec = 0;
4702     }
4703     else {
4704       absTime->tv_sec = now.tv_sec + secs;
4705       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4706       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4707         absTime->tv_nsec -= NANOSECS_PER_SEC;
4708         ++absTime->tv_sec; // note: this must be <= max_secs
4709       }
4710     }
4711   }
4712   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4713   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4714   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4715   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4716 }
4717 
4718 void Parker::park(bool isAbsolute, jlong time) {
4719   // Optional fast-path check:
4720   // Return immediately if a permit is available.
4721   if (_counter > 0) {
4722     _counter = 0;
4723     OrderAccess::fence();
4724     return;
4725   }
4726 
4727   Thread* thread = Thread::current();
4728   assert(thread->is_Java_thread(), "Must be JavaThread");
4729   JavaThread *jt = (JavaThread *)thread;
4730 
4731   // Optional optimization -- avoid state transitions if there's an interrupt pending.
4732   // Check interrupt before trying to wait
4733   if (Thread::is_interrupted(thread, false)) {
4734     return;
4735   }
4736 
4737   // Next, demultiplex/decode time arguments
4738   timespec absTime;
4739   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4740     return;
4741   }
4742   if (time > 0) {
4743     unpackTime(&absTime, isAbsolute, time);
4744   }
4745 
4746   // Enter safepoint region
4747   // Beware of deadlocks such as 6317397.
4748   // The per-thread Parker:: mutex is a classic leaf-lock.
4749   // In particular a thread must never block on the Threads_lock while
4750   // holding the Parker:: mutex. If safepoints are pending both the
4751   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4752   ThreadBlockInVM tbivm(jt);
4753 
4754   // Don't wait if cannot get lock since interference arises from
4755   // unblocking. Also. check interrupt before trying wait
4756   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4757     return;
4758   }
4759 
4760   int status;
4761   if (_counter > 0) { // no wait needed
4762     _counter = 0;
4763     status = pthread_mutex_unlock(_mutex);
4764     assert (status == 0, "invariant");
4765     OrderAccess::fence();
4766     return;
4767   }
4768 
4769 #ifdef ASSERT
4770   // Don't catch signals while blocked; let the running threads have the signals.
4771   // (This allows a debugger to break into the running thread.)
4772   sigset_t oldsigs;
4773   sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4774   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4775 #endif
4776 
4777   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4778   jt->set_suspend_equivalent();
4779   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4780 
4781   if (time == 0) {
4782     status = pthread_cond_wait (_cond, _mutex);
4783   } else {
4784     status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4785     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4786       pthread_cond_destroy (_cond);
4787       pthread_cond_init    (_cond, NULL);
4788     }
4789   }
4790   assert_status(status == 0 || status == EINTR ||
4791                 status == ETIME || status == ETIMEDOUT,
4792                 status, "cond_timedwait");
4793 
4794 #ifdef ASSERT
4795   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4796 #endif
4797 
4798   _counter = 0;
4799   status = pthread_mutex_unlock(_mutex);
4800   assert_status(status == 0, status, "invariant");
4801   // If externally suspended while waiting, re-suspend
4802   if (jt->handle_special_suspend_equivalent_condition()) {
4803     jt->java_suspend_self();
4804   }
4805 
4806   OrderAccess::fence();
4807 }
4808 
4809 void Parker::unpark() {
4810   int s, status;
4811   status = pthread_mutex_lock(_mutex);
4812   assert (status == 0, "invariant");
4813   s = _counter;
4814   _counter = 1;
4815   if (s < 1) {
4816     if (WorkAroundNPTLTimedWaitHang) {
4817       status = pthread_cond_signal (_cond);
4818       assert (status == 0, "invariant");
4819       status = pthread_mutex_unlock(_mutex);
4820       assert (status == 0, "invariant");
4821     } else {
4822       status = pthread_mutex_unlock(_mutex);
4823       assert (status == 0, "invariant");
4824       status = pthread_cond_signal (_cond);
4825       assert (status == 0, "invariant");
4826     }
4827   } else {
4828     pthread_mutex_unlock(_mutex);
4829     assert (status == 0, "invariant");
4830   }
4831 }
4832 
4833 extern char** environ;
4834 
4835 // Run the specified command in a separate process. Return its exit value,
4836 // or -1 on failure (e.g. can't fork a new process).
4837 // Unlike system(), this function can be called from signal handler. It
4838 // doesn't block SIGINT et al.
4839 int os::fork_and_exec(char* cmd) {
4840   char * argv[4] = {"sh", "-c", cmd, NULL};
4841 
4842   pid_t pid = fork();
4843 
4844   if (pid < 0) {
4845     // fork failed
4846     return -1;
4847 
4848   } else if (pid == 0) {
4849     // child process
4850 
4851     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4852     execve("/usr/bin/sh", argv, environ);
4853 
4854     // execve failed
4855     _exit(-1);
4856 
4857   } else {
4858     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4859     // care about the actual exit code, for now.
4860 
4861     int status;
4862 
4863     // Wait for the child process to exit. This returns immediately if
4864     // the child has already exited. */
4865     while (waitpid(pid, &status, 0) < 0) {
4866       switch (errno) {
4867         case ECHILD: return 0;
4868         case EINTR: break;
4869         default: return -1;
4870       }
4871     }
4872 
4873     if (WIFEXITED(status)) {
4874       // The child exited normally; get its exit code.
4875       return WEXITSTATUS(status);
4876     } else if (WIFSIGNALED(status)) {
4877       // The child exited because of a signal.
4878       // The best value to return is 0x80 + signal number,
4879       // because that is what all Unix shells do, and because
4880       // it allows callers to distinguish between process exit and
4881       // process death by signal.
4882       return 0x80 + WTERMSIG(status);
4883     } else {
4884       // Unknown exit code; pass it through.
4885       return status;
4886     }
4887   }
4888   return -1;
4889 }
4890 
4891 // is_headless_jre()
4892 //
4893 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4894 // in order to report if we are running in a headless jre.
4895 //
4896 // Since JDK8 xawt/libmawt.so is moved into the same directory
4897 // as libawt.so, and renamed libawt_xawt.so
4898 bool os::is_headless_jre() {
4899   struct stat statbuf;
4900   char buf[MAXPATHLEN];
4901   char libmawtpath[MAXPATHLEN];
4902   const char *xawtstr = "/xawt/libmawt.so";
4903   const char *new_xawtstr = "/libawt_xawt.so";
4904 
4905   char *p;
4906 
4907   // Get path to libjvm.so
4908   os::jvm_path(buf, sizeof(buf));
4909 
4910   // Get rid of libjvm.so
4911   p = strrchr(buf, '/');
4912   if (p == NULL) return false;
4913   else *p = '\0';
4914 
4915   // Get rid of client or server
4916   p = strrchr(buf, '/');
4917   if (p == NULL) return false;
4918   else *p = '\0';
4919 
4920   // check xawt/libmawt.so
4921   strcpy(libmawtpath, buf);
4922   strcat(libmawtpath, xawtstr);
4923   if (::stat(libmawtpath, &statbuf) == 0) return false;
4924 
4925   // check libawt_xawt.so
4926   strcpy(libmawtpath, buf);
4927   strcat(libmawtpath, new_xawtstr);
4928   if (::stat(libmawtpath, &statbuf) == 0) return false;
4929 
4930   return true;
4931 }
4932 
4933 // Get the default path to the core file
4934 // Returns the length of the string
4935 int os::get_core_path(char* buffer, size_t bufferSize) {
4936   const char* p = get_current_directory(buffer, bufferSize);
4937 
4938   if (p == NULL) {
4939     assert(p != NULL, "failed to get current directory");
4940     return 0;
4941   }
4942 
4943   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4944                                                p, current_process_id());
4945 
4946   return strlen(buffer);
4947 }
4948 
4949 #ifndef PRODUCT
4950 void TestReserveMemorySpecial_test() {
4951   // No tests available for this platform
4952 }
4953 #endif