rev 9211 : 8140645: Recent Developments for AIX
Summary: Port recent developments from SAP for AIX to the OpenJDK

   1 /*
   2  * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2015 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "libperfstat_aix.hpp"
  40 #include "loadlib_aix.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "misc_aix.hpp"
  44 #include "mutex_aix.inline.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "os_aix.inline.hpp"
  47 #include "os_share_aix.hpp"
  48 #include "porting_aix.hpp"
  49 #include "prims/jniFastGetField.hpp"
  50 #include "prims/jvm.h"
  51 #include "prims/jvm_misc.hpp"
  52 #include "runtime/arguments.hpp"
  53 #include "runtime/atomic.inline.hpp"
  54 #include "runtime/extendedPC.hpp"
  55 #include "runtime/globals.hpp"
  56 #include "runtime/interfaceSupport.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/javaCalls.hpp"
  59 #include "runtime/mutexLocker.hpp"
  60 #include "runtime/objectMonitor.hpp"
  61 #include "runtime/orderAccess.inline.hpp"
  62 #include "runtime/os.hpp"
  63 #include "runtime/osThread.hpp"
  64 #include "runtime/perfMemory.hpp"
  65 #include "runtime/sharedRuntime.hpp"
  66 #include "runtime/statSampler.hpp"
  67 #include "runtime/stubRoutines.hpp"
  68 #include "runtime/thread.inline.hpp"
  69 #include "runtime/threadCritical.hpp"
  70 #include "runtime/timer.hpp"
  71 #include "runtime/vm_version.hpp"
  72 #include "services/attachListener.hpp"
  73 #include "services/runtimeService.hpp"
  74 #include "utilities/decoder.hpp"
  75 #include "utilities/defaultStream.hpp"
  76 #include "utilities/events.hpp"
  77 #include "utilities/growableArray.hpp"
  78 #include "utilities/vmError.hpp"
  79 
  80 // put OS-includes here (sorted alphabetically)
  81 #include <errno.h>
  82 #include <fcntl.h>
  83 #include <inttypes.h>
  84 #include <poll.h>
  85 #include <procinfo.h>
  86 #include <pthread.h>
  87 #include <pwd.h>
  88 #include <semaphore.h>
  89 #include <signal.h>
  90 #include <stdint.h>
  91 #include <stdio.h>
  92 #include <string.h>
  93 #include <unistd.h>
  94 #include <sys/ioctl.h>
  95 #include <sys/ipc.h>
  96 #include <sys/mman.h>
  97 #include <sys/resource.h>
  98 #include <sys/select.h>
  99 #include <sys/shm.h>
 100 #include <sys/socket.h>
 101 #include <sys/stat.h>
 102 #include <sys/sysinfo.h>
 103 #include <sys/systemcfg.h>
 104 #include <sys/time.h>
 105 #include <sys/times.h>
 106 #include <sys/types.h>
 107 #include <sys/utsname.h>
 108 #include <sys/vminfo.h>
 109 #include <sys/wait.h>
 110 
 111 // If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 112 // getrusage() is prepared to handle the associated failure.
 113 #ifndef RUSAGE_THREAD
 114 #define RUSAGE_THREAD   (1)               /* only the calling thread */
 115 #endif
 116 
 117 // PPC port
 118 static const uintx Use64KPagesThreshold       = 1*M;
 119 static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
 120 
 121 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
 122 #if !defined(_AIXVERSION_610)
 123 extern "C" {
 124   int getthrds64(pid_t ProcessIdentifier,
 125                  struct thrdentry64* ThreadBuffer,
 126                  int ThreadSize,
 127                  tid64_t* IndexPointer,
 128                  int Count);
 129 }
 130 #endif
 131 
 132 #define MAX_PATH (2 * K)
 133 
 134 // for timer info max values which include all bits
 135 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 136 // for multipage initialization error analysis (in 'g_multipage_error')
 137 #define ERROR_MP_OS_TOO_OLD                          100
 138 #define ERROR_MP_EXTSHM_ACTIVE                       101
 139 #define ERROR_MP_VMGETINFO_FAILED                    102
 140 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 141 
 142 // The semantics in this file are thus that codeptr_t is a *real code ptr*.
 143 // This means that any function taking codeptr_t as arguments will assume
 144 // a real codeptr and won't handle function descriptors (eg getFuncName),
 145 // whereas functions taking address as args will deal with function
 146 // descriptors (eg os::dll_address_to_library_name).
 147 typedef unsigned int* codeptr_t;
 148 
 149 // Typedefs for stackslots, stack pointers, pointers to op codes.
 150 typedef unsigned long stackslot_t;
 151 typedef stackslot_t* stackptr_t;
 152 
 153 // Excerpts from systemcfg.h definitions newer than AIX 5.3.
 154 #ifndef PV_7
 155 #define PV_7 0x200000          /* Power PC 7 */
 156 #define PV_7_Compat 0x208000   /* Power PC 7 */
 157 #endif
 158 #ifndef PV_8
 159 #define PV_8 0x300000          /* Power PC 8 */
 160 #define PV_8_Compat 0x308000   /* Power PC 8 */
 161 #endif
 162 
 163 #define trcVerbose(fmt, ...) { /* PPC port */  \
 164   if (Verbose) { \
 165     fprintf(stderr, fmt, ##__VA_ARGS__); \
 166     fputc('\n', stderr); fflush(stderr); \
 167   } \
 168 }
 169 #define trc(fmt, ...)        /* PPC port */
 170 
 171 #define ERRBYE(s) { \
 172     trcVerbose(s); \
 173     return -1; \
 174 }
 175 
 176 // Query dimensions of the stack of the calling thread.
 177 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 178 
 179 // Function to check a given stack pointer against given stack limits
 180 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 181   if (((uintptr_t)sp) & 0x7) {
 182     return false;
 183   }
 184   if (sp > stack_base) {
 185     return false;
 186   }
 187   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 188     return false;
 189   }
 190   return true;
 191 }
 192 
 193 // Returns true if function is a valid codepointer
 194 inline bool is_valid_codepointer(codeptr_t p) {
 195   if (!p) {
 196     return false;
 197   }
 198   if (((uintptr_t)p) & 0x3) {
 199     return false;
 200   }
 201   if (!LoadedLibraries::find_for_text_address(p, NULL)) {
 202     return false;
 203   }
 204   return true;
 205 }
 206 
 207 // Macro to check a given stack pointer against given stack limits and to die if test fails.
 208 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 209     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 210 }
 211 
 212 // Macro to check the current stack pointer against given stacklimits.
 213 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 214   address sp; \
 215   sp = os::current_stack_pointer(); \
 216   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 217 }
 218 
 219 ////////////////////////////////////////////////////////////////////////////////
 220 // global variables (for a description see os_aix.hpp)
 221 
 222 julong    os::Aix::_physical_memory = 0;
 223 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 224 int       os::Aix::_page_size = -1;
 225 int       os::Aix::_on_pase = -1;
 226 int       os::Aix::_os_version = -1;
 227 int       os::Aix::_stack_page_size = -1;
 228 int       os::Aix::_xpg_sus_mode = -1;
 229 int       os::Aix::_extshm = -1;
 230 int       os::Aix::_logical_cpus = -1;
 231 
 232 ////////////////////////////////////////////////////////////////////////////////
 233 // local variables
 234 
 235 static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 236 static jlong    initial_time_count = 0;
 237 static int      clock_tics_per_sec = 100;
 238 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 239 static bool     check_signals      = true;
 240 static pid_t    _initial_pid       = 0;
 241 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 242 static sigset_t SR_sigset;
 243 
 244 // This describes the state of multipage support of the underlying
 245 // OS. Note that this is of no interest to the outsize world and
 246 // therefore should not be defined in AIX class.
 247 //
 248 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 249 // latter two (16M "large" resp. 16G "huge" pages) require special
 250 // setup and are normally not available.
 251 //
 252 // AIX supports multiple page sizes per process, for:
 253 //  - Stack (of the primordial thread, so not relevant for us)
 254 //  - Data - data, bss, heap, for us also pthread stacks
 255 //  - Text - text code
 256 //  - shared memory
 257 //
 258 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 259 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 260 //
 261 // For shared memory, page size can be set dynamically via
 262 // shmctl(). Different shared memory regions can have different page
 263 // sizes.
 264 //
 265 // More information can be found at AIBM info center:
 266 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 267 //
 268 static struct {
 269   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 270   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 271   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 272   size_t pthr_stack_pagesize; // stack page size of pthread threads
 273   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 274   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 275   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 276   int error;                  // Error describing if something went wrong at multipage init.
 277 } g_multipage_support = {
 278   (size_t) -1,
 279   (size_t) -1,
 280   (size_t) -1,
 281   (size_t) -1,
 282   (size_t) -1,
 283   false, false,
 284   0
 285 };
 286 
 287 // We must not accidentally allocate memory close to the BRK - even if
 288 // that would work - because then we prevent the BRK segment from
 289 // growing which may result in a malloc OOM even though there is
 290 // enough memory. The problem only arises if we shmat() or mmap() at
 291 // a specific wish address, e.g. to place the heap in a
 292 // compressed-oops-friendly way.
 293 static bool is_close_to_brk(address a) {
 294   address a1 = (address) sbrk(0);
 295   if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {
 296     return true;
 297   }
 298   return false;
 299 }
 300 
 301 julong os::available_memory() {
 302   return Aix::available_memory();
 303 }
 304 
 305 julong os::Aix::available_memory() {
 306   os::Aix::meminfo_t mi;
 307   if (os::Aix::get_meminfo(&mi)) {
 308     return mi.real_free;
 309   } else {
 310     return 0xFFFFFFFFFFFFFFFFLL;
 311   }
 312 }
 313 
 314 julong os::physical_memory() {
 315   return Aix::physical_memory();
 316 }
 317 
 318 // Return true if user is running as root.
 319 
 320 bool os::have_special_privileges() {
 321   static bool init = false;
 322   static bool privileges = false;
 323   if (!init) {
 324     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 325     init = true;
 326   }
 327   return privileges;
 328 }
 329 
 330 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 331 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 332 static bool my_disclaim64(char* addr, size_t size) {
 333 
 334   if (size == 0) {
 335     return true;
 336   }
 337 
 338   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 339   const unsigned int maxDisclaimSize = 0x40000000;
 340 
 341   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 342   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 343 
 344   char* p = addr;
 345 
 346   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 347     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 348       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 349       return false;
 350     }
 351     p += maxDisclaimSize;
 352   }
 353 
 354   if (lastDisclaimSize > 0) {
 355     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 356       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 357       return false;
 358     }
 359   }
 360 
 361   return true;
 362 }
 363 
 364 // Cpu architecture string
 365 #if defined(PPC32)
 366 static char cpu_arch[] = "ppc";
 367 #elif defined(PPC64)
 368 static char cpu_arch[] = "ppc64";
 369 #else
 370 #error Add appropriate cpu_arch setting
 371 #endif
 372 
 373 
 374 // Given an address, returns the size of the page backing that address.
 375 size_t os::Aix::query_pagesize(void* addr) {
 376 
 377   vm_page_info pi;
 378   pi.addr = (uint64_t)addr;
 379   if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 380     return pi.pagesize;
 381   } else {
 382     fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
 383     assert(false, "vmgetinfo failed to retrieve page size");
 384     return SIZE_4K;
 385   }
 386 
 387 }
 388 
 389 // Returns the kernel thread id of the currently running thread.
 390 pid_t os::Aix::gettid() {
 391   return (pid_t) thread_self();
 392 }
 393 
 394 void os::Aix::initialize_system_info() {
 395 
 396   // Get the number of online(logical) cpus instead of configured.
 397   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 398   assert(_processor_count > 0, "_processor_count must be > 0");
 399 
 400   // Retrieve total physical storage.
 401   os::Aix::meminfo_t mi;
 402   if (!os::Aix::get_meminfo(&mi)) {
 403     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
 404     assert(false, "os::Aix::get_meminfo failed.");
 405   }
 406   _physical_memory = (julong) mi.real_total;
 407 }
 408 
 409 // Helper function for tracing page sizes.
 410 static const char* describe_pagesize(size_t pagesize) {
 411   switch (pagesize) {
 412     case SIZE_4K : return "4K";
 413     case SIZE_64K: return "64K";
 414     case SIZE_16M: return "16M";
 415     case SIZE_16G: return "16G";
 416     case -1:       return "not set";
 417     default:
 418       assert(false, "surprise");
 419       return "??";
 420   }
 421 }
 422 
 423 // Probe OS for multipage support.
 424 // Will fill the global g_multipage_support structure.
 425 // Must be called before calling os::large_page_init().
 426 static void query_multipage_support() {
 427 
 428   guarantee(g_multipage_support.pagesize == -1,
 429             "do not call twice");
 430 
 431   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 432 
 433   // This really would surprise me.
 434   assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
 435 
 436   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 437   // Default data page size is defined either by linker options (-bdatapsize)
 438   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 439   // default should be 4K.
 440   {
 441     void* p = ::malloc(SIZE_16M);
 442     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 443     ::free(p);
 444   }
 445 
 446   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 447   {
 448     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 449     guarantee(shmid != -1, "shmget failed");
 450     void* p = ::shmat(shmid, NULL, 0);
 451     ::shmctl(shmid, IPC_RMID, NULL);
 452     guarantee(p != (void*) -1, "shmat failed");
 453     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 454     ::shmdt(p);
 455   }
 456 
 457   // Before querying the stack page size, make sure we are not running as primordial
 458   // thread (because primordial thread's stack may have different page size than
 459   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 460   // number of reasons so we may just as well guarantee it here.
 461   guarantee0(!os::Aix::is_primordial_thread());
 462 
 463   // Query pthread stack page size.
 464   {
 465     int dummy = 0;
 466     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 467   }
 468 
 469   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 470   /* PPC port: so far unused.
 471   {
 472     address any_function =
 473       (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 474     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 475   }
 476   */
 477 
 478   // Now probe for support of 64K pages and 16M pages.
 479 
 480   // Before OS/400 V6R1, there is no support for pages other than 4K.
 481   if (os::Aix::on_pase_V5R4_or_older()) {
 482     Unimplemented();
 483     goto query_multipage_support_end;
 484   }
 485 
 486   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 487   {
 488     const int MAX_PAGE_SIZES = 4;
 489     psize_t sizes[MAX_PAGE_SIZES];
 490     const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 491     if (num_psizes == -1) {
 492       trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
 493       trc("disabling multipage support.\n");
 494       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 495       goto query_multipage_support_end;
 496     }
 497     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 498     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 499     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 500     for (int i = 0; i < num_psizes; i ++) {
 501       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 502     }
 503 
 504     // Can we use 64K, 16M pages?
 505     for (int i = 0; i < num_psizes; i ++) {
 506       const size_t pagesize = sizes[i];
 507       if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
 508         continue;
 509       }
 510       bool can_use = false;
 511       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 512       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 513         IPC_CREAT | S_IRUSR | S_IWUSR);
 514       guarantee0(shmid != -1); // Should always work.
 515       // Try to set pagesize.
 516       struct shmid_ds shm_buf = { 0 };
 517       shm_buf.shm_pagesize = pagesize;
 518       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 519         const int en = errno;
 520         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 521         // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
 522         // PPC port  MiscUtils::describe_errno(en));
 523       } else {
 524         // Attach and double check pageisze.
 525         void* p = ::shmat(shmid, NULL, 0);
 526         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 527         guarantee0(p != (void*) -1); // Should always work.
 528         const size_t real_pagesize = os::Aix::query_pagesize(p);
 529         if (real_pagesize != pagesize) {
 530           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 531         } else {
 532           can_use = true;
 533         }
 534         ::shmdt(p);
 535       }
 536       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 537       if (pagesize == SIZE_64K) {
 538         g_multipage_support.can_use_64K_pages = can_use;
 539       } else if (pagesize == SIZE_16M) {
 540         g_multipage_support.can_use_16M_pages = can_use;
 541       }
 542     }
 543 
 544   } // end: check which pages can be used for shared memory
 545 
 546 query_multipage_support_end:
 547 
 548   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
 549       describe_pagesize(g_multipage_support.pagesize));
 550   trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
 551       describe_pagesize(g_multipage_support.datapsize));
 552   trcVerbose("Text page size: %s\n",
 553       describe_pagesize(g_multipage_support.textpsize));
 554   trcVerbose("Thread stack page size (pthread): %s\n",
 555       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 556   trcVerbose("Default shared memory page size: %s\n",
 557       describe_pagesize(g_multipage_support.shmpsize));
 558   trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
 559       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 560   trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
 561       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 562   trcVerbose("Multipage error details: %d\n",
 563       g_multipage_support.error);
 564 
 565   // sanity checks
 566   assert0(g_multipage_support.pagesize == SIZE_4K);
 567   assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
 568   // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
 569   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 570   assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
 571 
 572 } // end os::Aix::query_multipage_support()
 573 
 574 void os::init_system_properties_values() {
 575 
 576 #define DEFAULT_LIBPATH "/usr/lib:/lib"
 577 #define EXTENSIONS_DIR  "/lib/ext"
 578 
 579   // Buffer that fits several sprintfs.
 580   // Note that the space for the trailing null is provided
 581   // by the nulls included by the sizeof operator.
 582   const size_t bufsize =
 583     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 584          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 585   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 586 
 587   // sysclasspath, java_home, dll_dir
 588   {
 589     char *pslash;
 590     os::jvm_path(buf, bufsize);
 591 
 592     // Found the full path to libjvm.so.
 593     // Now cut the path to <java_home>/jre if we can.
 594     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 595     pslash = strrchr(buf, '/');
 596     if (pslash != NULL) {
 597       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 598     }
 599     Arguments::set_dll_dir(buf);
 600 
 601     if (pslash != NULL) {
 602       pslash = strrchr(buf, '/');
 603       if (pslash != NULL) {
 604         *pslash = '\0';          // Get rid of /<arch>.
 605         pslash = strrchr(buf, '/');
 606         if (pslash != NULL) {
 607           *pslash = '\0';        // Get rid of /lib.
 608         }
 609       }
 610     }
 611     Arguments::set_java_home(buf);
 612     set_boot_path('/', ':');
 613   }
 614 
 615   // Where to look for native libraries.
 616 
 617   // On Aix we get the user setting of LIBPATH.
 618   // Eventually, all the library path setting will be done here.
 619   // Get the user setting of LIBPATH.
 620   const char *v = ::getenv("LIBPATH");
 621   const char *v_colon = ":";
 622   if (v == NULL) { v = ""; v_colon = ""; }
 623 
 624   // Concatenate user and invariant part of ld_library_path.
 625   // That's +1 for the colon and +1 for the trailing '\0'.
 626   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 627   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 628   Arguments::set_library_path(ld_library_path);
 629   FREE_C_HEAP_ARRAY(char, ld_library_path);
 630 
 631   // Extensions directories.
 632   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 633   Arguments::set_ext_dirs(buf);
 634 
 635   FREE_C_HEAP_ARRAY(char, buf);
 636 
 637 #undef DEFAULT_LIBPATH
 638 #undef EXTENSIONS_DIR
 639 }
 640 
 641 ////////////////////////////////////////////////////////////////////////////////
 642 // breakpoint support
 643 
 644 void os::breakpoint() {
 645   BREAKPOINT;
 646 }
 647 
 648 extern "C" void breakpoint() {
 649   // use debugger to set breakpoint here
 650 }
 651 
 652 ////////////////////////////////////////////////////////////////////////////////
 653 // signal support
 654 
 655 debug_only(static bool signal_sets_initialized = false);
 656 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 657 
 658 bool os::Aix::is_sig_ignored(int sig) {
 659   struct sigaction oact;
 660   sigaction(sig, (struct sigaction*)NULL, &oact);
 661   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 662     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 663   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 664     return true;
 665   } else {
 666     return false;
 667   }
 668 }
 669 
 670 void os::Aix::signal_sets_init() {
 671   // Should also have an assertion stating we are still single-threaded.
 672   assert(!signal_sets_initialized, "Already initialized");
 673   // Fill in signals that are necessarily unblocked for all threads in
 674   // the VM. Currently, we unblock the following signals:
 675   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 676   //                         by -Xrs (=ReduceSignalUsage));
 677   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 678   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 679   // the dispositions or masks wrt these signals.
 680   // Programs embedding the VM that want to use the above signals for their
 681   // own purposes must, at this time, use the "-Xrs" option to prevent
 682   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 683   // (See bug 4345157, and other related bugs).
 684   // In reality, though, unblocking these signals is really a nop, since
 685   // these signals are not blocked by default.
 686   sigemptyset(&unblocked_sigs);
 687   sigemptyset(&allowdebug_blocked_sigs);
 688   sigaddset(&unblocked_sigs, SIGILL);
 689   sigaddset(&unblocked_sigs, SIGSEGV);
 690   sigaddset(&unblocked_sigs, SIGBUS);
 691   sigaddset(&unblocked_sigs, SIGFPE);
 692   sigaddset(&unblocked_sigs, SIGTRAP);
 693   sigaddset(&unblocked_sigs, SIGDANGER);
 694   sigaddset(&unblocked_sigs, SR_signum);
 695 
 696   if (!ReduceSignalUsage) {
 697    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 698      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 699      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
 700    }
 701    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 702      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 703      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
 704    }
 705    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 706      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 707      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
 708    }
 709   }
 710   // Fill in signals that are blocked by all but the VM thread.
 711   sigemptyset(&vm_sigs);
 712   if (!ReduceSignalUsage)
 713     sigaddset(&vm_sigs, BREAK_SIGNAL);
 714   debug_only(signal_sets_initialized = true);
 715 }
 716 
 717 // These are signals that are unblocked while a thread is running Java.
 718 // (For some reason, they get blocked by default.)
 719 sigset_t* os::Aix::unblocked_signals() {
 720   assert(signal_sets_initialized, "Not initialized");
 721   return &unblocked_sigs;
 722 }
 723 
 724 // These are the signals that are blocked while a (non-VM) thread is
 725 // running Java. Only the VM thread handles these signals.
 726 sigset_t* os::Aix::vm_signals() {
 727   assert(signal_sets_initialized, "Not initialized");
 728   return &vm_sigs;
 729 }
 730 
 731 // These are signals that are blocked during cond_wait to allow debugger in
 732 sigset_t* os::Aix::allowdebug_blocked_signals() {
 733   assert(signal_sets_initialized, "Not initialized");
 734   return &allowdebug_blocked_sigs;
 735 }
 736 
 737 void os::Aix::hotspot_sigmask(Thread* thread) {
 738 
 739   //Save caller's signal mask before setting VM signal mask
 740   sigset_t caller_sigmask;
 741   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 742 
 743   OSThread* osthread = thread->osthread();
 744   osthread->set_caller_sigmask(caller_sigmask);
 745 
 746   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 747 
 748   if (!ReduceSignalUsage) {
 749     if (thread->is_VM_thread()) {
 750       // Only the VM thread handles BREAK_SIGNAL ...
 751       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 752     } else {
 753       // ... all other threads block BREAK_SIGNAL
 754       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 755     }
 756   }
 757 }
 758 
 759 // retrieve memory information.
 760 // Returns false if something went wrong;
 761 // content of pmi undefined in this case.
 762 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 763 
 764   assert(pmi, "get_meminfo: invalid parameter");
 765 
 766   memset(pmi, 0, sizeof(meminfo_t));
 767 
 768   if (os::Aix::on_pase()) {
 769 
 770     Unimplemented();
 771     return false;
 772 
 773   } else {
 774 
 775     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 776     // See:
 777     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 778     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 779     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 780     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 781 
 782     perfstat_memory_total_t psmt;
 783     memset (&psmt, '\0', sizeof(psmt));
 784     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 785     if (rc == -1) {
 786       fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
 787       assert(0, "perfstat_memory_total() failed");
 788       return false;
 789     }
 790 
 791     assert(rc == 1, "perfstat_memory_total() - weird return code");
 792 
 793     // excerpt from
 794     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 795     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 796     // The fields of perfstat_memory_total_t:
 797     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 798     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 799     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 800     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 801     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 802 
 803     pmi->virt_total = psmt.virt_total * 4096;
 804     pmi->real_total = psmt.real_total * 4096;
 805     pmi->real_free = psmt.real_free * 4096;
 806     pmi->pgsp_total = psmt.pgsp_total * 4096;
 807     pmi->pgsp_free = psmt.pgsp_free * 4096;
 808 
 809     return true;
 810 
 811   }
 812 } // end os::Aix::get_meminfo
 813 
 814 // Retrieve global cpu information.
 815 // Returns false if something went wrong;
 816 // the content of pci is undefined in this case.
 817 bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
 818   assert(pci, "get_cpuinfo: invalid parameter");
 819   memset(pci, 0, sizeof(cpuinfo_t));
 820 
 821   perfstat_cpu_total_t psct;
 822   memset (&psct, '\0', sizeof(psct));
 823 
 824   if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
 825     fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
 826     assert(0, "perfstat_cpu_total() failed");
 827     return false;
 828   }
 829 
 830   // global cpu information
 831   strcpy (pci->description, psct.description);
 832   pci->processorHZ = psct.processorHZ;
 833   pci->ncpus = psct.ncpus;
 834   os::Aix::_logical_cpus = psct.ncpus;
 835   for (int i = 0; i < 3; i++) {
 836     pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
 837   }
 838 
 839   // get the processor version from _system_configuration
 840   switch (_system_configuration.version) {
 841   case PV_8:
 842     strcpy(pci->version, "Power PC 8");
 843     break;
 844   case PV_7:
 845     strcpy(pci->version, "Power PC 7");
 846     break;
 847   case PV_6_1:
 848     strcpy(pci->version, "Power PC 6 DD1.x");
 849     break;
 850   case PV_6:
 851     strcpy(pci->version, "Power PC 6");
 852     break;
 853   case PV_5:
 854     strcpy(pci->version, "Power PC 5");
 855     break;
 856   case PV_5_2:
 857     strcpy(pci->version, "Power PC 5_2");
 858     break;
 859   case PV_5_3:
 860     strcpy(pci->version, "Power PC 5_3");
 861     break;
 862   case PV_5_Compat:
 863     strcpy(pci->version, "PV_5_Compat");
 864     break;
 865   case PV_6_Compat:
 866     strcpy(pci->version, "PV_6_Compat");
 867     break;
 868   case PV_7_Compat:
 869     strcpy(pci->version, "PV_7_Compat");
 870     break;
 871   case PV_8_Compat:
 872     strcpy(pci->version, "PV_8_Compat");
 873     break;
 874   default:
 875     strcpy(pci->version, "unknown");
 876   }
 877 
 878   return true;
 879 
 880 } //end os::Aix::get_cpuinfo
 881 
 882 //////////////////////////////////////////////////////////////////////////////
 883 // detecting pthread library
 884 
 885 void os::Aix::libpthread_init() {
 886   return;
 887 }
 888 
 889 //////////////////////////////////////////////////////////////////////////////
 890 // create new thread
 891 
 892 // Thread start routine for all newly created threads
 893 static void *java_start(Thread *thread) {
 894 
 895   // find out my own stack dimensions
 896   {
 897     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 898     address base = 0;
 899     size_t size = 0;
 900     query_stack_dimensions(&base, &size);
 901     thread->set_stack_base(base);
 902     thread->set_stack_size(size);
 903   }
 904 
 905   // Do some sanity checks.
 906   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 907 
 908   // Try to randomize the cache line index of hot stack frames.
 909   // This helps when threads of the same stack traces evict each other's
 910   // cache lines. The threads can be either from the same JVM instance, or
 911   // from different JVM instances. The benefit is especially true for
 912   // processors with hyperthreading technology.
 913 
 914   static int counter = 0;
 915   int pid = os::current_process_id();
 916   alloca(((pid ^ counter++) & 7) * 128);
 917 
 918   ThreadLocalStorage::set_thread(thread);
 919 
 920   OSThread* osthread = thread->osthread();
 921 
 922   // thread_id is kernel thread id (similar to Solaris LWP id)
 923   osthread->set_thread_id(os::Aix::gettid());
 924 
 925   // initialize signal mask for this thread
 926   os::Aix::hotspot_sigmask(thread);
 927 
 928   // initialize floating point control register
 929   os::Aix::init_thread_fpu_state();
 930 
 931   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 932 
 933   // call one more level start routine
 934   thread->run();
 935 
 936   return 0;
 937 }
 938 
 939 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 940 
 941   // We want the whole function to be synchronized.
 942   ThreadCritical cs;
 943 
 944   assert(thread->osthread() == NULL, "caller responsible");
 945 
 946   // Allocate the OSThread object
 947   OSThread* osthread = new OSThread(NULL, NULL);
 948   if (osthread == NULL) {
 949     return false;
 950   }
 951 
 952   // set the correct thread state
 953   osthread->set_thread_type(thr_type);
 954 
 955   // Initial state is ALLOCATED but not INITIALIZED
 956   osthread->set_state(ALLOCATED);
 957 
 958   thread->set_osthread(osthread);
 959 
 960   // init thread attributes
 961   pthread_attr_t attr;
 962   pthread_attr_init(&attr);
 963   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 964 
 965   // Make sure we run in 1:1 kernel-user-thread mode.
 966   if (os::Aix::on_aix()) {
 967     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 968     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 969   } // end: aix
 970 
 971   // Start in suspended state, and in os::thread_start, wake the thread up.
 972   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 973 
 974   // calculate stack size if it's not specified by caller
 975   if (stack_size == 0) {
 976     stack_size = os::Aix::default_stack_size(thr_type);
 977 
 978     switch (thr_type) {
 979     case os::java_thread:
 980       // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
 981       assert(JavaThread::stack_size_at_create() > 0, "this should be set");
 982       stack_size = JavaThread::stack_size_at_create();
 983       break;
 984     case os::compiler_thread:
 985       if (CompilerThreadStackSize > 0) {
 986         stack_size = (size_t)(CompilerThreadStackSize * K);
 987         break;
 988       } // else fall through:
 989         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 990     case os::vm_thread:
 991     case os::pgc_thread:
 992     case os::cgc_thread:
 993     case os::watcher_thread:
 994       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 995       break;
 996     }
 997   }
 998 
 999   stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
1000   pthread_attr_setstacksize(&attr, stack_size);
1001 
1002   pthread_t tid;
1003   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
1004 
1005   pthread_attr_destroy(&attr);
1006 
1007   if (ret == 0) {
1008     // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
1009   } else {
1010     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1011       perror("pthread_create()");
1012     }
1013     // Need to clean up stuff we've allocated so far
1014     thread->set_osthread(NULL);
1015     delete osthread;
1016     return false;
1017   }
1018 
1019   // Store pthread info into the OSThread
1020   osthread->set_pthread_id(tid);
1021 
1022   return true;
1023 }
1024 
1025 /////////////////////////////////////////////////////////////////////////////
1026 // attach existing thread
1027 
1028 // bootstrap the main thread
1029 bool os::create_main_thread(JavaThread* thread) {
1030   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
1031   return create_attached_thread(thread);
1032 }
1033 
1034 bool os::create_attached_thread(JavaThread* thread) {
1035 #ifdef ASSERT
1036     thread->verify_not_published();
1037 #endif
1038 
1039   // Allocate the OSThread object
1040   OSThread* osthread = new OSThread(NULL, NULL);
1041 
1042   if (osthread == NULL) {
1043     return false;
1044   }
1045 
1046   // Store pthread info into the OSThread
1047   osthread->set_thread_id(os::Aix::gettid());
1048   osthread->set_pthread_id(::pthread_self());
1049 
1050   // initialize floating point control register
1051   os::Aix::init_thread_fpu_state();
1052 
1053   // some sanity checks
1054   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1055 
1056   // Initial thread state is RUNNABLE
1057   osthread->set_state(RUNNABLE);
1058 
1059   thread->set_osthread(osthread);
1060 
1061   if (UseNUMA) {
1062     int lgrp_id = os::numa_get_group_id();
1063     if (lgrp_id != -1) {
1064       thread->set_lgrp_id(lgrp_id);
1065     }
1066   }
1067 
1068   // initialize signal mask for this thread
1069   // and save the caller's signal mask
1070   os::Aix::hotspot_sigmask(thread);
1071 
1072   return true;
1073 }
1074 
1075 void os::pd_start_thread(Thread* thread) {
1076   int status = pthread_continue_np(thread->osthread()->pthread_id());
1077   assert(status == 0, "thr_continue failed");
1078 }
1079 
1080 // Free OS resources related to the OSThread
1081 void os::free_thread(OSThread* osthread) {
1082   assert(osthread != NULL, "osthread not set");
1083 
1084   if (Thread::current()->osthread() == osthread) {
1085     // Restore caller's signal mask
1086     sigset_t sigmask = osthread->caller_sigmask();
1087     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1088    }
1089 
1090   delete osthread;
1091 }
1092 
1093 //////////////////////////////////////////////////////////////////////////////
1094 // thread local storage
1095 
1096 int os::allocate_thread_local_storage() {
1097   pthread_key_t key;
1098   int rslt = pthread_key_create(&key, NULL);
1099   assert(rslt == 0, "cannot allocate thread local storage");
1100   return (int)key;
1101 }
1102 
1103 // Note: This is currently not used by VM, as we don't destroy TLS key
1104 // on VM exit.
1105 void os::free_thread_local_storage(int index) {
1106   int rslt = pthread_key_delete((pthread_key_t)index);
1107   assert(rslt == 0, "invalid index");
1108 }
1109 
1110 void os::thread_local_storage_at_put(int index, void* value) {
1111   int rslt = pthread_setspecific((pthread_key_t)index, value);
1112   assert(rslt == 0, "pthread_setspecific failed");
1113 }
1114 
1115 extern "C" Thread* get_thread() {
1116   return ThreadLocalStorage::thread();
1117 }
1118 
1119 ////////////////////////////////////////////////////////////////////////////////
1120 // time support
1121 
1122 // Time since start-up in seconds to a fine granularity.
1123 // Used by VMSelfDestructTimer and the MemProfiler.
1124 double os::elapsedTime() {
1125   return (double)(os::elapsed_counter()) * 0.000001;
1126 }
1127 
1128 jlong os::elapsed_counter() {
1129   timeval time;
1130   int status = gettimeofday(&time, NULL);
1131   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1132 }
1133 
1134 jlong os::elapsed_frequency() {
1135   return (1000 * 1000);
1136 }
1137 
1138 bool os::supports_vtime() { return true; }
1139 bool os::enable_vtime()   { return false; }
1140 bool os::vtime_enabled()  { return false; }
1141 
1142 double os::elapsedVTime() {
1143   struct rusage usage;
1144   int retval = getrusage(RUSAGE_THREAD, &usage);
1145   if (retval == 0) {
1146     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1147   } else {
1148     // better than nothing, but not much
1149     return elapsedTime();
1150   }
1151 }
1152 
1153 jlong os::javaTimeMillis() {
1154   timeval time;
1155   int status = gettimeofday(&time, NULL);
1156   assert(status != -1, "aix error at gettimeofday()");
1157   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1158 }
1159 
1160 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1161   timeval time;
1162   int status = gettimeofday(&time, NULL);
1163   assert(status != -1, "aix error at gettimeofday()");
1164   seconds = jlong(time.tv_sec);
1165   nanos = jlong(time.tv_usec) * 1000;
1166 }
1167 
1168 
1169 // We need to manually declare mread_real_time,
1170 // because IBM didn't provide a prototype in time.h.
1171 // (they probably only ever tested in C, not C++)
1172 extern "C"
1173 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1174 
1175 jlong os::javaTimeNanos() {
1176   if (os::Aix::on_pase()) {
1177     Unimplemented();
1178     return 0;
1179   } else {
1180     // On AIX use the precision of processors real time clock
1181     // or time base registers.
1182     timebasestruct_t time;
1183     int rc;
1184 
1185     // If the CPU has a time register, it will be used and
1186     // we have to convert to real time first. After convertion we have following data:
1187     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1188     // time.tb_low  [nanoseconds after the last full second above]
1189     // We better use mread_real_time here instead of read_real_time
1190     // to ensure that we will get a monotonic increasing time.
1191     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1192       rc = time_base_to_time(&time, TIMEBASE_SZ);
1193       assert(rc != -1, "aix error at time_base_to_time()");
1194     }
1195     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1196   }
1197 }
1198 
1199 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1200   info_ptr->max_value = ALL_64_BITS;
1201   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1202   info_ptr->may_skip_backward = false;
1203   info_ptr->may_skip_forward = false;
1204   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1205 }
1206 
1207 // Return the real, user, and system times in seconds from an
1208 // arbitrary fixed point in the past.
1209 bool os::getTimesSecs(double* process_real_time,
1210                       double* process_user_time,
1211                       double* process_system_time) {
1212   struct tms ticks;
1213   clock_t real_ticks = times(&ticks);
1214 
1215   if (real_ticks == (clock_t) (-1)) {
1216     return false;
1217   } else {
1218     double ticks_per_second = (double) clock_tics_per_sec;
1219     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1220     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1221     *process_real_time = ((double) real_ticks) / ticks_per_second;
1222 
1223     return true;
1224   }
1225 }
1226 
1227 char * os::local_time_string(char *buf, size_t buflen) {
1228   struct tm t;
1229   time_t long_time;
1230   time(&long_time);
1231   localtime_r(&long_time, &t);
1232   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1233                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1234                t.tm_hour, t.tm_min, t.tm_sec);
1235   return buf;
1236 }
1237 
1238 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1239   return localtime_r(clock, res);
1240 }
1241 
1242 ////////////////////////////////////////////////////////////////////////////////
1243 // runtime exit support
1244 
1245 // Note: os::shutdown() might be called very early during initialization, or
1246 // called from signal handler. Before adding something to os::shutdown(), make
1247 // sure it is async-safe and can handle partially initialized VM.
1248 void os::shutdown() {
1249 
1250   // allow PerfMemory to attempt cleanup of any persistent resources
1251   perfMemory_exit();
1252 
1253   // needs to remove object in file system
1254   AttachListener::abort();
1255 
1256   // flush buffered output, finish log files
1257   ostream_abort();
1258 
1259   // Check for abort hook
1260   abort_hook_t abort_hook = Arguments::abort_hook();
1261   if (abort_hook != NULL) {
1262     abort_hook();
1263   }
1264 }
1265 
1266 // Note: os::abort() might be called very early during initialization, or
1267 // called from signal handler. Before adding something to os::abort(), make
1268 // sure it is async-safe and can handle partially initialized VM.
1269 void os::abort(bool dump_core, void* siginfo, void* context) {
1270   os::shutdown();
1271   if (dump_core) {
1272 #ifndef PRODUCT
1273     fdStream out(defaultStream::output_fd());
1274     out.print_raw("Current thread is ");
1275     char buf[16];
1276     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1277     out.print_raw_cr(buf);
1278     out.print_raw_cr("Dumping core ...");
1279 #endif
1280     ::abort(); // dump core
1281   }
1282 
1283   ::exit(1);
1284 }
1285 
1286 // Die immediately, no exit hook, no abort hook, no cleanup.
1287 void os::die() {
1288   ::abort();
1289 }
1290 
1291 // This method is a copy of JDK's sysGetLastErrorString
1292 // from src/solaris/hpi/src/system_md.c
1293 
1294 size_t os::lasterror(char *buf, size_t len) {
1295   if (errno == 0) return 0;
1296 
1297   const char *s = ::strerror(errno);
1298   size_t n = ::strlen(s);
1299   if (n >= len) {
1300     n = len - 1;
1301   }
1302   ::strncpy(buf, s, n);
1303   buf[n] = '\0';
1304   return n;
1305 }
1306 
1307 intx os::current_thread_id() { return (intx)pthread_self(); }
1308 
1309 int os::current_process_id() {
1310 
1311   // This implementation returns a unique pid, the pid of the
1312   // launcher thread that starts the vm 'process'.
1313 
1314   // Under POSIX, getpid() returns the same pid as the
1315   // launcher thread rather than a unique pid per thread.
1316   // Use gettid() if you want the old pre NPTL behaviour.
1317 
1318   // if you are looking for the result of a call to getpid() that
1319   // returns a unique pid for the calling thread, then look at the
1320   // OSThread::thread_id() method in osThread_linux.hpp file
1321 
1322   return (int)(_initial_pid ? _initial_pid : getpid());
1323 }
1324 
1325 // DLL functions
1326 
1327 const char* os::dll_file_extension() { return ".so"; }
1328 
1329 // This must be hard coded because it's the system's temporary
1330 // directory not the java application's temp directory, ala java.io.tmpdir.
1331 const char* os::get_temp_directory() { return "/tmp"; }
1332 
1333 static bool file_exists(const char* filename) {
1334   struct stat statbuf;
1335   if (filename == NULL || strlen(filename) == 0) {
1336     return false;
1337   }
1338   return os::stat(filename, &statbuf) == 0;
1339 }
1340 
1341 bool os::dll_build_name(char* buffer, size_t buflen,
1342                         const char* pname, const char* fname) {
1343   bool retval = false;
1344   // Copied from libhpi
1345   const size_t pnamelen = pname ? strlen(pname) : 0;
1346 
1347   // Return error on buffer overflow.
1348   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1349     *buffer = '\0';
1350     return retval;
1351   }
1352 
1353   if (pnamelen == 0) {
1354     snprintf(buffer, buflen, "lib%s.so", fname);
1355     retval = true;
1356   } else if (strchr(pname, *os::path_separator()) != NULL) {
1357     int n;
1358     char** pelements = split_path(pname, &n);
1359     for (int i = 0; i < n; i++) {
1360       // Really shouldn't be NULL, but check can't hurt
1361       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1362         continue; // skip the empty path values
1363       }
1364       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1365       if (file_exists(buffer)) {
1366         retval = true;
1367         break;
1368       }
1369     }
1370     // release the storage
1371     for (int i = 0; i < n; i++) {
1372       if (pelements[i] != NULL) {
1373         FREE_C_HEAP_ARRAY(char, pelements[i]);
1374       }
1375     }
1376     if (pelements != NULL) {
1377       FREE_C_HEAP_ARRAY(char*, pelements);
1378     }
1379   } else {
1380     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1381     retval = true;
1382   }
1383   return retval;
1384 }
1385 
1386 // Check if addr is inside libjvm.so.
1387 bool os::address_is_in_vm(address addr) {
1388 
1389   // Input could be a real pc or a function pointer literal. The latter
1390   // would be a function descriptor residing in the data segment of a module.
1391   loaded_module_t lm;
1392   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1393     return lm.is_in_vm;
1394   } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1395     return lm.is_in_vm;











1396   } else {
1397     return false;
1398   }
1399 
1400 }
1401 
1402 // Resolve an AIX function descriptor literal to a code pointer.
1403 // If the input is a valid code pointer to a text segment of a loaded module,
1404 //   it is returned unchanged.
1405 // If the input is a valid AIX function descriptor, it is resolved to the
1406 //   code entry point.
1407 // If the input is neither a valid function descriptor nor a valid code pointer,
1408 //   NULL is returned.
1409 static address resolve_function_descriptor_to_code_pointer(address p) {
1410 
1411   if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {

1412     // its a real code pointer
1413     return p;
1414   } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {


1415     // pointer to data segment, potential function descriptor
1416     address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1417     if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1418       // Its a function descriptor
1419       return code_entry;
1420     }
1421   }
1422 
1423   return NULL;
1424 }
1425 
1426 bool os::dll_address_to_function_name(address addr, char *buf,
1427                                       int buflen, int *offset,
1428                                       bool demangle) {
1429   if (offset) {
1430     *offset = -1;
1431   }
1432   // Buf is not optional, but offset is optional.
1433   assert(buf != NULL, "sanity check");
1434   buf[0] = '\0';
1435 
1436   // Resolve function ptr literals first.
1437   addr = resolve_function_descriptor_to_code_pointer(addr);
1438   if (!addr) {
1439     return false;
1440   }
1441 
1442   // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1443   return Decoder::decode(addr, buf, buflen, offset, demangle);
1444 }
1445 
1446 static int getModuleName(codeptr_t pc,                    // [in] program counter
1447                          char* p_name, size_t namelen,    // [out] optional: function name
1448                          char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1449                          ) {
1450 

1451   if (p_name && namelen > 0) {
1452     *p_name = '\0';
1453   }
1454   if (p_errmsg && errmsglen > 0) {
1455     *p_errmsg = '\0';
1456   }
1457 


1458   if (p_name && namelen > 0) {
1459     loaded_module_t lm;
1460     if (LoadedLibraries::find_for_text_address(pc, &lm) != NULL) {
1461       strncpy(p_name, lm.shortname, namelen);
1462       p_name[namelen - 1] = '\0';
1463     }
1464     return 0;
1465   }
1466 


1467   return -1;
1468 }
1469 
1470 bool os::dll_address_to_library_name(address addr, char* buf,
1471                                      int buflen, int* offset) {
1472   if (offset) {
1473     *offset = -1;
1474   }
1475   // Buf is not optional, but offset is optional.
1476   assert(buf != NULL, "sanity check");
1477   buf[0] = '\0';
1478 
1479   // Resolve function ptr literals first.
1480   addr = resolve_function_descriptor_to_code_pointer(addr);
1481   if (!addr) {
1482     return false;
1483   }
1484 
1485   if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1486     return true;
1487   }
1488   return false;
1489 }
1490 
1491 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1492 // for the same architecture as Hotspot is running on.
1493 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1494 
1495   if (ebuf && ebuflen > 0) {
1496     ebuf[0] = '\0';
1497     ebuf[ebuflen - 1] = '\0';
1498   }
1499 
1500   if (!filename || strlen(filename) == 0) {
1501     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1502     return NULL;
1503   }
1504 
1505   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1506   void * result= ::dlopen(filename, RTLD_LAZY);
1507   if (result != NULL) {
1508     // Reload dll cache. Don't do this in signal handling.
1509     LoadedLibraries::reload();
1510     return result;
1511   } else {
1512     // error analysis when dlopen fails
1513     const char* const error_report = ::dlerror();
1514     if (error_report && ebuf && ebuflen > 0) {
1515       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1516                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1517     }
1518   }
1519   return NULL;
1520 }
1521 
1522 void* os::dll_lookup(void* handle, const char* name) {
1523   void* res = dlsym(handle, name);
1524   return res;
1525 }
1526 
1527 void* os::get_default_process_handle() {
1528   return (void*)::dlopen(NULL, RTLD_LAZY);
1529 }
1530 
1531 void os::print_dll_info(outputStream *st) {
1532   st->print_cr("Dynamic libraries:");
1533   LoadedLibraries::print(st);
1534 }
1535 
1536 void os::get_summary_os_info(char* buf, size_t buflen) {
1537   // There might be something more readable than uname results for AIX.
1538   struct utsname name;
1539   uname(&name);
1540   snprintf(buf, buflen, "%s %s", name.release, name.version);
1541 }
1542 
1543 void os::print_os_info(outputStream* st) {
1544   st->print("OS:");
1545 
1546   st->print("uname:");
1547   struct utsname name;
1548   uname(&name);
1549   st->print(name.sysname); st->print(" ");
1550   st->print(name.nodename); st->print(" ");
1551   st->print(name.release); st->print(" ");
1552   st->print(name.version); st->print(" ");
1553   st->print(name.machine);
1554   st->cr();
1555 
1556   // rlimit
1557   st->print("rlimit:");
1558   struct rlimit rlim;
1559 
1560   st->print(" STACK ");
1561   getrlimit(RLIMIT_STACK, &rlim);
1562   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1563   else st->print("%uk", rlim.rlim_cur >> 10);
1564 
1565   st->print(", CORE ");
1566   getrlimit(RLIMIT_CORE, &rlim);
1567   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1568   else st->print("%uk", rlim.rlim_cur >> 10);
1569 
1570   st->print(", NPROC ");
1571   st->print("%d", sysconf(_SC_CHILD_MAX));
1572 
1573   st->print(", NOFILE ");
1574   getrlimit(RLIMIT_NOFILE, &rlim);
1575   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1576   else st->print("%d", rlim.rlim_cur);
1577 
1578   st->print(", AS ");
1579   getrlimit(RLIMIT_AS, &rlim);
1580   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1581   else st->print("%uk", rlim.rlim_cur >> 10);
1582 
1583   // Print limits on DATA, because it limits the C-heap.
1584   st->print(", DATA ");
1585   getrlimit(RLIMIT_DATA, &rlim);
1586   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1587   else st->print("%uk", rlim.rlim_cur >> 10);
1588   st->cr();
1589 
1590   // load average
1591   st->print("load average:");
1592   double loadavg[3] = {-1.L, -1.L, -1.L};
1593   os::loadavg(loadavg, 3);
1594   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1595   st->cr();
1596 }
1597 
1598 void os::print_memory_info(outputStream* st) {
1599 
1600   st->print_cr("Memory:");
1601 
1602   st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1603   st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1604   st->print_cr("  Default shared memory page size:        %s",
1605     describe_pagesize(g_multipage_support.shmpsize));
1606   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1607     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1608   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1609     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1610   if (g_multipage_error != 0) {
1611     st->print_cr("  multipage error: %d", g_multipage_error);
1612   }
1613 
1614   // print out LDR_CNTRL because it affects the default page sizes
1615   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1616   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1617 
1618   const char* const extshm = ::getenv("EXTSHM");
1619   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1620   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1621     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1622   }
1623 
1624   // Call os::Aix::get_meminfo() to retrieve memory statistics.
1625   os::Aix::meminfo_t mi;
1626   if (os::Aix::get_meminfo(&mi)) {
1627     char buffer[256];
1628     if (os::Aix::on_aix()) {
1629       jio_snprintf(buffer, sizeof(buffer),
1630                    "  physical total : %llu\n"
1631                    "  physical free  : %llu\n"
1632                    "  swap total     : %llu\n"
1633                    "  swap free      : %llu\n",
1634                    mi.real_total,
1635                    mi.real_free,
1636                    mi.pgsp_total,
1637                    mi.pgsp_free);
1638     } else {
1639       Unimplemented();
1640     }
1641     st->print_raw(buffer);
1642   } else {
1643     st->print_cr("  (no more information available)");
1644   }
1645 }
1646 
1647 // Get a string for the cpuinfo that is a summary of the cpu type
1648 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1649   // This looks good
1650   os::Aix::cpuinfo_t ci;
1651   if (os::Aix::get_cpuinfo(&ci)) {
1652     strncpy(buf, ci.version, buflen);
1653   } else {
1654     strncpy(buf, "AIX", buflen);
1655   }
1656 }
1657 
1658 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1659 }
1660 
1661 void os::print_siginfo(outputStream* st, void* siginfo) {
1662   // Use common posix version.
1663   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1664   st->cr();
1665 }
1666 
1667 static void print_signal_handler(outputStream* st, int sig,
1668                                  char* buf, size_t buflen);
1669 
1670 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1671   st->print_cr("Signal Handlers:");
1672   print_signal_handler(st, SIGSEGV, buf, buflen);
1673   print_signal_handler(st, SIGBUS , buf, buflen);
1674   print_signal_handler(st, SIGFPE , buf, buflen);
1675   print_signal_handler(st, SIGPIPE, buf, buflen);
1676   print_signal_handler(st, SIGXFSZ, buf, buflen);
1677   print_signal_handler(st, SIGILL , buf, buflen);
1678   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1679   print_signal_handler(st, SR_signum, buf, buflen);
1680   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1681   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1682   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1683   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1684   print_signal_handler(st, SIGTRAP, buf, buflen);
1685   print_signal_handler(st, SIGDANGER, buf, buflen);
1686 }
1687 
1688 static char saved_jvm_path[MAXPATHLEN] = {0};
1689 
1690 // Find the full path to the current module, libjvm.so.
1691 void os::jvm_path(char *buf, jint buflen) {
1692   // Error checking.
1693   if (buflen < MAXPATHLEN) {
1694     assert(false, "must use a large-enough buffer");
1695     buf[0] = '\0';
1696     return;
1697   }
1698   // Lazy resolve the path to current module.
1699   if (saved_jvm_path[0] != 0) {
1700     strcpy(buf, saved_jvm_path);
1701     return;
1702   }
1703 
1704   Dl_info dlinfo;
1705   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1706   assert(ret != 0, "cannot locate libjvm");
1707   char* rp = realpath((char *)dlinfo.dli_fname, buf);
1708   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1709 
1710   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1711   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1712 }
1713 
1714 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1715   // no prefix required, not even "_"
1716 }
1717 
1718 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1719   // no suffix required
1720 }
1721 
1722 ////////////////////////////////////////////////////////////////////////////////
1723 // sun.misc.Signal support
1724 
1725 static volatile jint sigint_count = 0;
1726 
1727 static void
1728 UserHandler(int sig, void *siginfo, void *context) {
1729   // 4511530 - sem_post is serialized and handled by the manager thread. When
1730   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1731   // don't want to flood the manager thread with sem_post requests.
1732   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1733     return;
1734 
1735   // Ctrl-C is pressed during error reporting, likely because the error
1736   // handler fails to abort. Let VM die immediately.
1737   if (sig == SIGINT && is_error_reported()) {
1738     os::die();
1739   }
1740 
1741   os::signal_notify(sig);
1742 }
1743 
1744 void* os::user_handler() {
1745   return CAST_FROM_FN_PTR(void*, UserHandler);
1746 }
1747 
1748 extern "C" {
1749   typedef void (*sa_handler_t)(int);
1750   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1751 }
1752 
1753 void* os::signal(int signal_number, void* handler) {
1754   struct sigaction sigAct, oldSigAct;
1755 
1756   sigfillset(&(sigAct.sa_mask));
1757 
1758   // Do not block out synchronous signals in the signal handler.
1759   // Blocking synchronous signals only makes sense if you can really
1760   // be sure that those signals won't happen during signal handling,
1761   // when the blocking applies. Normal signal handlers are lean and
1762   // do not cause signals. But our signal handlers tend to be "risky"
1763   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1764   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1765   // by a SIGILL, which was blocked due to the signal mask. The process
1766   // just hung forever. Better to crash from a secondary signal than to hang.
1767   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1768   sigdelset(&(sigAct.sa_mask), SIGBUS);
1769   sigdelset(&(sigAct.sa_mask), SIGILL);
1770   sigdelset(&(sigAct.sa_mask), SIGFPE);
1771   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1772 
1773   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1774 
1775   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1776 
1777   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1778     // -1 means registration failed
1779     return (void *)-1;
1780   }
1781 
1782   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1783 }
1784 
1785 void os::signal_raise(int signal_number) {
1786   ::raise(signal_number);
1787 }
1788 
1789 //
1790 // The following code is moved from os.cpp for making this
1791 // code platform specific, which it is by its very nature.
1792 //
1793 
1794 // Will be modified when max signal is changed to be dynamic
1795 int os::sigexitnum_pd() {
1796   return NSIG;
1797 }
1798 
1799 // a counter for each possible signal value
1800 static volatile jint pending_signals[NSIG+1] = { 0 };
1801 
1802 // Linux(POSIX) specific hand shaking semaphore.
1803 static sem_t sig_sem;
1804 
1805 void os::signal_init_pd() {
1806   // Initialize signal structures
1807   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1808 
1809   // Initialize signal semaphore
1810   int rc = ::sem_init(&sig_sem, 0, 0);
1811   guarantee(rc != -1, "sem_init failed");
1812 }
1813 
1814 void os::signal_notify(int sig) {
1815   Atomic::inc(&pending_signals[sig]);
1816   ::sem_post(&sig_sem);
1817 }
1818 
1819 static int check_pending_signals(bool wait) {
1820   Atomic::store(0, &sigint_count);
1821   for (;;) {
1822     for (int i = 0; i < NSIG + 1; i++) {
1823       jint n = pending_signals[i];
1824       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1825         return i;
1826       }
1827     }
1828     if (!wait) {
1829       return -1;
1830     }
1831     JavaThread *thread = JavaThread::current();
1832     ThreadBlockInVM tbivm(thread);
1833 
1834     bool threadIsSuspended;
1835     do {
1836       thread->set_suspend_equivalent();
1837       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1838 
1839       ::sem_wait(&sig_sem);
1840 
1841       // were we externally suspended while we were waiting?
1842       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1843       if (threadIsSuspended) {
1844         //
1845         // The semaphore has been incremented, but while we were waiting
1846         // another thread suspended us. We don't want to continue running
1847         // while suspended because that would surprise the thread that
1848         // suspended us.
1849         //
1850         ::sem_post(&sig_sem);
1851 
1852         thread->java_suspend_self();
1853       }
1854     } while (threadIsSuspended);
1855   }
1856 }
1857 
1858 int os::signal_lookup() {
1859   return check_pending_signals(false);
1860 }
1861 
1862 int os::signal_wait() {
1863   return check_pending_signals(true);
1864 }
1865 
1866 ////////////////////////////////////////////////////////////////////////////////
1867 // Virtual Memory
1868 
1869 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1870 
1871 #define VMEM_MAPPED  1
1872 #define VMEM_SHMATED 2
1873 
1874 struct vmembk_t {
1875   int type;         // 1 - mmap, 2 - shmat
1876   char* addr;
1877   size_t size;      // Real size, may be larger than usersize.
1878   size_t pagesize;  // page size of area
1879   vmembk_t* next;
1880 
1881   bool contains_addr(char* p) const {
1882     return p >= addr && p < (addr + size);
1883   }
1884 
1885   bool contains_range(char* p, size_t s) const {
1886     return contains_addr(p) && contains_addr(p + s - 1);
1887   }
1888 
1889   void print_on(outputStream* os) const {
1890     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1891       " bytes, %d %s pages), %s",
1892       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1893       (type == VMEM_SHMATED ? "shmat" : "mmap")
1894     );
1895   }
1896 
1897   // Check that range is a sub range of memory block (or equal to memory block);
1898   // also check that range is fully page aligned to the page size if the block.
1899   void assert_is_valid_subrange(char* p, size_t s) const {
1900     if (!contains_range(p, s)) {
1901       fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1902               "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1903               p, p + s - 1, addr, addr + size - 1);
1904       guarantee0(false);
1905     }
1906     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1907       fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1908               " aligned to pagesize (%s)\n", p, p + s);
1909       guarantee0(false);
1910     }
1911   }
1912 };
1913 
1914 static struct {
1915   vmembk_t* first;
1916   MiscUtils::CritSect cs;
1917 } vmem;
1918 
1919 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1920   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1921   assert0(p);
1922   if (p) {
1923     MiscUtils::AutoCritSect lck(&vmem.cs);
1924     p->addr = addr; p->size = size;
1925     p->pagesize = pagesize;
1926     p->type = type;
1927     p->next = vmem.first;
1928     vmem.first = p;
1929   }
1930 }
1931 
1932 static vmembk_t* vmembk_find(char* addr) {
1933   MiscUtils::AutoCritSect lck(&vmem.cs);
1934   for (vmembk_t* p = vmem.first; p; p = p->next) {
1935     if (p->addr <= addr && (p->addr + p->size) > addr) {
1936       return p;
1937     }
1938   }
1939   return NULL;
1940 }
1941 
1942 static void vmembk_remove(vmembk_t* p0) {
1943   MiscUtils::AutoCritSect lck(&vmem.cs);
1944   assert0(p0);
1945   assert0(vmem.first); // List should not be empty.
1946   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1947     if (*pp == p0) {
1948       *pp = p0->next;
1949       ::free(p0);
1950       return;
1951     }
1952   }
1953   assert0(false); // Not found?
1954 }
1955 
1956 static void vmembk_print_on(outputStream* os) {
1957   MiscUtils::AutoCritSect lck(&vmem.cs);
1958   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1959     vmi->print_on(os);
1960     os->cr();
1961   }
1962 }
1963 
1964 // Reserve and attach a section of System V memory.
1965 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1966 // address. Failing that, it will attach the memory anywhere.
1967 // If <requested_addr> is NULL, function will attach the memory anywhere.
1968 //
1969 // <alignment_hint> is being ignored by this function. It is very probable however that the
1970 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1971 // Should this be not enogh, we can put more work into it.
1972 static char* reserve_shmated_memory (
1973   size_t bytes,
1974   char* requested_addr,
1975   size_t alignment_hint) {
1976 
1977   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1978     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1979     bytes, requested_addr, alignment_hint);
1980 
1981   // Either give me wish address or wish alignment but not both.
1982   assert0(!(requested_addr != NULL && alignment_hint != 0));
1983 
1984   // We must prevent anyone from attaching too close to the
1985   // BRK because that may cause malloc OOM.
1986   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1987     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1988       "Will attach anywhere.", requested_addr);
1989     // Act like the OS refused to attach there.
1990     requested_addr = NULL;
1991   }
1992 
1993   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1994   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1995   if (os::Aix::on_pase_V5R4_or_older()) {
1996     ShouldNotReachHere();
1997   }
1998 
1999   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
2000   const size_t size = align_size_up(bytes, SIZE_64K);
2001 
2002   // Reserve the shared segment.
2003   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2004   if (shmid == -1) {
2005     trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
2006     return NULL;
2007   }
2008 
2009   // Important note:
2010   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2011   // We must right after attaching it remove it from the system. System V shm segments are global and
2012   // survive the process.
2013   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2014 
2015   struct shmid_ds shmbuf;
2016   memset(&shmbuf, 0, sizeof(shmbuf));
2017   shmbuf.shm_pagesize = SIZE_64K;
2018   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2019     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2020                size / SIZE_64K, errno);
2021     // I want to know if this ever happens.
2022     assert(false, "failed to set page size for shmat");
2023   }
2024 
2025   // Now attach the shared segment.
2026   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2027   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2028   // were not a segment boundary.
2029   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2030   const int errno_shmat = errno;
2031 
2032   // (A) Right after shmat and before handing shmat errors delete the shm segment.
2033   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2034     trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2035     assert(false, "failed to remove shared memory segment!");
2036   }
2037 
2038   // Handle shmat error. If we failed to attach, just return.
2039   if (addr == (char*)-1) {
2040     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2041     return NULL;
2042   }
2043 
2044   // Just for info: query the real page size. In case setting the page size did not
2045   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2046   const size_t real_pagesize = os::Aix::query_pagesize(addr);
2047   if (real_pagesize != shmbuf.shm_pagesize) {
2048     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2049   }
2050 
2051   if (addr) {
2052     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2053       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2054   } else {
2055     if (requested_addr != NULL) {
2056       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2057     } else {
2058       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2059     }
2060   }
2061 
2062   // book-keeping
2063   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2064   assert0(is_aligned_to(addr, os::vm_page_size()));
2065 
2066   return addr;
2067 }
2068 
2069 static bool release_shmated_memory(char* addr, size_t size) {
2070 
2071   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2072     addr, addr + size - 1);
2073 
2074   bool rc = false;
2075 
2076   // TODO: is there a way to verify shm size without doing bookkeeping?
2077   if (::shmdt(addr) != 0) {
2078     trcVerbose("error (%d).", errno);
2079   } else {
2080     trcVerbose("ok.");
2081     rc = true;
2082   }
2083   return rc;
2084 }
2085 
2086 static bool uncommit_shmated_memory(char* addr, size_t size) {
2087   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2088     addr, addr + size - 1);
2089 
2090   const bool rc = my_disclaim64(addr, size);
2091 
2092   if (!rc) {
2093     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2094     return false;
2095   }
2096   return true;
2097 }
2098 
2099 // Reserve memory via mmap.
2100 // If <requested_addr> is given, an attempt is made to attach at the given address.
2101 // Failing that, memory is allocated at any address.
2102 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2103 // allocate at an address aligned with the given alignment. Failing that, memory
2104 // is aligned anywhere.
2105 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2106   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2107     "alignment_hint " UINTX_FORMAT "...",
2108     bytes, requested_addr, alignment_hint);
2109 
2110   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2111   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2112     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2113     return NULL;
2114   }
2115 
2116   // We must prevent anyone from attaching too close to the
2117   // BRK because that may cause malloc OOM.
2118   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2119     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2120       "Will attach anywhere.", requested_addr);
2121     // Act like the OS refused to attach there.
2122     requested_addr = NULL;
2123   }
2124 
2125   // Specify one or the other but not both.
2126   assert0(!(requested_addr != NULL && alignment_hint > 0));
2127 
2128   // In 64K mode, we claim the global page size (os::vm_page_size())
2129   // is 64K. This is one of the few points where that illusion may
2130   // break, because mmap() will always return memory aligned to 4K. So
2131   // we must ensure we only ever return memory aligned to 64k.
2132   if (alignment_hint) {
2133     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2134   } else {
2135     alignment_hint = os::vm_page_size();
2136   }
2137 
2138   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2139   const size_t size = align_size_up(bytes, os::vm_page_size());
2140 
2141   // alignment: Allocate memory large enough to include an aligned range of the right size and
2142   // cut off the leading and trailing waste pages.
2143   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2144   const size_t extra_size = size + alignment_hint;
2145 
2146   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2147   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2148   int flags = MAP_ANONYMOUS | MAP_SHARED;
2149 
2150   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2151   // it means if wishaddress is given but MAP_FIXED is not set.
2152   //
2153   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2154   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2155   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2156   // get clobbered.
2157   if (requested_addr != NULL) {
2158     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2159       flags |= MAP_FIXED;
2160     }
2161   }
2162 
2163   char* addr = (char*)::mmap(requested_addr, extra_size,
2164       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2165 
2166   if (addr == MAP_FAILED) {
2167     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2168     return NULL;
2169   }
2170 
2171   // Handle alignment.
2172   char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2173   const size_t waste_pre = addr_aligned - addr;
2174   char* const addr_aligned_end = addr_aligned + size;
2175   const size_t waste_post = extra_size - waste_pre - size;
2176   if (waste_pre > 0) {
2177     ::munmap(addr, waste_pre);
2178   }
2179   if (waste_post > 0) {
2180     ::munmap(addr_aligned_end, waste_post);
2181   }
2182   addr = addr_aligned;
2183 
2184   if (addr) {
2185     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2186       addr, addr + bytes, bytes);
2187   } else {
2188     if (requested_addr != NULL) {
2189       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2190     } else {
2191       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2192     }
2193   }
2194 
2195   // bookkeeping
2196   vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
2197 
2198   // Test alignment, see above.
2199   assert0(is_aligned_to(addr, os::vm_page_size()));
2200 
2201   return addr;
2202 }
2203 
2204 static bool release_mmaped_memory(char* addr, size_t size) {
2205   assert0(is_aligned_to(addr, os::vm_page_size()));
2206   assert0(is_aligned_to(size, os::vm_page_size()));
2207 
2208   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2209     addr, addr + size - 1);
2210   bool rc = false;
2211 
2212   if (::munmap(addr, size) != 0) {
2213     trcVerbose("failed (%d)\n", errno);
2214     rc = false;
2215   } else {
2216     trcVerbose("ok.");
2217     rc = true;
2218   }
2219 
2220   return rc;
2221 }
2222 
2223 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2224 
2225   assert0(is_aligned_to(addr, os::vm_page_size()));
2226   assert0(is_aligned_to(size, os::vm_page_size()));
2227 
2228   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2229     addr, addr + size - 1);
2230   bool rc = false;
2231 
2232   // Uncommit mmap memory with msync MS_INVALIDATE.
2233   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2234     trcVerbose("failed (%d)\n", errno);
2235     rc = false;
2236   } else {
2237     trcVerbose("ok.");
2238     rc = true;
2239   }
2240 
2241   return rc;
2242 }
2243 
2244 // End: shared memory bookkeeping
2245 ////////////////////////////////////////////////////////////////////////////////////////////////////
2246 
2247 int os::vm_page_size() {
2248   // Seems redundant as all get out.
2249   assert(os::Aix::page_size() != -1, "must call os::init");
2250   return os::Aix::page_size();
2251 }
2252 
2253 // Aix allocates memory by pages.
2254 int os::vm_allocation_granularity() {
2255   assert(os::Aix::page_size() != -1, "must call os::init");
2256   return os::Aix::page_size();
2257 }
2258 
2259 #ifdef PRODUCT
2260 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2261                                     int err) {
2262   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2263           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2264           strerror(err), err);
2265 }
2266 #endif
2267 
2268 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2269                                   const char* mesg) {
2270   assert(mesg != NULL, "mesg must be specified");
2271   if (!pd_commit_memory(addr, size, exec)) {
2272     // Add extra info in product mode for vm_exit_out_of_memory():
2273     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2274     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2275   }
2276 }
2277 
2278 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2279 
2280   assert0(is_aligned_to(addr, os::vm_page_size()));
2281   assert0(is_aligned_to(size, os::vm_page_size()));
2282 
2283   vmembk_t* const vmi = vmembk_find(addr);
2284   assert0(vmi);
2285   vmi->assert_is_valid_subrange(addr, size);
2286 
2287   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2288 
2289   return true;
2290 }
2291 
2292 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2293   return pd_commit_memory(addr, size, exec);
2294 }
2295 
2296 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2297                                   size_t alignment_hint, bool exec,
2298                                   const char* mesg) {
2299   // Alignment_hint is ignored on this OS.
2300   pd_commit_memory_or_exit(addr, size, exec, mesg);
2301 }
2302 
2303 bool os::pd_uncommit_memory(char* addr, size_t size) {
2304   assert0(is_aligned_to(addr, os::vm_page_size()));
2305   assert0(is_aligned_to(size, os::vm_page_size()));
2306 
2307   // Dynamically do different things for mmap/shmat.
2308   const vmembk_t* const vmi = vmembk_find(addr);
2309   assert0(vmi);
2310   vmi->assert_is_valid_subrange(addr, size);
2311 
2312   if (vmi->type == VMEM_SHMATED) {
2313     return uncommit_shmated_memory(addr, size);
2314   } else {
2315     return uncommit_mmaped_memory(addr, size);
2316   }
2317 }
2318 
2319 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2320   // Do not call this; no need to commit stack pages on AIX.
2321   ShouldNotReachHere();
2322   return true;
2323 }
2324 
2325 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2326   // Do not call this; no need to commit stack pages on AIX.
2327   ShouldNotReachHere();
2328   return true;
2329 }
2330 
2331 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2332 }
2333 
2334 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2335 }
2336 
2337 void os::numa_make_global(char *addr, size_t bytes) {
2338 }
2339 
2340 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2341 }
2342 
2343 bool os::numa_topology_changed() {
2344   return false;
2345 }
2346 
2347 size_t os::numa_get_groups_num() {
2348   return 1;
2349 }
2350 
2351 int os::numa_get_group_id() {
2352   return 0;
2353 }
2354 
2355 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2356   if (size > 0) {
2357     ids[0] = 0;
2358     return 1;
2359   }
2360   return 0;
2361 }
2362 
2363 bool os::get_page_info(char *start, page_info* info) {
2364   return false;
2365 }
2366 
2367 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2368   return end;
2369 }
2370 
2371 // Reserves and attaches a shared memory segment.
2372 // Will assert if a wish address is given and could not be obtained.
2373 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2374 
2375   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2376   // thereby clobbering old mappings at that place. That is probably
2377   // not intended, never used and almost certainly an error were it
2378   // ever be used this way (to try attaching at a specified address
2379   // without clobbering old mappings an alternate API exists,
2380   // os::attempt_reserve_memory_at()).
2381   // Instead of mimicking the dangerous coding of the other platforms, here I
2382   // just ignore the request address (release) or assert(debug).
2383   assert0(requested_addr == NULL);
2384 
2385   // Always round to os::vm_page_size(), which may be larger than 4K.
2386   bytes = align_size_up(bytes, os::vm_page_size());
2387   const size_t alignment_hint0 =
2388     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2389 
2390   // In 4K mode always use mmap.
2391   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2392   if (os::vm_page_size() == SIZE_4K) {
2393     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2394   } else {
2395     if (bytes >= Use64KPagesThreshold) {
2396       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2397     } else {
2398       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2399     }
2400   }
2401 }
2402 
2403 bool os::pd_release_memory(char* addr, size_t size) {
2404 
2405   // Dynamically do different things for mmap/shmat.
2406   vmembk_t* const vmi = vmembk_find(addr);
2407   assert0(vmi);
2408 
2409   // Always round to os::vm_page_size(), which may be larger than 4K.
2410   size = align_size_up(size, os::vm_page_size());
2411   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2412 
2413   bool rc = false;
2414   bool remove_bookkeeping = false;
2415   if (vmi->type == VMEM_SHMATED) {
2416     // For shmatted memory, we do:
2417     // - If user wants to release the whole range, release the memory (shmdt).
2418     // - If user only wants to release a partial range, uncommit (disclaim) that
2419     //   range. That way, at least, we do not use memory anymore (bust still page
2420     //   table space).
2421     vmi->assert_is_valid_subrange(addr, size);
2422     if (addr == vmi->addr && size == vmi->size) {
2423       rc = release_shmated_memory(addr, size);
2424       remove_bookkeeping = true;
2425     } else {
2426       rc = uncommit_shmated_memory(addr, size);
2427     }
2428   } else {
2429     // User may unmap partial regions but region has to be fully contained.
2430 #ifdef ASSERT
2431     vmi->assert_is_valid_subrange(addr, size);
2432 #endif
2433     rc = release_mmaped_memory(addr, size);
2434     remove_bookkeeping = true;
2435   }
2436 
2437   // update bookkeeping
2438   if (rc && remove_bookkeeping) {
2439     vmembk_remove(vmi);
2440   }
2441 
2442   return rc;
2443 }
2444 
2445 static bool checked_mprotect(char* addr, size_t size, int prot) {
2446 
2447   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2448   // not tell me if protection failed when trying to protect an un-protectable range.
2449   //
2450   // This means if the memory was allocated using shmget/shmat, protection wont work
2451   // but mprotect will still return 0:
2452   //
2453   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2454 
2455   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2456 
2457   if (!rc) {
2458     const char* const s_errno = strerror(errno);
2459     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2460     return false;
2461   }
2462 
2463   // mprotect success check
2464   //
2465   // Mprotect said it changed the protection but can I believe it?
2466   //
2467   // To be sure I need to check the protection afterwards. Try to
2468   // read from protected memory and check whether that causes a segfault.
2469   //
2470   if (!os::Aix::xpg_sus_mode()) {
2471 
2472     if (CanUseSafeFetch32()) {
2473 
2474       const bool read_protected =
2475         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2476          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2477 
2478       if (prot & PROT_READ) {
2479         rc = !read_protected;
2480       } else {
2481         rc = read_protected;
2482       }
2483     }
2484   }
2485   if (!rc) {
2486     assert(false, "mprotect failed.");
2487   }
2488   return rc;
2489 }
2490 
2491 // Set protections specified
2492 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2493   unsigned int p = 0;
2494   switch (prot) {
2495   case MEM_PROT_NONE: p = PROT_NONE; break;
2496   case MEM_PROT_READ: p = PROT_READ; break;
2497   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2498   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2499   default:
2500     ShouldNotReachHere();
2501   }
2502   // is_committed is unused.
2503   return checked_mprotect(addr, size, p);
2504 }
2505 
2506 bool os::guard_memory(char* addr, size_t size) {
2507   return checked_mprotect(addr, size, PROT_NONE);
2508 }
2509 
2510 bool os::unguard_memory(char* addr, size_t size) {
2511   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2512 }
2513 
2514 // Large page support
2515 
2516 static size_t _large_page_size = 0;
2517 
2518 // Enable large page support if OS allows that.
2519 void os::large_page_init() {
2520   return; // Nothing to do. See query_multipage_support and friends.
2521 }
2522 
2523 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2524   // "exec" is passed in but not used. Creating the shared image for
2525   // the code cache doesn't have an SHM_X executable permission to check.
2526   Unimplemented();
2527   return 0;
2528 }
2529 
2530 bool os::release_memory_special(char* base, size_t bytes) {
2531   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2532   Unimplemented();
2533   return false;
2534 }
2535 
2536 size_t os::large_page_size() {
2537   return _large_page_size;
2538 }
2539 
2540 bool os::can_commit_large_page_memory() {
2541   // Does not matter, we do not support huge pages.
2542   return false;
2543 }
2544 
2545 bool os::can_execute_large_page_memory() {
2546   // Does not matter, we do not support huge pages.
2547   return false;
2548 }
2549 
2550 // Reserve memory at an arbitrary address, only if that area is
2551 // available (and not reserved for something else).
2552 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2553   char* addr = NULL;
2554 
2555   // Always round to os::vm_page_size(), which may be larger than 4K.
2556   bytes = align_size_up(bytes, os::vm_page_size());
2557 
2558   // In 4K mode always use mmap.
2559   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2560   if (os::vm_page_size() == SIZE_4K) {
2561     return reserve_mmaped_memory(bytes, requested_addr, 0);
2562   } else {
2563     if (bytes >= Use64KPagesThreshold) {
2564       return reserve_shmated_memory(bytes, requested_addr, 0);
2565     } else {
2566       return reserve_mmaped_memory(bytes, requested_addr, 0);
2567     }
2568   }
2569 
2570   return addr;
2571 }
2572 
2573 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2574   return ::read(fd, buf, nBytes);
2575 }
2576 
2577 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2578   return ::pread(fd, buf, nBytes, offset);
2579 }
2580 
2581 void os::naked_short_sleep(jlong ms) {
2582   struct timespec req;
2583 
2584   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2585   req.tv_sec = 0;
2586   if (ms > 0) {
2587     req.tv_nsec = (ms % 1000) * 1000000;
2588   }
2589   else {
2590     req.tv_nsec = 1;
2591   }
2592 
2593   nanosleep(&req, NULL);
2594 
2595   return;
2596 }
2597 
2598 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2599 void os::infinite_sleep() {
2600   while (true) {    // sleep forever ...
2601     ::sleep(100);   // ... 100 seconds at a time
2602   }
2603 }
2604 
2605 // Used to convert frequent JVM_Yield() to nops
2606 bool os::dont_yield() {
2607   return DontYieldALot;
2608 }
2609 
2610 void os::naked_yield() {
2611   sched_yield();
2612 }
2613 
2614 ////////////////////////////////////////////////////////////////////////////////
2615 // thread priority support
2616 
2617 // From AIX manpage to pthread_setschedparam
2618 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2619 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2620 //
2621 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2622 // range from 40 to 80, where 40 is the least favored priority and 80
2623 // is the most favored."
2624 //
2625 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2626 // scheduling there; however, this still leaves iSeries.)
2627 //
2628 // We use the same values for AIX and PASE.
2629 int os::java_to_os_priority[CriticalPriority + 1] = {
2630   54,             // 0 Entry should never be used
2631 
2632   55,             // 1 MinPriority
2633   55,             // 2
2634   56,             // 3
2635 
2636   56,             // 4
2637   57,             // 5 NormPriority
2638   57,             // 6
2639 
2640   58,             // 7
2641   58,             // 8
2642   59,             // 9 NearMaxPriority
2643 
2644   60,             // 10 MaxPriority
2645 
2646   60              // 11 CriticalPriority
2647 };
2648 
2649 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2650   if (!UseThreadPriorities) return OS_OK;
2651   pthread_t thr = thread->osthread()->pthread_id();
2652   int policy = SCHED_OTHER;
2653   struct sched_param param;
2654   param.sched_priority = newpri;
2655   int ret = pthread_setschedparam(thr, policy, &param);
2656 
2657   if (ret != 0) {
2658     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2659         (int)thr, newpri, ret, strerror(ret));
2660   }
2661   return (ret == 0) ? OS_OK : OS_ERR;
2662 }
2663 
2664 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2665   if (!UseThreadPriorities) {
2666     *priority_ptr = java_to_os_priority[NormPriority];
2667     return OS_OK;
2668   }
2669   pthread_t thr = thread->osthread()->pthread_id();
2670   int policy = SCHED_OTHER;
2671   struct sched_param param;
2672   int ret = pthread_getschedparam(thr, &policy, &param);
2673   *priority_ptr = param.sched_priority;
2674 
2675   return (ret == 0) ? OS_OK : OS_ERR;
2676 }
2677 
2678 // Hint to the underlying OS that a task switch would not be good.
2679 // Void return because it's a hint and can fail.
2680 void os::hint_no_preempt() {}
2681 
2682 ////////////////////////////////////////////////////////////////////////////////
2683 // suspend/resume support
2684 
2685 //  the low-level signal-based suspend/resume support is a remnant from the
2686 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2687 //  within hotspot. Now there is a single use-case for this:
2688 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
2689 //      that runs in the watcher thread.
2690 //  The remaining code is greatly simplified from the more general suspension
2691 //  code that used to be used.
2692 //
2693 //  The protocol is quite simple:
2694 //  - suspend:
2695 //      - sends a signal to the target thread
2696 //      - polls the suspend state of the osthread using a yield loop
2697 //      - target thread signal handler (SR_handler) sets suspend state
2698 //        and blocks in sigsuspend until continued
2699 //  - resume:
2700 //      - sets target osthread state to continue
2701 //      - sends signal to end the sigsuspend loop in the SR_handler
2702 //
2703 //  Note that the SR_lock plays no role in this suspend/resume protocol.
2704 //
2705 
2706 static void resume_clear_context(OSThread *osthread) {
2707   osthread->set_ucontext(NULL);
2708   osthread->set_siginfo(NULL);
2709 }
2710 
2711 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2712   osthread->set_ucontext(context);
2713   osthread->set_siginfo(siginfo);
2714 }
2715 
2716 //
2717 // Handler function invoked when a thread's execution is suspended or
2718 // resumed. We have to be careful that only async-safe functions are
2719 // called here (Note: most pthread functions are not async safe and
2720 // should be avoided.)
2721 //
2722 // Note: sigwait() is a more natural fit than sigsuspend() from an
2723 // interface point of view, but sigwait() prevents the signal hander
2724 // from being run. libpthread would get very confused by not having
2725 // its signal handlers run and prevents sigwait()'s use with the
2726 // mutex granting granting signal.
2727 //
2728 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2729 //
2730 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2731   // Save and restore errno to avoid confusing native code with EINTR
2732   // after sigsuspend.
2733   int old_errno = errno;
2734 
2735   Thread* thread = Thread::current();
2736   OSThread* osthread = thread->osthread();
2737   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2738 
2739   os::SuspendResume::State current = osthread->sr.state();
2740   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2741     suspend_save_context(osthread, siginfo, context);
2742 
2743     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2744     os::SuspendResume::State state = osthread->sr.suspended();
2745     if (state == os::SuspendResume::SR_SUSPENDED) {
2746       sigset_t suspend_set;  // signals for sigsuspend()
2747 
2748       // get current set of blocked signals and unblock resume signal
2749       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2750       sigdelset(&suspend_set, SR_signum);
2751 
2752       // wait here until we are resumed
2753       while (1) {
2754         sigsuspend(&suspend_set);
2755 
2756         os::SuspendResume::State result = osthread->sr.running();
2757         if (result == os::SuspendResume::SR_RUNNING) {
2758           break;
2759         }
2760       }
2761 
2762     } else if (state == os::SuspendResume::SR_RUNNING) {
2763       // request was cancelled, continue
2764     } else {
2765       ShouldNotReachHere();
2766     }
2767 
2768     resume_clear_context(osthread);
2769   } else if (current == os::SuspendResume::SR_RUNNING) {
2770     // request was cancelled, continue
2771   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2772     // ignore
2773   } else {
2774     ShouldNotReachHere();
2775   }
2776 
2777   errno = old_errno;
2778 }
2779 
2780 static int SR_initialize() {
2781   struct sigaction act;
2782   char *s;
2783   // Get signal number to use for suspend/resume
2784   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2785     int sig = ::strtol(s, 0, 10);
2786     if (sig > 0 || sig < NSIG) {
2787       SR_signum = sig;
2788     }
2789   }
2790 
2791   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2792         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2793 
2794   sigemptyset(&SR_sigset);
2795   sigaddset(&SR_sigset, SR_signum);
2796 
2797   // Set up signal handler for suspend/resume.
2798   act.sa_flags = SA_RESTART|SA_SIGINFO;
2799   act.sa_handler = (void (*)(int)) SR_handler;
2800 
2801   // SR_signum is blocked by default.
2802   // 4528190 - We also need to block pthread restart signal (32 on all
2803   // supported Linux platforms). Note that LinuxThreads need to block
2804   // this signal for all threads to work properly. So we don't have
2805   // to use hard-coded signal number when setting up the mask.
2806   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2807 
2808   if (sigaction(SR_signum, &act, 0) == -1) {
2809     return -1;
2810   }
2811 
2812   // Save signal flag
2813   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2814   return 0;
2815 }
2816 
2817 static int SR_finalize() {
2818   return 0;
2819 }
2820 
2821 static int sr_notify(OSThread* osthread) {
2822   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2823   assert_status(status == 0, status, "pthread_kill");
2824   return status;
2825 }
2826 
2827 // "Randomly" selected value for how long we want to spin
2828 // before bailing out on suspending a thread, also how often
2829 // we send a signal to a thread we want to resume
2830 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2831 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2832 
2833 // returns true on success and false on error - really an error is fatal
2834 // but this seems the normal response to library errors
2835 static bool do_suspend(OSThread* osthread) {
2836   assert(osthread->sr.is_running(), "thread should be running");
2837   // mark as suspended and send signal
2838 
2839   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2840     // failed to switch, state wasn't running?
2841     ShouldNotReachHere();
2842     return false;
2843   }
2844 
2845   if (sr_notify(osthread) != 0) {
2846     // try to cancel, switch to running
2847 
2848     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2849     if (result == os::SuspendResume::SR_RUNNING) {
2850       // cancelled
2851       return false;
2852     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2853       // somehow managed to suspend
2854       return true;
2855     } else {
2856       ShouldNotReachHere();
2857       return false;
2858     }
2859   }
2860 
2861   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2862 
2863   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2864     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2865       os::naked_yield();
2866     }
2867 
2868     // timeout, try to cancel the request
2869     if (n >= RANDOMLY_LARGE_INTEGER) {
2870       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2871       if (cancelled == os::SuspendResume::SR_RUNNING) {
2872         return false;
2873       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2874         return true;
2875       } else {
2876         ShouldNotReachHere();
2877         return false;
2878       }
2879     }
2880   }
2881 
2882   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2883   return true;
2884 }
2885 
2886 static void do_resume(OSThread* osthread) {
2887   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2888 
2889   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2890     // failed to switch to WAKEUP_REQUEST
2891     ShouldNotReachHere();
2892     return;
2893   }
2894 
2895   while (!osthread->sr.is_running()) {
2896     if (sr_notify(osthread) == 0) {
2897       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2898         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2899           os::naked_yield();
2900         }
2901       }
2902     } else {
2903       ShouldNotReachHere();
2904     }
2905   }
2906 
2907   guarantee(osthread->sr.is_running(), "Must be running!");
2908 }
2909 
2910 ///////////////////////////////////////////////////////////////////////////////////
2911 // signal handling (except suspend/resume)
2912 
2913 // This routine may be used by user applications as a "hook" to catch signals.
2914 // The user-defined signal handler must pass unrecognized signals to this
2915 // routine, and if it returns true (non-zero), then the signal handler must
2916 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2917 // routine will never retun false (zero), but instead will execute a VM panic
2918 // routine kill the process.
2919 //
2920 // If this routine returns false, it is OK to call it again. This allows
2921 // the user-defined signal handler to perform checks either before or after
2922 // the VM performs its own checks. Naturally, the user code would be making
2923 // a serious error if it tried to handle an exception (such as a null check
2924 // or breakpoint) that the VM was generating for its own correct operation.
2925 //
2926 // This routine may recognize any of the following kinds of signals:
2927 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2928 // It should be consulted by handlers for any of those signals.
2929 //
2930 // The caller of this routine must pass in the three arguments supplied
2931 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2932 // field of the structure passed to sigaction(). This routine assumes that
2933 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2934 //
2935 // Note that the VM will print warnings if it detects conflicting signal
2936 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2937 //
2938 extern "C" JNIEXPORT int
2939 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2940 
2941 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2942 // to be the thing to call; documentation is not terribly clear about whether
2943 // pthread_sigmask also works, and if it does, whether it does the same.
2944 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2945   const int rc = ::pthread_sigmask(how, set, oset);
2946   // return value semantics differ slightly for error case:
2947   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2948   // (so, pthread_sigmask is more theadsafe for error handling)
2949   // But success is always 0.
2950   return rc == 0 ? true : false;
2951 }
2952 
2953 // Function to unblock all signals which are, according
2954 // to POSIX, typical program error signals. If they happen while being blocked,
2955 // they typically will bring down the process immediately.
2956 bool unblock_program_error_signals() {
2957   sigset_t set;
2958   ::sigemptyset(&set);
2959   ::sigaddset(&set, SIGILL);
2960   ::sigaddset(&set, SIGBUS);
2961   ::sigaddset(&set, SIGFPE);
2962   ::sigaddset(&set, SIGSEGV);
2963   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2964 }
2965 
2966 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2967 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2968   assert(info != NULL && uc != NULL, "it must be old kernel");
2969 
2970   // Never leave program error signals blocked;
2971   // on all our platforms they would bring down the process immediately when
2972   // getting raised while being blocked.
2973   unblock_program_error_signals();
2974 
2975   JVM_handle_aix_signal(sig, info, uc, true);
2976 }
2977 
2978 // This boolean allows users to forward their own non-matching signals
2979 // to JVM_handle_aix_signal, harmlessly.
2980 bool os::Aix::signal_handlers_are_installed = false;
2981 
2982 // For signal-chaining
2983 struct sigaction os::Aix::sigact[MAXSIGNUM];
2984 unsigned int os::Aix::sigs = 0;
2985 bool os::Aix::libjsig_is_loaded = false;
2986 typedef struct sigaction *(*get_signal_t)(int);
2987 get_signal_t os::Aix::get_signal_action = NULL;
2988 
2989 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2990   struct sigaction *actp = NULL;
2991 
2992   if (libjsig_is_loaded) {
2993     // Retrieve the old signal handler from libjsig
2994     actp = (*get_signal_action)(sig);
2995   }
2996   if (actp == NULL) {
2997     // Retrieve the preinstalled signal handler from jvm
2998     actp = get_preinstalled_handler(sig);
2999   }
3000 
3001   return actp;
3002 }
3003 
3004 static bool call_chained_handler(struct sigaction *actp, int sig,
3005                                  siginfo_t *siginfo, void *context) {
3006   // Call the old signal handler
3007   if (actp->sa_handler == SIG_DFL) {
3008     // It's more reasonable to let jvm treat it as an unexpected exception
3009     // instead of taking the default action.
3010     return false;
3011   } else if (actp->sa_handler != SIG_IGN) {
3012     if ((actp->sa_flags & SA_NODEFER) == 0) {
3013       // automaticlly block the signal
3014       sigaddset(&(actp->sa_mask), sig);
3015     }
3016 
3017     sa_handler_t hand = NULL;
3018     sa_sigaction_t sa = NULL;
3019     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3020     // retrieve the chained handler
3021     if (siginfo_flag_set) {
3022       sa = actp->sa_sigaction;
3023     } else {
3024       hand = actp->sa_handler;
3025     }
3026 
3027     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3028       actp->sa_handler = SIG_DFL;
3029     }
3030 
3031     // try to honor the signal mask
3032     sigset_t oset;
3033     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3034 
3035     // call into the chained handler
3036     if (siginfo_flag_set) {
3037       (*sa)(sig, siginfo, context);
3038     } else {
3039       (*hand)(sig);
3040     }
3041 
3042     // restore the signal mask
3043     pthread_sigmask(SIG_SETMASK, &oset, 0);
3044   }
3045   // Tell jvm's signal handler the signal is taken care of.
3046   return true;
3047 }
3048 
3049 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3050   bool chained = false;
3051   // signal-chaining
3052   if (UseSignalChaining) {
3053     struct sigaction *actp = get_chained_signal_action(sig);
3054     if (actp != NULL) {
3055       chained = call_chained_handler(actp, sig, siginfo, context);
3056     }
3057   }
3058   return chained;
3059 }
3060 
3061 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3062   if ((((unsigned int)1 << sig) & sigs) != 0) {
3063     return &sigact[sig];
3064   }
3065   return NULL;
3066 }
3067 
3068 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3069   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3070   sigact[sig] = oldAct;
3071   sigs |= (unsigned int)1 << sig;
3072 }
3073 
3074 // for diagnostic
3075 int os::Aix::sigflags[MAXSIGNUM];
3076 
3077 int os::Aix::get_our_sigflags(int sig) {
3078   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3079   return sigflags[sig];
3080 }
3081 
3082 void os::Aix::set_our_sigflags(int sig, int flags) {
3083   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3084   sigflags[sig] = flags;
3085 }
3086 
3087 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3088   // Check for overwrite.
3089   struct sigaction oldAct;
3090   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3091 
3092   void* oldhand = oldAct.sa_sigaction
3093     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3094     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3095   // Renamed 'signalHandler' to avoid collision with other shared libs.
3096   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3097       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3098       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3099     if (AllowUserSignalHandlers || !set_installed) {
3100       // Do not overwrite; user takes responsibility to forward to us.
3101       return;
3102     } else if (UseSignalChaining) {
3103       // save the old handler in jvm
3104       save_preinstalled_handler(sig, oldAct);
3105       // libjsig also interposes the sigaction() call below and saves the
3106       // old sigaction on it own.
3107     } else {
3108       fatal("Encountered unexpected pre-existing sigaction handler "
3109             "%#lx for signal %d.", (long)oldhand, sig);
3110     }
3111   }
3112 
3113   struct sigaction sigAct;
3114   sigfillset(&(sigAct.sa_mask));
3115   if (!set_installed) {
3116     sigAct.sa_handler = SIG_DFL;
3117     sigAct.sa_flags = SA_RESTART;
3118   } else {
3119     // Renamed 'signalHandler' to avoid collision with other shared libs.
3120     sigAct.sa_sigaction = javaSignalHandler;
3121     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3122   }
3123   // Save flags, which are set by ours
3124   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3125   sigflags[sig] = sigAct.sa_flags;
3126 
3127   int ret = sigaction(sig, &sigAct, &oldAct);
3128   assert(ret == 0, "check");
3129 
3130   void* oldhand2 = oldAct.sa_sigaction
3131                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3132                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3133   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3134 }
3135 
3136 // install signal handlers for signals that HotSpot needs to
3137 // handle in order to support Java-level exception handling.
3138 void os::Aix::install_signal_handlers() {
3139   if (!signal_handlers_are_installed) {
3140     signal_handlers_are_installed = true;
3141 
3142     // signal-chaining
3143     typedef void (*signal_setting_t)();
3144     signal_setting_t begin_signal_setting = NULL;
3145     signal_setting_t end_signal_setting = NULL;
3146     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3147                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3148     if (begin_signal_setting != NULL) {
3149       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3150                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3151       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3152                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3153       libjsig_is_loaded = true;
3154       assert(UseSignalChaining, "should enable signal-chaining");
3155     }
3156     if (libjsig_is_loaded) {
3157       // Tell libjsig jvm is setting signal handlers
3158       (*begin_signal_setting)();
3159     }
3160 
3161     set_signal_handler(SIGSEGV, true);
3162     set_signal_handler(SIGPIPE, true);
3163     set_signal_handler(SIGBUS, true);
3164     set_signal_handler(SIGILL, true);
3165     set_signal_handler(SIGFPE, true);
3166     set_signal_handler(SIGTRAP, true);
3167     set_signal_handler(SIGXFSZ, true);
3168     set_signal_handler(SIGDANGER, true);
3169 
3170     if (libjsig_is_loaded) {
3171       // Tell libjsig jvm finishes setting signal handlers.
3172       (*end_signal_setting)();
3173     }
3174 
3175     // We don't activate signal checker if libjsig is in place, we trust ourselves
3176     // and if UserSignalHandler is installed all bets are off.
3177     // Log that signal checking is off only if -verbose:jni is specified.
3178     if (CheckJNICalls) {
3179       if (libjsig_is_loaded) {
3180         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3181         check_signals = false;
3182       }
3183       if (AllowUserSignalHandlers) {
3184         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3185         check_signals = false;
3186       }
3187       // Need to initialize check_signal_done.
3188       ::sigemptyset(&check_signal_done);
3189     }
3190   }
3191 }
3192 
3193 static const char* get_signal_handler_name(address handler,
3194                                            char* buf, int buflen) {
3195   int offset;
3196   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3197   if (found) {
3198     // skip directory names
3199     const char *p1, *p2;
3200     p1 = buf;
3201     size_t len = strlen(os::file_separator());
3202     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3203     // The way os::dll_address_to_library_name is implemented on Aix
3204     // right now, it always returns -1 for the offset which is not
3205     // terribly informative.
3206     // Will fix that. For now, omit the offset.
3207     jio_snprintf(buf, buflen, "%s", p1);
3208   } else {
3209     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3210   }
3211   return buf;
3212 }
3213 
3214 static void print_signal_handler(outputStream* st, int sig,
3215                                  char* buf, size_t buflen) {
3216   struct sigaction sa;
3217   sigaction(sig, NULL, &sa);
3218 
3219   st->print("%s: ", os::exception_name(sig, buf, buflen));
3220 
3221   address handler = (sa.sa_flags & SA_SIGINFO)
3222     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3223     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3224 
3225   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3226     st->print("SIG_DFL");
3227   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3228     st->print("SIG_IGN");
3229   } else {
3230     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3231   }
3232 
3233   // Print readable mask.
3234   st->print(", sa_mask[0]=");
3235   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3236 
3237   address rh = VMError::get_resetted_sighandler(sig);
3238   // May be, handler was resetted by VMError?
3239   if (rh != NULL) {
3240     handler = rh;
3241     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3242   }
3243 
3244   // Print textual representation of sa_flags.
3245   st->print(", sa_flags=");
3246   os::Posix::print_sa_flags(st, sa.sa_flags);
3247 
3248   // Check: is it our handler?
3249   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3250       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3251     // It is our signal handler.
3252     // Check for flags, reset system-used one!
3253     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3254       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3255                 os::Aix::get_our_sigflags(sig));
3256     }
3257   }
3258   st->cr();
3259 }
3260 
3261 #define DO_SIGNAL_CHECK(sig) \
3262   if (!sigismember(&check_signal_done, sig)) \
3263     os::Aix::check_signal_handler(sig)
3264 
3265 // This method is a periodic task to check for misbehaving JNI applications
3266 // under CheckJNI, we can add any periodic checks here
3267 
3268 void os::run_periodic_checks() {
3269 
3270   if (check_signals == false) return;
3271 
3272   // SEGV and BUS if overridden could potentially prevent
3273   // generation of hs*.log in the event of a crash, debugging
3274   // such a case can be very challenging, so we absolutely
3275   // check the following for a good measure:
3276   DO_SIGNAL_CHECK(SIGSEGV);
3277   DO_SIGNAL_CHECK(SIGILL);
3278   DO_SIGNAL_CHECK(SIGFPE);
3279   DO_SIGNAL_CHECK(SIGBUS);
3280   DO_SIGNAL_CHECK(SIGPIPE);
3281   DO_SIGNAL_CHECK(SIGXFSZ);
3282   if (UseSIGTRAP) {
3283     DO_SIGNAL_CHECK(SIGTRAP);
3284   }
3285   DO_SIGNAL_CHECK(SIGDANGER);
3286 
3287   // ReduceSignalUsage allows the user to override these handlers
3288   // see comments at the very top and jvm_solaris.h
3289   if (!ReduceSignalUsage) {
3290     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3291     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3292     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3293     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3294   }
3295 
3296   DO_SIGNAL_CHECK(SR_signum);
3297   DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3298 }
3299 
3300 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3301 
3302 static os_sigaction_t os_sigaction = NULL;
3303 
3304 void os::Aix::check_signal_handler(int sig) {
3305   char buf[O_BUFLEN];
3306   address jvmHandler = NULL;
3307 
3308   struct sigaction act;
3309   if (os_sigaction == NULL) {
3310     // only trust the default sigaction, in case it has been interposed
3311     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3312     if (os_sigaction == NULL) return;
3313   }
3314 
3315   os_sigaction(sig, (struct sigaction*)NULL, &act);
3316 
3317   address thisHandler = (act.sa_flags & SA_SIGINFO)
3318     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3319     : CAST_FROM_FN_PTR(address, act.sa_handler);
3320 
3321   switch(sig) {
3322   case SIGSEGV:
3323   case SIGBUS:
3324   case SIGFPE:
3325   case SIGPIPE:
3326   case SIGILL:
3327   case SIGXFSZ:
3328     // Renamed 'signalHandler' to avoid collision with other shared libs.
3329     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3330     break;
3331 
3332   case SHUTDOWN1_SIGNAL:
3333   case SHUTDOWN2_SIGNAL:
3334   case SHUTDOWN3_SIGNAL:
3335   case BREAK_SIGNAL:
3336     jvmHandler = (address)user_handler();
3337     break;
3338 
3339   case INTERRUPT_SIGNAL:
3340     jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3341     break;
3342 
3343   default:
3344     if (sig == SR_signum) {
3345       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3346     } else {
3347       return;
3348     }
3349     break;
3350   }
3351 
3352   if (thisHandler != jvmHandler) {
3353     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3354     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3355     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3356     // No need to check this sig any longer
3357     sigaddset(&check_signal_done, sig);
3358     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3359     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3360       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3361                     exception_name(sig, buf, O_BUFLEN));
3362     }
3363   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3364     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3365     tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3366     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3367     // No need to check this sig any longer
3368     sigaddset(&check_signal_done, sig);
3369   }
3370 
3371   // Dump all the signal
3372   if (sigismember(&check_signal_done, sig)) {
3373     print_signal_handlers(tty, buf, O_BUFLEN);
3374   }
3375 }
3376 
3377 extern bool signal_name(int signo, char* buf, size_t len);
3378 
3379 const char* os::exception_name(int exception_code, char* buf, size_t size) {
3380   if (0 < exception_code && exception_code <= SIGRTMAX) {
3381     // signal
3382     if (!signal_name(exception_code, buf, size)) {
3383       jio_snprintf(buf, size, "SIG%d", exception_code);
3384     }
3385     return buf;
3386   } else {
3387     return NULL;
3388   }
3389 }
3390 
3391 // To install functions for atexit system call
3392 extern "C" {
3393   static void perfMemory_exit_helper() {
3394     perfMemory_exit();
3395   }
3396 }
3397 
3398 // This is called _before_ the most of global arguments have been parsed.
3399 void os::init(void) {
3400   // This is basic, we want to know if that ever changes.
3401   // (Shared memory boundary is supposed to be a 256M aligned.)
3402   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3403 
3404   // First off, we need to know whether we run on AIX or PASE, and
3405   // the OS level we run on.
3406   os::Aix::initialize_os_info();
3407 
3408   // Scan environment (SPEC1170 behaviour, etc).
3409   os::Aix::scan_environment();
3410 
3411   // Check which pages are supported by AIX.
3412   query_multipage_support();
3413 
3414   // Act like we only have one page size by eliminating corner cases which
3415   // we did not support very well anyway.
3416   // We have two input conditions:
3417   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3418   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3419   //    setting.
3420   //    Data segment page size is important for us because it defines the thread stack page
3421   //    size, which is needed for guard page handling, stack banging etc.
3422   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3423   //    and should be allocated with 64k pages.
3424   //
3425   // So, we do the following:
3426   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3427   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3428   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3429   // 64k          no              --- AIX 5.2 ? ---
3430   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3431 
3432   // We explicitly leave no option to change page size, because only upgrading would work,
3433   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3434 
3435   if (g_multipage_support.datapsize == SIZE_4K) {
3436     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3437     if (g_multipage_support.can_use_64K_pages) {
3438       // .. but we are able to use 64K pages dynamically.
3439       // This would be typical for java launchers which are not linked
3440       // with datapsize=64K (like, any other launcher but our own).
3441       //
3442       // In this case it would be smart to allocate the java heap with 64K
3443       // to get the performance benefit, and to fake 64k pages for the
3444       // data segment (when dealing with thread stacks).
3445       //
3446       // However, leave a possibility to downgrade to 4K, using
3447       // -XX:-Use64KPages.
3448       if (Use64KPages) {
3449         trcVerbose("64K page mode (faked for data segment)");
3450         Aix::_page_size = SIZE_64K;
3451       } else {
3452         trcVerbose("4K page mode (Use64KPages=off)");
3453         Aix::_page_size = SIZE_4K;
3454       }
3455     } else {
3456       // .. and not able to allocate 64k pages dynamically. Here, just
3457       // fall back to 4K paged mode and use mmap for everything.
3458       trcVerbose("4K page mode");
3459       Aix::_page_size = SIZE_4K;
3460       FLAG_SET_ERGO(bool, Use64KPages, false);
3461     }
3462   } else {
3463     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3464     //   This normally means that we can allocate 64k pages dynamically.
3465     //   (There is one special case where this may be false: EXTSHM=on.
3466     //    but we decided to not support that mode).
3467     assert0(g_multipage_support.can_use_64K_pages);
3468     Aix::_page_size = SIZE_64K;
3469     trcVerbose("64K page mode");
3470     FLAG_SET_ERGO(bool, Use64KPages, true);
3471   }
3472 
3473   // Short-wire stack page size to base page size; if that works, we just remove
3474   // that stack page size altogether.
3475   Aix::_stack_page_size = Aix::_page_size;
3476 
3477   // For now UseLargePages is just ignored.
3478   FLAG_SET_ERGO(bool, UseLargePages, false);
3479   _page_sizes[0] = 0;
3480 
3481   // debug trace
3482   trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
3483 
3484   // Next, we need to initialize libo4 and libperfstat libraries.
3485   if (os::Aix::on_pase()) {
3486     os::Aix::initialize_libo4();
3487   } else {
3488     os::Aix::initialize_libperfstat();
3489   }
3490 
3491   // Reset the perfstat information provided by ODM.
3492   if (os::Aix::on_aix()) {
3493     libperfstat::perfstat_reset();
3494   }
3495 
3496   // Now initialze basic system properties. Note that for some of the values we
3497   // need libperfstat etc.
3498   os::Aix::initialize_system_info();
3499 
3500   _initial_pid = getpid();
3501 
3502   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3503 
3504   init_random(1234567);
3505 
3506   ThreadCritical::initialize();
3507 
3508   // Main_thread points to the aboriginal thread.
3509   Aix::_main_thread = pthread_self();
3510 
3511   initial_time_count = os::elapsed_counter();
3512 
3513   // If the pagesize of the VM is greater than 8K determine the appropriate
3514   // number of initial guard pages. The user can change this with the
3515   // command line arguments, if needed.
3516   if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3517     StackYellowPages = 1;
3518     StackRedPages = 1;
3519     StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3520   }
3521 }
3522 
3523 // This is called _after_ the global arguments have been parsed.
3524 jint os::init_2(void) {
3525 
3526   trcVerbose("processor count: %d", os::_processor_count);
3527   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3528 
3529   // Initially build up the loaded dll map.
3530   LoadedLibraries::reload();
3531 
3532   const int page_size = Aix::page_size();
3533   const int map_size = page_size;
3534 
3535   address map_address = (address) MAP_FAILED;
3536   const int prot  = PROT_READ;
3537   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3538 
3539   // Use optimized addresses for the polling page,
3540   // e.g. map it to a special 32-bit address.
3541   if (OptimizePollingPageLocation) {
3542     // architecture-specific list of address wishes:
3543     address address_wishes[] = {
3544       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3545       // PPC64: all address wishes are non-negative 32 bit values where
3546       // the lower 16 bits are all zero. we can load these addresses
3547       // with a single ppc_lis instruction.
3548       (address) 0x30000000, (address) 0x31000000,
3549       (address) 0x32000000, (address) 0x33000000,
3550       (address) 0x40000000, (address) 0x41000000,
3551       (address) 0x42000000, (address) 0x43000000,
3552       (address) 0x50000000, (address) 0x51000000,
3553       (address) 0x52000000, (address) 0x53000000,
3554       (address) 0x60000000, (address) 0x61000000,
3555       (address) 0x62000000, (address) 0x63000000
3556     };
3557     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3558 
3559     // iterate over the list of address wishes:
3560     for (int i=0; i<address_wishes_length; i++) {
3561       // Try to map with current address wish.
3562       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3563       // fail if the address is already mapped.
3564       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3565                                      map_size, prot,
3566                                      flags | MAP_FIXED,
3567                                      -1, 0);
3568       if (Verbose) {
3569         fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3570                 address_wishes[i], map_address + (ssize_t)page_size);
3571       }
3572 
3573       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3574         // Map succeeded and map_address is at wished address, exit loop.
3575         break;
3576       }
3577 
3578       if (map_address != (address) MAP_FAILED) {
3579         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3580         ::munmap(map_address, map_size);
3581         map_address = (address) MAP_FAILED;
3582       }
3583       // Map failed, continue loop.
3584     }
3585   } // end OptimizePollingPageLocation
3586 
3587   if (map_address == (address) MAP_FAILED) {
3588     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3589   }
3590   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3591   os::set_polling_page(map_address);
3592 
3593   if (!UseMembar) {
3594     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3595     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3596     os::set_memory_serialize_page(mem_serialize_page);
3597 
3598 #ifndef PRODUCT
3599     if (Verbose && PrintMiscellaneous) {
3600       tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3601     }
3602 #endif
3603   }
3604 
3605   // initialize suspend/resume support - must do this before signal_sets_init()
3606   if (SR_initialize() != 0) {
3607     perror("SR_initialize failed");
3608     return JNI_ERR;
3609   }
3610 
3611   Aix::signal_sets_init();
3612   Aix::install_signal_handlers();
3613 
3614   // Check minimum allowable stack size for thread creation and to initialize
3615   // the java system classes, including StackOverflowError - depends on page
3616   // size. Add a page for compiler2 recursion in main thread.
3617   // Add in 2*BytesPerWord times page size to account for VM stack during
3618   // class initialization depending on 32 or 64 bit VM.
3619   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3620             (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3621                      (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3622 
3623   os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3624 
3625   size_t threadStackSizeInBytes = ThreadStackSize * K;
3626   if (threadStackSizeInBytes != 0 &&
3627       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3628     tty->print_cr("\nThe stack size specified is too small, "
3629                   "Specify at least %dk",
3630                   os::Aix::min_stack_allowed / K);
3631     return JNI_ERR;
3632   }
3633 
3634   // Make the stack size a multiple of the page size so that
3635   // the yellow/red zones can be guarded.
3636   // Note that this can be 0, if no default stacksize was set.
3637   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3638 
3639   Aix::libpthread_init();
3640 
3641   if (MaxFDLimit) {
3642     // Set the number of file descriptors to max. print out error
3643     // if getrlimit/setrlimit fails but continue regardless.
3644     struct rlimit nbr_files;
3645     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3646     if (status != 0) {
3647       if (PrintMiscellaneous && (Verbose || WizardMode))
3648         perror("os::init_2 getrlimit failed");
3649     } else {
3650       nbr_files.rlim_cur = nbr_files.rlim_max;
3651       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3652       if (status != 0) {
3653         if (PrintMiscellaneous && (Verbose || WizardMode))
3654           perror("os::init_2 setrlimit failed");
3655       }
3656     }
3657   }
3658 
3659   if (PerfAllowAtExitRegistration) {
3660     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3661     // Atexit functions can be delayed until process exit time, which
3662     // can be problematic for embedded VM situations. Embedded VMs should
3663     // call DestroyJavaVM() to assure that VM resources are released.
3664 
3665     // Note: perfMemory_exit_helper atexit function may be removed in
3666     // the future if the appropriate cleanup code can be added to the
3667     // VM_Exit VMOperation's doit method.
3668     if (atexit(perfMemory_exit_helper) != 0) {
3669       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3670     }
3671   }
3672 
3673   return JNI_OK;
3674 }
3675 
3676 // Mark the polling page as unreadable
3677 void os::make_polling_page_unreadable(void) {
3678   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3679     fatal("Could not disable polling page");
3680   }
3681 };
3682 
3683 // Mark the polling page as readable
3684 void os::make_polling_page_readable(void) {
3685   // Changed according to os_linux.cpp.
3686   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3687     fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3688   }
3689 };
3690 
3691 int os::active_processor_count() {
3692   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3693   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3694   return online_cpus;
3695 }
3696 
3697 void os::set_native_thread_name(const char *name) {
3698   // Not yet implemented.
3699   return;
3700 }
3701 
3702 bool os::distribute_processes(uint length, uint* distribution) {
3703   // Not yet implemented.
3704   return false;
3705 }
3706 
3707 bool os::bind_to_processor(uint processor_id) {
3708   // Not yet implemented.
3709   return false;
3710 }
3711 
3712 void os::SuspendedThreadTask::internal_do_task() {
3713   if (do_suspend(_thread->osthread())) {
3714     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3715     do_task(context);
3716     do_resume(_thread->osthread());
3717   }
3718 }
3719 
3720 class PcFetcher : public os::SuspendedThreadTask {
3721 public:
3722   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3723   ExtendedPC result();
3724 protected:
3725   void do_task(const os::SuspendedThreadTaskContext& context);
3726 private:
3727   ExtendedPC _epc;
3728 };
3729 
3730 ExtendedPC PcFetcher::result() {
3731   guarantee(is_done(), "task is not done yet.");
3732   return _epc;
3733 }
3734 
3735 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3736   Thread* thread = context.thread();
3737   OSThread* osthread = thread->osthread();
3738   if (osthread->ucontext() != NULL) {
3739     _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3740   } else {
3741     // NULL context is unexpected, double-check this is the VMThread.
3742     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3743   }
3744 }
3745 
3746 // Suspends the target using the signal mechanism and then grabs the PC before
3747 // resuming the target. Used by the flat-profiler only
3748 ExtendedPC os::get_thread_pc(Thread* thread) {
3749   // Make sure that it is called by the watcher for the VMThread.
3750   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3751   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3752 
3753   PcFetcher fetcher(thread);
3754   fetcher.run();
3755   return fetcher.result();
3756 }
3757 
3758 ////////////////////////////////////////////////////////////////////////////////
3759 // debug support
3760 
3761 static address same_page(address x, address y) {
3762   intptr_t page_bits = -os::vm_page_size();
3763   if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3764     return x;
3765   else if (x > y)
3766     return (address)(intptr_t(y) | ~page_bits) + 1;
3767   else
3768     return (address)(intptr_t(y) & page_bits);
3769 }
3770 
3771 bool os::find(address addr, outputStream* st) {
3772 
3773   st->print(PTR_FORMAT ": ", addr);
3774 
3775   loaded_module_t lm;
3776   if ( LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3777        LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3778     st->print("%s", lm.path);




3779       return true;



3780   }
3781 
3782   return false;
3783 }
3784 
3785 ////////////////////////////////////////////////////////////////////////////////
3786 // misc
3787 
3788 // This does not do anything on Aix. This is basically a hook for being
3789 // able to use structured exception handling (thread-local exception filters)
3790 // on, e.g., Win32.
3791 void
3792 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3793                          JavaCallArguments* args, Thread* thread) {
3794   f(value, method, args, thread);
3795 }
3796 
3797 void os::print_statistics() {
3798 }
3799 
3800 int os::message_box(const char* title, const char* message) {
3801   int i;
3802   fdStream err(defaultStream::error_fd());
3803   for (i = 0; i < 78; i++) err.print_raw("=");
3804   err.cr();
3805   err.print_raw_cr(title);
3806   for (i = 0; i < 78; i++) err.print_raw("-");
3807   err.cr();
3808   err.print_raw_cr(message);
3809   for (i = 0; i < 78; i++) err.print_raw("=");
3810   err.cr();
3811 
3812   char buf[16];
3813   // Prevent process from exiting upon "read error" without consuming all CPU
3814   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3815 
3816   return buf[0] == 'y' || buf[0] == 'Y';
3817 }
3818 
3819 int os::stat(const char *path, struct stat *sbuf) {
3820   char pathbuf[MAX_PATH];
3821   if (strlen(path) > MAX_PATH - 1) {
3822     errno = ENAMETOOLONG;
3823     return -1;
3824   }
3825   os::native_path(strcpy(pathbuf, path));
3826   return ::stat(pathbuf, sbuf);
3827 }
3828 
3829 bool os::check_heap(bool force) {
3830   return true;
3831 }
3832 
3833 // Is a (classpath) directory empty?
3834 bool os::dir_is_empty(const char* path) {
3835   DIR *dir = NULL;
3836   struct dirent *ptr;
3837 
3838   dir = opendir(path);
3839   if (dir == NULL) return true;
3840 
3841   /* Scan the directory */
3842   bool result = true;
3843   char buf[sizeof(struct dirent) + MAX_PATH];
3844   while (result && (ptr = ::readdir(dir)) != NULL) {
3845     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3846       result = false;
3847     }
3848   }
3849   closedir(dir);
3850   return result;
3851 }
3852 
3853 // This code originates from JDK's sysOpen and open64_w
3854 // from src/solaris/hpi/src/system_md.c
3855 
3856 int os::open(const char *path, int oflag, int mode) {
3857 
3858   if (strlen(path) > MAX_PATH - 1) {
3859     errno = ENAMETOOLONG;
3860     return -1;
3861   }
3862   int fd;
3863 
3864   fd = ::open64(path, oflag, mode);
3865   if (fd == -1) return -1;
3866 
3867   // If the open succeeded, the file might still be a directory.
3868   {
3869     struct stat64 buf64;
3870     int ret = ::fstat64(fd, &buf64);
3871     int st_mode = buf64.st_mode;
3872 
3873     if (ret != -1) {
3874       if ((st_mode & S_IFMT) == S_IFDIR) {
3875         errno = EISDIR;
3876         ::close(fd);
3877         return -1;
3878       }
3879     } else {
3880       ::close(fd);
3881       return -1;
3882     }
3883   }
3884 
3885   // All file descriptors that are opened in the JVM and not
3886   // specifically destined for a subprocess should have the
3887   // close-on-exec flag set. If we don't set it, then careless 3rd
3888   // party native code might fork and exec without closing all
3889   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3890   // UNIXProcess.c), and this in turn might:
3891   //
3892   // - cause end-of-file to fail to be detected on some file
3893   //   descriptors, resulting in mysterious hangs, or
3894   //
3895   // - might cause an fopen in the subprocess to fail on a system
3896   //   suffering from bug 1085341.
3897   //
3898   // (Yes, the default setting of the close-on-exec flag is a Unix
3899   // design flaw.)
3900   //
3901   // See:
3902   // 1085341: 32-bit stdio routines should support file descriptors >255
3903   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3904   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3905 #ifdef FD_CLOEXEC
3906   {
3907     int flags = ::fcntl(fd, F_GETFD);
3908     if (flags != -1)
3909       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3910   }
3911 #endif
3912 
3913   return fd;
3914 }
3915 
3916 // create binary file, rewriting existing file if required
3917 int os::create_binary_file(const char* path, bool rewrite_existing) {
3918   int oflags = O_WRONLY | O_CREAT;
3919   if (!rewrite_existing) {
3920     oflags |= O_EXCL;
3921   }
3922   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3923 }
3924 
3925 // return current position of file pointer
3926 jlong os::current_file_offset(int fd) {
3927   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3928 }
3929 
3930 // move file pointer to the specified offset
3931 jlong os::seek_to_file_offset(int fd, jlong offset) {
3932   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3933 }
3934 
3935 // This code originates from JDK's sysAvailable
3936 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3937 
3938 int os::available(int fd, jlong *bytes) {
3939   jlong cur, end;
3940   int mode;
3941   struct stat64 buf64;
3942 
3943   if (::fstat64(fd, &buf64) >= 0) {
3944     mode = buf64.st_mode;
3945     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3946       // XXX: is the following call interruptible? If so, this might
3947       // need to go through the INTERRUPT_IO() wrapper as for other
3948       // blocking, interruptible calls in this file.
3949       int n;
3950       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3951         *bytes = n;
3952         return 1;
3953       }
3954     }
3955   }
3956   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3957     return 0;
3958   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3959     return 0;
3960   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3961     return 0;
3962   }
3963   *bytes = end - cur;
3964   return 1;
3965 }
3966 
3967 // Map a block of memory.
3968 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3969                         char *addr, size_t bytes, bool read_only,
3970                         bool allow_exec) {
3971   int prot;
3972   int flags = MAP_PRIVATE;
3973 
3974   if (read_only) {
3975     prot = PROT_READ;
3976     flags = MAP_SHARED;
3977   } else {
3978     prot = PROT_READ | PROT_WRITE;
3979     flags = MAP_PRIVATE;
3980   }
3981 
3982   if (allow_exec) {
3983     prot |= PROT_EXEC;
3984   }
3985 
3986   if (addr != NULL) {
3987     flags |= MAP_FIXED;
3988   }
3989 
3990   // Allow anonymous mappings if 'fd' is -1.
3991   if (fd == -1) {
3992     flags |= MAP_ANONYMOUS;
3993   }
3994 
3995   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3996                                      fd, file_offset);
3997   if (mapped_address == MAP_FAILED) {
3998     return NULL;
3999   }
4000   return mapped_address;
4001 }
4002 
4003 // Remap a block of memory.
4004 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4005                           char *addr, size_t bytes, bool read_only,
4006                           bool allow_exec) {
4007   // same as map_memory() on this OS
4008   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4009                         allow_exec);
4010 }
4011 
4012 // Unmap a block of memory.
4013 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4014   return munmap(addr, bytes) == 0;
4015 }
4016 
4017 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4018 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4019 // of a thread.
4020 //
4021 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4022 // the fast estimate available on the platform.
4023 
4024 jlong os::current_thread_cpu_time() {
4025   // return user + sys since the cost is the same
4026   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4027   assert(n >= 0, "negative CPU time");
4028   return n;
4029 }
4030 
4031 jlong os::thread_cpu_time(Thread* thread) {
4032   // consistent with what current_thread_cpu_time() returns
4033   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4034   assert(n >= 0, "negative CPU time");
4035   return n;
4036 }
4037 
4038 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4039   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4040   assert(n >= 0, "negative CPU time");
4041   return n;
4042 }
4043 
4044 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4045   bool error = false;
4046 
4047   jlong sys_time = 0;
4048   jlong user_time = 0;
4049 
4050   // Reimplemented using getthrds64().
4051   //
4052   // Works like this:
4053   // For the thread in question, get the kernel thread id. Then get the
4054   // kernel thread statistics using that id.
4055   //
4056   // This only works of course when no pthread scheduling is used,
4057   // i.e. there is a 1:1 relationship to kernel threads.
4058   // On AIX, see AIXTHREAD_SCOPE variable.
4059 
4060   pthread_t pthtid = thread->osthread()->pthread_id();
4061 
4062   // retrieve kernel thread id for the pthread:
4063   tid64_t tid = 0;
4064   struct __pthrdsinfo pinfo;
4065   // I just love those otherworldly IBM APIs which force me to hand down
4066   // dummy buffers for stuff I dont care for...
4067   char dummy[1];
4068   int dummy_size = sizeof(dummy);
4069   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4070                           dummy, &dummy_size) == 0) {
4071     tid = pinfo.__pi_tid;
4072   } else {
4073     tty->print_cr("pthread_getthrds_np failed.");
4074     error = true;
4075   }
4076 
4077   // retrieve kernel timing info for that kernel thread
4078   if (!error) {
4079     struct thrdentry64 thrdentry;
4080     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4081       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4082       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4083     } else {
4084       tty->print_cr("pthread_getthrds_np failed.");
4085       error = true;
4086     }
4087   }
4088 
4089   if (p_sys_time) {
4090     *p_sys_time = sys_time;
4091   }
4092 
4093   if (p_user_time) {
4094     *p_user_time = user_time;
4095   }
4096 
4097   if (error) {
4098     return false;
4099   }
4100 
4101   return true;
4102 }
4103 
4104 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4105   jlong sys_time;
4106   jlong user_time;
4107 
4108   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4109     return -1;
4110   }
4111 
4112   return user_sys_cpu_time ? sys_time + user_time : user_time;
4113 }
4114 
4115 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4116   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4117   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4118   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4119   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4120 }
4121 
4122 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4123   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4124   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4125   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4126   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4127 }
4128 
4129 bool os::is_thread_cpu_time_supported() {
4130   return true;
4131 }
4132 
4133 // System loadavg support. Returns -1 if load average cannot be obtained.
4134 // For now just return the system wide load average (no processor sets).
4135 int os::loadavg(double values[], int nelem) {
4136 
4137   // Implemented using libperfstat on AIX.
4138 
4139   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4140   guarantee(values, "argument error");
4141 
4142   if (os::Aix::on_pase()) {
4143     Unimplemented();
4144     return -1;
4145   } else {
4146     // AIX: use libperfstat
4147     //
4148     // See also:
4149     // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4150     // /usr/include/libperfstat.h:
4151 
4152     // Use the already AIX version independent get_cpuinfo.
4153     os::Aix::cpuinfo_t ci;
4154     if (os::Aix::get_cpuinfo(&ci)) {
4155       for (int i = 0; i < nelem; i++) {
4156         values[i] = ci.loadavg[i];
4157       }
4158     } else {
4159       return -1;
4160     }
4161     return nelem;
4162   }
4163 }
4164 
4165 void os::pause() {
4166   char filename[MAX_PATH];
4167   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4168     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4169   } else {
4170     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4171   }
4172 
4173   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4174   if (fd != -1) {
4175     struct stat buf;
4176     ::close(fd);
4177     while (::stat(filename, &buf) == 0) {
4178       (void)::poll(NULL, 0, 100);
4179     }
4180   } else {
4181     jio_fprintf(stderr,
4182       "Could not open pause file '%s', continuing immediately.\n", filename);
4183   }
4184 }
4185 
4186 bool os::Aix::is_primordial_thread() {
4187   if (pthread_self() == (pthread_t)1) {
4188     return true;
4189   } else {
4190     return false;
4191   }
4192 }
4193 
4194 // OS recognitions (PASE/AIX, OS level) call this before calling any
4195 // one of Aix::on_pase(), Aix::os_version() static
4196 void os::Aix::initialize_os_info() {
4197 
4198   assert(_on_pase == -1 && _os_version == -1, "already called.");
4199 
4200   struct utsname uts;
4201   memset(&uts, 0, sizeof(uts));
4202   strcpy(uts.sysname, "?");
4203   if (::uname(&uts) == -1) {
4204     trc("uname failed (%d)", errno);
4205     guarantee(0, "Could not determine whether we run on AIX or PASE");
4206   } else {
4207     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4208                "node \"%s\" machine \"%s\"\n",
4209                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4210     const int major = atoi(uts.version);
4211     assert(major > 0, "invalid OS version");
4212     const int minor = atoi(uts.release);
4213     assert(minor > 0, "invalid OS release");
4214     _os_version = (major << 8) | minor;
4215     if (strcmp(uts.sysname, "OS400") == 0) {
4216       Unimplemented();
4217     } else if (strcmp(uts.sysname, "AIX") == 0) {
4218       // We run on AIX. We do not support versions older than AIX 5.3.
4219       _on_pase = 0;
4220       if (_os_version < 0x0503) {
4221         trc("AIX release older than AIX 5.3 not supported.");
4222         assert(false, "AIX release too old.");
4223       } else {
4224         trcVerbose("We run on AIX %d.%d\n", major, minor);
4225       }
4226     } else {
4227       assert(false, "unknown OS");
4228     }
4229   }
4230 
4231   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4232 } // end: os::Aix::initialize_os_info()
4233 
4234 // Scan environment for important settings which might effect the VM.
4235 // Trace out settings. Warn about invalid settings and/or correct them.
4236 //
4237 // Must run after os::Aix::initialue_os_info().
4238 void os::Aix::scan_environment() {
4239 
4240   char* p;
4241   int rc;
4242 
4243   // Warn explicity if EXTSHM=ON is used. That switch changes how
4244   // System V shared memory behaves. One effect is that page size of
4245   // shared memory cannot be change dynamically, effectivly preventing
4246   // large pages from working.
4247   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4248   // recommendation is (in OSS notes) to switch it off.
4249   p = ::getenv("EXTSHM");
4250   if (Verbose) {
4251     fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4252   }
4253   if (p && strcasecmp(p, "ON") == 0) {
4254     fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4255     _extshm = 1;
4256   } else {
4257     _extshm = 0;
4258   }
4259 
4260   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4261   // Not tested, not supported.
4262   //
4263   // Note that it might be worth the trouble to test and to require it, if only to
4264   // get useful return codes for mprotect.
4265   //
4266   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4267   // exec() ? before loading the libjvm ? ....)
4268   p = ::getenv("XPG_SUS_ENV");
4269   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4270   if (p && strcmp(p, "ON") == 0) {
4271     _xpg_sus_mode = 1;
4272     trc("Unsupported setting: XPG_SUS_ENV=ON");
4273     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4274     // clobber address ranges. If we ever want to support that, we have to do some
4275     // testing first.
4276     guarantee(false, "XPG_SUS_ENV=ON not supported");
4277   } else {
4278     _xpg_sus_mode = 0;
4279   }
4280 
4281   // Switch off AIX internal (pthread) guard pages. This has
4282   // immediate effect for any pthread_create calls which follow.
4283   p = ::getenv("AIXTHREAD_GUARDPAGES");
4284   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4285   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4286   guarantee(rc == 0, "");
4287 
4288 } // end: os::Aix::scan_environment()
4289 
4290 // PASE: initialize the libo4 library (AS400 PASE porting library).
4291 void os::Aix::initialize_libo4() {
4292   Unimplemented();
4293 }
4294 
4295 // AIX: initialize the libperfstat library (we load this dynamically
4296 // because it is only available on AIX.
4297 void os::Aix::initialize_libperfstat() {
4298 
4299   assert(os::Aix::on_aix(), "AIX only");
4300 
4301   if (!libperfstat::init()) {
4302     trc("libperfstat initialization failed.");
4303     assert(false, "libperfstat initialization failed");
4304   } else {
4305     if (Verbose) {
4306       fprintf(stderr, "libperfstat initialized.\n");
4307     }
4308   }
4309 } // end: os::Aix::initialize_libperfstat
4310 
4311 /////////////////////////////////////////////////////////////////////////////
4312 // thread stack
4313 
4314 // Function to query the current stack size using pthread_getthrds_np.
4315 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4316   // This only works when invoked on a pthread. As we agreed not to use
4317   // primordial threads anyway, I assert here.
4318   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4319 
4320   // Information about this api can be found (a) in the pthread.h header and
4321   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4322   //
4323   // The use of this API to find out the current stack is kind of undefined.
4324   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4325   // enough for cases where I let the pthread library create its stacks. For cases
4326   // where I create an own stack and pass this to pthread_create, it seems not to
4327   // work (the returned stack size in that case is 0).
4328 
4329   pthread_t tid = pthread_self();
4330   struct __pthrdsinfo pinfo;
4331   char dummy[1]; // We only need this to satisfy the api and to not get E.
4332   int dummy_size = sizeof(dummy);
4333 
4334   memset(&pinfo, 0, sizeof(pinfo));
4335 
4336   const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4337                                      sizeof(pinfo), dummy, &dummy_size);
4338 
4339   if (rc != 0) {
4340     assert0(false);
4341     trcVerbose("pthread_getthrds_np failed (%d)", rc);
4342     return false;
4343   }
4344   guarantee0(pinfo.__pi_stackend);
4345 
4346   // The following can happen when invoking pthread_getthrds_np on a pthread running
4347   // on a user provided stack (when handing down a stack to pthread create, see
4348   // pthread_attr_setstackaddr).
4349   // Not sure what to do here - I feel inclined to forbid this use case completely.
4350   guarantee0(pinfo.__pi_stacksize);
4351 
4352   // Note: the pthread stack on AIX seems to look like this:
4353   //
4354   // ---------------------   real base ? at page border ?
4355   //
4356   //     pthread internal data, like ~2K, see also
4357   //     http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/thread_supp_tun_params.htm
4358   //
4359   // ---------------------   __pi_stackend - not page aligned, (xxxxF890)
4360   //
4361   //     stack
4362   //      ....
4363   //
4364   //     stack
4365   //
4366   // ---------------------   __pi_stackend  - __pi_stacksize
4367   //
4368   //     padding due to AIX guard pages (?) see AIXTHREAD_GUARDPAGES
4369   // ---------------------   __pi_stackaddr  (page aligned if AIXTHREAD_GUARDPAGES > 0)
4370   //
4371   //   AIX guard pages (?)
4372   //
4373 
4374   // So, the safe thing to do is to use the area from __pi_stackend to __pi_stackaddr;
4375   // __pi_stackend however is almost never page aligned.
4376   //
4377 
4378   if (p_stack_base) {
4379     (*p_stack_base) = (address) (pinfo.__pi_stackend);
4380   }
4381 
4382   if (p_stack_size) {
4383     (*p_stack_size) = pinfo.__pi_stackend - pinfo.__pi_stackaddr;
4384   }
4385 
4386   return true;
4387 }
4388 
4389 // Get the current stack base from the OS (actually, the pthread library).
4390 address os::current_stack_base() {
4391   address p;
4392   query_stack_dimensions(&p, 0);
4393   return p;
4394 }
4395 
4396 // Get the current stack size from the OS (actually, the pthread library).
4397 size_t os::current_stack_size() {
4398   size_t s;
4399   query_stack_dimensions(0, &s);
4400   return s;
4401 }
4402 
4403 // Refer to the comments in os_solaris.cpp park-unpark.
4404 //
4405 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4406 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4407 // For specifics regarding the bug see GLIBC BUGID 261237 :
4408 //    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4409 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4410 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4411 // is used. (The simple C test-case provided in the GLIBC bug report manifests the
4412 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4413 // and monitorenter when we're using 1-0 locking. All those operations may result in
4414 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4415 // of libpthread avoids the problem, but isn't practical.
4416 //
4417 // Possible remedies:
4418 //
4419 // 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4420 //      This is palliative and probabilistic, however. If the thread is preempted
4421 //      between the call to compute_abstime() and pthread_cond_timedwait(), more
4422 //      than the minimum period may have passed, and the abstime may be stale (in the
4423 //      past) resultin in a hang. Using this technique reduces the odds of a hang
4424 //      but the JVM is still vulnerable, particularly on heavily loaded systems.
4425 //
4426 // 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4427 //      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4428 //      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4429 //      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4430 //      thread.
4431 //
4432 // 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4433 //      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4434 //      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4435 //      This also works well. In fact it avoids kernel-level scalability impediments
4436 //      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4437 //      timers in a graceful fashion.
4438 //
4439 // 4.   When the abstime value is in the past it appears that control returns
4440 //      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4441 //      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4442 //      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4443 //      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4444 //      It may be possible to avoid reinitialization by checking the return
4445 //      value from pthread_cond_timedwait(). In addition to reinitializing the
4446 //      condvar we must establish the invariant that cond_signal() is only called
4447 //      within critical sections protected by the adjunct mutex. This prevents
4448 //      cond_signal() from "seeing" a condvar that's in the midst of being
4449 //      reinitialized or that is corrupt. Sadly, this invariant obviates the
4450 //      desirable signal-after-unlock optimization that avoids futile context switching.
4451 //
4452 //      I'm also concerned that some versions of NTPL might allocate an auxilliary
4453 //      structure when a condvar is used or initialized. cond_destroy() would
4454 //      release the helper structure. Our reinitialize-after-timedwait fix
4455 //      put excessive stress on malloc/free and locks protecting the c-heap.
4456 //
4457 // We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4458 // It may be possible to refine (4) by checking the kernel and NTPL verisons
4459 // and only enabling the work-around for vulnerable environments.
4460 
4461 // utility to compute the abstime argument to timedwait:
4462 // millis is the relative timeout time
4463 // abstime will be the absolute timeout time
4464 // TODO: replace compute_abstime() with unpackTime()
4465 
4466 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4467   if (millis < 0) millis = 0;
4468   struct timeval now;
4469   int status = gettimeofday(&now, NULL);
4470   assert(status == 0, "gettimeofday");
4471   jlong seconds = millis / 1000;
4472   millis %= 1000;
4473   if (seconds > 50000000) { // see man cond_timedwait(3T)
4474     seconds = 50000000;
4475   }
4476   abstime->tv_sec = now.tv_sec  + seconds;
4477   long       usec = now.tv_usec + millis * 1000;
4478   if (usec >= 1000000) {
4479     abstime->tv_sec += 1;
4480     usec -= 1000000;
4481   }
4482   abstime->tv_nsec = usec * 1000;
4483   return abstime;
4484 }
4485 
4486 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4487 // Conceptually TryPark() should be equivalent to park(0).
4488 
4489 int os::PlatformEvent::TryPark() {
4490   for (;;) {
4491     const int v = _Event;
4492     guarantee ((v == 0) || (v == 1), "invariant");
4493     if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4494   }
4495 }
4496 
4497 void os::PlatformEvent::park() {       // AKA "down()"
4498   // Invariant: Only the thread associated with the Event/PlatformEvent
4499   // may call park().
4500   // TODO: assert that _Assoc != NULL or _Assoc == Self
4501   int v;
4502   for (;;) {
4503     v = _Event;
4504     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4505   }
4506   guarantee (v >= 0, "invariant");
4507   if (v == 0) {
4508     // Do this the hard way by blocking ...
4509     int status = pthread_mutex_lock(_mutex);
4510     assert_status(status == 0, status, "mutex_lock");
4511     guarantee (_nParked == 0, "invariant");
4512     ++ _nParked;
4513     while (_Event < 0) {
4514       status = pthread_cond_wait(_cond, _mutex);
4515       assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4516     }
4517     -- _nParked;
4518 
4519     // In theory we could move the ST of 0 into _Event past the unlock(),
4520     // but then we'd need a MEMBAR after the ST.
4521     _Event = 0;
4522     status = pthread_mutex_unlock(_mutex);
4523     assert_status(status == 0, status, "mutex_unlock");
4524   }
4525   guarantee (_Event >= 0, "invariant");
4526 }
4527 
4528 int os::PlatformEvent::park(jlong millis) {
4529   guarantee (_nParked == 0, "invariant");
4530 
4531   int v;
4532   for (;;) {
4533     v = _Event;
4534     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4535   }
4536   guarantee (v >= 0, "invariant");
4537   if (v != 0) return OS_OK;
4538 
4539   // We do this the hard way, by blocking the thread.
4540   // Consider enforcing a minimum timeout value.
4541   struct timespec abst;
4542   compute_abstime(&abst, millis);
4543 
4544   int ret = OS_TIMEOUT;
4545   int status = pthread_mutex_lock(_mutex);
4546   assert_status(status == 0, status, "mutex_lock");
4547   guarantee (_nParked == 0, "invariant");
4548   ++_nParked;
4549 
4550   // Object.wait(timo) will return because of
4551   // (a) notification
4552   // (b) timeout
4553   // (c) thread.interrupt
4554   //
4555   // Thread.interrupt and object.notify{All} both call Event::set.
4556   // That is, we treat thread.interrupt as a special case of notification.
4557   // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4558   // We assume all ETIME returns are valid.
4559   //
4560   // TODO: properly differentiate simultaneous notify+interrupt.
4561   // In that case, we should propagate the notify to another waiter.
4562 
4563   while (_Event < 0) {
4564     status = pthread_cond_timedwait(_cond, _mutex, &abst);
4565     assert_status(status == 0 || status == ETIMEDOUT,
4566                   status, "cond_timedwait");
4567     if (!FilterSpuriousWakeups) break;         // previous semantics
4568     if (status == ETIMEDOUT) break;
4569     // We consume and ignore EINTR and spurious wakeups.
4570   }
4571   --_nParked;
4572   if (_Event >= 0) {
4573      ret = OS_OK;
4574   }
4575   _Event = 0;
4576   status = pthread_mutex_unlock(_mutex);
4577   assert_status(status == 0, status, "mutex_unlock");
4578   assert (_nParked == 0, "invariant");
4579   return ret;
4580 }
4581 
4582 void os::PlatformEvent::unpark() {
4583   int v, AnyWaiters;
4584   for (;;) {
4585     v = _Event;
4586     if (v > 0) {
4587       // The LD of _Event could have reordered or be satisfied
4588       // by a read-aside from this processor's write buffer.
4589       // To avoid problems execute a barrier and then
4590       // ratify the value.
4591       OrderAccess::fence();
4592       if (_Event == v) return;
4593       continue;
4594     }
4595     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4596   }
4597   if (v < 0) {
4598     // Wait for the thread associated with the event to vacate
4599     int status = pthread_mutex_lock(_mutex);
4600     assert_status(status == 0, status, "mutex_lock");
4601     AnyWaiters = _nParked;
4602 
4603     if (AnyWaiters != 0) {
4604       // We intentional signal *after* dropping the lock
4605       // to avoid a common class of futile wakeups.
4606       status = pthread_cond_signal(_cond);
4607       assert_status(status == 0, status, "cond_signal");
4608     }
4609     // Mutex should be locked for pthread_cond_signal(_cond).
4610     status = pthread_mutex_unlock(_mutex);
4611     assert_status(status == 0, status, "mutex_unlock");
4612   }
4613 
4614   // Note that we signal() _after dropping the lock for "immortal" Events.
4615   // This is safe and avoids a common class of futile wakeups. In rare
4616   // circumstances this can cause a thread to return prematurely from
4617   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4618   // simply re-test the condition and re-park itself.
4619 }
4620 
4621 
4622 // JSR166
4623 // -------------------------------------------------------
4624 
4625 //
4626 // The solaris and linux implementations of park/unpark are fairly
4627 // conservative for now, but can be improved. They currently use a
4628 // mutex/condvar pair, plus a a count.
4629 // Park decrements count if > 0, else does a condvar wait. Unpark
4630 // sets count to 1 and signals condvar. Only one thread ever waits
4631 // on the condvar. Contention seen when trying to park implies that someone
4632 // is unparking you, so don't wait. And spurious returns are fine, so there
4633 // is no need to track notifications.
4634 //
4635 
4636 #define MAX_SECS 100000000
4637 //
4638 // This code is common to linux and solaris and will be moved to a
4639 // common place in dolphin.
4640 //
4641 // The passed in time value is either a relative time in nanoseconds
4642 // or an absolute time in milliseconds. Either way it has to be unpacked
4643 // into suitable seconds and nanoseconds components and stored in the
4644 // given timespec structure.
4645 // Given time is a 64-bit value and the time_t used in the timespec is only
4646 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
4647 // overflow if times way in the future are given. Further on Solaris versions
4648 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4649 // number of seconds, in abstime, is less than current_time + 100,000,000.
4650 // As it will be 28 years before "now + 100000000" will overflow we can
4651 // ignore overflow and just impose a hard-limit on seconds using the value
4652 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4653 // years from "now".
4654 //
4655 
4656 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4657   assert (time > 0, "convertTime");
4658 
4659   struct timeval now;
4660   int status = gettimeofday(&now, NULL);
4661   assert(status == 0, "gettimeofday");
4662 
4663   time_t max_secs = now.tv_sec + MAX_SECS;
4664 
4665   if (isAbsolute) {
4666     jlong secs = time / 1000;
4667     if (secs > max_secs) {
4668       absTime->tv_sec = max_secs;
4669     }
4670     else {
4671       absTime->tv_sec = secs;
4672     }
4673     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4674   }
4675   else {
4676     jlong secs = time / NANOSECS_PER_SEC;
4677     if (secs >= MAX_SECS) {
4678       absTime->tv_sec = max_secs;
4679       absTime->tv_nsec = 0;
4680     }
4681     else {
4682       absTime->tv_sec = now.tv_sec + secs;
4683       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4684       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4685         absTime->tv_nsec -= NANOSECS_PER_SEC;
4686         ++absTime->tv_sec; // note: this must be <= max_secs
4687       }
4688     }
4689   }
4690   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4691   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4692   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4693   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4694 }
4695 
4696 void Parker::park(bool isAbsolute, jlong time) {
4697   // Optional fast-path check:
4698   // Return immediately if a permit is available.
4699   if (_counter > 0) {
4700     _counter = 0;
4701     OrderAccess::fence();
4702     return;
4703   }
4704 
4705   Thread* thread = Thread::current();
4706   assert(thread->is_Java_thread(), "Must be JavaThread");
4707   JavaThread *jt = (JavaThread *)thread;
4708 
4709   // Optional optimization -- avoid state transitions if there's an interrupt pending.
4710   // Check interrupt before trying to wait
4711   if (Thread::is_interrupted(thread, false)) {
4712     return;
4713   }
4714 
4715   // Next, demultiplex/decode time arguments
4716   timespec absTime;
4717   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4718     return;
4719   }
4720   if (time > 0) {
4721     unpackTime(&absTime, isAbsolute, time);
4722   }
4723 
4724   // Enter safepoint region
4725   // Beware of deadlocks such as 6317397.
4726   // The per-thread Parker:: mutex is a classic leaf-lock.
4727   // In particular a thread must never block on the Threads_lock while
4728   // holding the Parker:: mutex. If safepoints are pending both the
4729   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4730   ThreadBlockInVM tbivm(jt);
4731 
4732   // Don't wait if cannot get lock since interference arises from
4733   // unblocking. Also. check interrupt before trying wait
4734   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4735     return;
4736   }
4737 
4738   int status;
4739   if (_counter > 0) { // no wait needed
4740     _counter = 0;
4741     status = pthread_mutex_unlock(_mutex);
4742     assert (status == 0, "invariant");
4743     OrderAccess::fence();
4744     return;
4745   }
4746 
4747 #ifdef ASSERT
4748   // Don't catch signals while blocked; let the running threads have the signals.
4749   // (This allows a debugger to break into the running thread.)
4750   sigset_t oldsigs;
4751   sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4752   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4753 #endif
4754 
4755   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4756   jt->set_suspend_equivalent();
4757   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4758 
4759   if (time == 0) {
4760     status = pthread_cond_wait (_cond, _mutex);
4761   } else {
4762     status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4763     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4764       pthread_cond_destroy (_cond);
4765       pthread_cond_init    (_cond, NULL);
4766     }
4767   }
4768   assert_status(status == 0 || status == EINTR ||
4769                 status == ETIME || status == ETIMEDOUT,
4770                 status, "cond_timedwait");
4771 
4772 #ifdef ASSERT
4773   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4774 #endif
4775 
4776   _counter = 0;
4777   status = pthread_mutex_unlock(_mutex);
4778   assert_status(status == 0, status, "invariant");
4779   // If externally suspended while waiting, re-suspend
4780   if (jt->handle_special_suspend_equivalent_condition()) {
4781     jt->java_suspend_self();
4782   }
4783 
4784   OrderAccess::fence();
4785 }
4786 
4787 void Parker::unpark() {
4788   int s, status;
4789   status = pthread_mutex_lock(_mutex);
4790   assert (status == 0, "invariant");
4791   s = _counter;
4792   _counter = 1;
4793   if (s < 1) {
4794     if (WorkAroundNPTLTimedWaitHang) {
4795       status = pthread_cond_signal (_cond);
4796       assert (status == 0, "invariant");
4797       status = pthread_mutex_unlock(_mutex);
4798       assert (status == 0, "invariant");
4799     } else {
4800       status = pthread_mutex_unlock(_mutex);
4801       assert (status == 0, "invariant");
4802       status = pthread_cond_signal (_cond);
4803       assert (status == 0, "invariant");
4804     }
4805   } else {
4806     pthread_mutex_unlock(_mutex);
4807     assert (status == 0, "invariant");
4808   }
4809 }
4810 
4811 extern char** environ;
4812 
4813 // Run the specified command in a separate process. Return its exit value,
4814 // or -1 on failure (e.g. can't fork a new process).
4815 // Unlike system(), this function can be called from signal handler. It
4816 // doesn't block SIGINT et al.
4817 int os::fork_and_exec(char* cmd) {
4818   char * argv[4] = {"sh", "-c", cmd, NULL};
4819 
4820   pid_t pid = fork();
4821 
4822   if (pid < 0) {
4823     // fork failed
4824     return -1;
4825 
4826   } else if (pid == 0) {
4827     // child process
4828 
4829     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4830     execve("/usr/bin/sh", argv, environ);
4831 
4832     // execve failed
4833     _exit(-1);
4834 
4835   } else {
4836     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4837     // care about the actual exit code, for now.
4838 
4839     int status;
4840 
4841     // Wait for the child process to exit. This returns immediately if
4842     // the child has already exited. */
4843     while (waitpid(pid, &status, 0) < 0) {
4844       switch (errno) {
4845         case ECHILD: return 0;
4846         case EINTR: break;
4847         default: return -1;
4848       }
4849     }
4850 
4851     if (WIFEXITED(status)) {
4852       // The child exited normally; get its exit code.
4853       return WEXITSTATUS(status);
4854     } else if (WIFSIGNALED(status)) {
4855       // The child exited because of a signal.
4856       // The best value to return is 0x80 + signal number,
4857       // because that is what all Unix shells do, and because
4858       // it allows callers to distinguish between process exit and
4859       // process death by signal.
4860       return 0x80 + WTERMSIG(status);
4861     } else {
4862       // Unknown exit code; pass it through.
4863       return status;
4864     }
4865   }
4866   return -1;
4867 }
4868 
4869 // is_headless_jre()
4870 //
4871 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4872 // in order to report if we are running in a headless jre.
4873 //
4874 // Since JDK8 xawt/libmawt.so is moved into the same directory
4875 // as libawt.so, and renamed libawt_xawt.so
4876 bool os::is_headless_jre() {
4877   struct stat statbuf;
4878   char buf[MAXPATHLEN];
4879   char libmawtpath[MAXPATHLEN];
4880   const char *xawtstr = "/xawt/libmawt.so";
4881   const char *new_xawtstr = "/libawt_xawt.so";
4882 
4883   char *p;
4884 
4885   // Get path to libjvm.so
4886   os::jvm_path(buf, sizeof(buf));
4887 
4888   // Get rid of libjvm.so
4889   p = strrchr(buf, '/');
4890   if (p == NULL) return false;
4891   else *p = '\0';
4892 
4893   // Get rid of client or server
4894   p = strrchr(buf, '/');
4895   if (p == NULL) return false;
4896   else *p = '\0';
4897 
4898   // check xawt/libmawt.so
4899   strcpy(libmawtpath, buf);
4900   strcat(libmawtpath, xawtstr);
4901   if (::stat(libmawtpath, &statbuf) == 0) return false;
4902 
4903   // check libawt_xawt.so
4904   strcpy(libmawtpath, buf);
4905   strcat(libmawtpath, new_xawtstr);
4906   if (::stat(libmawtpath, &statbuf) == 0) return false;
4907 
4908   return true;
4909 }
4910 
4911 // Get the default path to the core file
4912 // Returns the length of the string
4913 int os::get_core_path(char* buffer, size_t bufferSize) {
4914   const char* p = get_current_directory(buffer, bufferSize);
4915 
4916   if (p == NULL) {
4917     assert(p != NULL, "failed to get current directory");
4918     return 0;
4919   }
4920 
4921   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4922                                                p, current_process_id());
4923 
4924   return strlen(buffer);
4925 }
4926 
4927 #ifndef PRODUCT
4928 void TestReserveMemorySpecial_test() {
4929   // No tests available for this platform
4930 }
4931 #endif
--- EOF ---