1 /*
   2  * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2015 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "libperfstat_aix.hpp"
  40 #include "loadlib_aix.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "mutex_aix.inline.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "os_aix.inline.hpp"
  46 #include "os_share_aix.hpp"
  47 #include "porting_aix.hpp"
  48 #include "prims/jniFastGetField.hpp"
  49 #include "prims/jvm.h"
  50 #include "prims/jvm_misc.hpp"
  51 #include "runtime/arguments.hpp"
  52 #include "runtime/atomic.inline.hpp"
  53 #include "runtime/extendedPC.hpp"
  54 #include "runtime/globals.hpp"
  55 #include "runtime/interfaceSupport.hpp"
  56 #include "runtime/java.hpp"
  57 #include "runtime/javaCalls.hpp"
  58 #include "runtime/mutexLocker.hpp"
  59 #include "runtime/objectMonitor.hpp"
  60 #include "runtime/orderAccess.inline.hpp"
  61 #include "runtime/os.hpp"
  62 #include "runtime/osThread.hpp"
  63 #include "runtime/perfMemory.hpp"
  64 #include "runtime/sharedRuntime.hpp"
  65 #include "runtime/statSampler.hpp"
  66 #include "runtime/stubRoutines.hpp"
  67 #include "runtime/thread.inline.hpp"
  68 #include "runtime/threadCritical.hpp"
  69 #include "runtime/timer.hpp"
  70 #include "runtime/vm_version.hpp"
  71 #include "services/attachListener.hpp"
  72 #include "services/runtimeService.hpp"
  73 #include "utilities/decoder.hpp"
  74 #include "utilities/defaultStream.hpp"
  75 #include "utilities/events.hpp"
  76 #include "utilities/growableArray.hpp"
  77 #include "utilities/vmError.hpp"
  78 
  79 // put OS-includes here (sorted alphabetically)
  80 #include <errno.h>
  81 #include <fcntl.h>
  82 #include <inttypes.h>
  83 #include <poll.h>
  84 #include <procinfo.h>
  85 #include <pthread.h>
  86 #include <pwd.h>
  87 #include <semaphore.h>
  88 #include <signal.h>
  89 #include <stdint.h>
  90 #include <stdio.h>
  91 #include <string.h>
  92 #include <unistd.h>
  93 #include <sys/ioctl.h>
  94 #include <sys/ipc.h>
  95 #include <sys/mman.h>
  96 #include <sys/resource.h>
  97 #include <sys/select.h>
  98 #include <sys/shm.h>
  99 #include <sys/socket.h>
 100 #include <sys/stat.h>
 101 #include <sys/sysinfo.h>
 102 #include <sys/systemcfg.h>
 103 #include <sys/time.h>
 104 #include <sys/times.h>
 105 #include <sys/types.h>
 106 #include <sys/utsname.h>
 107 #include <sys/vminfo.h>
 108 #include <sys/wait.h>
 109 
 110 // If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 111 // getrusage() is prepared to handle the associated failure.
 112 #ifndef RUSAGE_THREAD
 113 #define RUSAGE_THREAD   (1)               /* only the calling thread */
 114 #endif
 115 
 116 // PPC port
 117 static const uintx Use64KPagesThreshold       = 1*M;
 118 static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
 119 
 120 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
 121 #if !defined(_AIXVERSION_610)
 122 extern "C" {
 123   int getthrds64(pid_t ProcessIdentifier,
 124                  struct thrdentry64* ThreadBuffer,
 125                  int ThreadSize,
 126                  tid64_t* IndexPointer,
 127                  int Count);
 128 }
 129 #endif
 130 
 131 #define MAX_PATH (2 * K)
 132 
 133 // for timer info max values which include all bits
 134 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 135 // for multipage initialization error analysis (in 'g_multipage_error')
 136 #define ERROR_MP_OS_TOO_OLD                          100
 137 #define ERROR_MP_EXTSHM_ACTIVE                       101
 138 #define ERROR_MP_VMGETINFO_FAILED                    102
 139 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 140 
 141 // The semantics in this file are thus that codeptr_t is a *real code ptr*.
 142 // This means that any function taking codeptr_t as arguments will assume
 143 // a real codeptr and won't handle function descriptors (eg getFuncName),
 144 // whereas functions taking address as args will deal with function
 145 // descriptors (eg os::dll_address_to_library_name).
 146 typedef unsigned int* codeptr_t;
 147 
 148 // Typedefs for stackslots, stack pointers, pointers to op codes.
 149 typedef unsigned long stackslot_t;
 150 typedef stackslot_t* stackptr_t;
 151 
 152 // Excerpts from systemcfg.h definitions newer than AIX 5.3.
 153 #ifndef PV_7
 154 #define PV_7 0x200000          /* Power PC 7 */
 155 #define PV_7_Compat 0x208000   /* Power PC 7 */
 156 #endif
 157 #ifndef PV_8
 158 #define PV_8 0x300000          /* Power PC 8 */
 159 #define PV_8_Compat 0x308000   /* Power PC 8 */
 160 #endif
 161 
 162 #define trcVerbose(fmt, ...) { /* PPC port */  \
 163   if (Verbose) { \
 164     fprintf(stderr, fmt, ##__VA_ARGS__); \
 165     fputc('\n', stderr); fflush(stderr); \
 166   } \
 167 }
 168 #define trc(fmt, ...)        /* PPC port */
 169 
 170 #define ERRBYE(s) { \
 171     trcVerbose(s); \
 172     return -1; \
 173 }
 174 
 175 // Query dimensions of the stack of the calling thread.
 176 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 177 
 178 // function to check a given stack pointer against given stack limits
 179 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 180   if (((uintptr_t)sp) & 0x7) {
 181     return false;
 182   }
 183   if (sp > stack_base) {
 184     return false;
 185   }
 186   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 187     return false;
 188   }
 189   return true;
 190 }
 191 
 192 // returns true if function is a valid codepointer
 193 inline bool is_valid_codepointer(codeptr_t p) {
 194   if (!p) {
 195     return false;
 196   }
 197   if (((uintptr_t)p) & 0x3) {
 198     return false;
 199   }
 200   if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
 201     return false;
 202   }
 203   return true;
 204 }
 205 
 206 // Macro to check a given stack pointer against given stack limits and to die if test fails.
 207 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 208     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 209 }
 210 
 211 // Macro to check the current stack pointer against given stacklimits.
 212 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 213   address sp; \
 214   sp = os::current_stack_pointer(); \
 215   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 216 }
 217 
 218 ////////////////////////////////////////////////////////////////////////////////
 219 // global variables (for a description see os_aix.hpp)
 220 
 221 julong    os::Aix::_physical_memory = 0;
 222 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 223 int       os::Aix::_page_size = -1;
 224 int       os::Aix::_on_pase = -1;
 225 int       os::Aix::_os_version = -1;
 226 int       os::Aix::_stack_page_size = -1;
 227 int       os::Aix::_xpg_sus_mode = -1;
 228 int       os::Aix::_extshm = -1;
 229 int       os::Aix::_logical_cpus = -1;
 230 
 231 ////////////////////////////////////////////////////////////////////////////////
 232 // local variables
 233 
 234 static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 235 static jlong    initial_time_count = 0;
 236 static int      clock_tics_per_sec = 100;
 237 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 238 static bool     check_signals      = true;
 239 static pid_t    _initial_pid       = 0;
 240 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 241 static sigset_t SR_sigset;
 242 
 243 // This describes the state of multipage support of the underlying
 244 // OS. Note that this is of no interest to the outsize world and
 245 // therefore should not be defined in AIX class.
 246 //
 247 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 248 // latter two (16M "large" resp. 16G "huge" pages) require special
 249 // setup and are normally not available.
 250 //
 251 // AIX supports multiple page sizes per process, for:
 252 //  - Stack (of the primordial thread, so not relevant for us)
 253 //  - Data - data, bss, heap, for us also pthread stacks
 254 //  - Text - text code
 255 //  - shared memory
 256 //
 257 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 258 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 259 //
 260 // For shared memory, page size can be set dynamically via
 261 // shmctl(). Different shared memory regions can have different page
 262 // sizes.
 263 //
 264 // More information can be found at AIBM info center:
 265 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 266 //
 267 static struct {
 268   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 269   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 270   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 271   size_t pthr_stack_pagesize; // stack page size of pthread threads
 272   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 273   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 274   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 275   int error;                  // Error describing if something went wrong at multipage init.
 276 } g_multipage_support = {
 277   (size_t) -1,
 278   (size_t) -1,
 279   (size_t) -1,
 280   (size_t) -1,
 281   (size_t) -1,
 282   false, false,
 283   0
 284 };
 285 
 286 // We must not accidentally allocate memory close to the BRK - even if
 287 // that would work - because then we prevent the BRK segment from
 288 // growing which may result in a malloc OOM even though there is
 289 // enough memory. The problem only arises if we shmat() or mmap() at
 290 // a specific wish address, e.g. to place the heap in a
 291 // compressed-oops-friendly way.
 292 static bool is_close_to_brk(address a) {
 293   address a1 = (address) sbrk(0);
 294   if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {
 295     return true;
 296   }
 297   return false;
 298 }
 299 
 300 julong os::available_memory() {
 301   return Aix::available_memory();
 302 }
 303 
 304 julong os::Aix::available_memory() {
 305   os::Aix::meminfo_t mi;
 306   if (os::Aix::get_meminfo(&mi)) {
 307     return mi.real_free;
 308   } else {
 309     return 0xFFFFFFFFFFFFFFFFLL;
 310   }
 311 }
 312 
 313 julong os::physical_memory() {
 314   return Aix::physical_memory();
 315 }
 316 
 317 // Return true if user is running as root.
 318 
 319 bool os::have_special_privileges() {
 320   static bool init = false;
 321   static bool privileges = false;
 322   if (!init) {
 323     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 324     init = true;
 325   }
 326   return privileges;
 327 }
 328 
 329 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 330 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 331 static bool my_disclaim64(char* addr, size_t size) {
 332 
 333   if (size == 0) {
 334     return true;
 335   }
 336 
 337   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 338   const unsigned int maxDisclaimSize = 0x40000000;
 339 
 340   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 341   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 342 
 343   char* p = addr;
 344 
 345   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 346     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 347       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 348       return false;
 349     }
 350     p += maxDisclaimSize;
 351   }
 352 
 353   if (lastDisclaimSize > 0) {
 354     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 355       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 356       return false;
 357     }
 358   }
 359 
 360   return true;
 361 }
 362 
 363 // Cpu architecture string
 364 #if defined(PPC32)
 365 static char cpu_arch[] = "ppc";
 366 #elif defined(PPC64)
 367 static char cpu_arch[] = "ppc64";
 368 #else
 369 #error Add appropriate cpu_arch setting
 370 #endif
 371 
 372 
 373 // Given an address, returns the size of the page backing that address.
 374 size_t os::Aix::query_pagesize(void* addr) {
 375 
 376   vm_page_info pi;
 377   pi.addr = (uint64_t)addr;
 378   if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 379     return pi.pagesize;
 380   } else {
 381     fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
 382     assert(false, "vmgetinfo failed to retrieve page size");
 383     return SIZE_4K;
 384   }
 385 
 386 }
 387 
 388 // Returns the kernel thread id of the currently running thread.
 389 pid_t os::Aix::gettid() {
 390   return (pid_t) thread_self();
 391 }
 392 
 393 void os::Aix::initialize_system_info() {
 394 
 395   // Get the number of online(logical) cpus instead of configured.
 396   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 397   assert(_processor_count > 0, "_processor_count must be > 0");
 398 
 399   // Retrieve total physical storage.
 400   os::Aix::meminfo_t mi;
 401   if (!os::Aix::get_meminfo(&mi)) {
 402     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
 403     assert(false, "os::Aix::get_meminfo failed.");
 404   }
 405   _physical_memory = (julong) mi.real_total;
 406 }
 407 
 408 // Helper function for tracing page sizes.
 409 static const char* describe_pagesize(size_t pagesize) {
 410   switch (pagesize) {
 411     case SIZE_4K : return "4K";
 412     case SIZE_64K: return "64K";
 413     case SIZE_16M: return "16M";
 414     case SIZE_16G: return "16G";
 415     case -1:       return "not set";
 416     default:
 417       assert(false, "surprise");
 418       return "??";
 419   }
 420 }
 421 
 422 // Probe OS for multipage support.
 423 // Will fill the global g_multipage_support structure.
 424 // Must be called before calling os::large_page_init().
 425 static void query_multipage_support() {
 426 
 427   guarantee(g_multipage_support.pagesize == -1,
 428             "do not call twice");
 429 
 430   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 431 
 432   // This really would surprise me.
 433   assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
 434 
 435   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 436   // Default data page size is defined either by linker options (-bdatapsize)
 437   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 438   // default should be 4K.
 439   {
 440     void* p = ::malloc(SIZE_16M);
 441     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 442     ::free(p);
 443   }
 444 
 445   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 446   {
 447     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 448     guarantee(shmid != -1, "shmget failed");
 449     void* p = ::shmat(shmid, NULL, 0);
 450     ::shmctl(shmid, IPC_RMID, NULL);
 451     guarantee(p != (void*) -1, "shmat failed");
 452     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 453     ::shmdt(p);
 454   }
 455 
 456   // Before querying the stack page size, make sure we are not running as primordial
 457   // thread (because primordial thread's stack may have different page size than
 458   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 459   // number of reasons so we may just as well guarantee it here.
 460   guarantee0(!os::Aix::is_primordial_thread());
 461 
 462   // Query pthread stack page size.
 463   {
 464     int dummy = 0;
 465     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 466   }
 467 
 468   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 469   /* PPC port: so far unused.
 470   {
 471     address any_function =
 472       (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 473     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 474   }
 475   */
 476 
 477   // Now probe for support of 64K pages and 16M pages.
 478 
 479   // Before OS/400 V6R1, there is no support for pages other than 4K.
 480   if (os::Aix::on_pase_V5R4_or_older()) {
 481     Unimplemented();
 482     goto query_multipage_support_end;
 483   }
 484 
 485   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 486   {
 487     const int MAX_PAGE_SIZES = 4;
 488     psize_t sizes[MAX_PAGE_SIZES];
 489     const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 490     if (num_psizes == -1) {
 491       trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
 492       trc("disabling multipage support.\n");
 493       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 494       goto query_multipage_support_end;
 495     }
 496     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 497     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 498     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 499     for (int i = 0; i < num_psizes; i ++) {
 500       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 501     }
 502 
 503     // Can we use 64K, 16M pages?
 504     for (int i = 0; i < num_psizes; i ++) {
 505       const size_t pagesize = sizes[i];
 506       if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
 507         continue;
 508       }
 509       bool can_use = false;
 510       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 511       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 512         IPC_CREAT | S_IRUSR | S_IWUSR);
 513       guarantee0(shmid != -1); // Should always work.
 514       // Try to set pagesize.
 515       struct shmid_ds shm_buf = { 0 };
 516       shm_buf.shm_pagesize = pagesize;
 517       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 518         const int en = errno;
 519         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 520         // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
 521         // PPC port  MiscUtils::describe_errno(en));
 522       } else {
 523         // Attach and double check pageisze.
 524         void* p = ::shmat(shmid, NULL, 0);
 525         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 526         guarantee0(p != (void*) -1); // Should always work.
 527         const size_t real_pagesize = os::Aix::query_pagesize(p);
 528         if (real_pagesize != pagesize) {
 529           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 530         } else {
 531           can_use = true;
 532         }
 533         ::shmdt(p);
 534       }
 535       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 536       if (pagesize == SIZE_64K) {
 537         g_multipage_support.can_use_64K_pages = can_use;
 538       } else if (pagesize == SIZE_16M) {
 539         g_multipage_support.can_use_16M_pages = can_use;
 540       }
 541     }
 542 
 543   } // end: check which pages can be used for shared memory
 544 
 545 query_multipage_support_end:
 546 
 547   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
 548       describe_pagesize(g_multipage_support.pagesize));
 549   trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
 550       describe_pagesize(g_multipage_support.datapsize));
 551   trcVerbose("Text page size: %s\n",
 552       describe_pagesize(g_multipage_support.textpsize));
 553   trcVerbose("Thread stack page size (pthread): %s\n",
 554       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 555   trcVerbose("Default shared memory page size: %s\n",
 556       describe_pagesize(g_multipage_support.shmpsize));
 557   trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
 558       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 559   trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
 560       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 561   trcVerbose("Multipage error details: %d\n",
 562       g_multipage_support.error);
 563 
 564   // sanity checks
 565   assert0(g_multipage_support.pagesize == SIZE_4K);
 566   assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
 567   // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
 568   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 569   assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
 570 
 571 } // end os::Aix::query_multipage_support()
 572 
 573 void os::init_system_properties_values() {
 574 
 575 #define DEFAULT_LIBPATH "/usr/lib:/lib"
 576 #define EXTENSIONS_DIR  "/lib/ext"
 577 
 578   // Buffer that fits several sprintfs.
 579   // Note that the space for the trailing null is provided
 580   // by the nulls included by the sizeof operator.
 581   const size_t bufsize =
 582     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 583          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 584   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 585 
 586   // sysclasspath, java_home, dll_dir
 587   {
 588     char *pslash;
 589     os::jvm_path(buf, bufsize);
 590 
 591     // Found the full path to libjvm.so.
 592     // Now cut the path to <java_home>/jre if we can.
 593     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 594     pslash = strrchr(buf, '/');
 595     if (pslash != NULL) {
 596       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 597     }
 598     Arguments::set_dll_dir(buf);
 599 
 600     if (pslash != NULL) {
 601       pslash = strrchr(buf, '/');
 602       if (pslash != NULL) {
 603         *pslash = '\0';          // Get rid of /<arch>.
 604         pslash = strrchr(buf, '/');
 605         if (pslash != NULL) {
 606           *pslash = '\0';        // Get rid of /lib.
 607         }
 608       }
 609     }
 610     Arguments::set_java_home(buf);
 611     set_boot_path('/', ':');
 612   }
 613 
 614   // Where to look for native libraries.
 615 
 616   // On Aix we get the user setting of LIBPATH.
 617   // Eventually, all the library path setting will be done here.
 618   // Get the user setting of LIBPATH.
 619   const char *v = ::getenv("LIBPATH");
 620   const char *v_colon = ":";
 621   if (v == NULL) { v = ""; v_colon = ""; }
 622 
 623   // Concatenate user and invariant part of ld_library_path.
 624   // That's +1 for the colon and +1 for the trailing '\0'.
 625   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 626   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 627   Arguments::set_library_path(ld_library_path);
 628   FREE_C_HEAP_ARRAY(char, ld_library_path);
 629 
 630   // Extensions directories.
 631   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 632   Arguments::set_ext_dirs(buf);
 633 
 634   FREE_C_HEAP_ARRAY(char, buf);
 635 
 636 #undef DEFAULT_LIBPATH
 637 #undef EXTENSIONS_DIR
 638 }
 639 
 640 ////////////////////////////////////////////////////////////////////////////////
 641 // breakpoint support
 642 
 643 void os::breakpoint() {
 644   BREAKPOINT;
 645 }
 646 
 647 extern "C" void breakpoint() {
 648   // use debugger to set breakpoint here
 649 }
 650 
 651 ////////////////////////////////////////////////////////////////////////////////
 652 // signal support
 653 
 654 debug_only(static bool signal_sets_initialized = false);
 655 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 656 
 657 bool os::Aix::is_sig_ignored(int sig) {
 658   struct sigaction oact;
 659   sigaction(sig, (struct sigaction*)NULL, &oact);
 660   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 661     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 662   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 663     return true;
 664   } else {
 665     return false;
 666   }
 667 }
 668 
 669 void os::Aix::signal_sets_init() {
 670   // Should also have an assertion stating we are still single-threaded.
 671   assert(!signal_sets_initialized, "Already initialized");
 672   // Fill in signals that are necessarily unblocked for all threads in
 673   // the VM. Currently, we unblock the following signals:
 674   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 675   //                         by -Xrs (=ReduceSignalUsage));
 676   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 677   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 678   // the dispositions or masks wrt these signals.
 679   // Programs embedding the VM that want to use the above signals for their
 680   // own purposes must, at this time, use the "-Xrs" option to prevent
 681   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 682   // (See bug 4345157, and other related bugs).
 683   // In reality, though, unblocking these signals is really a nop, since
 684   // these signals are not blocked by default.
 685   sigemptyset(&unblocked_sigs);
 686   sigemptyset(&allowdebug_blocked_sigs);
 687   sigaddset(&unblocked_sigs, SIGILL);
 688   sigaddset(&unblocked_sigs, SIGSEGV);
 689   sigaddset(&unblocked_sigs, SIGBUS);
 690   sigaddset(&unblocked_sigs, SIGFPE);
 691   sigaddset(&unblocked_sigs, SIGTRAP);
 692   sigaddset(&unblocked_sigs, SIGDANGER);
 693   sigaddset(&unblocked_sigs, SR_signum);
 694 
 695   if (!ReduceSignalUsage) {
 696    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 697      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 698      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
 699    }
 700    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 701      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 702      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
 703    }
 704    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 705      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 706      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
 707    }
 708   }
 709   // Fill in signals that are blocked by all but the VM thread.
 710   sigemptyset(&vm_sigs);
 711   if (!ReduceSignalUsage)
 712     sigaddset(&vm_sigs, BREAK_SIGNAL);
 713   debug_only(signal_sets_initialized = true);
 714 }
 715 
 716 // These are signals that are unblocked while a thread is running Java.
 717 // (For some reason, they get blocked by default.)
 718 sigset_t* os::Aix::unblocked_signals() {
 719   assert(signal_sets_initialized, "Not initialized");
 720   return &unblocked_sigs;
 721 }
 722 
 723 // These are the signals that are blocked while a (non-VM) thread is
 724 // running Java. Only the VM thread handles these signals.
 725 sigset_t* os::Aix::vm_signals() {
 726   assert(signal_sets_initialized, "Not initialized");
 727   return &vm_sigs;
 728 }
 729 
 730 // These are signals that are blocked during cond_wait to allow debugger in
 731 sigset_t* os::Aix::allowdebug_blocked_signals() {
 732   assert(signal_sets_initialized, "Not initialized");
 733   return &allowdebug_blocked_sigs;
 734 }
 735 
 736 void os::Aix::hotspot_sigmask(Thread* thread) {
 737 
 738   //Save caller's signal mask before setting VM signal mask
 739   sigset_t caller_sigmask;
 740   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 741 
 742   OSThread* osthread = thread->osthread();
 743   osthread->set_caller_sigmask(caller_sigmask);
 744 
 745   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 746 
 747   if (!ReduceSignalUsage) {
 748     if (thread->is_VM_thread()) {
 749       // Only the VM thread handles BREAK_SIGNAL ...
 750       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 751     } else {
 752       // ... all other threads block BREAK_SIGNAL
 753       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 754     }
 755   }
 756 }
 757 
 758 // retrieve memory information.
 759 // Returns false if something went wrong;
 760 // content of pmi undefined in this case.
 761 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 762 
 763   assert(pmi, "get_meminfo: invalid parameter");
 764 
 765   memset(pmi, 0, sizeof(meminfo_t));
 766 
 767   if (os::Aix::on_pase()) {
 768 
 769     Unimplemented();
 770     return false;
 771 
 772   } else {
 773 
 774     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 775     // See:
 776     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 777     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 778     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 779     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 780 
 781     perfstat_memory_total_t psmt;
 782     memset (&psmt, '\0', sizeof(psmt));
 783     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 784     if (rc == -1) {
 785       fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
 786       assert(0, "perfstat_memory_total() failed");
 787       return false;
 788     }
 789 
 790     assert(rc == 1, "perfstat_memory_total() - weird return code");
 791 
 792     // excerpt from
 793     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 794     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 795     // The fields of perfstat_memory_total_t:
 796     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 797     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 798     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 799     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 800     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 801 
 802     pmi->virt_total = psmt.virt_total * 4096;
 803     pmi->real_total = psmt.real_total * 4096;
 804     pmi->real_free = psmt.real_free * 4096;
 805     pmi->pgsp_total = psmt.pgsp_total * 4096;
 806     pmi->pgsp_free = psmt.pgsp_free * 4096;
 807 
 808     return true;
 809 
 810   }
 811 } // end os::Aix::get_meminfo
 812 
 813 // Retrieve global cpu information.
 814 // Returns false if something went wrong;
 815 // the content of pci is undefined in this case.
 816 bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
 817   assert(pci, "get_cpuinfo: invalid parameter");
 818   memset(pci, 0, sizeof(cpuinfo_t));
 819 
 820   perfstat_cpu_total_t psct;
 821   memset (&psct, '\0', sizeof(psct));
 822 
 823   if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
 824     fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
 825     assert(0, "perfstat_cpu_total() failed");
 826     return false;
 827   }
 828 
 829   // global cpu information
 830   strcpy (pci->description, psct.description);
 831   pci->processorHZ = psct.processorHZ;
 832   pci->ncpus = psct.ncpus;
 833   os::Aix::_logical_cpus = psct.ncpus;
 834   for (int i = 0; i < 3; i++) {
 835     pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
 836   }
 837 
 838   // get the processor version from _system_configuration
 839   switch (_system_configuration.version) {
 840   case PV_8:
 841     strcpy(pci->version, "Power PC 8");
 842     break;
 843   case PV_7:
 844     strcpy(pci->version, "Power PC 7");
 845     break;
 846   case PV_6_1:
 847     strcpy(pci->version, "Power PC 6 DD1.x");
 848     break;
 849   case PV_6:
 850     strcpy(pci->version, "Power PC 6");
 851     break;
 852   case PV_5:
 853     strcpy(pci->version, "Power PC 5");
 854     break;
 855   case PV_5_2:
 856     strcpy(pci->version, "Power PC 5_2");
 857     break;
 858   case PV_5_3:
 859     strcpy(pci->version, "Power PC 5_3");
 860     break;
 861   case PV_5_Compat:
 862     strcpy(pci->version, "PV_5_Compat");
 863     break;
 864   case PV_6_Compat:
 865     strcpy(pci->version, "PV_6_Compat");
 866     break;
 867   case PV_7_Compat:
 868     strcpy(pci->version, "PV_7_Compat");
 869     break;
 870   case PV_8_Compat:
 871     strcpy(pci->version, "PV_8_Compat");
 872     break;
 873   default:
 874     strcpy(pci->version, "unknown");
 875   }
 876 
 877   return true;
 878 
 879 } //end os::Aix::get_cpuinfo
 880 
 881 //////////////////////////////////////////////////////////////////////////////
 882 // detecting pthread library
 883 
 884 void os::Aix::libpthread_init() {
 885   return;
 886 }
 887 
 888 //////////////////////////////////////////////////////////////////////////////
 889 // create new thread
 890 
 891 // Thread start routine for all newly created threads
 892 static void *java_start(Thread *thread) {
 893 
 894   // find out my own stack dimensions
 895   {
 896     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 897     address base = 0;
 898     size_t size = 0;
 899     query_stack_dimensions(&base, &size);
 900     thread->set_stack_base(base);
 901     thread->set_stack_size(size);
 902   }
 903 
 904   // Do some sanity checks.
 905   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 906 
 907   // Try to randomize the cache line index of hot stack frames.
 908   // This helps when threads of the same stack traces evict each other's
 909   // cache lines. The threads can be either from the same JVM instance, or
 910   // from different JVM instances. The benefit is especially true for
 911   // processors with hyperthreading technology.
 912 
 913   static int counter = 0;
 914   int pid = os::current_process_id();
 915   alloca(((pid ^ counter++) & 7) * 128);
 916 
 917   ThreadLocalStorage::set_thread(thread);
 918 
 919   OSThread* osthread = thread->osthread();
 920 
 921   // thread_id is kernel thread id (similar to Solaris LWP id)
 922   osthread->set_thread_id(os::Aix::gettid());
 923 
 924   // initialize signal mask for this thread
 925   os::Aix::hotspot_sigmask(thread);
 926 
 927   // initialize floating point control register
 928   os::Aix::init_thread_fpu_state();
 929 
 930   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 931 
 932   // call one more level start routine
 933   thread->run();
 934 
 935   return 0;
 936 }
 937 
 938 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 939 
 940   // We want the whole function to be synchronized.
 941   ThreadCritical cs;
 942 
 943   assert(thread->osthread() == NULL, "caller responsible");
 944 
 945   // Allocate the OSThread object
 946   OSThread* osthread = new OSThread(NULL, NULL);
 947   if (osthread == NULL) {
 948     return false;
 949   }
 950 
 951   // set the correct thread state
 952   osthread->set_thread_type(thr_type);
 953 
 954   // Initial state is ALLOCATED but not INITIALIZED
 955   osthread->set_state(ALLOCATED);
 956 
 957   thread->set_osthread(osthread);
 958 
 959   // init thread attributes
 960   pthread_attr_t attr;
 961   pthread_attr_init(&attr);
 962   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 963 
 964   // Make sure we run in 1:1 kernel-user-thread mode.
 965   if (os::Aix::on_aix()) {
 966     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 967     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 968   } // end: aix
 969 
 970   // Start in suspended state, and in os::thread_start, wake the thread up.
 971   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 972 
 973   // calculate stack size if it's not specified by caller
 974   if (os::Aix::supports_variable_stack_size()) {
 975     if (stack_size == 0) {
 976       stack_size = os::Aix::default_stack_size(thr_type);
 977 
 978       switch (thr_type) {
 979       case os::java_thread:
 980         // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
 981         assert(JavaThread::stack_size_at_create() > 0, "this should be set");
 982         stack_size = JavaThread::stack_size_at_create();
 983         break;
 984       case os::compiler_thread:
 985         if (CompilerThreadStackSize > 0) {
 986           stack_size = (size_t)(CompilerThreadStackSize * K);
 987           break;
 988         } // else fall through:
 989           // use VMThreadStackSize if CompilerThreadStackSize is not defined
 990       case os::vm_thread:
 991       case os::pgc_thread:
 992       case os::cgc_thread:
 993       case os::watcher_thread:
 994         if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 995         break;
 996       }
 997     }
 998 
 999     stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
1000     pthread_attr_setstacksize(&attr, stack_size);
1001   } //else let thread_create() pick the default value (96 K on AIX)
1002 
1003   pthread_t tid;
1004   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
1005 
1006   pthread_attr_destroy(&attr);
1007 
1008   if (ret == 0) {
1009     // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
1010   } else {
1011     if (PrintMiscellaneous && (Verbose || WizardMode)) {
1012       perror("pthread_create()");
1013     }
1014     // Need to clean up stuff we've allocated so far
1015     thread->set_osthread(NULL);
1016     delete osthread;
1017     return false;
1018   }
1019 
1020   // Store pthread info into the OSThread
1021   osthread->set_pthread_id(tid);
1022 
1023   return true;
1024 }
1025 
1026 /////////////////////////////////////////////////////////////////////////////
1027 // attach existing thread
1028 
1029 // bootstrap the main thread
1030 bool os::create_main_thread(JavaThread* thread) {
1031   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
1032   return create_attached_thread(thread);
1033 }
1034 
1035 bool os::create_attached_thread(JavaThread* thread) {
1036 #ifdef ASSERT
1037     thread->verify_not_published();
1038 #endif
1039 
1040   // Allocate the OSThread object
1041   OSThread* osthread = new OSThread(NULL, NULL);
1042 
1043   if (osthread == NULL) {
1044     return false;
1045   }
1046 
1047   // Store pthread info into the OSThread
1048   osthread->set_thread_id(os::Aix::gettid());
1049   osthread->set_pthread_id(::pthread_self());
1050 
1051   // initialize floating point control register
1052   os::Aix::init_thread_fpu_state();
1053 
1054   // some sanity checks
1055   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1056 
1057   // Initial thread state is RUNNABLE
1058   osthread->set_state(RUNNABLE);
1059 
1060   thread->set_osthread(osthread);
1061 
1062   if (UseNUMA) {
1063     int lgrp_id = os::numa_get_group_id();
1064     if (lgrp_id != -1) {
1065       thread->set_lgrp_id(lgrp_id);
1066     }
1067   }
1068 
1069   // initialize signal mask for this thread
1070   // and save the caller's signal mask
1071   os::Aix::hotspot_sigmask(thread);
1072 
1073   return true;
1074 }
1075 
1076 void os::pd_start_thread(Thread* thread) {
1077   int status = pthread_continue_np(thread->osthread()->pthread_id());
1078   assert(status == 0, "thr_continue failed");
1079 }
1080 
1081 // Free OS resources related to the OSThread
1082 void os::free_thread(OSThread* osthread) {
1083   assert(osthread != NULL, "osthread not set");
1084 
1085   if (Thread::current()->osthread() == osthread) {
1086     // Restore caller's signal mask
1087     sigset_t sigmask = osthread->caller_sigmask();
1088     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1089    }
1090 
1091   delete osthread;
1092 }
1093 
1094 //////////////////////////////////////////////////////////////////////////////
1095 // thread local storage
1096 
1097 int os::allocate_thread_local_storage() {
1098   pthread_key_t key;
1099   int rslt = pthread_key_create(&key, NULL);
1100   assert(rslt == 0, "cannot allocate thread local storage");
1101   return (int)key;
1102 }
1103 
1104 // Note: This is currently not used by VM, as we don't destroy TLS key
1105 // on VM exit.
1106 void os::free_thread_local_storage(int index) {
1107   int rslt = pthread_key_delete((pthread_key_t)index);
1108   assert(rslt == 0, "invalid index");
1109 }
1110 
1111 void os::thread_local_storage_at_put(int index, void* value) {
1112   int rslt = pthread_setspecific((pthread_key_t)index, value);
1113   assert(rslt == 0, "pthread_setspecific failed");
1114 }
1115 
1116 extern "C" Thread* get_thread() {
1117   return ThreadLocalStorage::thread();
1118 }
1119 
1120 ////////////////////////////////////////////////////////////////////////////////
1121 // time support
1122 
1123 // Time since start-up in seconds to a fine granularity.
1124 // Used by VMSelfDestructTimer and the MemProfiler.
1125 double os::elapsedTime() {
1126   return (double)(os::elapsed_counter()) * 0.000001;
1127 }
1128 
1129 jlong os::elapsed_counter() {
1130   timeval time;
1131   int status = gettimeofday(&time, NULL);
1132   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1133 }
1134 
1135 jlong os::elapsed_frequency() {
1136   return (1000 * 1000);
1137 }
1138 
1139 bool os::supports_vtime() { return true; }
1140 bool os::enable_vtime()   { return false; }
1141 bool os::vtime_enabled()  { return false; }
1142 
1143 double os::elapsedVTime() {
1144   struct rusage usage;
1145   int retval = getrusage(RUSAGE_THREAD, &usage);
1146   if (retval == 0) {
1147     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1148   } else {
1149     // better than nothing, but not much
1150     return elapsedTime();
1151   }
1152 }
1153 
1154 jlong os::javaTimeMillis() {
1155   timeval time;
1156   int status = gettimeofday(&time, NULL);
1157   assert(status != -1, "aix error at gettimeofday()");
1158   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1159 }
1160 
1161 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1162   timeval time;
1163   int status = gettimeofday(&time, NULL);
1164   assert(status != -1, "aix error at gettimeofday()");
1165   seconds = jlong(time.tv_sec);
1166   nanos = jlong(time.tv_usec) * 1000;
1167 }
1168 
1169 
1170 // We need to manually declare mread_real_time,
1171 // because IBM didn't provide a prototype in time.h.
1172 // (they probably only ever tested in C, not C++)
1173 extern "C"
1174 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1175 
1176 jlong os::javaTimeNanos() {
1177   if (os::Aix::on_pase()) {
1178     Unimplemented();
1179     return 0;
1180   } else {
1181     // On AIX use the precision of processors real time clock
1182     // or time base registers.
1183     timebasestruct_t time;
1184     int rc;
1185 
1186     // If the CPU has a time register, it will be used and
1187     // we have to convert to real time first. After convertion we have following data:
1188     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1189     // time.tb_low  [nanoseconds after the last full second above]
1190     // We better use mread_real_time here instead of read_real_time
1191     // to ensure that we will get a monotonic increasing time.
1192     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1193       rc = time_base_to_time(&time, TIMEBASE_SZ);
1194       assert(rc != -1, "aix error at time_base_to_time()");
1195     }
1196     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1197   }
1198 }
1199 
1200 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1201   info_ptr->max_value = ALL_64_BITS;
1202   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1203   info_ptr->may_skip_backward = false;
1204   info_ptr->may_skip_forward = false;
1205   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1206 }
1207 
1208 // Return the real, user, and system times in seconds from an
1209 // arbitrary fixed point in the past.
1210 bool os::getTimesSecs(double* process_real_time,
1211                       double* process_user_time,
1212                       double* process_system_time) {
1213   struct tms ticks;
1214   clock_t real_ticks = times(&ticks);
1215 
1216   if (real_ticks == (clock_t) (-1)) {
1217     return false;
1218   } else {
1219     double ticks_per_second = (double) clock_tics_per_sec;
1220     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1221     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1222     *process_real_time = ((double) real_ticks) / ticks_per_second;
1223 
1224     return true;
1225   }
1226 }
1227 
1228 char * os::local_time_string(char *buf, size_t buflen) {
1229   struct tm t;
1230   time_t long_time;
1231   time(&long_time);
1232   localtime_r(&long_time, &t);
1233   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1234                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1235                t.tm_hour, t.tm_min, t.tm_sec);
1236   return buf;
1237 }
1238 
1239 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1240   return localtime_r(clock, res);
1241 }
1242 
1243 ////////////////////////////////////////////////////////////////////////////////
1244 // runtime exit support
1245 
1246 // Note: os::shutdown() might be called very early during initialization, or
1247 // called from signal handler. Before adding something to os::shutdown(), make
1248 // sure it is async-safe and can handle partially initialized VM.
1249 void os::shutdown() {
1250 
1251   // allow PerfMemory to attempt cleanup of any persistent resources
1252   perfMemory_exit();
1253 
1254   // needs to remove object in file system
1255   AttachListener::abort();
1256 
1257   // flush buffered output, finish log files
1258   ostream_abort();
1259 
1260   // Check for abort hook
1261   abort_hook_t abort_hook = Arguments::abort_hook();
1262   if (abort_hook != NULL) {
1263     abort_hook();
1264   }
1265 }
1266 
1267 // Note: os::abort() might be called very early during initialization, or
1268 // called from signal handler. Before adding something to os::abort(), make
1269 // sure it is async-safe and can handle partially initialized VM.
1270 void os::abort(bool dump_core, void* siginfo, void* context) {
1271   os::shutdown();
1272   if (dump_core) {
1273 #ifndef PRODUCT
1274     fdStream out(defaultStream::output_fd());
1275     out.print_raw("Current thread is ");
1276     char buf[16];
1277     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1278     out.print_raw_cr(buf);
1279     out.print_raw_cr("Dumping core ...");
1280 #endif
1281     ::abort(); // dump core
1282   }
1283 
1284   ::exit(1);
1285 }
1286 
1287 // Die immediately, no exit hook, no abort hook, no cleanup.
1288 void os::die() {
1289   ::abort();
1290 }
1291 
1292 // This method is a copy of JDK's sysGetLastErrorString
1293 // from src/solaris/hpi/src/system_md.c
1294 
1295 size_t os::lasterror(char *buf, size_t len) {
1296   if (errno == 0) return 0;
1297 
1298   const char *s = ::strerror(errno);
1299   size_t n = ::strlen(s);
1300   if (n >= len) {
1301     n = len - 1;
1302   }
1303   ::strncpy(buf, s, n);
1304   buf[n] = '\0';
1305   return n;
1306 }
1307 
1308 intx os::current_thread_id() { return (intx)pthread_self(); }
1309 
1310 int os::current_process_id() {
1311 
1312   // This implementation returns a unique pid, the pid of the
1313   // launcher thread that starts the vm 'process'.
1314 
1315   // Under POSIX, getpid() returns the same pid as the
1316   // launcher thread rather than a unique pid per thread.
1317   // Use gettid() if you want the old pre NPTL behaviour.
1318 
1319   // if you are looking for the result of a call to getpid() that
1320   // returns a unique pid for the calling thread, then look at the
1321   // OSThread::thread_id() method in osThread_linux.hpp file
1322 
1323   return (int)(_initial_pid ? _initial_pid : getpid());
1324 }
1325 
1326 // DLL functions
1327 
1328 const char* os::dll_file_extension() { return ".so"; }
1329 
1330 // This must be hard coded because it's the system's temporary
1331 // directory not the java application's temp directory, ala java.io.tmpdir.
1332 const char* os::get_temp_directory() { return "/tmp"; }
1333 
1334 static bool file_exists(const char* filename) {
1335   struct stat statbuf;
1336   if (filename == NULL || strlen(filename) == 0) {
1337     return false;
1338   }
1339   return os::stat(filename, &statbuf) == 0;
1340 }
1341 
1342 bool os::dll_build_name(char* buffer, size_t buflen,
1343                         const char* pname, const char* fname) {
1344   bool retval = false;
1345   // Copied from libhpi
1346   const size_t pnamelen = pname ? strlen(pname) : 0;
1347 
1348   // Return error on buffer overflow.
1349   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1350     *buffer = '\0';
1351     return retval;
1352   }
1353 
1354   if (pnamelen == 0) {
1355     snprintf(buffer, buflen, "lib%s.so", fname);
1356     retval = true;
1357   } else if (strchr(pname, *os::path_separator()) != NULL) {
1358     int n;
1359     char** pelements = split_path(pname, &n);
1360     for (int i = 0; i < n; i++) {
1361       // Really shouldn't be NULL, but check can't hurt
1362       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1363         continue; // skip the empty path values
1364       }
1365       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1366       if (file_exists(buffer)) {
1367         retval = true;
1368         break;
1369       }
1370     }
1371     // release the storage
1372     for (int i = 0; i < n; i++) {
1373       if (pelements[i] != NULL) {
1374         FREE_C_HEAP_ARRAY(char, pelements[i]);
1375       }
1376     }
1377     if (pelements != NULL) {
1378       FREE_C_HEAP_ARRAY(char*, pelements);
1379     }
1380   } else {
1381     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1382     retval = true;
1383   }
1384   return retval;
1385 }
1386 
1387 // Check if addr is inside libjvm.so.
1388 bool os::address_is_in_vm(address addr) {
1389 
1390   // Input could be a real pc or a function pointer literal. The latter
1391   // would be a function descriptor residing in the data segment of a module.
1392 
1393   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
1394   if (lib) {
1395     if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1396       return true;
1397     } else {
1398       return false;
1399     }
1400   } else {
1401     lib = LoadedLibraries::find_for_data_address(addr);
1402     if (lib) {
1403       if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1404         return true;
1405       } else {
1406         return false;
1407       }
1408     } else {
1409       return false;
1410     }
1411   }
1412 }
1413 
1414 // Resolve an AIX function descriptor literal to a code pointer.
1415 // If the input is a valid code pointer to a text segment of a loaded module,
1416 //   it is returned unchanged.
1417 // If the input is a valid AIX function descriptor, it is resolved to the
1418 //   code entry point.
1419 // If the input is neither a valid function descriptor nor a valid code pointer,
1420 //   NULL is returned.
1421 static address resolve_function_descriptor_to_code_pointer(address p) {
1422 
1423   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
1424   if (lib) {
1425     // its a real code pointer
1426     return p;
1427   } else {
1428     lib = LoadedLibraries::find_for_data_address(p);
1429     if (lib) {
1430       // pointer to data segment, potential function descriptor
1431       address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1432       if (LoadedLibraries::find_for_text_address(code_entry)) {
1433         // Its a function descriptor
1434         return code_entry;
1435       }
1436     }
1437   }
1438   return NULL;
1439 }
1440 
1441 bool os::dll_address_to_function_name(address addr, char *buf,
1442                                       int buflen, int *offset,
1443                                       bool demangle) {
1444   if (offset) {
1445     *offset = -1;
1446   }
1447   // Buf is not optional, but offset is optional.
1448   assert(buf != NULL, "sanity check");
1449   buf[0] = '\0';
1450 
1451   // Resolve function ptr literals first.
1452   addr = resolve_function_descriptor_to_code_pointer(addr);
1453   if (!addr) {
1454     return false;
1455   }
1456 
1457   // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1458   return Decoder::decode(addr, buf, buflen, offset, demangle);
1459 }
1460 
1461 static int getModuleName(codeptr_t pc,                    // [in] program counter
1462                          char* p_name, size_t namelen,    // [out] optional: function name
1463                          char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1464                          ) {
1465 
1466   // initialize output parameters
1467   if (p_name && namelen > 0) {
1468     *p_name = '\0';
1469   }
1470   if (p_errmsg && errmsglen > 0) {
1471     *p_errmsg = '\0';
1472   }
1473 
1474   const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
1475   if (lib) {
1476     if (p_name && namelen > 0) {
1477       sprintf(p_name, "%.*s", namelen, lib->get_shortname());
1478     }
1479     return 0;
1480   }
1481 
1482   trcVerbose("pc outside any module");
1483 
1484   return -1;
1485 }
1486 
1487 bool os::dll_address_to_library_name(address addr, char* buf,
1488                                      int buflen, int* offset) {
1489   if (offset) {
1490     *offset = -1;
1491   }
1492   // Buf is not optional, but offset is optional.
1493   assert(buf != NULL, "sanity check");
1494   buf[0] = '\0';
1495 
1496   // Resolve function ptr literals first.
1497   addr = resolve_function_descriptor_to_code_pointer(addr);
1498   if (!addr) {
1499     return false;
1500   }
1501 
1502   if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1503     return true;
1504   }
1505   return false;
1506 }
1507 
1508 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1509 // for the same architecture as Hotspot is running on.
1510 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1511 
1512   if (ebuf && ebuflen > 0) {
1513     ebuf[0] = '\0';
1514     ebuf[ebuflen - 1] = '\0';
1515   }
1516 
1517   if (!filename || strlen(filename) == 0) {
1518     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1519     return NULL;
1520   }
1521 
1522   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1523   void * result= ::dlopen(filename, RTLD_LAZY);
1524   if (result != NULL) {
1525     // Reload dll cache. Don't do this in signal handling.
1526     LoadedLibraries::reload();
1527     return result;
1528   } else {
1529     // error analysis when dlopen fails
1530     const char* const error_report = ::dlerror();
1531     if (error_report && ebuf && ebuflen > 0) {
1532       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1533                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1534     }
1535   }
1536   return NULL;
1537 }
1538 
1539 void* os::dll_lookup(void* handle, const char* name) {
1540   void* res = dlsym(handle, name);
1541   return res;
1542 }
1543 
1544 void* os::get_default_process_handle() {
1545   return (void*)::dlopen(NULL, RTLD_LAZY);
1546 }
1547 
1548 void os::print_dll_info(outputStream *st) {
1549   st->print_cr("Dynamic libraries:");
1550   LoadedLibraries::print(st);
1551 }
1552 
1553 void os::get_summary_os_info(char* buf, size_t buflen) {
1554   // There might be something more readable than uname results for AIX.
1555   struct utsname name;
1556   uname(&name);
1557   snprintf(buf, buflen, "%s %s", name.release, name.version);
1558 }
1559 
1560 void os::print_os_info(outputStream* st) {
1561   st->print("OS:");
1562 
1563   st->print("uname:");
1564   struct utsname name;
1565   uname(&name);
1566   st->print(name.sysname); st->print(" ");
1567   st->print(name.nodename); st->print(" ");
1568   st->print(name.release); st->print(" ");
1569   st->print(name.version); st->print(" ");
1570   st->print(name.machine);
1571   st->cr();
1572 
1573   // rlimit
1574   st->print("rlimit:");
1575   struct rlimit rlim;
1576 
1577   st->print(" STACK ");
1578   getrlimit(RLIMIT_STACK, &rlim);
1579   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1580   else st->print("%uk", rlim.rlim_cur >> 10);
1581 
1582   st->print(", CORE ");
1583   getrlimit(RLIMIT_CORE, &rlim);
1584   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1585   else st->print("%uk", rlim.rlim_cur >> 10);
1586 
1587   st->print(", NPROC ");
1588   st->print("%d", sysconf(_SC_CHILD_MAX));
1589 
1590   st->print(", NOFILE ");
1591   getrlimit(RLIMIT_NOFILE, &rlim);
1592   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1593   else st->print("%d", rlim.rlim_cur);
1594 
1595   st->print(", AS ");
1596   getrlimit(RLIMIT_AS, &rlim);
1597   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1598   else st->print("%uk", rlim.rlim_cur >> 10);
1599 
1600   // Print limits on DATA, because it limits the C-heap.
1601   st->print(", DATA ");
1602   getrlimit(RLIMIT_DATA, &rlim);
1603   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1604   else st->print("%uk", rlim.rlim_cur >> 10);
1605   st->cr();
1606 
1607   // load average
1608   st->print("load average:");
1609   double loadavg[3] = {-1.L, -1.L, -1.L};
1610   os::loadavg(loadavg, 3);
1611   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1612   st->cr();
1613 }
1614 
1615 void os::print_memory_info(outputStream* st) {
1616 
1617   st->print_cr("Memory:");
1618 
1619   st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1620   st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1621   st->print_cr("  Default shared memory page size:        %s",
1622     describe_pagesize(g_multipage_support.shmpsize));
1623   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1624     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1625   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1626     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1627   if (g_multipage_error != 0) {
1628     st->print_cr("  multipage error: %d", g_multipage_error);
1629   }
1630 
1631   // print out LDR_CNTRL because it affects the default page sizes
1632   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1633   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1634 
1635   const char* const extshm = ::getenv("EXTSHM");
1636   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1637   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1638     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1639   }
1640 
1641   // Call os::Aix::get_meminfo() to retrieve memory statistics.
1642   os::Aix::meminfo_t mi;
1643   if (os::Aix::get_meminfo(&mi)) {
1644     char buffer[256];
1645     if (os::Aix::on_aix()) {
1646       jio_snprintf(buffer, sizeof(buffer),
1647                    "  physical total : %llu\n"
1648                    "  physical free  : %llu\n"
1649                    "  swap total     : %llu\n"
1650                    "  swap free      : %llu\n",
1651                    mi.real_total,
1652                    mi.real_free,
1653                    mi.pgsp_total,
1654                    mi.pgsp_free);
1655     } else {
1656       Unimplemented();
1657     }
1658     st->print_raw(buffer);
1659   } else {
1660     st->print_cr("  (no more information available)");
1661   }
1662 }
1663 
1664 // Get a string for the cpuinfo that is a summary of the cpu type
1665 // This looks good
1666 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1667   // This looks good
1668   os::Aix::cpuinfo_t ci;
1669   if (os::Aix::get_cpuinfo(&ci)) {
1670     strncpy(buf, ci.version, buflen);
1671   } else {
1672     strncpy(buf, "AIX", buflen);
1673   }
1674 }
1675 
1676 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1677   // cpu
1678   st->print("CPU:");
1679   st->print("total %d", os::processor_count());
1680   // It's not safe to query number of active processors after crash
1681   // st->print("(active %d)", os::active_processor_count());
1682   st->print(" %s", VM_Version::cpu_features());
1683   st->cr();
1684 }
1685 
1686 void os::print_siginfo(outputStream* st, void* siginfo) {
1687   // Use common posix version.
1688   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1689   st->cr();
1690 }
1691 
1692 static void print_signal_handler(outputStream* st, int sig,
1693                                  char* buf, size_t buflen);
1694 
1695 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1696   st->print_cr("Signal Handlers:");
1697   print_signal_handler(st, SIGSEGV, buf, buflen);
1698   print_signal_handler(st, SIGBUS , buf, buflen);
1699   print_signal_handler(st, SIGFPE , buf, buflen);
1700   print_signal_handler(st, SIGPIPE, buf, buflen);
1701   print_signal_handler(st, SIGXFSZ, buf, buflen);
1702   print_signal_handler(st, SIGILL , buf, buflen);
1703   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1704   print_signal_handler(st, SR_signum, buf, buflen);
1705   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1706   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1707   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1708   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1709   print_signal_handler(st, SIGTRAP, buf, buflen);
1710   print_signal_handler(st, SIGDANGER, buf, buflen);
1711 }
1712 
1713 static char saved_jvm_path[MAXPATHLEN] = {0};
1714 
1715 // Find the full path to the current module, libjvm.so.
1716 void os::jvm_path(char *buf, jint buflen) {
1717   // Error checking.
1718   if (buflen < MAXPATHLEN) {
1719     assert(false, "must use a large-enough buffer");
1720     buf[0] = '\0';
1721     return;
1722   }
1723   // Lazy resolve the path to current module.
1724   if (saved_jvm_path[0] != 0) {
1725     strcpy(buf, saved_jvm_path);
1726     return;
1727   }
1728 
1729   Dl_info dlinfo;
1730   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1731   assert(ret != 0, "cannot locate libjvm");
1732   char* rp = realpath((char *)dlinfo.dli_fname, buf);
1733   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1734 
1735   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1736   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1737 }
1738 
1739 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1740   // no prefix required, not even "_"
1741 }
1742 
1743 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1744   // no suffix required
1745 }
1746 
1747 ////////////////////////////////////////////////////////////////////////////////
1748 // sun.misc.Signal support
1749 
1750 static volatile jint sigint_count = 0;
1751 
1752 static void
1753 UserHandler(int sig, void *siginfo, void *context) {
1754   // 4511530 - sem_post is serialized and handled by the manager thread. When
1755   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1756   // don't want to flood the manager thread with sem_post requests.
1757   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1758     return;
1759 
1760   // Ctrl-C is pressed during error reporting, likely because the error
1761   // handler fails to abort. Let VM die immediately.
1762   if (sig == SIGINT && is_error_reported()) {
1763     os::die();
1764   }
1765 
1766   os::signal_notify(sig);
1767 }
1768 
1769 void* os::user_handler() {
1770   return CAST_FROM_FN_PTR(void*, UserHandler);
1771 }
1772 
1773 extern "C" {
1774   typedef void (*sa_handler_t)(int);
1775   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1776 }
1777 
1778 void* os::signal(int signal_number, void* handler) {
1779   struct sigaction sigAct, oldSigAct;
1780 
1781   sigfillset(&(sigAct.sa_mask));
1782 
1783   // Do not block out synchronous signals in the signal handler.
1784   // Blocking synchronous signals only makes sense if you can really
1785   // be sure that those signals won't happen during signal handling,
1786   // when the blocking applies. Normal signal handlers are lean and
1787   // do not cause signals. But our signal handlers tend to be "risky"
1788   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1789   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1790   // by a SIGILL, which was blocked due to the signal mask. The process
1791   // just hung forever. Better to crash from a secondary signal than to hang.
1792   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1793   sigdelset(&(sigAct.sa_mask), SIGBUS);
1794   sigdelset(&(sigAct.sa_mask), SIGILL);
1795   sigdelset(&(sigAct.sa_mask), SIGFPE);
1796   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1797 
1798   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1799 
1800   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1801 
1802   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1803     // -1 means registration failed
1804     return (void *)-1;
1805   }
1806 
1807   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1808 }
1809 
1810 void os::signal_raise(int signal_number) {
1811   ::raise(signal_number);
1812 }
1813 
1814 //
1815 // The following code is moved from os.cpp for making this
1816 // code platform specific, which it is by its very nature.
1817 //
1818 
1819 // Will be modified when max signal is changed to be dynamic
1820 int os::sigexitnum_pd() {
1821   return NSIG;
1822 }
1823 
1824 // a counter for each possible signal value
1825 static volatile jint pending_signals[NSIG+1] = { 0 };
1826 
1827 // Linux(POSIX) specific hand shaking semaphore.
1828 static sem_t sig_sem;
1829 
1830 void os::signal_init_pd() {
1831   // Initialize signal structures
1832   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1833 
1834   // Initialize signal semaphore
1835   int rc = ::sem_init(&sig_sem, 0, 0);
1836   guarantee(rc != -1, "sem_init failed");
1837 }
1838 
1839 void os::signal_notify(int sig) {
1840   Atomic::inc(&pending_signals[sig]);
1841   ::sem_post(&sig_sem);
1842 }
1843 
1844 static int check_pending_signals(bool wait) {
1845   Atomic::store(0, &sigint_count);
1846   for (;;) {
1847     for (int i = 0; i < NSIG + 1; i++) {
1848       jint n = pending_signals[i];
1849       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1850         return i;
1851       }
1852     }
1853     if (!wait) {
1854       return -1;
1855     }
1856     JavaThread *thread = JavaThread::current();
1857     ThreadBlockInVM tbivm(thread);
1858 
1859     bool threadIsSuspended;
1860     do {
1861       thread->set_suspend_equivalent();
1862       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1863 
1864       ::sem_wait(&sig_sem);
1865 
1866       // were we externally suspended while we were waiting?
1867       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1868       if (threadIsSuspended) {
1869         //
1870         // The semaphore has been incremented, but while we were waiting
1871         // another thread suspended us. We don't want to continue running
1872         // while suspended because that would surprise the thread that
1873         // suspended us.
1874         //
1875         ::sem_post(&sig_sem);
1876 
1877         thread->java_suspend_self();
1878       }
1879     } while (threadIsSuspended);
1880   }
1881 }
1882 
1883 int os::signal_lookup() {
1884   return check_pending_signals(false);
1885 }
1886 
1887 int os::signal_wait() {
1888   return check_pending_signals(true);
1889 }
1890 
1891 ////////////////////////////////////////////////////////////////////////////////
1892 // Virtual Memory
1893 
1894 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1895 
1896 #define VMEM_MAPPED  1
1897 #define VMEM_SHMATED 2
1898 
1899 struct vmembk_t {
1900   int type;         // 1 - mmap, 2 - shmat
1901   char* addr;
1902   size_t size;      // Real size, may be larger than usersize.
1903   size_t pagesize;  // page size of area
1904   vmembk_t* next;
1905 
1906   bool contains_addr(char* p) const {
1907     return p >= addr && p < (addr + size);
1908   }
1909 
1910   bool contains_range(char* p, size_t s) const {
1911     return contains_addr(p) && contains_addr(p + s - 1);
1912   }
1913 
1914   void print_on(outputStream* os) const {
1915     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1916       " bytes, %d %s pages), %s",
1917       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1918       (type == VMEM_SHMATED ? "shmat" : "mmap")
1919     );
1920   }
1921 
1922   // Check that range is a sub range of memory block (or equal to memory block);
1923   // also check that range is fully page aligned to the page size if the block.
1924   void assert_is_valid_subrange(char* p, size_t s) const {
1925     if (!contains_range(p, s)) {
1926       fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1927               "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1928               p, p + s - 1, addr, addr + size - 1);
1929       guarantee0(false);
1930     }
1931     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1932       fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1933               " aligned to pagesize (%s)\n", p, p + s);
1934       guarantee0(false);
1935     }
1936   }
1937 };
1938 
1939 static struct {
1940   vmembk_t* first;
1941   MiscUtils::CritSect cs;
1942 } vmem;
1943 
1944 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1945   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1946   assert0(p);
1947   if (p) {
1948     MiscUtils::AutoCritSect lck(&vmem.cs);
1949     p->addr = addr; p->size = size;
1950     p->pagesize = pagesize;
1951     p->type = type;
1952     p->next = vmem.first;
1953     vmem.first = p;
1954   }
1955 }
1956 
1957 static vmembk_t* vmembk_find(char* addr) {
1958   MiscUtils::AutoCritSect lck(&vmem.cs);
1959   for (vmembk_t* p = vmem.first; p; p = p->next) {
1960     if (p->addr <= addr && (p->addr + p->size) > addr) {
1961       return p;
1962     }
1963   }
1964   return NULL;
1965 }
1966 
1967 static void vmembk_remove(vmembk_t* p0) {
1968   MiscUtils::AutoCritSect lck(&vmem.cs);
1969   assert0(p0);
1970   assert0(vmem.first); // List should not be empty.
1971   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1972     if (*pp == p0) {
1973       *pp = p0->next;
1974       ::free(p0);
1975       return;
1976     }
1977   }
1978   assert0(false); // Not found?
1979 }
1980 
1981 static void vmembk_print_on(outputStream* os) {
1982   MiscUtils::AutoCritSect lck(&vmem.cs);
1983   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1984     vmi->print_on(os);
1985     os->cr();
1986   }
1987 }
1988 
1989 // Reserve and attach a section of System V memory.
1990 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1991 // address. Failing that, it will attach the memory anywhere.
1992 // If <requested_addr> is NULL, function will attach the memory anywhere.
1993 //
1994 // <alignment_hint> is being ignored by this function. It is very probable however that the
1995 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1996 // Should this be not enogh, we can put more work into it.
1997 static char* reserve_shmated_memory (
1998   size_t bytes,
1999   char* requested_addr,
2000   size_t alignment_hint) {
2001 
2002   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
2003     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
2004     bytes, requested_addr, alignment_hint);
2005 
2006   // Either give me wish address or wish alignment but not both.
2007   assert0(!(requested_addr != NULL && alignment_hint != 0));
2008 
2009   // We must prevent anyone from attaching too close to the
2010   // BRK because that may cause malloc OOM.
2011   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2012     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2013       "Will attach anywhere.", requested_addr);
2014     // Act like the OS refused to attach there.
2015     requested_addr = NULL;
2016   }
2017 
2018   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
2019   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
2020   if (os::Aix::on_pase_V5R4_or_older()) {
2021     ShouldNotReachHere();
2022   }
2023 
2024   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
2025   const size_t size = align_size_up(bytes, SIZE_64K);
2026 
2027   // Reserve the shared segment.
2028   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2029   if (shmid == -1) {
2030     trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
2031     return NULL;
2032   }
2033 
2034   // Important note:
2035   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2036   // We must right after attaching it remove it from the system. System V shm segments are global and
2037   // survive the process.
2038   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2039 
2040   struct shmid_ds shmbuf;
2041   memset(&shmbuf, 0, sizeof(shmbuf));
2042   shmbuf.shm_pagesize = SIZE_64K;
2043   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2044     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2045                size / SIZE_64K, errno);
2046     // I want to know if this ever happens.
2047     assert(false, "failed to set page size for shmat");
2048   }
2049 
2050   // Now attach the shared segment.
2051   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2052   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2053   // were not a segment boundary.
2054   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2055   const int errno_shmat = errno;
2056 
2057   // (A) Right after shmat and before handing shmat errors delete the shm segment.
2058   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2059     trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2060     assert(false, "failed to remove shared memory segment!");
2061   }
2062 
2063   // Handle shmat error. If we failed to attach, just return.
2064   if (addr == (char*)-1) {
2065     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2066     return NULL;
2067   }
2068 
2069   // Just for info: query the real page size. In case setting the page size did not
2070   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2071   const size_t real_pagesize = os::Aix::query_pagesize(addr);
2072   if (real_pagesize != shmbuf.shm_pagesize) {
2073     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2074   }
2075 
2076   if (addr) {
2077     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2078       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2079   } else {
2080     if (requested_addr != NULL) {
2081       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2082     } else {
2083       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2084     }
2085   }
2086 
2087   // book-keeping
2088   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2089   assert0(is_aligned_to(addr, os::vm_page_size()));
2090 
2091   return addr;
2092 }
2093 
2094 static bool release_shmated_memory(char* addr, size_t size) {
2095 
2096   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2097     addr, addr + size - 1);
2098 
2099   bool rc = false;
2100 
2101   // TODO: is there a way to verify shm size without doing bookkeeping?
2102   if (::shmdt(addr) != 0) {
2103     trcVerbose("error (%d).", errno);
2104   } else {
2105     trcVerbose("ok.");
2106     rc = true;
2107   }
2108   return rc;
2109 }
2110 
2111 static bool uncommit_shmated_memory(char* addr, size_t size) {
2112   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2113     addr, addr + size - 1);
2114 
2115   const bool rc = my_disclaim64(addr, size);
2116 
2117   if (!rc) {
2118     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2119     return false;
2120   }
2121   return true;
2122 }
2123 
2124 // Reserve memory via mmap.
2125 // If <requested_addr> is given, an attempt is made to attach at the given address.
2126 // Failing that, memory is allocated at any address.
2127 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2128 // allocate at an address aligned with the given alignment. Failing that, memory
2129 // is aligned anywhere.
2130 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2131   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2132     "alignment_hint " UINTX_FORMAT "...",
2133     bytes, requested_addr, alignment_hint);
2134 
2135   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2136   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2137     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2138     return NULL;
2139   }
2140 
2141   // We must prevent anyone from attaching too close to the
2142   // BRK because that may cause malloc OOM.
2143   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2144     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2145       "Will attach anywhere.", requested_addr);
2146     // Act like the OS refused to attach there.
2147     requested_addr = NULL;
2148   }
2149 
2150   // Specify one or the other but not both.
2151   assert0(!(requested_addr != NULL && alignment_hint > 0));
2152 
2153   // In 64K mode, we claim the global page size (os::vm_page_size())
2154   // is 64K. This is one of the few points where that illusion may
2155   // break, because mmap() will always return memory aligned to 4K. So
2156   // we must ensure we only ever return memory aligned to 64k.
2157   if (alignment_hint) {
2158     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2159   } else {
2160     alignment_hint = os::vm_page_size();
2161   }
2162 
2163   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2164   const size_t size = align_size_up(bytes, os::vm_page_size());
2165 
2166   // alignment: Allocate memory large enough to include an aligned range of the right size and
2167   // cut off the leading and trailing waste pages.
2168   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2169   const size_t extra_size = size + alignment_hint;
2170 
2171   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2172   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2173   int flags = MAP_ANONYMOUS | MAP_SHARED;
2174 
2175   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2176   // it means if wishaddress is given but MAP_FIXED is not set.
2177   //
2178   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2179   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2180   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2181   // get clobbered.
2182   if (requested_addr != NULL) {
2183     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2184       flags |= MAP_FIXED;
2185     }
2186   }
2187 
2188   char* addr = (char*)::mmap(requested_addr, extra_size,
2189       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2190 
2191   if (addr == MAP_FAILED) {
2192     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2193     return NULL;
2194   }
2195 
2196   // Handle alignment.
2197   char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2198   const size_t waste_pre = addr_aligned - addr;
2199   char* const addr_aligned_end = addr_aligned + size;
2200   const size_t waste_post = extra_size - waste_pre - size;
2201   if (waste_pre > 0) {
2202     ::munmap(addr, waste_pre);
2203   }
2204   if (waste_post > 0) {
2205     ::munmap(addr_aligned_end, waste_post);
2206   }
2207   addr = addr_aligned;
2208 
2209   if (addr) {
2210     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2211       addr, addr + bytes, bytes);
2212   } else {
2213     if (requested_addr != NULL) {
2214       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2215     } else {
2216       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2217     }
2218   }
2219 
2220   // bookkeeping
2221   vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
2222 
2223   // Test alignment, see above.
2224   assert0(is_aligned_to(addr, os::vm_page_size()));
2225 
2226   return addr;
2227 }
2228 
2229 static bool release_mmaped_memory(char* addr, size_t size) {
2230   assert0(is_aligned_to(addr, os::vm_page_size()));
2231   assert0(is_aligned_to(size, os::vm_page_size()));
2232 
2233   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2234     addr, addr + size - 1);
2235   bool rc = false;
2236 
2237   if (::munmap(addr, size) != 0) {
2238     trcVerbose("failed (%d)\n", errno);
2239     rc = false;
2240   } else {
2241     trcVerbose("ok.");
2242     rc = true;
2243   }
2244 
2245   return rc;
2246 }
2247 
2248 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2249 
2250   assert0(is_aligned_to(addr, os::vm_page_size()));
2251   assert0(is_aligned_to(size, os::vm_page_size()));
2252 
2253   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2254     addr, addr + size - 1);
2255   bool rc = false;
2256 
2257   // Uncommit mmap memory with msync MS_INVALIDATE.
2258   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2259     trcVerbose("failed (%d)\n", errno);
2260     rc = false;
2261   } else {
2262     trcVerbose("ok.");
2263     rc = true;
2264   }
2265 
2266   return rc;
2267 }
2268 
2269 // End: shared memory bookkeeping
2270 ////////////////////////////////////////////////////////////////////////////////////////////////////
2271 
2272 int os::vm_page_size() {
2273   // Seems redundant as all get out.
2274   assert(os::Aix::page_size() != -1, "must call os::init");
2275   return os::Aix::page_size();
2276 }
2277 
2278 // Aix allocates memory by pages.
2279 int os::vm_allocation_granularity() {
2280   assert(os::Aix::page_size() != -1, "must call os::init");
2281   return os::Aix::page_size();
2282 }
2283 
2284 #ifdef PRODUCT
2285 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2286                                     int err) {
2287   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2288           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2289           strerror(err), err);
2290 }
2291 #endif
2292 
2293 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2294                                   const char* mesg) {
2295   assert(mesg != NULL, "mesg must be specified");
2296   if (!pd_commit_memory(addr, size, exec)) {
2297     // Add extra info in product mode for vm_exit_out_of_memory():
2298     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2299     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2300   }
2301 }
2302 
2303 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2304 
2305   assert0(is_aligned_to(addr, os::vm_page_size()));
2306   assert0(is_aligned_to(size, os::vm_page_size()));
2307 
2308   vmembk_t* const vmi = vmembk_find(addr);
2309   assert0(vmi);
2310   vmi->assert_is_valid_subrange(addr, size);
2311 
2312   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2313 
2314   return true;
2315 }
2316 
2317 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2318   return pd_commit_memory(addr, size, exec);
2319 }
2320 
2321 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2322                                   size_t alignment_hint, bool exec,
2323                                   const char* mesg) {
2324   // Alignment_hint is ignored on this OS.
2325   pd_commit_memory_or_exit(addr, size, exec, mesg);
2326 }
2327 
2328 bool os::pd_uncommit_memory(char* addr, size_t size) {
2329   assert0(is_aligned_to(addr, os::vm_page_size()));
2330   assert0(is_aligned_to(size, os::vm_page_size()));
2331 
2332   // Dynamically do different things for mmap/shmat.
2333   const vmembk_t* const vmi = vmembk_find(addr);
2334   assert0(vmi);
2335   vmi->assert_is_valid_subrange(addr, size);
2336 
2337   if (vmi->type == VMEM_SHMATED) {
2338     return uncommit_shmated_memory(addr, size);
2339   } else {
2340     return uncommit_mmaped_memory(addr, size);
2341   }
2342 }
2343 
2344 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2345   // Do not call this; no need to commit stack pages on AIX.
2346   ShouldNotReachHere();
2347   return true;
2348 }
2349 
2350 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2351   // Do not call this; no need to commit stack pages on AIX.
2352   ShouldNotReachHere();
2353   return true;
2354 }
2355 
2356 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2357 }
2358 
2359 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2360 }
2361 
2362 void os::numa_make_global(char *addr, size_t bytes) {
2363 }
2364 
2365 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2366 }
2367 
2368 bool os::numa_topology_changed() {
2369   return false;
2370 }
2371 
2372 size_t os::numa_get_groups_num() {
2373   return 1;
2374 }
2375 
2376 int os::numa_get_group_id() {
2377   return 0;
2378 }
2379 
2380 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2381   if (size > 0) {
2382     ids[0] = 0;
2383     return 1;
2384   }
2385   return 0;
2386 }
2387 
2388 bool os::get_page_info(char *start, page_info* info) {
2389   return false;
2390 }
2391 
2392 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2393   return end;
2394 }
2395 
2396 // Reserves and attaches a shared memory segment.
2397 // Will assert if a wish address is given and could not be obtained.
2398 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2399 
2400   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2401   // thereby clobbering old mappings at that place. That is probably
2402   // not intended, never used and almost certainly an error were it
2403   // ever be used this way (to try attaching at a specified address
2404   // without clobbering old mappings an alternate API exists,
2405   // os::attempt_reserve_memory_at()).
2406   // Instead of mimicking the dangerous coding of the other platforms, here I
2407   // just ignore the request address (release) or assert(debug).
2408   assert0(requested_addr == NULL);
2409 
2410   // Always round to os::vm_page_size(), which may be larger than 4K.
2411   bytes = align_size_up(bytes, os::vm_page_size());
2412   const size_t alignment_hint0 =
2413     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2414 
2415   // In 4K mode always use mmap.
2416   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2417   if (os::vm_page_size() == SIZE_4K) {
2418     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2419   } else {
2420     if (bytes >= Use64KPagesThreshold) {
2421       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2422     } else {
2423       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2424     }
2425   }
2426 }
2427 
2428 bool os::pd_release_memory(char* addr, size_t size) {
2429 
2430   // Dynamically do different things for mmap/shmat.
2431   vmembk_t* const vmi = vmembk_find(addr);
2432   assert0(vmi);
2433 
2434   // Always round to os::vm_page_size(), which may be larger than 4K.
2435   size = align_size_up(size, os::vm_page_size());
2436   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2437 
2438   bool rc = false;
2439   bool remove_bookkeeping = false;
2440   if (vmi->type == VMEM_SHMATED) {
2441     // For shmatted memory, we do:
2442     // - If user wants to release the whole range, release the memory (shmdt).
2443     // - If user only wants to release a partial range, uncommit (disclaim) that
2444     //   range. That way, at least, we do not use memory anymore (bust still page
2445     //   table space).
2446     vmi->assert_is_valid_subrange(addr, size);
2447     if (addr == vmi->addr && size == vmi->size) {
2448       rc = release_shmated_memory(addr, size);
2449       remove_bookkeeping = true;
2450     } else {
2451       rc = uncommit_shmated_memory(addr, size);
2452     }
2453   } else {
2454     // User may unmap partial regions but region has to be fully contained.
2455 #ifdef ASSERT
2456     vmi->assert_is_valid_subrange(addr, size);
2457 #endif
2458     rc = release_mmaped_memory(addr, size);
2459     remove_bookkeeping = true;
2460   }
2461 
2462   // update bookkeeping
2463   if (rc && remove_bookkeeping) {
2464     vmembk_remove(vmi);
2465   }
2466 
2467   return rc;
2468 }
2469 
2470 static bool checked_mprotect(char* addr, size_t size, int prot) {
2471 
2472   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2473   // not tell me if protection failed when trying to protect an un-protectable range.
2474   //
2475   // This means if the memory was allocated using shmget/shmat, protection wont work
2476   // but mprotect will still return 0:
2477   //
2478   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2479 
2480   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2481 
2482   if (!rc) {
2483     const char* const s_errno = strerror(errno);
2484     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2485     return false;
2486   }
2487 
2488   // mprotect success check
2489   //
2490   // Mprotect said it changed the protection but can I believe it?
2491   //
2492   // To be sure I need to check the protection afterwards. Try to
2493   // read from protected memory and check whether that causes a segfault.
2494   //
2495   if (!os::Aix::xpg_sus_mode()) {
2496 
2497     if (CanUseSafeFetch32()) {
2498 
2499       const bool read_protected =
2500         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2501          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2502 
2503       if (prot & PROT_READ) {
2504         rc = !read_protected;
2505       } else {
2506         rc = read_protected;
2507       }
2508     }
2509   }
2510   if (!rc) {
2511     assert(false, "mprotect failed.");
2512   }
2513   return rc;
2514 }
2515 
2516 // Set protections specified
2517 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2518   unsigned int p = 0;
2519   switch (prot) {
2520   case MEM_PROT_NONE: p = PROT_NONE; break;
2521   case MEM_PROT_READ: p = PROT_READ; break;
2522   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2523   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2524   default:
2525     ShouldNotReachHere();
2526   }
2527   // is_committed is unused.
2528   return checked_mprotect(addr, size, p);
2529 }
2530 
2531 bool os::guard_memory(char* addr, size_t size) {
2532   return checked_mprotect(addr, size, PROT_NONE);
2533 }
2534 
2535 bool os::unguard_memory(char* addr, size_t size) {
2536   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2537 }
2538 
2539 // Large page support
2540 
2541 static size_t _large_page_size = 0;
2542 
2543 // Enable large page support if OS allows that.
2544 void os::large_page_init() {
2545   return; // Nothing to do. See query_multipage_support and friends.
2546 }
2547 
2548 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2549   // "exec" is passed in but not used. Creating the shared image for
2550   // the code cache doesn't have an SHM_X executable permission to check.
2551   Unimplemented();
2552   return 0;
2553 }
2554 
2555 bool os::release_memory_special(char* base, size_t bytes) {
2556   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2557   Unimplemented();
2558   return false;
2559 }
2560 
2561 size_t os::large_page_size() {
2562   return _large_page_size;
2563 }
2564 
2565 bool os::can_commit_large_page_memory() {
2566   // Does not matter, we do not support huge pages.
2567   return false;
2568 }
2569 
2570 bool os::can_execute_large_page_memory() {
2571   // Does not matter, we do not support huge pages.
2572   return false;
2573 }
2574 
2575 // Reserve memory at an arbitrary address, only if that area is
2576 // available (and not reserved for something else).
2577 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2578   char* addr = NULL;
2579 
2580   // Always round to os::vm_page_size(), which may be larger than 4K.
2581   bytes = align_size_up(bytes, os::vm_page_size());
2582 
2583   // In 4K mode always use mmap.
2584   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2585   if (os::vm_page_size() == SIZE_4K) {
2586     return reserve_mmaped_memory(bytes, requested_addr, 0);
2587   } else {
2588     if (bytes >= Use64KPagesThreshold) {
2589       return reserve_shmated_memory(bytes, requested_addr, 0);
2590     } else {
2591       return reserve_mmaped_memory(bytes, requested_addr, 0);
2592     }
2593   }
2594 
2595   return addr;
2596 }
2597 
2598 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2599   return ::read(fd, buf, nBytes);
2600 }
2601 
2602 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2603   return ::pread(fd, buf, nBytes, offset);
2604 }
2605 
2606 void os::naked_short_sleep(jlong ms) {
2607   struct timespec req;
2608 
2609   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2610   req.tv_sec = 0;
2611   if (ms > 0) {
2612     req.tv_nsec = (ms % 1000) * 1000000;
2613   }
2614   else {
2615     req.tv_nsec = 1;
2616   }
2617 
2618   nanosleep(&req, NULL);
2619 
2620   return;
2621 }
2622 
2623 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2624 void os::infinite_sleep() {
2625   while (true) {    // sleep forever ...
2626     ::sleep(100);   // ... 100 seconds at a time
2627   }
2628 }
2629 
2630 // Used to convert frequent JVM_Yield() to nops
2631 bool os::dont_yield() {
2632   return DontYieldALot;
2633 }
2634 
2635 void os::naked_yield() {
2636   sched_yield();
2637 }
2638 
2639 ////////////////////////////////////////////////////////////////////////////////
2640 // thread priority support
2641 
2642 // From AIX manpage to pthread_setschedparam
2643 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2644 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2645 //
2646 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2647 // range from 40 to 80, where 40 is the least favored priority and 80
2648 // is the most favored."
2649 //
2650 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2651 // scheduling there; however, this still leaves iSeries.)
2652 //
2653 // We use the same values for AIX and PASE.
2654 int os::java_to_os_priority[CriticalPriority + 1] = {
2655   54,             // 0 Entry should never be used
2656 
2657   55,             // 1 MinPriority
2658   55,             // 2
2659   56,             // 3
2660 
2661   56,             // 4
2662   57,             // 5 NormPriority
2663   57,             // 6
2664 
2665   58,             // 7
2666   58,             // 8
2667   59,             // 9 NearMaxPriority
2668 
2669   60,             // 10 MaxPriority
2670 
2671   60              // 11 CriticalPriority
2672 };
2673 
2674 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2675   if (!UseThreadPriorities) return OS_OK;
2676   pthread_t thr = thread->osthread()->pthread_id();
2677   int policy = SCHED_OTHER;
2678   struct sched_param param;
2679   param.sched_priority = newpri;
2680   int ret = pthread_setschedparam(thr, policy, &param);
2681 
2682   if (ret != 0) {
2683     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2684         (int)thr, newpri, ret, strerror(ret));
2685   }
2686   return (ret == 0) ? OS_OK : OS_ERR;
2687 }
2688 
2689 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2690   if (!UseThreadPriorities) {
2691     *priority_ptr = java_to_os_priority[NormPriority];
2692     return OS_OK;
2693   }
2694   pthread_t thr = thread->osthread()->pthread_id();
2695   int policy = SCHED_OTHER;
2696   struct sched_param param;
2697   int ret = pthread_getschedparam(thr, &policy, &param);
2698   *priority_ptr = param.sched_priority;
2699 
2700   return (ret == 0) ? OS_OK : OS_ERR;
2701 }
2702 
2703 // Hint to the underlying OS that a task switch would not be good.
2704 // Void return because it's a hint and can fail.
2705 void os::hint_no_preempt() {}
2706 
2707 ////////////////////////////////////////////////////////////////////////////////
2708 // suspend/resume support
2709 
2710 //  the low-level signal-based suspend/resume support is a remnant from the
2711 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2712 //  within hotspot. Now there is a single use-case for this:
2713 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
2714 //      that runs in the watcher thread.
2715 //  The remaining code is greatly simplified from the more general suspension
2716 //  code that used to be used.
2717 //
2718 //  The protocol is quite simple:
2719 //  - suspend:
2720 //      - sends a signal to the target thread
2721 //      - polls the suspend state of the osthread using a yield loop
2722 //      - target thread signal handler (SR_handler) sets suspend state
2723 //        and blocks in sigsuspend until continued
2724 //  - resume:
2725 //      - sets target osthread state to continue
2726 //      - sends signal to end the sigsuspend loop in the SR_handler
2727 //
2728 //  Note that the SR_lock plays no role in this suspend/resume protocol.
2729 //
2730 
2731 static void resume_clear_context(OSThread *osthread) {
2732   osthread->set_ucontext(NULL);
2733   osthread->set_siginfo(NULL);
2734 }
2735 
2736 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2737   osthread->set_ucontext(context);
2738   osthread->set_siginfo(siginfo);
2739 }
2740 
2741 //
2742 // Handler function invoked when a thread's execution is suspended or
2743 // resumed. We have to be careful that only async-safe functions are
2744 // called here (Note: most pthread functions are not async safe and
2745 // should be avoided.)
2746 //
2747 // Note: sigwait() is a more natural fit than sigsuspend() from an
2748 // interface point of view, but sigwait() prevents the signal hander
2749 // from being run. libpthread would get very confused by not having
2750 // its signal handlers run and prevents sigwait()'s use with the
2751 // mutex granting granting signal.
2752 //
2753 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2754 //
2755 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2756   // Save and restore errno to avoid confusing native code with EINTR
2757   // after sigsuspend.
2758   int old_errno = errno;
2759 
2760   Thread* thread = Thread::current();
2761   OSThread* osthread = thread->osthread();
2762   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2763 
2764   os::SuspendResume::State current = osthread->sr.state();
2765   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2766     suspend_save_context(osthread, siginfo, context);
2767 
2768     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2769     os::SuspendResume::State state = osthread->sr.suspended();
2770     if (state == os::SuspendResume::SR_SUSPENDED) {
2771       sigset_t suspend_set;  // signals for sigsuspend()
2772 
2773       // get current set of blocked signals and unblock resume signal
2774       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2775       sigdelset(&suspend_set, SR_signum);
2776 
2777       // wait here until we are resumed
2778       while (1) {
2779         sigsuspend(&suspend_set);
2780 
2781         os::SuspendResume::State result = osthread->sr.running();
2782         if (result == os::SuspendResume::SR_RUNNING) {
2783           break;
2784         }
2785       }
2786 
2787     } else if (state == os::SuspendResume::SR_RUNNING) {
2788       // request was cancelled, continue
2789     } else {
2790       ShouldNotReachHere();
2791     }
2792 
2793     resume_clear_context(osthread);
2794   } else if (current == os::SuspendResume::SR_RUNNING) {
2795     // request was cancelled, continue
2796   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2797     // ignore
2798   } else {
2799     ShouldNotReachHere();
2800   }
2801 
2802   errno = old_errno;
2803 }
2804 
2805 static int SR_initialize() {
2806   struct sigaction act;
2807   char *s;
2808   // Get signal number to use for suspend/resume
2809   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2810     int sig = ::strtol(s, 0, 10);
2811     if (sig > 0 || sig < NSIG) {
2812       SR_signum = sig;
2813     }
2814   }
2815 
2816   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2817         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2818 
2819   sigemptyset(&SR_sigset);
2820   sigaddset(&SR_sigset, SR_signum);
2821 
2822   // Set up signal handler for suspend/resume.
2823   act.sa_flags = SA_RESTART|SA_SIGINFO;
2824   act.sa_handler = (void (*)(int)) SR_handler;
2825 
2826   // SR_signum is blocked by default.
2827   // 4528190 - We also need to block pthread restart signal (32 on all
2828   // supported Linux platforms). Note that LinuxThreads need to block
2829   // this signal for all threads to work properly. So we don't have
2830   // to use hard-coded signal number when setting up the mask.
2831   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2832 
2833   if (sigaction(SR_signum, &act, 0) == -1) {
2834     return -1;
2835   }
2836 
2837   // Save signal flag
2838   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2839   return 0;
2840 }
2841 
2842 static int SR_finalize() {
2843   return 0;
2844 }
2845 
2846 static int sr_notify(OSThread* osthread) {
2847   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2848   assert_status(status == 0, status, "pthread_kill");
2849   return status;
2850 }
2851 
2852 // "Randomly" selected value for how long we want to spin
2853 // before bailing out on suspending a thread, also how often
2854 // we send a signal to a thread we want to resume
2855 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2856 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2857 
2858 // returns true on success and false on error - really an error is fatal
2859 // but this seems the normal response to library errors
2860 static bool do_suspend(OSThread* osthread) {
2861   assert(osthread->sr.is_running(), "thread should be running");
2862   // mark as suspended and send signal
2863 
2864   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2865     // failed to switch, state wasn't running?
2866     ShouldNotReachHere();
2867     return false;
2868   }
2869 
2870   if (sr_notify(osthread) != 0) {
2871     // try to cancel, switch to running
2872 
2873     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2874     if (result == os::SuspendResume::SR_RUNNING) {
2875       // cancelled
2876       return false;
2877     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2878       // somehow managed to suspend
2879       return true;
2880     } else {
2881       ShouldNotReachHere();
2882       return false;
2883     }
2884   }
2885 
2886   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2887 
2888   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2889     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2890       os::naked_yield();
2891     }
2892 
2893     // timeout, try to cancel the request
2894     if (n >= RANDOMLY_LARGE_INTEGER) {
2895       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2896       if (cancelled == os::SuspendResume::SR_RUNNING) {
2897         return false;
2898       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2899         return true;
2900       } else {
2901         ShouldNotReachHere();
2902         return false;
2903       }
2904     }
2905   }
2906 
2907   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2908   return true;
2909 }
2910 
2911 static void do_resume(OSThread* osthread) {
2912   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2913 
2914   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2915     // failed to switch to WAKEUP_REQUEST
2916     ShouldNotReachHere();
2917     return;
2918   }
2919 
2920   while (!osthread->sr.is_running()) {
2921     if (sr_notify(osthread) == 0) {
2922       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2923         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2924           os::naked_yield();
2925         }
2926       }
2927     } else {
2928       ShouldNotReachHere();
2929     }
2930   }
2931 
2932   guarantee(osthread->sr.is_running(), "Must be running!");
2933 }
2934 
2935 ///////////////////////////////////////////////////////////////////////////////////
2936 // signal handling (except suspend/resume)
2937 
2938 // This routine may be used by user applications as a "hook" to catch signals.
2939 // The user-defined signal handler must pass unrecognized signals to this
2940 // routine, and if it returns true (non-zero), then the signal handler must
2941 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2942 // routine will never retun false (zero), but instead will execute a VM panic
2943 // routine kill the process.
2944 //
2945 // If this routine returns false, it is OK to call it again. This allows
2946 // the user-defined signal handler to perform checks either before or after
2947 // the VM performs its own checks. Naturally, the user code would be making
2948 // a serious error if it tried to handle an exception (such as a null check
2949 // or breakpoint) that the VM was generating for its own correct operation.
2950 //
2951 // This routine may recognize any of the following kinds of signals:
2952 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2953 // It should be consulted by handlers for any of those signals.
2954 //
2955 // The caller of this routine must pass in the three arguments supplied
2956 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2957 // field of the structure passed to sigaction(). This routine assumes that
2958 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2959 //
2960 // Note that the VM will print warnings if it detects conflicting signal
2961 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2962 //
2963 extern "C" JNIEXPORT int
2964 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2965 
2966 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2967 // to be the thing to call; documentation is not terribly clear about whether
2968 // pthread_sigmask also works, and if it does, whether it does the same.
2969 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2970   const int rc = ::pthread_sigmask(how, set, oset);
2971   // return value semantics differ slightly for error case:
2972   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2973   // (so, pthread_sigmask is more theadsafe for error handling)
2974   // But success is always 0.
2975   return rc == 0 ? true : false;
2976 }
2977 
2978 // Function to unblock all signals which are, according
2979 // to POSIX, typical program error signals. If they happen while being blocked,
2980 // they typically will bring down the process immediately.
2981 bool unblock_program_error_signals() {
2982   sigset_t set;
2983   ::sigemptyset(&set);
2984   ::sigaddset(&set, SIGILL);
2985   ::sigaddset(&set, SIGBUS);
2986   ::sigaddset(&set, SIGFPE);
2987   ::sigaddset(&set, SIGSEGV);
2988   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2989 }
2990 
2991 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2992 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2993   assert(info != NULL && uc != NULL, "it must be old kernel");
2994 
2995   // Never leave program error signals blocked;
2996   // on all our platforms they would bring down the process immediately when
2997   // getting raised while being blocked.
2998   unblock_program_error_signals();
2999 
3000   JVM_handle_aix_signal(sig, info, uc, true);
3001 }
3002 
3003 // This boolean allows users to forward their own non-matching signals
3004 // to JVM_handle_aix_signal, harmlessly.
3005 bool os::Aix::signal_handlers_are_installed = false;
3006 
3007 // For signal-chaining
3008 struct sigaction os::Aix::sigact[MAXSIGNUM];
3009 unsigned int os::Aix::sigs = 0;
3010 bool os::Aix::libjsig_is_loaded = false;
3011 typedef struct sigaction *(*get_signal_t)(int);
3012 get_signal_t os::Aix::get_signal_action = NULL;
3013 
3014 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3015   struct sigaction *actp = NULL;
3016 
3017   if (libjsig_is_loaded) {
3018     // Retrieve the old signal handler from libjsig
3019     actp = (*get_signal_action)(sig);
3020   }
3021   if (actp == NULL) {
3022     // Retrieve the preinstalled signal handler from jvm
3023     actp = get_preinstalled_handler(sig);
3024   }
3025 
3026   return actp;
3027 }
3028 
3029 static bool call_chained_handler(struct sigaction *actp, int sig,
3030                                  siginfo_t *siginfo, void *context) {
3031   // Call the old signal handler
3032   if (actp->sa_handler == SIG_DFL) {
3033     // It's more reasonable to let jvm treat it as an unexpected exception
3034     // instead of taking the default action.
3035     return false;
3036   } else if (actp->sa_handler != SIG_IGN) {
3037     if ((actp->sa_flags & SA_NODEFER) == 0) {
3038       // automaticlly block the signal
3039       sigaddset(&(actp->sa_mask), sig);
3040     }
3041 
3042     sa_handler_t hand = NULL;
3043     sa_sigaction_t sa = NULL;
3044     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3045     // retrieve the chained handler
3046     if (siginfo_flag_set) {
3047       sa = actp->sa_sigaction;
3048     } else {
3049       hand = actp->sa_handler;
3050     }
3051 
3052     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3053       actp->sa_handler = SIG_DFL;
3054     }
3055 
3056     // try to honor the signal mask
3057     sigset_t oset;
3058     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3059 
3060     // call into the chained handler
3061     if (siginfo_flag_set) {
3062       (*sa)(sig, siginfo, context);
3063     } else {
3064       (*hand)(sig);
3065     }
3066 
3067     // restore the signal mask
3068     pthread_sigmask(SIG_SETMASK, &oset, 0);
3069   }
3070   // Tell jvm's signal handler the signal is taken care of.
3071   return true;
3072 }
3073 
3074 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3075   bool chained = false;
3076   // signal-chaining
3077   if (UseSignalChaining) {
3078     struct sigaction *actp = get_chained_signal_action(sig);
3079     if (actp != NULL) {
3080       chained = call_chained_handler(actp, sig, siginfo, context);
3081     }
3082   }
3083   return chained;
3084 }
3085 
3086 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3087   if ((((unsigned int)1 << sig) & sigs) != 0) {
3088     return &sigact[sig];
3089   }
3090   return NULL;
3091 }
3092 
3093 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3094   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3095   sigact[sig] = oldAct;
3096   sigs |= (unsigned int)1 << sig;
3097 }
3098 
3099 // for diagnostic
3100 int os::Aix::sigflags[MAXSIGNUM];
3101 
3102 int os::Aix::get_our_sigflags(int sig) {
3103   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3104   return sigflags[sig];
3105 }
3106 
3107 void os::Aix::set_our_sigflags(int sig, int flags) {
3108   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3109   sigflags[sig] = flags;
3110 }
3111 
3112 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3113   // Check for overwrite.
3114   struct sigaction oldAct;
3115   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3116 
3117   void* oldhand = oldAct.sa_sigaction
3118     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3119     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3120   // Renamed 'signalHandler' to avoid collision with other shared libs.
3121   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3122       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3123       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3124     if (AllowUserSignalHandlers || !set_installed) {
3125       // Do not overwrite; user takes responsibility to forward to us.
3126       return;
3127     } else if (UseSignalChaining) {
3128       // save the old handler in jvm
3129       save_preinstalled_handler(sig, oldAct);
3130       // libjsig also interposes the sigaction() call below and saves the
3131       // old sigaction on it own.
3132     } else {
3133       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
3134                     "%#lx for signal %d.", (long)oldhand, sig));
3135     }
3136   }
3137 
3138   struct sigaction sigAct;
3139   sigfillset(&(sigAct.sa_mask));
3140   if (!set_installed) {
3141     sigAct.sa_handler = SIG_DFL;
3142     sigAct.sa_flags = SA_RESTART;
3143   } else {
3144     // Renamed 'signalHandler' to avoid collision with other shared libs.
3145     sigAct.sa_sigaction = javaSignalHandler;
3146     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3147   }
3148   // Save flags, which are set by ours
3149   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3150   sigflags[sig] = sigAct.sa_flags;
3151 
3152   int ret = sigaction(sig, &sigAct, &oldAct);
3153   assert(ret == 0, "check");
3154 
3155   void* oldhand2 = oldAct.sa_sigaction
3156                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3157                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3158   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3159 }
3160 
3161 // install signal handlers for signals that HotSpot needs to
3162 // handle in order to support Java-level exception handling.
3163 void os::Aix::install_signal_handlers() {
3164   if (!signal_handlers_are_installed) {
3165     signal_handlers_are_installed = true;
3166 
3167     // signal-chaining
3168     typedef void (*signal_setting_t)();
3169     signal_setting_t begin_signal_setting = NULL;
3170     signal_setting_t end_signal_setting = NULL;
3171     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3172                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3173     if (begin_signal_setting != NULL) {
3174       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3175                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3176       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3177                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3178       libjsig_is_loaded = true;
3179       assert(UseSignalChaining, "should enable signal-chaining");
3180     }
3181     if (libjsig_is_loaded) {
3182       // Tell libjsig jvm is setting signal handlers
3183       (*begin_signal_setting)();
3184     }
3185 
3186     set_signal_handler(SIGSEGV, true);
3187     set_signal_handler(SIGPIPE, true);
3188     set_signal_handler(SIGBUS, true);
3189     set_signal_handler(SIGILL, true);
3190     set_signal_handler(SIGFPE, true);
3191     set_signal_handler(SIGTRAP, true);
3192     set_signal_handler(SIGXFSZ, true);
3193     set_signal_handler(SIGDANGER, true);
3194 
3195     if (libjsig_is_loaded) {
3196       // Tell libjsig jvm finishes setting signal handlers.
3197       (*end_signal_setting)();
3198     }
3199 
3200     // We don't activate signal checker if libjsig is in place, we trust ourselves
3201     // and if UserSignalHandler is installed all bets are off.
3202     // Log that signal checking is off only if -verbose:jni is specified.
3203     if (CheckJNICalls) {
3204       if (libjsig_is_loaded) {
3205         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3206         check_signals = false;
3207       }
3208       if (AllowUserSignalHandlers) {
3209         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3210         check_signals = false;
3211       }
3212       // Need to initialize check_signal_done.
3213       ::sigemptyset(&check_signal_done);
3214     }
3215   }
3216 }
3217 
3218 static const char* get_signal_handler_name(address handler,
3219                                            char* buf, int buflen) {
3220   int offset;
3221   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3222   if (found) {
3223     // skip directory names
3224     const char *p1, *p2;
3225     p1 = buf;
3226     size_t len = strlen(os::file_separator());
3227     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3228     // The way os::dll_address_to_library_name is implemented on Aix
3229     // right now, it always returns -1 for the offset which is not
3230     // terribly informative.
3231     // Will fix that. For now, omit the offset.
3232     jio_snprintf(buf, buflen, "%s", p1);
3233   } else {
3234     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3235   }
3236   return buf;
3237 }
3238 
3239 static void print_signal_handler(outputStream* st, int sig,
3240                                  char* buf, size_t buflen) {
3241   struct sigaction sa;
3242   sigaction(sig, NULL, &sa);
3243 
3244   st->print("%s: ", os::exception_name(sig, buf, buflen));
3245 
3246   address handler = (sa.sa_flags & SA_SIGINFO)
3247     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3248     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3249 
3250   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3251     st->print("SIG_DFL");
3252   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3253     st->print("SIG_IGN");
3254   } else {
3255     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3256   }
3257 
3258   // Print readable mask.
3259   st->print(", sa_mask[0]=");
3260   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3261 
3262   address rh = VMError::get_resetted_sighandler(sig);
3263   // May be, handler was resetted by VMError?
3264   if (rh != NULL) {
3265     handler = rh;
3266     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3267   }
3268 
3269   // Print textual representation of sa_flags.
3270   st->print(", sa_flags=");
3271   os::Posix::print_sa_flags(st, sa.sa_flags);
3272 
3273   // Check: is it our handler?
3274   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3275       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3276     // It is our signal handler.
3277     // Check for flags, reset system-used one!
3278     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3279       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3280                 os::Aix::get_our_sigflags(sig));
3281     }
3282   }
3283   st->cr();
3284 }
3285 
3286 #define DO_SIGNAL_CHECK(sig) \
3287   if (!sigismember(&check_signal_done, sig)) \
3288     os::Aix::check_signal_handler(sig)
3289 
3290 // This method is a periodic task to check for misbehaving JNI applications
3291 // under CheckJNI, we can add any periodic checks here
3292 
3293 void os::run_periodic_checks() {
3294 
3295   if (check_signals == false) return;
3296 
3297   // SEGV and BUS if overridden could potentially prevent
3298   // generation of hs*.log in the event of a crash, debugging
3299   // such a case can be very challenging, so we absolutely
3300   // check the following for a good measure:
3301   DO_SIGNAL_CHECK(SIGSEGV);
3302   DO_SIGNAL_CHECK(SIGILL);
3303   DO_SIGNAL_CHECK(SIGFPE);
3304   DO_SIGNAL_CHECK(SIGBUS);
3305   DO_SIGNAL_CHECK(SIGPIPE);
3306   DO_SIGNAL_CHECK(SIGXFSZ);
3307   if (UseSIGTRAP) {
3308     DO_SIGNAL_CHECK(SIGTRAP);
3309   }
3310   DO_SIGNAL_CHECK(SIGDANGER);
3311 
3312   // ReduceSignalUsage allows the user to override these handlers
3313   // see comments at the very top and jvm_solaris.h
3314   if (!ReduceSignalUsage) {
3315     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3316     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3317     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3318     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3319   }
3320 
3321   DO_SIGNAL_CHECK(SR_signum);
3322   DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3323 }
3324 
3325 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3326 
3327 static os_sigaction_t os_sigaction = NULL;
3328 
3329 void os::Aix::check_signal_handler(int sig) {
3330   char buf[O_BUFLEN];
3331   address jvmHandler = NULL;
3332 
3333   struct sigaction act;
3334   if (os_sigaction == NULL) {
3335     // only trust the default sigaction, in case it has been interposed
3336     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3337     if (os_sigaction == NULL) return;
3338   }
3339 
3340   os_sigaction(sig, (struct sigaction*)NULL, &act);
3341 
3342   address thisHandler = (act.sa_flags & SA_SIGINFO)
3343     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3344     : CAST_FROM_FN_PTR(address, act.sa_handler);
3345 
3346   switch(sig) {
3347   case SIGSEGV:
3348   case SIGBUS:
3349   case SIGFPE:
3350   case SIGPIPE:
3351   case SIGILL:
3352   case SIGXFSZ:
3353     // Renamed 'signalHandler' to avoid collision with other shared libs.
3354     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3355     break;
3356 
3357   case SHUTDOWN1_SIGNAL:
3358   case SHUTDOWN2_SIGNAL:
3359   case SHUTDOWN3_SIGNAL:
3360   case BREAK_SIGNAL:
3361     jvmHandler = (address)user_handler();
3362     break;
3363 
3364   case INTERRUPT_SIGNAL:
3365     jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3366     break;
3367 
3368   default:
3369     if (sig == SR_signum) {
3370       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3371     } else {
3372       return;
3373     }
3374     break;
3375   }
3376 
3377   if (thisHandler != jvmHandler) {
3378     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3379     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3380     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3381     // No need to check this sig any longer
3382     sigaddset(&check_signal_done, sig);
3383     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3384     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3385       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3386                     exception_name(sig, buf, O_BUFLEN));
3387     }
3388   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3389     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3390     tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3391     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3392     // No need to check this sig any longer
3393     sigaddset(&check_signal_done, sig);
3394   }
3395 
3396   // Dump all the signal
3397   if (sigismember(&check_signal_done, sig)) {
3398     print_signal_handlers(tty, buf, O_BUFLEN);
3399   }
3400 }
3401 
3402 extern bool signal_name(int signo, char* buf, size_t len);
3403 
3404 const char* os::exception_name(int exception_code, char* buf, size_t size) {
3405   if (0 < exception_code && exception_code <= SIGRTMAX) {
3406     // signal
3407     if (!signal_name(exception_code, buf, size)) {
3408       jio_snprintf(buf, size, "SIG%d", exception_code);
3409     }
3410     return buf;
3411   } else {
3412     return NULL;
3413   }
3414 }
3415 
3416 // To install functions for atexit system call
3417 extern "C" {
3418   static void perfMemory_exit_helper() {
3419     perfMemory_exit();
3420   }
3421 }
3422 
3423 // This is called _before_ the most of global arguments have been parsed.
3424 void os::init(void) {
3425   // This is basic, we want to know if that ever changes.
3426   // (Shared memory boundary is supposed to be a 256M aligned.)
3427   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3428 
3429   // First off, we need to know whether we run on AIX or PASE, and
3430   // the OS level we run on.
3431   os::Aix::initialize_os_info();
3432 
3433   // Scan environment (SPEC1170 behaviour, etc).
3434   os::Aix::scan_environment();
3435 
3436   // Check which pages are supported by AIX.
3437   query_multipage_support();
3438 
3439   // Act like we only have one page size by eliminating corner cases which
3440   // we did not support very well anyway.
3441   // We have two input conditions:
3442   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3443   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3444   //    setting.
3445   //    Data segment page size is important for us because it defines the thread stack page
3446   //    size, which is needed for guard page handling, stack banging etc.
3447   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3448   //    and should be allocated with 64k pages.
3449   //
3450   // So, we do the following:
3451   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3452   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3453   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3454   // 64k          no              --- AIX 5.2 ? ---
3455   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3456 
3457   // We explicitly leave no option to change page size, because only upgrading would work,
3458   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3459 
3460   if (g_multipage_support.datapsize == SIZE_4K) {
3461     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3462     if (g_multipage_support.can_use_64K_pages) {
3463       // .. but we are able to use 64K pages dynamically.
3464       // This would be typical for java launchers which are not linked
3465       // with datapsize=64K (like, any other launcher but our own).
3466       //
3467       // In this case it would be smart to allocate the java heap with 64K
3468       // to get the performance benefit, and to fake 64k pages for the
3469       // data segment (when dealing with thread stacks).
3470       //
3471       // However, leave a possibility to downgrade to 4K, using
3472       // -XX:-Use64KPages.
3473       if (Use64KPages) {
3474         trcVerbose("64K page mode (faked for data segment)");
3475         Aix::_page_size = SIZE_64K;
3476       } else {
3477         trcVerbose("4K page mode (Use64KPages=off)");
3478         Aix::_page_size = SIZE_4K;
3479       }
3480     } else {
3481       // .. and not able to allocate 64k pages dynamically. Here, just
3482       // fall back to 4K paged mode and use mmap for everything.
3483       trcVerbose("4K page mode");
3484       Aix::_page_size = SIZE_4K;
3485       FLAG_SET_ERGO(bool, Use64KPages, false);
3486     }
3487   } else {
3488     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3489     //   This normally means that we can allocate 64k pages dynamically.
3490     //   (There is one special case where this may be false: EXTSHM=on.
3491     //    but we decided to not support that mode).
3492     assert0(g_multipage_support.can_use_64K_pages);
3493     Aix::_page_size = SIZE_64K;
3494     trcVerbose("64K page mode");
3495     FLAG_SET_ERGO(bool, Use64KPages, true);
3496   }
3497 
3498   // Short-wire stack page size to base page size; if that works, we just remove
3499   // that stack page size altogether.
3500   Aix::_stack_page_size = Aix::_page_size;
3501 
3502   // For now UseLargePages is just ignored.
3503   FLAG_SET_ERGO(bool, UseLargePages, false);
3504   _page_sizes[0] = 0;
3505   _large_page_size = -1;
3506 
3507   // debug trace
3508   trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
3509 
3510   // Next, we need to initialize libo4 and libperfstat libraries.
3511   if (os::Aix::on_pase()) {
3512     os::Aix::initialize_libo4();
3513   } else {
3514     os::Aix::initialize_libperfstat();
3515   }
3516 
3517   // Reset the perfstat information provided by ODM.
3518   if (os::Aix::on_aix()) {
3519     libperfstat::perfstat_reset();
3520   }
3521 
3522   // Now initialze basic system properties. Note that for some of the values we
3523   // need libperfstat etc.
3524   os::Aix::initialize_system_info();
3525 
3526   _initial_pid = getpid();
3527 
3528   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3529 
3530   init_random(1234567);
3531 
3532   ThreadCritical::initialize();
3533 
3534   // Main_thread points to the aboriginal thread.
3535   Aix::_main_thread = pthread_self();
3536 
3537   initial_time_count = os::elapsed_counter();
3538 
3539   // If the pagesize of the VM is greater than 8K determine the appropriate
3540   // number of initial guard pages. The user can change this with the
3541   // command line arguments, if needed.
3542   if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3543     StackYellowPages = 1;
3544     StackRedPages = 1;
3545     StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3546   }
3547 }
3548 
3549 // This is called _after_ the global arguments have been parsed.
3550 jint os::init_2(void) {
3551 
3552   trcVerbose("processor count: %d", os::_processor_count);
3553   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3554 
3555   // Initially build up the loaded dll map.
3556   LoadedLibraries::reload();
3557 
3558   const int page_size = Aix::page_size();
3559   const int map_size = page_size;
3560 
3561   address map_address = (address) MAP_FAILED;
3562   const int prot  = PROT_READ;
3563   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3564 
3565   // Use optimized addresses for the polling page,
3566   // e.g. map it to a special 32-bit address.
3567   if (OptimizePollingPageLocation) {
3568     // architecture-specific list of address wishes:
3569     address address_wishes[] = {
3570       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3571       // PPC64: all address wishes are non-negative 32 bit values where
3572       // the lower 16 bits are all zero. we can load these addresses
3573       // with a single ppc_lis instruction.
3574       (address) 0x30000000, (address) 0x31000000,
3575       (address) 0x32000000, (address) 0x33000000,
3576       (address) 0x40000000, (address) 0x41000000,
3577       (address) 0x42000000, (address) 0x43000000,
3578       (address) 0x50000000, (address) 0x51000000,
3579       (address) 0x52000000, (address) 0x53000000,
3580       (address) 0x60000000, (address) 0x61000000,
3581       (address) 0x62000000, (address) 0x63000000
3582     };
3583     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3584 
3585     // iterate over the list of address wishes:
3586     for (int i=0; i<address_wishes_length; i++) {
3587       // Try to map with current address wish.
3588       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3589       // fail if the address is already mapped.
3590       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3591                                      map_size, prot,
3592                                      flags | MAP_FIXED,
3593                                      -1, 0);
3594       if (Verbose) {
3595         fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3596                 address_wishes[i], map_address + (ssize_t)page_size);
3597       }
3598 
3599       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3600         // Map succeeded and map_address is at wished address, exit loop.
3601         break;
3602       }
3603 
3604       if (map_address != (address) MAP_FAILED) {
3605         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3606         ::munmap(map_address, map_size);
3607         map_address = (address) MAP_FAILED;
3608       }
3609       // Map failed, continue loop.
3610     }
3611   } // end OptimizePollingPageLocation
3612 
3613   if (map_address == (address) MAP_FAILED) {
3614     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3615   }
3616   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3617   os::set_polling_page(map_address);
3618 
3619   if (!UseMembar) {
3620     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3621     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3622     os::set_memory_serialize_page(mem_serialize_page);
3623 
3624 #ifndef PRODUCT
3625     if (Verbose && PrintMiscellaneous) {
3626       tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3627     }
3628 #endif
3629   }
3630 
3631   // initialize suspend/resume support - must do this before signal_sets_init()
3632   if (SR_initialize() != 0) {
3633     perror("SR_initialize failed");
3634     return JNI_ERR;
3635   }
3636 
3637   Aix::signal_sets_init();
3638   Aix::install_signal_handlers();
3639 
3640   // Check minimum allowable stack size for thread creation and to initialize
3641   // the java system classes, including StackOverflowError - depends on page
3642   // size. Add a page for compiler2 recursion in main thread.
3643   // Add in 2*BytesPerWord times page size to account for VM stack during
3644   // class initialization depending on 32 or 64 bit VM.
3645   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3646             (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3647                      (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3648 
3649   os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3650 
3651   size_t threadStackSizeInBytes = ThreadStackSize * K;
3652   if (threadStackSizeInBytes != 0 &&
3653       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3654     tty->print_cr("\nThe stack size specified is too small, "
3655                   "Specify at least %dk",
3656                   os::Aix::min_stack_allowed / K);
3657     return JNI_ERR;
3658   }
3659 
3660   // Make the stack size a multiple of the page size so that
3661   // the yellow/red zones can be guarded.
3662   // Note that this can be 0, if no default stacksize was set.
3663   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3664 
3665   Aix::libpthread_init();
3666 
3667   if (MaxFDLimit) {
3668     // Set the number of file descriptors to max. print out error
3669     // if getrlimit/setrlimit fails but continue regardless.
3670     struct rlimit nbr_files;
3671     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3672     if (status != 0) {
3673       if (PrintMiscellaneous && (Verbose || WizardMode))
3674         perror("os::init_2 getrlimit failed");
3675     } else {
3676       nbr_files.rlim_cur = nbr_files.rlim_max;
3677       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3678       if (status != 0) {
3679         if (PrintMiscellaneous && (Verbose || WizardMode))
3680           perror("os::init_2 setrlimit failed");
3681       }
3682     }
3683   }
3684 
3685   if (PerfAllowAtExitRegistration) {
3686     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3687     // Atexit functions can be delayed until process exit time, which
3688     // can be problematic for embedded VM situations. Embedded VMs should
3689     // call DestroyJavaVM() to assure that VM resources are released.
3690 
3691     // Note: perfMemory_exit_helper atexit function may be removed in
3692     // the future if the appropriate cleanup code can be added to the
3693     // VM_Exit VMOperation's doit method.
3694     if (atexit(perfMemory_exit_helper) != 0) {
3695       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3696     }
3697   }
3698 
3699   return JNI_OK;
3700 }
3701 
3702 // Mark the polling page as unreadable
3703 void os::make_polling_page_unreadable(void) {
3704   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3705     fatal("Could not disable polling page");
3706   }
3707 };
3708 
3709 // Mark the polling page as readable
3710 void os::make_polling_page_readable(void) {
3711   // Changed according to os_linux.cpp.
3712   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3713     fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
3714   }
3715 };
3716 
3717 int os::active_processor_count() {
3718   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3719   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3720   return online_cpus;
3721 }
3722 
3723 void os::set_native_thread_name(const char *name) {
3724   // Not yet implemented.
3725   return;
3726 }
3727 
3728 bool os::distribute_processes(uint length, uint* distribution) {
3729   // Not yet implemented.
3730   return false;
3731 }
3732 
3733 bool os::bind_to_processor(uint processor_id) {
3734   // Not yet implemented.
3735   return false;
3736 }
3737 
3738 void os::SuspendedThreadTask::internal_do_task() {
3739   if (do_suspend(_thread->osthread())) {
3740     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3741     do_task(context);
3742     do_resume(_thread->osthread());
3743   }
3744 }
3745 
3746 class PcFetcher : public os::SuspendedThreadTask {
3747 public:
3748   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3749   ExtendedPC result();
3750 protected:
3751   void do_task(const os::SuspendedThreadTaskContext& context);
3752 private:
3753   ExtendedPC _epc;
3754 };
3755 
3756 ExtendedPC PcFetcher::result() {
3757   guarantee(is_done(), "task is not done yet.");
3758   return _epc;
3759 }
3760 
3761 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3762   Thread* thread = context.thread();
3763   OSThread* osthread = thread->osthread();
3764   if (osthread->ucontext() != NULL) {
3765     _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3766   } else {
3767     // NULL context is unexpected, double-check this is the VMThread.
3768     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3769   }
3770 }
3771 
3772 // Suspends the target using the signal mechanism and then grabs the PC before
3773 // resuming the target. Used by the flat-profiler only
3774 ExtendedPC os::get_thread_pc(Thread* thread) {
3775   // Make sure that it is called by the watcher for the VMThread.
3776   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3777   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3778 
3779   PcFetcher fetcher(thread);
3780   fetcher.run();
3781   return fetcher.result();
3782 }
3783 
3784 ////////////////////////////////////////////////////////////////////////////////
3785 // debug support
3786 
3787 static address same_page(address x, address y) {
3788   intptr_t page_bits = -os::vm_page_size();
3789   if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3790     return x;
3791   else if (x > y)
3792     return (address)(intptr_t(y) | ~page_bits) + 1;
3793   else
3794     return (address)(intptr_t(y) & page_bits);
3795 }
3796 
3797 bool os::find(address addr, outputStream* st) {
3798 
3799   st->print(PTR_FORMAT ": ", addr);
3800 
3801   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
3802   if (lib) {
3803     lib->print(st);
3804     return true;
3805   } else {
3806     lib = LoadedLibraries::find_for_data_address(addr);
3807     if (lib) {
3808       lib->print(st);
3809       return true;
3810     } else {
3811       st->print_cr("(outside any module)");
3812     }
3813   }
3814 
3815   return false;
3816 }
3817 
3818 ////////////////////////////////////////////////////////////////////////////////
3819 // misc
3820 
3821 // This does not do anything on Aix. This is basically a hook for being
3822 // able to use structured exception handling (thread-local exception filters)
3823 // on, e.g., Win32.
3824 void
3825 os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
3826                          JavaCallArguments* args, Thread* thread) {
3827   f(value, method, args, thread);
3828 }
3829 
3830 void os::print_statistics() {
3831 }
3832 
3833 int os::message_box(const char* title, const char* message) {
3834   int i;
3835   fdStream err(defaultStream::error_fd());
3836   for (i = 0; i < 78; i++) err.print_raw("=");
3837   err.cr();
3838   err.print_raw_cr(title);
3839   for (i = 0; i < 78; i++) err.print_raw("-");
3840   err.cr();
3841   err.print_raw_cr(message);
3842   for (i = 0; i < 78; i++) err.print_raw("=");
3843   err.cr();
3844 
3845   char buf[16];
3846   // Prevent process from exiting upon "read error" without consuming all CPU
3847   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3848 
3849   return buf[0] == 'y' || buf[0] == 'Y';
3850 }
3851 
3852 int os::stat(const char *path, struct stat *sbuf) {
3853   char pathbuf[MAX_PATH];
3854   if (strlen(path) > MAX_PATH - 1) {
3855     errno = ENAMETOOLONG;
3856     return -1;
3857   }
3858   os::native_path(strcpy(pathbuf, path));
3859   return ::stat(pathbuf, sbuf);
3860 }
3861 
3862 bool os::check_heap(bool force) {
3863   return true;
3864 }
3865 
3866 // Is a (classpath) directory empty?
3867 bool os::dir_is_empty(const char* path) {
3868   DIR *dir = NULL;
3869   struct dirent *ptr;
3870 
3871   dir = opendir(path);
3872   if (dir == NULL) return true;
3873 
3874   /* Scan the directory */
3875   bool result = true;
3876   char buf[sizeof(struct dirent) + MAX_PATH];
3877   while (result && (ptr = ::readdir(dir)) != NULL) {
3878     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3879       result = false;
3880     }
3881   }
3882   closedir(dir);
3883   return result;
3884 }
3885 
3886 // This code originates from JDK's sysOpen and open64_w
3887 // from src/solaris/hpi/src/system_md.c
3888 
3889 int os::open(const char *path, int oflag, int mode) {
3890 
3891   if (strlen(path) > MAX_PATH - 1) {
3892     errno = ENAMETOOLONG;
3893     return -1;
3894   }
3895   int fd;
3896 
3897   fd = ::open64(path, oflag, mode);
3898   if (fd == -1) return -1;
3899 
3900   // If the open succeeded, the file might still be a directory.
3901   {
3902     struct stat64 buf64;
3903     int ret = ::fstat64(fd, &buf64);
3904     int st_mode = buf64.st_mode;
3905 
3906     if (ret != -1) {
3907       if ((st_mode & S_IFMT) == S_IFDIR) {
3908         errno = EISDIR;
3909         ::close(fd);
3910         return -1;
3911       }
3912     } else {
3913       ::close(fd);
3914       return -1;
3915     }
3916   }
3917 
3918   // All file descriptors that are opened in the JVM and not
3919   // specifically destined for a subprocess should have the
3920   // close-on-exec flag set. If we don't set it, then careless 3rd
3921   // party native code might fork and exec without closing all
3922   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3923   // UNIXProcess.c), and this in turn might:
3924   //
3925   // - cause end-of-file to fail to be detected on some file
3926   //   descriptors, resulting in mysterious hangs, or
3927   //
3928   // - might cause an fopen in the subprocess to fail on a system
3929   //   suffering from bug 1085341.
3930   //
3931   // (Yes, the default setting of the close-on-exec flag is a Unix
3932   // design flaw.)
3933   //
3934   // See:
3935   // 1085341: 32-bit stdio routines should support file descriptors >255
3936   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3937   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3938 #ifdef FD_CLOEXEC
3939   {
3940     int flags = ::fcntl(fd, F_GETFD);
3941     if (flags != -1)
3942       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3943   }
3944 #endif
3945 
3946   return fd;
3947 }
3948 
3949 // create binary file, rewriting existing file if required
3950 int os::create_binary_file(const char* path, bool rewrite_existing) {
3951   int oflags = O_WRONLY | O_CREAT;
3952   if (!rewrite_existing) {
3953     oflags |= O_EXCL;
3954   }
3955   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3956 }
3957 
3958 // return current position of file pointer
3959 jlong os::current_file_offset(int fd) {
3960   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3961 }
3962 
3963 // move file pointer to the specified offset
3964 jlong os::seek_to_file_offset(int fd, jlong offset) {
3965   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3966 }
3967 
3968 // This code originates from JDK's sysAvailable
3969 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3970 
3971 int os::available(int fd, jlong *bytes) {
3972   jlong cur, end;
3973   int mode;
3974   struct stat64 buf64;
3975 
3976   if (::fstat64(fd, &buf64) >= 0) {
3977     mode = buf64.st_mode;
3978     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3979       // XXX: is the following call interruptible? If so, this might
3980       // need to go through the INTERRUPT_IO() wrapper as for other
3981       // blocking, interruptible calls in this file.
3982       int n;
3983       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3984         *bytes = n;
3985         return 1;
3986       }
3987     }
3988   }
3989   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3990     return 0;
3991   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3992     return 0;
3993   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3994     return 0;
3995   }
3996   *bytes = end - cur;
3997   return 1;
3998 }
3999 
4000 // Map a block of memory.
4001 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4002                         char *addr, size_t bytes, bool read_only,
4003                         bool allow_exec) {
4004   int prot;
4005   int flags = MAP_PRIVATE;
4006 
4007   if (read_only) {
4008     prot = PROT_READ;
4009     flags = MAP_SHARED;
4010   } else {
4011     prot = PROT_READ | PROT_WRITE;
4012     flags = MAP_PRIVATE;
4013   }
4014 
4015   if (allow_exec) {
4016     prot |= PROT_EXEC;
4017   }
4018 
4019   if (addr != NULL) {
4020     flags |= MAP_FIXED;
4021   }
4022 
4023   // Allow anonymous mappings if 'fd' is -1.
4024   if (fd == -1) {
4025     flags |= MAP_ANONYMOUS;
4026   }
4027 
4028   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
4029                                      fd, file_offset);
4030   if (mapped_address == MAP_FAILED) {
4031     return NULL;
4032   }
4033   return mapped_address;
4034 }
4035 
4036 // Remap a block of memory.
4037 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4038                           char *addr, size_t bytes, bool read_only,
4039                           bool allow_exec) {
4040   // same as map_memory() on this OS
4041   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4042                         allow_exec);
4043 }
4044 
4045 // Unmap a block of memory.
4046 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4047   return munmap(addr, bytes) == 0;
4048 }
4049 
4050 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4051 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4052 // of a thread.
4053 //
4054 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4055 // the fast estimate available on the platform.
4056 
4057 jlong os::current_thread_cpu_time() {
4058   // return user + sys since the cost is the same
4059   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4060   assert(n >= 0, "negative CPU time");
4061   return n;
4062 }
4063 
4064 jlong os::thread_cpu_time(Thread* thread) {
4065   // consistent with what current_thread_cpu_time() returns
4066   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4067   assert(n >= 0, "negative CPU time");
4068   return n;
4069 }
4070 
4071 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4072   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4073   assert(n >= 0, "negative CPU time");
4074   return n;
4075 }
4076 
4077 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4078   bool error = false;
4079 
4080   jlong sys_time = 0;
4081   jlong user_time = 0;
4082 
4083   // Reimplemented using getthrds64().
4084   //
4085   // Works like this:
4086   // For the thread in question, get the kernel thread id. Then get the
4087   // kernel thread statistics using that id.
4088   //
4089   // This only works of course when no pthread scheduling is used,
4090   // i.e. there is a 1:1 relationship to kernel threads.
4091   // On AIX, see AIXTHREAD_SCOPE variable.
4092 
4093   pthread_t pthtid = thread->osthread()->pthread_id();
4094 
4095   // retrieve kernel thread id for the pthread:
4096   tid64_t tid = 0;
4097   struct __pthrdsinfo pinfo;
4098   // I just love those otherworldly IBM APIs which force me to hand down
4099   // dummy buffers for stuff I dont care for...
4100   char dummy[1];
4101   int dummy_size = sizeof(dummy);
4102   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4103                           dummy, &dummy_size) == 0) {
4104     tid = pinfo.__pi_tid;
4105   } else {
4106     tty->print_cr("pthread_getthrds_np failed.");
4107     error = true;
4108   }
4109 
4110   // retrieve kernel timing info for that kernel thread
4111   if (!error) {
4112     struct thrdentry64 thrdentry;
4113     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4114       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4115       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4116     } else {
4117       tty->print_cr("pthread_getthrds_np failed.");
4118       error = true;
4119     }
4120   }
4121 
4122   if (p_sys_time) {
4123     *p_sys_time = sys_time;
4124   }
4125 
4126   if (p_user_time) {
4127     *p_user_time = user_time;
4128   }
4129 
4130   if (error) {
4131     return false;
4132   }
4133 
4134   return true;
4135 }
4136 
4137 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4138   jlong sys_time;
4139   jlong user_time;
4140 
4141   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4142     return -1;
4143   }
4144 
4145   return user_sys_cpu_time ? sys_time + user_time : user_time;
4146 }
4147 
4148 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4149   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4150   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4151   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4152   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4153 }
4154 
4155 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4156   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4157   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4158   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4159   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4160 }
4161 
4162 bool os::is_thread_cpu_time_supported() {
4163   return true;
4164 }
4165 
4166 // System loadavg support. Returns -1 if load average cannot be obtained.
4167 // For now just return the system wide load average (no processor sets).
4168 int os::loadavg(double values[], int nelem) {
4169 
4170   // Implemented using libperfstat on AIX.
4171 
4172   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4173   guarantee(values, "argument error");
4174 
4175   if (os::Aix::on_pase()) {
4176     Unimplemented();
4177     return -1;
4178   } else {
4179     // AIX: use libperfstat
4180     //
4181     // See also:
4182     // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4183     // /usr/include/libperfstat.h:
4184 
4185     // Use the already AIX version independent get_cpuinfo.
4186     os::Aix::cpuinfo_t ci;
4187     if (os::Aix::get_cpuinfo(&ci)) {
4188       for (int i = 0; i < nelem; i++) {
4189         values[i] = ci.loadavg[i];
4190       }
4191     } else {
4192       return -1;
4193     }
4194     return nelem;
4195   }
4196 }
4197 
4198 void os::pause() {
4199   char filename[MAX_PATH];
4200   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4201     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4202   } else {
4203     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4204   }
4205 
4206   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4207   if (fd != -1) {
4208     struct stat buf;
4209     ::close(fd);
4210     while (::stat(filename, &buf) == 0) {
4211       (void)::poll(NULL, 0, 100);
4212     }
4213   } else {
4214     jio_fprintf(stderr,
4215       "Could not open pause file '%s', continuing immediately.\n", filename);
4216   }
4217 }
4218 
4219 bool os::Aix::is_primordial_thread() {
4220   if (pthread_self() == (pthread_t)1) {
4221     return true;
4222   } else {
4223     return false;
4224   }
4225 }
4226 
4227 // OS recognitions (PASE/AIX, OS level) call this before calling any
4228 // one of Aix::on_pase(), Aix::os_version() static
4229 void os::Aix::initialize_os_info() {
4230 
4231   assert(_on_pase == -1 && _os_version == -1, "already called.");
4232 
4233   struct utsname uts;
4234   memset(&uts, 0, sizeof(uts));
4235   strcpy(uts.sysname, "?");
4236   if (::uname(&uts) == -1) {
4237     trc("uname failed (%d)", errno);
4238     guarantee(0, "Could not determine whether we run on AIX or PASE");
4239   } else {
4240     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4241                "node \"%s\" machine \"%s\"\n",
4242                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4243     const int major = atoi(uts.version);
4244     assert(major > 0, "invalid OS version");
4245     const int minor = atoi(uts.release);
4246     assert(minor > 0, "invalid OS release");
4247     _os_version = (major << 8) | minor;
4248     if (strcmp(uts.sysname, "OS400") == 0) {
4249       Unimplemented();
4250     } else if (strcmp(uts.sysname, "AIX") == 0) {
4251       // We run on AIX. We do not support versions older than AIX 5.3.
4252       _on_pase = 0;
4253       if (_os_version < 0x0503) {
4254         trc("AIX release older than AIX 5.3 not supported.");
4255         assert(false, "AIX release too old.");
4256       } else {
4257         trcVerbose("We run on AIX %d.%d\n", major, minor);
4258       }
4259     } else {
4260       assert(false, "unknown OS");
4261     }
4262   }
4263 
4264   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4265 } // end: os::Aix::initialize_os_info()
4266 
4267 // Scan environment for important settings which might effect the VM.
4268 // Trace out settings. Warn about invalid settings and/or correct them.
4269 //
4270 // Must run after os::Aix::initialue_os_info().
4271 void os::Aix::scan_environment() {
4272 
4273   char* p;
4274   int rc;
4275 
4276   // Warn explicity if EXTSHM=ON is used. That switch changes how
4277   // System V shared memory behaves. One effect is that page size of
4278   // shared memory cannot be change dynamically, effectivly preventing
4279   // large pages from working.
4280   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4281   // recommendation is (in OSS notes) to switch it off.
4282   p = ::getenv("EXTSHM");
4283   if (Verbose) {
4284     fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4285   }
4286   if (p && strcasecmp(p, "ON") == 0) {
4287     fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4288     _extshm = 1;
4289   } else {
4290     _extshm = 0;
4291   }
4292 
4293   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4294   // Not tested, not supported.
4295   //
4296   // Note that it might be worth the trouble to test and to require it, if only to
4297   // get useful return codes for mprotect.
4298   //
4299   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4300   // exec() ? before loading the libjvm ? ....)
4301   p = ::getenv("XPG_SUS_ENV");
4302   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4303   if (p && strcmp(p, "ON") == 0) {
4304     _xpg_sus_mode = 1;
4305     trc("Unsupported setting: XPG_SUS_ENV=ON");
4306     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4307     // clobber address ranges. If we ever want to support that, we have to do some
4308     // testing first.
4309     guarantee(false, "XPG_SUS_ENV=ON not supported");
4310   } else {
4311     _xpg_sus_mode = 0;
4312   }
4313 
4314   // Switch off AIX internal (pthread) guard pages. This has
4315   // immediate effect for any pthread_create calls which follow.
4316   p = ::getenv("AIXTHREAD_GUARDPAGES");
4317   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4318   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4319   guarantee(rc == 0, "");
4320 
4321 } // end: os::Aix::scan_environment()
4322 
4323 // PASE: initialize the libo4 library (AS400 PASE porting library).
4324 void os::Aix::initialize_libo4() {
4325   Unimplemented();
4326 }
4327 
4328 // AIX: initialize the libperfstat library (we load this dynamically
4329 // because it is only available on AIX.
4330 void os::Aix::initialize_libperfstat() {
4331 
4332   assert(os::Aix::on_aix(), "AIX only");
4333 
4334   if (!libperfstat::init()) {
4335     trc("libperfstat initialization failed.");
4336     assert(false, "libperfstat initialization failed");
4337   } else {
4338     if (Verbose) {
4339       fprintf(stderr, "libperfstat initialized.\n");
4340     }
4341   }
4342 } // end: os::Aix::initialize_libperfstat
4343 
4344 /////////////////////////////////////////////////////////////////////////////
4345 // thread stack
4346 
4347 // Function to query the current stack size using pthread_getthrds_np.
4348 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4349   // This only works when invoked on a pthread. As we agreed not to use
4350   // primordial threads anyway, I assert here.
4351   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4352 
4353   // Information about this api can be found (a) in the pthread.h header and
4354   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4355   //
4356   // The use of this API to find out the current stack is kind of undefined.
4357   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4358   // enough for cases where I let the pthread library create its stacks. For cases
4359   // where I create an own stack and pass this to pthread_create, it seems not to
4360   // work (the returned stack size in that case is 0).
4361 
4362   pthread_t tid = pthread_self();
4363   struct __pthrdsinfo pinfo;
4364   char dummy[1]; // We only need this to satisfy the api and to not get E.
4365   int dummy_size = sizeof(dummy);
4366 
4367   memset(&pinfo, 0, sizeof(pinfo));
4368 
4369   const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4370                                      sizeof(pinfo), dummy, &dummy_size);
4371 
4372   if (rc != 0) {
4373     assert0(false);
4374     trcVerbose("pthread_getthrds_np failed (%d)", rc);
4375     return false;
4376   }
4377   guarantee0(pinfo.__pi_stackend);
4378 
4379   // The following can happen when invoking pthread_getthrds_np on a pthread running
4380   // on a user provided stack (when handing down a stack to pthread create, see
4381   // pthread_attr_setstackaddr).
4382   // Not sure what to do here - I feel inclined to forbid this use case completely.
4383   guarantee0(pinfo.__pi_stacksize);
4384 
4385   // Note: the pthread stack on AIX seems to look like this:
4386   //
4387   // ---------------------   real base ? at page border ?
4388   //
4389   //     pthread internal data, like ~2K, see also
4390   //     http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/thread_supp_tun_params.htm
4391   //
4392   // ---------------------   __pi_stackend - not page aligned, (xxxxF890)
4393   //
4394   //     stack
4395   //      ....
4396   //
4397   //     stack
4398   //
4399   // ---------------------   __pi_stackend  - __pi_stacksize
4400   //
4401   //     padding due to AIX guard pages (?) see AIXTHREAD_GUARDPAGES
4402   // ---------------------   __pi_stackaddr  (page aligned if AIXTHREAD_GUARDPAGES > 0)
4403   //
4404   //   AIX guard pages (?)
4405   //
4406 
4407   // So, the safe thing to do is to use the area from __pi_stackend to __pi_stackaddr;
4408   // __pi_stackend however is almost never page aligned.
4409   //
4410 
4411   if (p_stack_base) {
4412     (*p_stack_base) = (address) (pinfo.__pi_stackend);
4413   }
4414 
4415   if (p_stack_size) {
4416     (*p_stack_size) = pinfo.__pi_stackend - pinfo.__pi_stackaddr;
4417   }
4418 
4419   return true;
4420 }
4421 
4422 // Get the current stack base from the OS (actually, the pthread library).
4423 address os::current_stack_base() {
4424   address p;
4425   query_stack_dimensions(&p, 0);
4426   return p;
4427 }
4428 
4429 // Get the current stack size from the OS (actually, the pthread library).
4430 size_t os::current_stack_size() {
4431   size_t s;
4432   query_stack_dimensions(0, &s);
4433   return s;
4434 }
4435 
4436 // Refer to the comments in os_solaris.cpp park-unpark.
4437 //
4438 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4439 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4440 // For specifics regarding the bug see GLIBC BUGID 261237 :
4441 //    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4442 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4443 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4444 // is used. (The simple C test-case provided in the GLIBC bug report manifests the
4445 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4446 // and monitorenter when we're using 1-0 locking. All those operations may result in
4447 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4448 // of libpthread avoids the problem, but isn't practical.
4449 //
4450 // Possible remedies:
4451 //
4452 // 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4453 //      This is palliative and probabilistic, however. If the thread is preempted
4454 //      between the call to compute_abstime() and pthread_cond_timedwait(), more
4455 //      than the minimum period may have passed, and the abstime may be stale (in the
4456 //      past) resultin in a hang. Using this technique reduces the odds of a hang
4457 //      but the JVM is still vulnerable, particularly on heavily loaded systems.
4458 //
4459 // 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4460 //      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4461 //      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4462 //      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4463 //      thread.
4464 //
4465 // 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4466 //      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4467 //      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4468 //      This also works well. In fact it avoids kernel-level scalability impediments
4469 //      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4470 //      timers in a graceful fashion.
4471 //
4472 // 4.   When the abstime value is in the past it appears that control returns
4473 //      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4474 //      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4475 //      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4476 //      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4477 //      It may be possible to avoid reinitialization by checking the return
4478 //      value from pthread_cond_timedwait(). In addition to reinitializing the
4479 //      condvar we must establish the invariant that cond_signal() is only called
4480 //      within critical sections protected by the adjunct mutex. This prevents
4481 //      cond_signal() from "seeing" a condvar that's in the midst of being
4482 //      reinitialized or that is corrupt. Sadly, this invariant obviates the
4483 //      desirable signal-after-unlock optimization that avoids futile context switching.
4484 //
4485 //      I'm also concerned that some versions of NTPL might allocate an auxilliary
4486 //      structure when a condvar is used or initialized. cond_destroy() would
4487 //      release the helper structure. Our reinitialize-after-timedwait fix
4488 //      put excessive stress on malloc/free and locks protecting the c-heap.
4489 //
4490 // We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4491 // It may be possible to refine (4) by checking the kernel and NTPL verisons
4492 // and only enabling the work-around for vulnerable environments.
4493 
4494 // utility to compute the abstime argument to timedwait:
4495 // millis is the relative timeout time
4496 // abstime will be the absolute timeout time
4497 // TODO: replace compute_abstime() with unpackTime()
4498 
4499 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4500   if (millis < 0) millis = 0;
4501   struct timeval now;
4502   int status = gettimeofday(&now, NULL);
4503   assert(status == 0, "gettimeofday");
4504   jlong seconds = millis / 1000;
4505   millis %= 1000;
4506   if (seconds > 50000000) { // see man cond_timedwait(3T)
4507     seconds = 50000000;
4508   }
4509   abstime->tv_sec = now.tv_sec  + seconds;
4510   long       usec = now.tv_usec + millis * 1000;
4511   if (usec >= 1000000) {
4512     abstime->tv_sec += 1;
4513     usec -= 1000000;
4514   }
4515   abstime->tv_nsec = usec * 1000;
4516   return abstime;
4517 }
4518 
4519 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4520 // Conceptually TryPark() should be equivalent to park(0).
4521 
4522 int os::PlatformEvent::TryPark() {
4523   for (;;) {
4524     const int v = _Event;
4525     guarantee ((v == 0) || (v == 1), "invariant");
4526     if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4527   }
4528 }
4529 
4530 void os::PlatformEvent::park() {       // AKA "down()"
4531   // Invariant: Only the thread associated with the Event/PlatformEvent
4532   // may call park().
4533   // TODO: assert that _Assoc != NULL or _Assoc == Self
4534   int v;
4535   for (;;) {
4536     v = _Event;
4537     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4538   }
4539   guarantee (v >= 0, "invariant");
4540   if (v == 0) {
4541     // Do this the hard way by blocking ...
4542     int status = pthread_mutex_lock(_mutex);
4543     assert_status(status == 0, status, "mutex_lock");
4544     guarantee (_nParked == 0, "invariant");
4545     ++ _nParked;
4546     while (_Event < 0) {
4547       status = pthread_cond_wait(_cond, _mutex);
4548       assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4549     }
4550     -- _nParked;
4551 
4552     // In theory we could move the ST of 0 into _Event past the unlock(),
4553     // but then we'd need a MEMBAR after the ST.
4554     _Event = 0;
4555     status = pthread_mutex_unlock(_mutex);
4556     assert_status(status == 0, status, "mutex_unlock");
4557   }
4558   guarantee (_Event >= 0, "invariant");
4559 }
4560 
4561 int os::PlatformEvent::park(jlong millis) {
4562   guarantee (_nParked == 0, "invariant");
4563 
4564   int v;
4565   for (;;) {
4566     v = _Event;
4567     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4568   }
4569   guarantee (v >= 0, "invariant");
4570   if (v != 0) return OS_OK;
4571 
4572   // We do this the hard way, by blocking the thread.
4573   // Consider enforcing a minimum timeout value.
4574   struct timespec abst;
4575   compute_abstime(&abst, millis);
4576 
4577   int ret = OS_TIMEOUT;
4578   int status = pthread_mutex_lock(_mutex);
4579   assert_status(status == 0, status, "mutex_lock");
4580   guarantee (_nParked == 0, "invariant");
4581   ++_nParked;
4582 
4583   // Object.wait(timo) will return because of
4584   // (a) notification
4585   // (b) timeout
4586   // (c) thread.interrupt
4587   //
4588   // Thread.interrupt and object.notify{All} both call Event::set.
4589   // That is, we treat thread.interrupt as a special case of notification.
4590   // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4591   // We assume all ETIME returns are valid.
4592   //
4593   // TODO: properly differentiate simultaneous notify+interrupt.
4594   // In that case, we should propagate the notify to another waiter.
4595 
4596   while (_Event < 0) {
4597     status = pthread_cond_timedwait(_cond, _mutex, &abst);
4598     assert_status(status == 0 || status == ETIMEDOUT,
4599                   status, "cond_timedwait");
4600     if (!FilterSpuriousWakeups) break;         // previous semantics
4601     if (status == ETIMEDOUT) break;
4602     // We consume and ignore EINTR and spurious wakeups.
4603   }
4604   --_nParked;
4605   if (_Event >= 0) {
4606      ret = OS_OK;
4607   }
4608   _Event = 0;
4609   status = pthread_mutex_unlock(_mutex);
4610   assert_status(status == 0, status, "mutex_unlock");
4611   assert (_nParked == 0, "invariant");
4612   return ret;
4613 }
4614 
4615 void os::PlatformEvent::unpark() {
4616   int v, AnyWaiters;
4617   for (;;) {
4618     v = _Event;
4619     if (v > 0) {
4620       // The LD of _Event could have reordered or be satisfied
4621       // by a read-aside from this processor's write buffer.
4622       // To avoid problems execute a barrier and then
4623       // ratify the value.
4624       OrderAccess::fence();
4625       if (_Event == v) return;
4626       continue;
4627     }
4628     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4629   }
4630   if (v < 0) {
4631     // Wait for the thread associated with the event to vacate
4632     int status = pthread_mutex_lock(_mutex);
4633     assert_status(status == 0, status, "mutex_lock");
4634     AnyWaiters = _nParked;
4635 
4636     if (AnyWaiters != 0) {
4637       // We intentional signal *after* dropping the lock
4638       // to avoid a common class of futile wakeups.
4639       status = pthread_cond_signal(_cond);
4640       assert_status(status == 0, status, "cond_signal");
4641     }
4642     // Mutex should be locked for pthread_cond_signal(_cond).
4643     status = pthread_mutex_unlock(_mutex);
4644     assert_status(status == 0, status, "mutex_unlock");
4645   }
4646 
4647   // Note that we signal() _after dropping the lock for "immortal" Events.
4648   // This is safe and avoids a common class of futile wakeups. In rare
4649   // circumstances this can cause a thread to return prematurely from
4650   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4651   // simply re-test the condition and re-park itself.
4652 }
4653 
4654 
4655 // JSR166
4656 // -------------------------------------------------------
4657 
4658 //
4659 // The solaris and linux implementations of park/unpark are fairly
4660 // conservative for now, but can be improved. They currently use a
4661 // mutex/condvar pair, plus a a count.
4662 // Park decrements count if > 0, else does a condvar wait. Unpark
4663 // sets count to 1 and signals condvar. Only one thread ever waits
4664 // on the condvar. Contention seen when trying to park implies that someone
4665 // is unparking you, so don't wait. And spurious returns are fine, so there
4666 // is no need to track notifications.
4667 //
4668 
4669 #define MAX_SECS 100000000
4670 //
4671 // This code is common to linux and solaris and will be moved to a
4672 // common place in dolphin.
4673 //
4674 // The passed in time value is either a relative time in nanoseconds
4675 // or an absolute time in milliseconds. Either way it has to be unpacked
4676 // into suitable seconds and nanoseconds components and stored in the
4677 // given timespec structure.
4678 // Given time is a 64-bit value and the time_t used in the timespec is only
4679 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
4680 // overflow if times way in the future are given. Further on Solaris versions
4681 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4682 // number of seconds, in abstime, is less than current_time + 100,000,000.
4683 // As it will be 28 years before "now + 100000000" will overflow we can
4684 // ignore overflow and just impose a hard-limit on seconds using the value
4685 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4686 // years from "now".
4687 //
4688 
4689 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4690   assert (time > 0, "convertTime");
4691 
4692   struct timeval now;
4693   int status = gettimeofday(&now, NULL);
4694   assert(status == 0, "gettimeofday");
4695 
4696   time_t max_secs = now.tv_sec + MAX_SECS;
4697 
4698   if (isAbsolute) {
4699     jlong secs = time / 1000;
4700     if (secs > max_secs) {
4701       absTime->tv_sec = max_secs;
4702     }
4703     else {
4704       absTime->tv_sec = secs;
4705     }
4706     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4707   }
4708   else {
4709     jlong secs = time / NANOSECS_PER_SEC;
4710     if (secs >= MAX_SECS) {
4711       absTime->tv_sec = max_secs;
4712       absTime->tv_nsec = 0;
4713     }
4714     else {
4715       absTime->tv_sec = now.tv_sec + secs;
4716       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4717       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4718         absTime->tv_nsec -= NANOSECS_PER_SEC;
4719         ++absTime->tv_sec; // note: this must be <= max_secs
4720       }
4721     }
4722   }
4723   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4724   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4725   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4726   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4727 }
4728 
4729 void Parker::park(bool isAbsolute, jlong time) {
4730   // Optional fast-path check:
4731   // Return immediately if a permit is available.
4732   if (_counter > 0) {
4733     _counter = 0;
4734     OrderAccess::fence();
4735     return;
4736   }
4737 
4738   Thread* thread = Thread::current();
4739   assert(thread->is_Java_thread(), "Must be JavaThread");
4740   JavaThread *jt = (JavaThread *)thread;
4741 
4742   // Optional optimization -- avoid state transitions if there's an interrupt pending.
4743   // Check interrupt before trying to wait
4744   if (Thread::is_interrupted(thread, false)) {
4745     return;
4746   }
4747 
4748   // Next, demultiplex/decode time arguments
4749   timespec absTime;
4750   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4751     return;
4752   }
4753   if (time > 0) {
4754     unpackTime(&absTime, isAbsolute, time);
4755   }
4756 
4757   // Enter safepoint region
4758   // Beware of deadlocks such as 6317397.
4759   // The per-thread Parker:: mutex is a classic leaf-lock.
4760   // In particular a thread must never block on the Threads_lock while
4761   // holding the Parker:: mutex. If safepoints are pending both the
4762   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4763   ThreadBlockInVM tbivm(jt);
4764 
4765   // Don't wait if cannot get lock since interference arises from
4766   // unblocking. Also. check interrupt before trying wait
4767   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4768     return;
4769   }
4770 
4771   int status;
4772   if (_counter > 0) { // no wait needed
4773     _counter = 0;
4774     status = pthread_mutex_unlock(_mutex);
4775     assert (status == 0, "invariant");
4776     OrderAccess::fence();
4777     return;
4778   }
4779 
4780 #ifdef ASSERT
4781   // Don't catch signals while blocked; let the running threads have the signals.
4782   // (This allows a debugger to break into the running thread.)
4783   sigset_t oldsigs;
4784   sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4785   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4786 #endif
4787 
4788   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4789   jt->set_suspend_equivalent();
4790   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4791 
4792   if (time == 0) {
4793     status = pthread_cond_wait (_cond, _mutex);
4794   } else {
4795     status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4796     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4797       pthread_cond_destroy (_cond);
4798       pthread_cond_init    (_cond, NULL);
4799     }
4800   }
4801   assert_status(status == 0 || status == EINTR ||
4802                 status == ETIME || status == ETIMEDOUT,
4803                 status, "cond_timedwait");
4804 
4805 #ifdef ASSERT
4806   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4807 #endif
4808 
4809   _counter = 0;
4810   status = pthread_mutex_unlock(_mutex);
4811   assert_status(status == 0, status, "invariant");
4812   // If externally suspended while waiting, re-suspend
4813   if (jt->handle_special_suspend_equivalent_condition()) {
4814     jt->java_suspend_self();
4815   }
4816 
4817   OrderAccess::fence();
4818 }
4819 
4820 void Parker::unpark() {
4821   int s, status;
4822   status = pthread_mutex_lock(_mutex);
4823   assert (status == 0, "invariant");
4824   s = _counter;
4825   _counter = 1;
4826   if (s < 1) {
4827     if (WorkAroundNPTLTimedWaitHang) {
4828       status = pthread_cond_signal (_cond);
4829       assert (status == 0, "invariant");
4830       status = pthread_mutex_unlock(_mutex);
4831       assert (status == 0, "invariant");
4832     } else {
4833       status = pthread_mutex_unlock(_mutex);
4834       assert (status == 0, "invariant");
4835       status = pthread_cond_signal (_cond);
4836       assert (status == 0, "invariant");
4837     }
4838   } else {
4839     pthread_mutex_unlock(_mutex);
4840     assert (status == 0, "invariant");
4841   }
4842 }
4843 
4844 extern char** environ;
4845 
4846 // Run the specified command in a separate process. Return its exit value,
4847 // or -1 on failure (e.g. can't fork a new process).
4848 // Unlike system(), this function can be called from signal handler. It
4849 // doesn't block SIGINT et al.
4850 int os::fork_and_exec(char* cmd) {
4851   char * argv[4] = {"sh", "-c", cmd, NULL};
4852 
4853   pid_t pid = fork();
4854 
4855   if (pid < 0) {
4856     // fork failed
4857     return -1;
4858 
4859   } else if (pid == 0) {
4860     // child process
4861 
4862     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4863     execve("/usr/bin/sh", argv, environ);
4864 
4865     // execve failed
4866     _exit(-1);
4867 
4868   } else {
4869     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4870     // care about the actual exit code, for now.
4871 
4872     int status;
4873 
4874     // Wait for the child process to exit. This returns immediately if
4875     // the child has already exited. */
4876     while (waitpid(pid, &status, 0) < 0) {
4877       switch (errno) {
4878         case ECHILD: return 0;
4879         case EINTR: break;
4880         default: return -1;
4881       }
4882     }
4883 
4884     if (WIFEXITED(status)) {
4885       // The child exited normally; get its exit code.
4886       return WEXITSTATUS(status);
4887     } else if (WIFSIGNALED(status)) {
4888       // The child exited because of a signal.
4889       // The best value to return is 0x80 + signal number,
4890       // because that is what all Unix shells do, and because
4891       // it allows callers to distinguish between process exit and
4892       // process death by signal.
4893       return 0x80 + WTERMSIG(status);
4894     } else {
4895       // Unknown exit code; pass it through.
4896       return status;
4897     }
4898   }
4899   return -1;
4900 }
4901 
4902 // is_headless_jre()
4903 //
4904 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4905 // in order to report if we are running in a headless jre.
4906 //
4907 // Since JDK8 xawt/libmawt.so is moved into the same directory
4908 // as libawt.so, and renamed libawt_xawt.so
4909 bool os::is_headless_jre() {
4910   struct stat statbuf;
4911   char buf[MAXPATHLEN];
4912   char libmawtpath[MAXPATHLEN];
4913   const char *xawtstr = "/xawt/libmawt.so";
4914   const char *new_xawtstr = "/libawt_xawt.so";
4915 
4916   char *p;
4917 
4918   // Get path to libjvm.so
4919   os::jvm_path(buf, sizeof(buf));
4920 
4921   // Get rid of libjvm.so
4922   p = strrchr(buf, '/');
4923   if (p == NULL) return false;
4924   else *p = '\0';
4925 
4926   // Get rid of client or server
4927   p = strrchr(buf, '/');
4928   if (p == NULL) return false;
4929   else *p = '\0';
4930 
4931   // check xawt/libmawt.so
4932   strcpy(libmawtpath, buf);
4933   strcat(libmawtpath, xawtstr);
4934   if (::stat(libmawtpath, &statbuf) == 0) return false;
4935 
4936   // check libawt_xawt.so
4937   strcpy(libmawtpath, buf);
4938   strcat(libmawtpath, new_xawtstr);
4939   if (::stat(libmawtpath, &statbuf) == 0) return false;
4940 
4941   return true;
4942 }
4943 
4944 // Get the default path to the core file
4945 // Returns the length of the string
4946 int os::get_core_path(char* buffer, size_t bufferSize) {
4947   const char* p = get_current_directory(buffer, bufferSize);
4948 
4949   if (p == NULL) {
4950     assert(p != NULL, "failed to get current directory");
4951     return 0;
4952   }
4953 
4954   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4955                                                p, current_process_id());
4956 
4957   return strlen(buffer);
4958 }
4959 
4960 #ifndef PRODUCT
4961 void TestReserveMemorySpecial_test() {
4962   // No tests available for this platform
4963 }
4964 #endif