1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "logging/log.hpp"
  40 #include "libo4.hpp"
  41 #include "libperfstat_aix.hpp"
  42 #include "libodm_aix.hpp"
  43 #include "loadlib_aix.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/filemap.hpp"
  46 #include "misc_aix.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "os_aix.inline.hpp"
  49 #include "os_share_aix.hpp"
  50 #include "porting_aix.hpp"
  51 #include "prims/jniFastGetField.hpp"
  52 #include "prims/jvm.h"
  53 #include "prims/jvm_misc.hpp"
  54 #include "runtime/arguments.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/extendedPC.hpp"
  57 #include "runtime/globals.hpp"
  58 #include "runtime/interfaceSupport.hpp"
  59 #include "runtime/java.hpp"
  60 #include "runtime/javaCalls.hpp"
  61 #include "runtime/mutexLocker.hpp"
  62 #include "runtime/objectMonitor.hpp"
  63 #include "runtime/orderAccess.inline.hpp"
  64 #include "runtime/os.hpp"
  65 #include "runtime/osThread.hpp"
  66 #include "runtime/perfMemory.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/statSampler.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/thread.inline.hpp"
  71 #include "runtime/threadCritical.hpp"
  72 #include "runtime/timer.hpp"
  73 #include "runtime/vm_version.hpp"
  74 #include "services/attachListener.hpp"
  75 #include "services/runtimeService.hpp"
  76 #include "utilities/align.hpp"
  77 #include "utilities/decoder.hpp"
  78 #include "utilities/defaultStream.hpp"
  79 #include "utilities/events.hpp"
  80 #include "utilities/growableArray.hpp"
  81 #include "utilities/vmError.hpp"
  82 
  83 // put OS-includes here (sorted alphabetically)
  84 #include <errno.h>
  85 #include <fcntl.h>
  86 #include <inttypes.h>
  87 #include <poll.h>
  88 #include <procinfo.h>
  89 #include <pthread.h>
  90 #include <pwd.h>
  91 #include <semaphore.h>
  92 #include <signal.h>
  93 #include <stdint.h>
  94 #include <stdio.h>
  95 #include <string.h>
  96 #include <unistd.h>
  97 #include <sys/ioctl.h>
  98 #include <sys/ipc.h>
  99 #include <sys/mman.h>
 100 #include <sys/resource.h>
 101 #include <sys/select.h>
 102 #include <sys/shm.h>
 103 #include <sys/socket.h>
 104 #include <sys/stat.h>
 105 #include <sys/sysinfo.h>
 106 #include <sys/systemcfg.h>
 107 #include <sys/time.h>
 108 #include <sys/times.h>
 109 #include <sys/types.h>
 110 #include <sys/utsname.h>
 111 #include <sys/vminfo.h>
 112 #include <sys/wait.h>
 113 
 114 // Missing prototypes for various system APIs.
 115 extern "C"
 116 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
 117 
 118 #if !defined(_AIXVERSION_610)
 119 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
 120 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
 121 extern "C" int getargs   (procsinfo*, int, char*, int);
 122 #endif
 123 
 124 #define MAX_PATH (2 * K)
 125 
 126 // for timer info max values which include all bits
 127 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 128 // for multipage initialization error analysis (in 'g_multipage_error')
 129 #define ERROR_MP_OS_TOO_OLD                          100
 130 #define ERROR_MP_EXTSHM_ACTIVE                       101
 131 #define ERROR_MP_VMGETINFO_FAILED                    102
 132 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 133 
 134 static address resolve_function_descriptor_to_code_pointer(address p);
 135 
 136 static void vmembk_print_on(outputStream* os);
 137 
 138 ////////////////////////////////////////////////////////////////////////////////
 139 // global variables (for a description see os_aix.hpp)
 140 
 141 julong    os::Aix::_physical_memory = 0;
 142 
 143 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 144 int       os::Aix::_page_size = -1;
 145 
 146 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 147 int       os::Aix::_on_pase = -1;
 148 
 149 // 0 = uninitialized, otherwise 32 bit number:
 150 //  0xVVRRTTSS
 151 //  VV - major version
 152 //  RR - minor version
 153 //  TT - tech level, if known, 0 otherwise
 154 //  SS - service pack, if known, 0 otherwise
 155 uint32_t  os::Aix::_os_version = 0;
 156 
 157 // -1 = uninitialized, 0 - no, 1 - yes
 158 int       os::Aix::_xpg_sus_mode = -1;
 159 
 160 // -1 = uninitialized, 0 - no, 1 - yes
 161 int       os::Aix::_extshm = -1;
 162 
 163 ////////////////////////////////////////////////////////////////////////////////
 164 // local variables
 165 
 166 static jlong    initial_time_count = 0;
 167 static int      clock_tics_per_sec = 100;
 168 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 169 static bool     check_signals      = true;
 170 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 171 static sigset_t SR_sigset;
 172 
 173 // Process break recorded at startup.
 174 static address g_brk_at_startup = NULL;
 175 
 176 // This describes the state of multipage support of the underlying
 177 // OS. Note that this is of no interest to the outsize world and
 178 // therefore should not be defined in AIX class.
 179 //
 180 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 181 // latter two (16M "large" resp. 16G "huge" pages) require special
 182 // setup and are normally not available.
 183 //
 184 // AIX supports multiple page sizes per process, for:
 185 //  - Stack (of the primordial thread, so not relevant for us)
 186 //  - Data - data, bss, heap, for us also pthread stacks
 187 //  - Text - text code
 188 //  - shared memory
 189 //
 190 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 191 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 192 //
 193 // For shared memory, page size can be set dynamically via
 194 // shmctl(). Different shared memory regions can have different page
 195 // sizes.
 196 //
 197 // More information can be found at AIBM info center:
 198 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 199 //
 200 static struct {
 201   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 202   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 203   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 204   size_t pthr_stack_pagesize; // stack page size of pthread threads
 205   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 206   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 207   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 208   int error;                  // Error describing if something went wrong at multipage init.
 209 } g_multipage_support = {
 210   (size_t) -1,
 211   (size_t) -1,
 212   (size_t) -1,
 213   (size_t) -1,
 214   (size_t) -1,
 215   false, false,
 216   0
 217 };
 218 
 219 // We must not accidentally allocate memory close to the BRK - even if
 220 // that would work - because then we prevent the BRK segment from
 221 // growing which may result in a malloc OOM even though there is
 222 // enough memory. The problem only arises if we shmat() or mmap() at
 223 // a specific wish address, e.g. to place the heap in a
 224 // compressed-oops-friendly way.
 225 static bool is_close_to_brk(address a) {
 226   assert0(g_brk_at_startup != NULL);
 227   if (a >= g_brk_at_startup &&
 228       a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
 229     return true;
 230   }
 231   return false;
 232 }
 233 
 234 julong os::available_memory() {
 235   return Aix::available_memory();
 236 }
 237 
 238 julong os::Aix::available_memory() {
 239   // Avoid expensive API call here, as returned value will always be null.
 240   if (os::Aix::on_pase()) {
 241     return 0x0LL;
 242   }
 243   os::Aix::meminfo_t mi;
 244   if (os::Aix::get_meminfo(&mi)) {
 245     return mi.real_free;
 246   } else {
 247     return ULONG_MAX;
 248   }
 249 }
 250 
 251 julong os::physical_memory() {
 252   return Aix::physical_memory();
 253 }
 254 
 255 // Return true if user is running as root.
 256 
 257 bool os::have_special_privileges() {
 258   static bool init = false;
 259   static bool privileges = false;
 260   if (!init) {
 261     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 262     init = true;
 263   }
 264   return privileges;
 265 }
 266 
 267 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 268 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 269 static bool my_disclaim64(char* addr, size_t size) {
 270 
 271   if (size == 0) {
 272     return true;
 273   }
 274 
 275   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 276   const unsigned int maxDisclaimSize = 0x40000000;
 277 
 278   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 279   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 280 
 281   char* p = addr;
 282 
 283   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 284     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 285       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 286       return false;
 287     }
 288     p += maxDisclaimSize;
 289   }
 290 
 291   if (lastDisclaimSize > 0) {
 292     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 293       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 294       return false;
 295     }
 296   }
 297 
 298   return true;
 299 }
 300 
 301 // Cpu architecture string
 302 #if defined(PPC32)
 303 static char cpu_arch[] = "ppc";
 304 #elif defined(PPC64)
 305 static char cpu_arch[] = "ppc64";
 306 #else
 307 #error Add appropriate cpu_arch setting
 308 #endif
 309 
 310 // Wrap the function "vmgetinfo" which is not available on older OS releases.
 311 static int checked_vmgetinfo(void *out, int command, int arg) {
 312   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 313     guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
 314   }
 315   return ::vmgetinfo(out, command, arg);
 316 }
 317 
 318 // Given an address, returns the size of the page backing that address.
 319 size_t os::Aix::query_pagesize(void* addr) {
 320 
 321   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 322     // AS/400 older than V6R1: no vmgetinfo here, default to 4K
 323     return 4*K;
 324   }
 325 
 326   vm_page_info pi;
 327   pi.addr = (uint64_t)addr;
 328   if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 329     return pi.pagesize;
 330   } else {
 331     assert(false, "vmgetinfo failed to retrieve page size");
 332     return 4*K;
 333   }
 334 }
 335 
 336 void os::Aix::initialize_system_info() {
 337 
 338   // Get the number of online(logical) cpus instead of configured.
 339   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 340   assert(_processor_count > 0, "_processor_count must be > 0");
 341 
 342   // Retrieve total physical storage.
 343   os::Aix::meminfo_t mi;
 344   if (!os::Aix::get_meminfo(&mi)) {
 345     assert(false, "os::Aix::get_meminfo failed.");
 346   }
 347   _physical_memory = (julong) mi.real_total;
 348 }
 349 
 350 // Helper function for tracing page sizes.
 351 static const char* describe_pagesize(size_t pagesize) {
 352   switch (pagesize) {
 353     case 4*K : return "4K";
 354     case 64*K: return "64K";
 355     case 16*M: return "16M";
 356     case 16*G: return "16G";
 357     default:
 358       assert(false, "surprise");
 359       return "??";
 360   }
 361 }
 362 
 363 // Probe OS for multipage support.
 364 // Will fill the global g_multipage_support structure.
 365 // Must be called before calling os::large_page_init().
 366 static void query_multipage_support() {
 367 
 368   guarantee(g_multipage_support.pagesize == -1,
 369             "do not call twice");
 370 
 371   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 372 
 373   // This really would surprise me.
 374   assert(g_multipage_support.pagesize == 4*K, "surprise!");
 375 
 376   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 377   // Default data page size is defined either by linker options (-bdatapsize)
 378   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 379   // default should be 4K.
 380   {
 381     void* p = ::malloc(16*M);
 382     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 383     ::free(p);
 384   }
 385 
 386   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 387   // Note that this is pure curiosity. We do not rely on default page size but set
 388   // our own page size after allocated.
 389   {
 390     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 391     guarantee(shmid != -1, "shmget failed");
 392     void* p = ::shmat(shmid, NULL, 0);
 393     ::shmctl(shmid, IPC_RMID, NULL);
 394     guarantee(p != (void*) -1, "shmat failed");
 395     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 396     ::shmdt(p);
 397   }
 398 
 399   // Before querying the stack page size, make sure we are not running as primordial
 400   // thread (because primordial thread's stack may have different page size than
 401   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 402   // number of reasons so we may just as well guarantee it here.
 403   guarantee0(!os::Aix::is_primordial_thread());
 404 
 405   // Query pthread stack page size. Should be the same as data page size because
 406   // pthread stacks are allocated from C-Heap.
 407   {
 408     int dummy = 0;
 409     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 410   }
 411 
 412   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 413   {
 414     address any_function =
 415       resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 416     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 417   }
 418 
 419   // Now probe for support of 64K pages and 16M pages.
 420 
 421   // Before OS/400 V6R1, there is no support for pages other than 4K.
 422   if (os::Aix::on_pase_V5R4_or_older()) {
 423     trcVerbose("OS/400 < V6R1 - no large page support.");
 424     g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
 425     goto query_multipage_support_end;
 426   }
 427 
 428   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 429   {
 430     const int MAX_PAGE_SIZES = 4;
 431     psize_t sizes[MAX_PAGE_SIZES];
 432     const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 433     if (num_psizes == -1) {
 434       trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
 435       trcVerbose("disabling multipage support.");
 436       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 437       goto query_multipage_support_end;
 438     }
 439     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 440     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 441     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 442     for (int i = 0; i < num_psizes; i ++) {
 443       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 444     }
 445 
 446     // Can we use 64K, 16M pages?
 447     for (int i = 0; i < num_psizes; i ++) {
 448       const size_t pagesize = sizes[i];
 449       if (pagesize != 64*K && pagesize != 16*M) {
 450         continue;
 451       }
 452       bool can_use = false;
 453       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 454       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 455         IPC_CREAT | S_IRUSR | S_IWUSR);
 456       guarantee0(shmid != -1); // Should always work.
 457       // Try to set pagesize.
 458       struct shmid_ds shm_buf = { 0 };
 459       shm_buf.shm_pagesize = pagesize;
 460       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 461         const int en = errno;
 462         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 463         trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
 464           errno);
 465       } else {
 466         // Attach and double check pageisze.
 467         void* p = ::shmat(shmid, NULL, 0);
 468         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 469         guarantee0(p != (void*) -1); // Should always work.
 470         const size_t real_pagesize = os::Aix::query_pagesize(p);
 471         if (real_pagesize != pagesize) {
 472           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 473         } else {
 474           can_use = true;
 475         }
 476         ::shmdt(p);
 477       }
 478       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 479       if (pagesize == 64*K) {
 480         g_multipage_support.can_use_64K_pages = can_use;
 481       } else if (pagesize == 16*M) {
 482         g_multipage_support.can_use_16M_pages = can_use;
 483       }
 484     }
 485 
 486   } // end: check which pages can be used for shared memory
 487 
 488 query_multipage_support_end:
 489 
 490   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
 491       describe_pagesize(g_multipage_support.pagesize));
 492   trcVerbose("Data page size (C-Heap, bss, etc): %s",
 493       describe_pagesize(g_multipage_support.datapsize));
 494   trcVerbose("Text page size: %s",
 495       describe_pagesize(g_multipage_support.textpsize));
 496   trcVerbose("Thread stack page size (pthread): %s",
 497       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 498   trcVerbose("Default shared memory page size: %s",
 499       describe_pagesize(g_multipage_support.shmpsize));
 500   trcVerbose("Can use 64K pages dynamically with shared meory: %s",
 501       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 502   trcVerbose("Can use 16M pages dynamically with shared memory: %s",
 503       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 504   trcVerbose("Multipage error details: %d",
 505       g_multipage_support.error);
 506 
 507   // sanity checks
 508   assert0(g_multipage_support.pagesize == 4*K);
 509   assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K);
 510   assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K);
 511   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 512   assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K);
 513 
 514 }
 515 
 516 void os::init_system_properties_values() {
 517 
 518 #define DEFAULT_LIBPATH "/lib:/usr/lib"
 519 #define EXTENSIONS_DIR  "/lib/ext"
 520 
 521   // Buffer that fits several sprintfs.
 522   // Note that the space for the trailing null is provided
 523   // by the nulls included by the sizeof operator.
 524   const size_t bufsize =
 525     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 526          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 527   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 528 
 529   // sysclasspath, java_home, dll_dir
 530   {
 531     char *pslash;
 532     os::jvm_path(buf, bufsize);
 533 
 534     // Found the full path to libjvm.so.
 535     // Now cut the path to <java_home>/jre if we can.
 536     pslash = strrchr(buf, '/');
 537     if (pslash != NULL) {
 538       *pslash = '\0';            // Get rid of /libjvm.so.
 539     }
 540     pslash = strrchr(buf, '/');
 541     if (pslash != NULL) {
 542       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 543     }
 544     Arguments::set_dll_dir(buf);
 545 
 546     if (pslash != NULL) {
 547       pslash = strrchr(buf, '/');
 548       if (pslash != NULL) {
 549         *pslash = '\0';        // Get rid of /lib.
 550       }
 551     }
 552     Arguments::set_java_home(buf);
 553     set_boot_path('/', ':');
 554   }
 555 
 556   // Where to look for native libraries.
 557 
 558   // On Aix we get the user setting of LIBPATH.
 559   // Eventually, all the library path setting will be done here.
 560   // Get the user setting of LIBPATH.
 561   const char *v = ::getenv("LIBPATH");
 562   const char *v_colon = ":";
 563   if (v == NULL) { v = ""; v_colon = ""; }
 564 
 565   // Concatenate user and invariant part of ld_library_path.
 566   // That's +1 for the colon and +1 for the trailing '\0'.
 567   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 568   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 569   Arguments::set_library_path(ld_library_path);
 570   FREE_C_HEAP_ARRAY(char, ld_library_path);
 571 
 572   // Extensions directories.
 573   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 574   Arguments::set_ext_dirs(buf);
 575 
 576   FREE_C_HEAP_ARRAY(char, buf);
 577 
 578 #undef DEFAULT_LIBPATH
 579 #undef EXTENSIONS_DIR
 580 }
 581 
 582 ////////////////////////////////////////////////////////////////////////////////
 583 // breakpoint support
 584 
 585 void os::breakpoint() {
 586   BREAKPOINT;
 587 }
 588 
 589 extern "C" void breakpoint() {
 590   // use debugger to set breakpoint here
 591 }
 592 
 593 ////////////////////////////////////////////////////////////////////////////////
 594 // signal support
 595 
 596 debug_only(static bool signal_sets_initialized = false);
 597 static sigset_t unblocked_sigs, vm_sigs;
 598 
 599 bool os::Aix::is_sig_ignored(int sig) {
 600   struct sigaction oact;
 601   sigaction(sig, (struct sigaction*)NULL, &oact);
 602   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 603     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 604   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 605     return true;
 606   } else {
 607     return false;
 608   }
 609 }
 610 
 611 void os::Aix::signal_sets_init() {
 612   // Should also have an assertion stating we are still single-threaded.
 613   assert(!signal_sets_initialized, "Already initialized");
 614   // Fill in signals that are necessarily unblocked for all threads in
 615   // the VM. Currently, we unblock the following signals:
 616   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 617   //                         by -Xrs (=ReduceSignalUsage));
 618   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 619   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 620   // the dispositions or masks wrt these signals.
 621   // Programs embedding the VM that want to use the above signals for their
 622   // own purposes must, at this time, use the "-Xrs" option to prevent
 623   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 624   // (See bug 4345157, and other related bugs).
 625   // In reality, though, unblocking these signals is really a nop, since
 626   // these signals are not blocked by default.
 627   sigemptyset(&unblocked_sigs);
 628   sigaddset(&unblocked_sigs, SIGILL);
 629   sigaddset(&unblocked_sigs, SIGSEGV);
 630   sigaddset(&unblocked_sigs, SIGBUS);
 631   sigaddset(&unblocked_sigs, SIGFPE);
 632   sigaddset(&unblocked_sigs, SIGTRAP);
 633   sigaddset(&unblocked_sigs, SR_signum);
 634 
 635   if (!ReduceSignalUsage) {
 636    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 637      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 638    }
 639    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 640      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 641    }
 642    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 643      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 644    }
 645   }
 646   // Fill in signals that are blocked by all but the VM thread.
 647   sigemptyset(&vm_sigs);
 648   if (!ReduceSignalUsage)
 649     sigaddset(&vm_sigs, BREAK_SIGNAL);
 650   debug_only(signal_sets_initialized = true);
 651 }
 652 
 653 // These are signals that are unblocked while a thread is running Java.
 654 // (For some reason, they get blocked by default.)
 655 sigset_t* os::Aix::unblocked_signals() {
 656   assert(signal_sets_initialized, "Not initialized");
 657   return &unblocked_sigs;
 658 }
 659 
 660 // These are the signals that are blocked while a (non-VM) thread is
 661 // running Java. Only the VM thread handles these signals.
 662 sigset_t* os::Aix::vm_signals() {
 663   assert(signal_sets_initialized, "Not initialized");
 664   return &vm_sigs;
 665 }
 666 
 667 void os::Aix::hotspot_sigmask(Thread* thread) {
 668 
 669   //Save caller's signal mask before setting VM signal mask
 670   sigset_t caller_sigmask;
 671   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 672 
 673   OSThread* osthread = thread->osthread();
 674   osthread->set_caller_sigmask(caller_sigmask);
 675 
 676   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 677 
 678   if (!ReduceSignalUsage) {
 679     if (thread->is_VM_thread()) {
 680       // Only the VM thread handles BREAK_SIGNAL ...
 681       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 682     } else {
 683       // ... all other threads block BREAK_SIGNAL
 684       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 685     }
 686   }
 687 }
 688 
 689 // retrieve memory information.
 690 // Returns false if something went wrong;
 691 // content of pmi undefined in this case.
 692 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 693 
 694   assert(pmi, "get_meminfo: invalid parameter");
 695 
 696   memset(pmi, 0, sizeof(meminfo_t));
 697 
 698   if (os::Aix::on_pase()) {
 699     // On PASE, use the libo4 porting library.
 700 
 701     unsigned long long virt_total = 0;
 702     unsigned long long real_total = 0;
 703     unsigned long long real_free = 0;
 704     unsigned long long pgsp_total = 0;
 705     unsigned long long pgsp_free = 0;
 706     if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
 707       pmi->virt_total = virt_total;
 708       pmi->real_total = real_total;
 709       pmi->real_free = real_free;
 710       pmi->pgsp_total = pgsp_total;
 711       pmi->pgsp_free = pgsp_free;
 712       return true;
 713     }
 714     return false;
 715 
 716   } else {
 717 
 718     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 719     // See:
 720     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 721     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 722     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 723     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 724 
 725     perfstat_memory_total_t psmt;
 726     memset (&psmt, '\0', sizeof(psmt));
 727     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 728     if (rc == -1) {
 729       trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
 730       assert(0, "perfstat_memory_total() failed");
 731       return false;
 732     }
 733 
 734     assert(rc == 1, "perfstat_memory_total() - weird return code");
 735 
 736     // excerpt from
 737     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 738     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 739     // The fields of perfstat_memory_total_t:
 740     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 741     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 742     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 743     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 744     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 745 
 746     pmi->virt_total = psmt.virt_total * 4096;
 747     pmi->real_total = psmt.real_total * 4096;
 748     pmi->real_free = psmt.real_free * 4096;
 749     pmi->pgsp_total = psmt.pgsp_total * 4096;
 750     pmi->pgsp_free = psmt.pgsp_free * 4096;
 751 
 752     return true;
 753 
 754   }
 755 } // end os::Aix::get_meminfo
 756 
 757 //////////////////////////////////////////////////////////////////////////////
 758 // create new thread
 759 
 760 // Thread start routine for all newly created threads
 761 static void *thread_native_entry(Thread *thread) {
 762 
 763   // find out my own stack dimensions
 764   {
 765     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 766     thread->set_stack_base(os::current_stack_base());
 767     thread->set_stack_size(os::current_stack_size());
 768   }
 769 
 770   const pthread_t pthread_id = ::pthread_self();
 771   const tid_t kernel_thread_id = ::thread_self();
 772 
 773   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 774     os::current_thread_id(), (uintx) kernel_thread_id);
 775 
 776   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
 777   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
 778   // tools hook pthread_create(). In this case, we may run into problems establishing
 779   // guard pages on those stacks, because the stacks may reside in memory which is not
 780   // protectable (shmated).
 781   if (thread->stack_base() > ::sbrk(0)) {
 782     log_warning(os, thread)("Thread stack not in data segment.");
 783   }
 784 
 785   // Try to randomize the cache line index of hot stack frames.
 786   // This helps when threads of the same stack traces evict each other's
 787   // cache lines. The threads can be either from the same JVM instance, or
 788   // from different JVM instances. The benefit is especially true for
 789   // processors with hyperthreading technology.
 790 
 791   static int counter = 0;
 792   int pid = os::current_process_id();
 793   alloca(((pid ^ counter++) & 7) * 128);
 794 
 795   thread->initialize_thread_current();
 796 
 797   OSThread* osthread = thread->osthread();
 798 
 799   // Thread_id is pthread id.
 800   osthread->set_thread_id(pthread_id);
 801 
 802   // .. but keep kernel thread id too for diagnostics
 803   osthread->set_kernel_thread_id(kernel_thread_id);
 804 
 805   // Initialize signal mask for this thread.
 806   os::Aix::hotspot_sigmask(thread);
 807 
 808   // Initialize floating point control register.
 809   os::Aix::init_thread_fpu_state();
 810 
 811   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 812 
 813   // Call one more level start routine.
 814   thread->run();
 815 
 816   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 817     os::current_thread_id(), (uintx) kernel_thread_id);
 818 
 819   // If a thread has not deleted itself ("delete this") as part of its
 820   // termination sequence, we have to ensure thread-local-storage is
 821   // cleared before we actually terminate. No threads should ever be
 822   // deleted asynchronously with respect to their termination.
 823   if (Thread::current_or_null_safe() != NULL) {
 824     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 825     thread->clear_thread_current();
 826   }
 827 
 828   return 0;
 829 }
 830 
 831 bool os::create_thread(Thread* thread, ThreadType thr_type,
 832                        size_t req_stack_size) {
 833 
 834   assert(thread->osthread() == NULL, "caller responsible");
 835 
 836   // Allocate the OSThread object.
 837   OSThread* osthread = new OSThread(NULL, NULL);
 838   if (osthread == NULL) {
 839     return false;
 840   }
 841 
 842   // Set the correct thread state.
 843   osthread->set_thread_type(thr_type);
 844 
 845   // Initial state is ALLOCATED but not INITIALIZED
 846   osthread->set_state(ALLOCATED);
 847 
 848   thread->set_osthread(osthread);
 849 
 850   // Init thread attributes.
 851   pthread_attr_t attr;
 852   pthread_attr_init(&attr);
 853   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 854 
 855   // Make sure we run in 1:1 kernel-user-thread mode.
 856   if (os::Aix::on_aix()) {
 857     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 858     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 859   }
 860 
 861   // Start in suspended state, and in os::thread_start, wake the thread up.
 862   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 863 
 864   // Calculate stack size if it's not specified by caller.
 865   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 866   int status = pthread_attr_setstacksize(&attr, stack_size);
 867   assert_status(status == 0, status, "pthread_attr_setstacksize");
 868 
 869   // Configure libc guard page.
 870   pthread_attr_setguardsize(&attr, os::Aix::default_guard_size(thr_type));
 871 
 872   pthread_t tid;
 873   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
 874 
 875   char buf[64];
 876   if (ret == 0) {
 877     log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
 878       (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 879   } else {
 880     log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
 881       ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 882   }
 883 
 884   pthread_attr_destroy(&attr);
 885 
 886   if (ret != 0) {
 887     // Need to clean up stuff we've allocated so far.
 888     thread->set_osthread(NULL);
 889     delete osthread;
 890     return false;
 891   }
 892 
 893   // OSThread::thread_id is the pthread id.
 894   osthread->set_thread_id(tid);
 895 
 896   return true;
 897 }
 898 
 899 /////////////////////////////////////////////////////////////////////////////
 900 // attach existing thread
 901 
 902 // bootstrap the main thread
 903 bool os::create_main_thread(JavaThread* thread) {
 904   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 905   return create_attached_thread(thread);
 906 }
 907 
 908 bool os::create_attached_thread(JavaThread* thread) {
 909 #ifdef ASSERT
 910     thread->verify_not_published();
 911 #endif
 912 
 913   // Allocate the OSThread object
 914   OSThread* osthread = new OSThread(NULL, NULL);
 915 
 916   if (osthread == NULL) {
 917     return false;
 918   }
 919 
 920   const pthread_t pthread_id = ::pthread_self();
 921   const tid_t kernel_thread_id = ::thread_self();
 922 
 923   // OSThread::thread_id is the pthread id.
 924   osthread->set_thread_id(pthread_id);
 925 
 926   // .. but keep kernel thread id too for diagnostics
 927   osthread->set_kernel_thread_id(kernel_thread_id);
 928 
 929   // initialize floating point control register
 930   os::Aix::init_thread_fpu_state();
 931 
 932   // Initial thread state is RUNNABLE
 933   osthread->set_state(RUNNABLE);
 934 
 935   thread->set_osthread(osthread);
 936 
 937   if (UseNUMA) {
 938     int lgrp_id = os::numa_get_group_id();
 939     if (lgrp_id != -1) {
 940       thread->set_lgrp_id(lgrp_id);
 941     }
 942   }
 943 
 944   // initialize signal mask for this thread
 945   // and save the caller's signal mask
 946   os::Aix::hotspot_sigmask(thread);
 947 
 948   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 949     os::current_thread_id(), (uintx) kernel_thread_id);
 950 
 951   return true;
 952 }
 953 
 954 void os::pd_start_thread(Thread* thread) {
 955   int status = pthread_continue_np(thread->osthread()->pthread_id());
 956   assert(status == 0, "thr_continue failed");
 957 }
 958 
 959 // Free OS resources related to the OSThread
 960 void os::free_thread(OSThread* osthread) {
 961   assert(osthread != NULL, "osthread not set");
 962 
 963   // We are told to free resources of the argument thread,
 964   // but we can only really operate on the current thread.
 965   assert(Thread::current()->osthread() == osthread,
 966          "os::free_thread but not current thread");
 967 
 968   // Restore caller's signal mask
 969   sigset_t sigmask = osthread->caller_sigmask();
 970   pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
 971 
 972   delete osthread;
 973 }
 974 
 975 ////////////////////////////////////////////////////////////////////////////////
 976 // time support
 977 
 978 // Time since start-up in seconds to a fine granularity.
 979 // Used by VMSelfDestructTimer and the MemProfiler.
 980 double os::elapsedTime() {
 981   return (double)(os::elapsed_counter()) * 0.000001;
 982 }
 983 
 984 jlong os::elapsed_counter() {
 985   timeval time;
 986   int status = gettimeofday(&time, NULL);
 987   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
 988 }
 989 
 990 jlong os::elapsed_frequency() {
 991   return (1000 * 1000);
 992 }
 993 
 994 bool os::supports_vtime() { return true; }
 995 bool os::enable_vtime()   { return false; }
 996 bool os::vtime_enabled()  { return false; }
 997 
 998 double os::elapsedVTime() {
 999   struct rusage usage;
1000   int retval = getrusage(RUSAGE_THREAD, &usage);
1001   if (retval == 0) {
1002     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1003   } else {
1004     // better than nothing, but not much
1005     return elapsedTime();
1006   }
1007 }
1008 
1009 jlong os::javaTimeMillis() {
1010   timeval time;
1011   int status = gettimeofday(&time, NULL);
1012   assert(status != -1, "aix error at gettimeofday()");
1013   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1014 }
1015 
1016 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1017   timeval time;
1018   int status = gettimeofday(&time, NULL);
1019   assert(status != -1, "aix error at gettimeofday()");
1020   seconds = jlong(time.tv_sec);
1021   nanos = jlong(time.tv_usec) * 1000;
1022 }
1023 
1024 jlong os::javaTimeNanos() {
1025   if (os::Aix::on_pase()) {
1026 
1027     timeval time;
1028     int status = gettimeofday(&time, NULL);
1029     assert(status != -1, "PASE error at gettimeofday()");
1030     jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1031     return 1000 * usecs;
1032 
1033   } else {
1034     // On AIX use the precision of processors real time clock
1035     // or time base registers.
1036     timebasestruct_t time;
1037     int rc;
1038 
1039     // If the CPU has a time register, it will be used and
1040     // we have to convert to real time first. After convertion we have following data:
1041     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1042     // time.tb_low  [nanoseconds after the last full second above]
1043     // We better use mread_real_time here instead of read_real_time
1044     // to ensure that we will get a monotonic increasing time.
1045     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1046       rc = time_base_to_time(&time, TIMEBASE_SZ);
1047       assert(rc != -1, "aix error at time_base_to_time()");
1048     }
1049     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1050   }
1051 }
1052 
1053 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1054   info_ptr->max_value = ALL_64_BITS;
1055   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1056   info_ptr->may_skip_backward = false;
1057   info_ptr->may_skip_forward = false;
1058   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1059 }
1060 
1061 // Return the real, user, and system times in seconds from an
1062 // arbitrary fixed point in the past.
1063 bool os::getTimesSecs(double* process_real_time,
1064                       double* process_user_time,
1065                       double* process_system_time) {
1066   struct tms ticks;
1067   clock_t real_ticks = times(&ticks);
1068 
1069   if (real_ticks == (clock_t) (-1)) {
1070     return false;
1071   } else {
1072     double ticks_per_second = (double) clock_tics_per_sec;
1073     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1074     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1075     *process_real_time = ((double) real_ticks) / ticks_per_second;
1076 
1077     return true;
1078   }
1079 }
1080 
1081 char * os::local_time_string(char *buf, size_t buflen) {
1082   struct tm t;
1083   time_t long_time;
1084   time(&long_time);
1085   localtime_r(&long_time, &t);
1086   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1087                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1088                t.tm_hour, t.tm_min, t.tm_sec);
1089   return buf;
1090 }
1091 
1092 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1093   return localtime_r(clock, res);
1094 }
1095 
1096 ////////////////////////////////////////////////////////////////////////////////
1097 // runtime exit support
1098 
1099 // Note: os::shutdown() might be called very early during initialization, or
1100 // called from signal handler. Before adding something to os::shutdown(), make
1101 // sure it is async-safe and can handle partially initialized VM.
1102 void os::shutdown() {
1103 
1104   // allow PerfMemory to attempt cleanup of any persistent resources
1105   perfMemory_exit();
1106 
1107   // needs to remove object in file system
1108   AttachListener::abort();
1109 
1110   // flush buffered output, finish log files
1111   ostream_abort();
1112 
1113   // Check for abort hook
1114   abort_hook_t abort_hook = Arguments::abort_hook();
1115   if (abort_hook != NULL) {
1116     abort_hook();
1117   }
1118 }
1119 
1120 // Note: os::abort() might be called very early during initialization, or
1121 // called from signal handler. Before adding something to os::abort(), make
1122 // sure it is async-safe and can handle partially initialized VM.
1123 void os::abort(bool dump_core, void* siginfo, const void* context) {
1124   os::shutdown();
1125   if (dump_core) {
1126 #ifndef PRODUCT
1127     fdStream out(defaultStream::output_fd());
1128     out.print_raw("Current thread is ");
1129     char buf[16];
1130     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1131     out.print_raw_cr(buf);
1132     out.print_raw_cr("Dumping core ...");
1133 #endif
1134     ::abort(); // dump core
1135   }
1136 
1137   ::exit(1);
1138 }
1139 
1140 // Die immediately, no exit hook, no abort hook, no cleanup.
1141 void os::die() {
1142   ::abort();
1143 }
1144 
1145 // This method is a copy of JDK's sysGetLastErrorString
1146 // from src/solaris/hpi/src/system_md.c
1147 
1148 size_t os::lasterror(char *buf, size_t len) {
1149   if (errno == 0) return 0;
1150 
1151   const char *s = os::strerror(errno);
1152   size_t n = ::strlen(s);
1153   if (n >= len) {
1154     n = len - 1;
1155   }
1156   ::strncpy(buf, s, n);
1157   buf[n] = '\0';
1158   return n;
1159 }
1160 
1161 intx os::current_thread_id() {
1162   return (intx)pthread_self();
1163 }
1164 
1165 int os::current_process_id() {
1166   return getpid();
1167 }
1168 
1169 // DLL functions
1170 
1171 const char* os::dll_file_extension() { return ".so"; }
1172 
1173 // This must be hard coded because it's the system's temporary
1174 // directory not the java application's temp directory, ala java.io.tmpdir.
1175 const char* os::get_temp_directory() { return "/tmp"; }
1176 
1177 static bool file_exists(const char* filename) {
1178   struct stat statbuf;
1179   if (filename == NULL || strlen(filename) == 0) {
1180     return false;
1181   }
1182   return os::stat(filename, &statbuf) == 0;
1183 }
1184 
1185 bool os::dll_build_name(char* buffer, size_t buflen,
1186                         const char* pname, const char* fname) {
1187   bool retval = false;
1188   // Copied from libhpi
1189   const size_t pnamelen = pname ? strlen(pname) : 0;
1190 
1191   // Return error on buffer overflow.
1192   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1193     *buffer = '\0';
1194     return retval;
1195   }
1196 
1197   if (pnamelen == 0) {
1198     snprintf(buffer, buflen, "lib%s.so", fname);
1199     retval = true;
1200   } else if (strchr(pname, *os::path_separator()) != NULL) {
1201     int n;
1202     char** pelements = split_path(pname, &n);
1203     if (pelements == NULL) {
1204       return false;
1205     }
1206     for (int i = 0; i < n; i++) {
1207       // Really shouldn't be NULL, but check can't hurt
1208       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1209         continue; // skip the empty path values
1210       }
1211       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1212       if (file_exists(buffer)) {
1213         retval = true;
1214         break;
1215       }
1216     }
1217     // release the storage
1218     for (int i = 0; i < n; i++) {
1219       if (pelements[i] != NULL) {
1220         FREE_C_HEAP_ARRAY(char, pelements[i]);
1221       }
1222     }
1223     if (pelements != NULL) {
1224       FREE_C_HEAP_ARRAY(char*, pelements);
1225     }
1226   } else {
1227     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1228     retval = true;
1229   }
1230   return retval;
1231 }
1232 
1233 // Check if addr is inside libjvm.so.
1234 bool os::address_is_in_vm(address addr) {
1235 
1236   // Input could be a real pc or a function pointer literal. The latter
1237   // would be a function descriptor residing in the data segment of a module.
1238   loaded_module_t lm;
1239   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1240     return lm.is_in_vm;
1241   } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1242     return lm.is_in_vm;
1243   } else {
1244     return false;
1245   }
1246 
1247 }
1248 
1249 // Resolve an AIX function descriptor literal to a code pointer.
1250 // If the input is a valid code pointer to a text segment of a loaded module,
1251 //   it is returned unchanged.
1252 // If the input is a valid AIX function descriptor, it is resolved to the
1253 //   code entry point.
1254 // If the input is neither a valid function descriptor nor a valid code pointer,
1255 //   NULL is returned.
1256 static address resolve_function_descriptor_to_code_pointer(address p) {
1257 
1258   if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1259     // It is a real code pointer.
1260     return p;
1261   } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1262     // Pointer to data segment, potential function descriptor.
1263     address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1264     if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1265       // It is a function descriptor.
1266       return code_entry;
1267     }
1268   }
1269 
1270   return NULL;
1271 }
1272 
1273 bool os::dll_address_to_function_name(address addr, char *buf,
1274                                       int buflen, int *offset,
1275                                       bool demangle) {
1276   if (offset) {
1277     *offset = -1;
1278   }
1279   // Buf is not optional, but offset is optional.
1280   assert(buf != NULL, "sanity check");
1281   buf[0] = '\0';
1282 
1283   // Resolve function ptr literals first.
1284   addr = resolve_function_descriptor_to_code_pointer(addr);
1285   if (!addr) {
1286     return false;
1287   }
1288 
1289   return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle);
1290 }
1291 
1292 bool os::dll_address_to_library_name(address addr, char* buf,
1293                                      int buflen, int* offset) {
1294   if (offset) {
1295     *offset = -1;
1296   }
1297   // Buf is not optional, but offset is optional.
1298   assert(buf != NULL, "sanity check");
1299   buf[0] = '\0';
1300 
1301   // Resolve function ptr literals first.
1302   addr = resolve_function_descriptor_to_code_pointer(addr);
1303   if (!addr) {
1304     return false;
1305   }
1306 
1307   return AixSymbols::get_module_name(addr, buf, buflen);
1308 }
1309 
1310 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1311 // for the same architecture as Hotspot is running on.
1312 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1313 
1314   if (ebuf && ebuflen > 0) {
1315     ebuf[0] = '\0';
1316     ebuf[ebuflen - 1] = '\0';
1317   }
1318 
1319   if (!filename || strlen(filename) == 0) {
1320     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1321     return NULL;
1322   }
1323 
1324   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1325   void * result= ::dlopen(filename, RTLD_LAZY);
1326   if (result != NULL) {
1327     // Reload dll cache. Don't do this in signal handling.
1328     LoadedLibraries::reload();
1329     return result;
1330   } else {
1331     // error analysis when dlopen fails
1332     const char* const error_report = ::dlerror();
1333     if (error_report && ebuf && ebuflen > 0) {
1334       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1335                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1336     }
1337   }
1338   return NULL;
1339 }
1340 
1341 void* os::dll_lookup(void* handle, const char* name) {
1342   void* res = dlsym(handle, name);
1343   return res;
1344 }
1345 
1346 void* os::get_default_process_handle() {
1347   return (void*)::dlopen(NULL, RTLD_LAZY);
1348 }
1349 
1350 void os::print_dll_info(outputStream *st) {
1351   st->print_cr("Dynamic libraries:");
1352   LoadedLibraries::print(st);
1353 }
1354 
1355 void os::get_summary_os_info(char* buf, size_t buflen) {
1356   // There might be something more readable than uname results for AIX.
1357   struct utsname name;
1358   uname(&name);
1359   snprintf(buf, buflen, "%s %s", name.release, name.version);
1360 }
1361 
1362 void os::print_os_info(outputStream* st) {
1363   st->print("OS:");
1364 
1365   st->print("uname:");
1366   struct utsname name;
1367   uname(&name);
1368   st->print(name.sysname); st->print(" ");
1369   st->print(name.nodename); st->print(" ");
1370   st->print(name.release); st->print(" ");
1371   st->print(name.version); st->print(" ");
1372   st->print(name.machine);
1373   st->cr();
1374 
1375   uint32_t ver = os::Aix::os_version();
1376   st->print_cr("AIX kernel version %u.%u.%u.%u",
1377                (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1378 
1379   os::Posix::print_rlimit_info(st);
1380 
1381   // load average
1382   st->print("load average:");
1383   double loadavg[3] = {-1.L, -1.L, -1.L};
1384   os::loadavg(loadavg, 3);
1385   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1386   st->cr();
1387 
1388   // print wpar info
1389   libperfstat::wparinfo_t wi;
1390   if (libperfstat::get_wparinfo(&wi)) {
1391     st->print_cr("wpar info");
1392     st->print_cr("name: %s", wi.name);
1393     st->print_cr("id:   %d", wi.wpar_id);
1394     st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1395   }
1396 
1397   // print partition info
1398   libperfstat::partitioninfo_t pi;
1399   if (libperfstat::get_partitioninfo(&pi)) {
1400     st->print_cr("partition info");
1401     st->print_cr(" name: %s", pi.name);
1402   }
1403 
1404 }
1405 
1406 void os::print_memory_info(outputStream* st) {
1407 
1408   st->print_cr("Memory:");
1409 
1410   st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1411     describe_pagesize(g_multipage_support.pagesize));
1412   st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1413     describe_pagesize(g_multipage_support.datapsize));
1414   st->print_cr("  Text page size:                         %s",
1415     describe_pagesize(g_multipage_support.textpsize));
1416   st->print_cr("  Thread stack page size (pthread):       %s",
1417     describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1418   st->print_cr("  Default shared memory page size:        %s",
1419     describe_pagesize(g_multipage_support.shmpsize));
1420   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1421     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1422   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1423     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1424   st->print_cr("  Multipage error: %d",
1425     g_multipage_support.error);
1426   st->cr();
1427   st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1428 
1429   // print out LDR_CNTRL because it affects the default page sizes
1430   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1431   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1432 
1433   // Print out EXTSHM because it is an unsupported setting.
1434   const char* const extshm = ::getenv("EXTSHM");
1435   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1436   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1437     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1438   }
1439 
1440   // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1441   const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1442   st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1443       aixthread_guardpages ? aixthread_guardpages : "<unset>");
1444 
1445   os::Aix::meminfo_t mi;
1446   if (os::Aix::get_meminfo(&mi)) {
1447     char buffer[256];
1448     if (os::Aix::on_aix()) {
1449       st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1450       st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1451       st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1452       st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1453     } else {
1454       // PASE - Numbers are result of QWCRSSTS; they mean:
1455       // real_total: Sum of all system pools
1456       // real_free: always 0
1457       // pgsp_total: we take the size of the system ASP
1458       // pgsp_free: size of system ASP times percentage of system ASP unused
1459       st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1460       st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1461       st->print_cr("%% system asp used : " SIZE_FORMAT,
1462         mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1463     }
1464     st->print_raw(buffer);
1465   }
1466   st->cr();
1467 
1468   // Print segments allocated with os::reserve_memory.
1469   st->print_cr("internal virtual memory regions used by vm:");
1470   vmembk_print_on(st);
1471 }
1472 
1473 // Get a string for the cpuinfo that is a summary of the cpu type
1474 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1475   // This looks good
1476   libperfstat::cpuinfo_t ci;
1477   if (libperfstat::get_cpuinfo(&ci)) {
1478     strncpy(buf, ci.version, buflen);
1479   } else {
1480     strncpy(buf, "AIX", buflen);
1481   }
1482 }
1483 
1484 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1485   st->print("CPU:");
1486   st->print("total %d", os::processor_count());
1487   // It's not safe to query number of active processors after crash.
1488   // st->print("(active %d)", os::active_processor_count());
1489   st->print(" %s", VM_Version::features());
1490   st->cr();
1491 }
1492 
1493 static void print_signal_handler(outputStream* st, int sig,
1494                                  char* buf, size_t buflen);
1495 
1496 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1497   st->print_cr("Signal Handlers:");
1498   print_signal_handler(st, SIGSEGV, buf, buflen);
1499   print_signal_handler(st, SIGBUS , buf, buflen);
1500   print_signal_handler(st, SIGFPE , buf, buflen);
1501   print_signal_handler(st, SIGPIPE, buf, buflen);
1502   print_signal_handler(st, SIGXFSZ, buf, buflen);
1503   print_signal_handler(st, SIGILL , buf, buflen);
1504   print_signal_handler(st, SR_signum, buf, buflen);
1505   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1506   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1507   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1508   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1509   print_signal_handler(st, SIGTRAP, buf, buflen);
1510   // We also want to know if someone else adds a SIGDANGER handler because
1511   // that will interfere with OOM killling.
1512   print_signal_handler(st, SIGDANGER, buf, buflen);
1513 }
1514 
1515 static char saved_jvm_path[MAXPATHLEN] = {0};
1516 
1517 // Find the full path to the current module, libjvm.so.
1518 void os::jvm_path(char *buf, jint buflen) {
1519   // Error checking.
1520   if (buflen < MAXPATHLEN) {
1521     assert(false, "must use a large-enough buffer");
1522     buf[0] = '\0';
1523     return;
1524   }
1525   // Lazy resolve the path to current module.
1526   if (saved_jvm_path[0] != 0) {
1527     strcpy(buf, saved_jvm_path);
1528     return;
1529   }
1530 
1531   Dl_info dlinfo;
1532   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1533   assert(ret != 0, "cannot locate libjvm");
1534   char* rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1535   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1536 
1537   if (Arguments::sun_java_launcher_is_altjvm()) {
1538     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
1539     // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".
1540     // If "/jre/lib/" appears at the right place in the string, then
1541     // assume we are installed in a JDK and we're done. Otherwise, check
1542     // for a JAVA_HOME environment variable and fix up the path so it
1543     // looks like libjvm.so is installed there (append a fake suffix
1544     // hotspot/libjvm.so).
1545     const char *p = buf + strlen(buf) - 1;
1546     for (int count = 0; p > buf && count < 4; ++count) {
1547       for (--p; p > buf && *p != '/'; --p)
1548         /* empty */ ;
1549     }
1550 
1551     if (strncmp(p, "/jre/lib/", 9) != 0) {
1552       // Look for JAVA_HOME in the environment.
1553       char* java_home_var = ::getenv("JAVA_HOME");
1554       if (java_home_var != NULL && java_home_var[0] != 0) {
1555         char* jrelib_p;
1556         int len;
1557 
1558         // Check the current module name "libjvm.so".
1559         p = strrchr(buf, '/');
1560         if (p == NULL) {
1561           return;
1562         }
1563         assert(strstr(p, "/libjvm") == p, "invalid library name");
1564 
1565         rp = os::Posix::realpath(java_home_var, buf, buflen);
1566         if (rp == NULL) {
1567           return;
1568         }
1569 
1570         // determine if this is a legacy image or modules image
1571         // modules image doesn't have "jre" subdirectory
1572         len = strlen(buf);
1573         assert(len < buflen, "Ran out of buffer room");
1574         jrelib_p = buf + len;
1575         snprintf(jrelib_p, buflen-len, "/jre/lib");
1576         if (0 != access(buf, F_OK)) {
1577           snprintf(jrelib_p, buflen-len, "/lib");
1578         }
1579 
1580         if (0 == access(buf, F_OK)) {
1581           // Use current module name "libjvm.so"
1582           len = strlen(buf);
1583           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
1584         } else {
1585           // Go back to path of .so
1586           rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1587           if (rp == NULL) {
1588             return;
1589           }
1590         }
1591       }
1592     }
1593   }
1594 
1595   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1596   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1597 }
1598 
1599 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1600   // no prefix required, not even "_"
1601 }
1602 
1603 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1604   // no suffix required
1605 }
1606 
1607 ////////////////////////////////////////////////////////////////////////////////
1608 // sun.misc.Signal support
1609 
1610 static volatile jint sigint_count = 0;
1611 
1612 static void
1613 UserHandler(int sig, void *siginfo, void *context) {
1614   // 4511530 - sem_post is serialized and handled by the manager thread. When
1615   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1616   // don't want to flood the manager thread with sem_post requests.
1617   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1618     return;
1619 
1620   // Ctrl-C is pressed during error reporting, likely because the error
1621   // handler fails to abort. Let VM die immediately.
1622   if (sig == SIGINT && VMError::is_error_reported()) {
1623     os::die();
1624   }
1625 
1626   os::signal_notify(sig);
1627 }
1628 
1629 void* os::user_handler() {
1630   return CAST_FROM_FN_PTR(void*, UserHandler);
1631 }
1632 
1633 extern "C" {
1634   typedef void (*sa_handler_t)(int);
1635   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1636 }
1637 
1638 void* os::signal(int signal_number, void* handler) {
1639   struct sigaction sigAct, oldSigAct;
1640 
1641   sigfillset(&(sigAct.sa_mask));
1642 
1643   // Do not block out synchronous signals in the signal handler.
1644   // Blocking synchronous signals only makes sense if you can really
1645   // be sure that those signals won't happen during signal handling,
1646   // when the blocking applies. Normal signal handlers are lean and
1647   // do not cause signals. But our signal handlers tend to be "risky"
1648   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1649   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1650   // by a SIGILL, which was blocked due to the signal mask. The process
1651   // just hung forever. Better to crash from a secondary signal than to hang.
1652   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1653   sigdelset(&(sigAct.sa_mask), SIGBUS);
1654   sigdelset(&(sigAct.sa_mask), SIGILL);
1655   sigdelset(&(sigAct.sa_mask), SIGFPE);
1656   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1657 
1658   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1659 
1660   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1661 
1662   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1663     // -1 means registration failed
1664     return (void *)-1;
1665   }
1666 
1667   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1668 }
1669 
1670 void os::signal_raise(int signal_number) {
1671   ::raise(signal_number);
1672 }
1673 
1674 //
1675 // The following code is moved from os.cpp for making this
1676 // code platform specific, which it is by its very nature.
1677 //
1678 
1679 // Will be modified when max signal is changed to be dynamic
1680 int os::sigexitnum_pd() {
1681   return NSIG;
1682 }
1683 
1684 // a counter for each possible signal value
1685 static volatile jint pending_signals[NSIG+1] = { 0 };
1686 
1687 // Wrapper functions for: sem_init(), sem_post(), sem_wait()
1688 // On AIX, we use sem_init(), sem_post(), sem_wait()
1689 // On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1690 // do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1691 // Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1692 // on AIX, msem_..() calls are suspected of causing problems.
1693 static sem_t sig_sem;
1694 static msemaphore* p_sig_msem = 0;
1695 
1696 static void local_sem_init() {
1697   if (os::Aix::on_aix()) {
1698     int rc = ::sem_init(&sig_sem, 0, 0);
1699     guarantee(rc != -1, "sem_init failed");
1700   } else {
1701     // Memory semaphores must live in shared mem.
1702     guarantee0(p_sig_msem == NULL);
1703     p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1704     guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1705     guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1706   }
1707 }
1708 
1709 static void local_sem_post() {
1710   static bool warn_only_once = false;
1711   if (os::Aix::on_aix()) {
1712     int rc = ::sem_post(&sig_sem);
1713     if (rc == -1 && !warn_only_once) {
1714       trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
1715       warn_only_once = true;
1716     }
1717   } else {
1718     guarantee0(p_sig_msem != NULL);
1719     int rc = ::msem_unlock(p_sig_msem, 0);
1720     if (rc == -1 && !warn_only_once) {
1721       trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
1722       warn_only_once = true;
1723     }
1724   }
1725 }
1726 
1727 static void local_sem_wait() {
1728   static bool warn_only_once = false;
1729   if (os::Aix::on_aix()) {
1730     int rc = ::sem_wait(&sig_sem);
1731     if (rc == -1 && !warn_only_once) {
1732       trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
1733       warn_only_once = true;
1734     }
1735   } else {
1736     guarantee0(p_sig_msem != NULL); // must init before use
1737     int rc = ::msem_lock(p_sig_msem, 0);
1738     if (rc == -1 && !warn_only_once) {
1739       trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
1740       warn_only_once = true;
1741     }
1742   }
1743 }
1744 
1745 void os::signal_init_pd() {
1746   // Initialize signal structures
1747   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1748 
1749   // Initialize signal semaphore
1750   local_sem_init();
1751 }
1752 
1753 void os::signal_notify(int sig) {
1754   Atomic::inc(&pending_signals[sig]);
1755   local_sem_post();
1756 }
1757 
1758 static int check_pending_signals(bool wait) {
1759   Atomic::store(0, &sigint_count);
1760   for (;;) {
1761     for (int i = 0; i < NSIG + 1; i++) {
1762       jint n = pending_signals[i];
1763       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1764         return i;
1765       }
1766     }
1767     if (!wait) {
1768       return -1;
1769     }
1770     JavaThread *thread = JavaThread::current();
1771     ThreadBlockInVM tbivm(thread);
1772 
1773     bool threadIsSuspended;
1774     do {
1775       thread->set_suspend_equivalent();
1776       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1777 
1778       local_sem_wait();
1779 
1780       // were we externally suspended while we were waiting?
1781       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1782       if (threadIsSuspended) {
1783         //
1784         // The semaphore has been incremented, but while we were waiting
1785         // another thread suspended us. We don't want to continue running
1786         // while suspended because that would surprise the thread that
1787         // suspended us.
1788         //
1789 
1790         local_sem_post();
1791 
1792         thread->java_suspend_self();
1793       }
1794     } while (threadIsSuspended);
1795   }
1796 }
1797 
1798 int os::signal_lookup() {
1799   return check_pending_signals(false);
1800 }
1801 
1802 int os::signal_wait() {
1803   return check_pending_signals(true);
1804 }
1805 
1806 ////////////////////////////////////////////////////////////////////////////////
1807 // Virtual Memory
1808 
1809 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1810 
1811 #define VMEM_MAPPED  1
1812 #define VMEM_SHMATED 2
1813 
1814 struct vmembk_t {
1815   int type;         // 1 - mmap, 2 - shmat
1816   char* addr;
1817   size_t size;      // Real size, may be larger than usersize.
1818   size_t pagesize;  // page size of area
1819   vmembk_t* next;
1820 
1821   bool contains_addr(char* p) const {
1822     return p >= addr && p < (addr + size);
1823   }
1824 
1825   bool contains_range(char* p, size_t s) const {
1826     return contains_addr(p) && contains_addr(p + s - 1);
1827   }
1828 
1829   void print_on(outputStream* os) const {
1830     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1831       " bytes, %d %s pages), %s",
1832       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1833       (type == VMEM_SHMATED ? "shmat" : "mmap")
1834     );
1835   }
1836 
1837   // Check that range is a sub range of memory block (or equal to memory block);
1838   // also check that range is fully page aligned to the page size if the block.
1839   void assert_is_valid_subrange(char* p, size_t s) const {
1840     if (!contains_range(p, s)) {
1841       trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1842               "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1843               p, p + s, addr, addr + size);
1844       guarantee0(false);
1845     }
1846     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1847       trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1848               " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1849       guarantee0(false);
1850     }
1851   }
1852 };
1853 
1854 static struct {
1855   vmembk_t* first;
1856   MiscUtils::CritSect cs;
1857 } vmem;
1858 
1859 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1860   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1861   assert0(p);
1862   if (p) {
1863     MiscUtils::AutoCritSect lck(&vmem.cs);
1864     p->addr = addr; p->size = size;
1865     p->pagesize = pagesize;
1866     p->type = type;
1867     p->next = vmem.first;
1868     vmem.first = p;
1869   }
1870 }
1871 
1872 static vmembk_t* vmembk_find(char* addr) {
1873   MiscUtils::AutoCritSect lck(&vmem.cs);
1874   for (vmembk_t* p = vmem.first; p; p = p->next) {
1875     if (p->addr <= addr && (p->addr + p->size) > addr) {
1876       return p;
1877     }
1878   }
1879   return NULL;
1880 }
1881 
1882 static void vmembk_remove(vmembk_t* p0) {
1883   MiscUtils::AutoCritSect lck(&vmem.cs);
1884   assert0(p0);
1885   assert0(vmem.first); // List should not be empty.
1886   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1887     if (*pp == p0) {
1888       *pp = p0->next;
1889       ::free(p0);
1890       return;
1891     }
1892   }
1893   assert0(false); // Not found?
1894 }
1895 
1896 static void vmembk_print_on(outputStream* os) {
1897   MiscUtils::AutoCritSect lck(&vmem.cs);
1898   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1899     vmi->print_on(os);
1900     os->cr();
1901   }
1902 }
1903 
1904 // Reserve and attach a section of System V memory.
1905 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1906 // address. Failing that, it will attach the memory anywhere.
1907 // If <requested_addr> is NULL, function will attach the memory anywhere.
1908 //
1909 // <alignment_hint> is being ignored by this function. It is very probable however that the
1910 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1911 // Should this be not enogh, we can put more work into it.
1912 static char* reserve_shmated_memory (
1913   size_t bytes,
1914   char* requested_addr,
1915   size_t alignment_hint) {
1916 
1917   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1918     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1919     bytes, requested_addr, alignment_hint);
1920 
1921   // Either give me wish address or wish alignment but not both.
1922   assert0(!(requested_addr != NULL && alignment_hint != 0));
1923 
1924   // We must prevent anyone from attaching too close to the
1925   // BRK because that may cause malloc OOM.
1926   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1927     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1928       "Will attach anywhere.", requested_addr);
1929     // Act like the OS refused to attach there.
1930     requested_addr = NULL;
1931   }
1932 
1933   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1934   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1935   if (os::Aix::on_pase_V5R4_or_older()) {
1936     ShouldNotReachHere();
1937   }
1938 
1939   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1940   const size_t size = align_up(bytes, 64*K);
1941 
1942   // Reserve the shared segment.
1943   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1944   if (shmid == -1) {
1945     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1946     return NULL;
1947   }
1948 
1949   // Important note:
1950   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1951   // We must right after attaching it remove it from the system. System V shm segments are global and
1952   // survive the process.
1953   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1954 
1955   struct shmid_ds shmbuf;
1956   memset(&shmbuf, 0, sizeof(shmbuf));
1957   shmbuf.shm_pagesize = 64*K;
1958   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1959     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1960                size / (64*K), errno);
1961     // I want to know if this ever happens.
1962     assert(false, "failed to set page size for shmat");
1963   }
1964 
1965   // Now attach the shared segment.
1966   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1967   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1968   // were not a segment boundary.
1969   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1970   const int errno_shmat = errno;
1971 
1972   // (A) Right after shmat and before handing shmat errors delete the shm segment.
1973   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
1974     trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1975     assert(false, "failed to remove shared memory segment!");
1976   }
1977 
1978   // Handle shmat error. If we failed to attach, just return.
1979   if (addr == (char*)-1) {
1980     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
1981     return NULL;
1982   }
1983 
1984   // Just for info: query the real page size. In case setting the page size did not
1985   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1986   const size_t real_pagesize = os::Aix::query_pagesize(addr);
1987   if (real_pagesize != shmbuf.shm_pagesize) {
1988     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
1989   }
1990 
1991   if (addr) {
1992     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
1993       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
1994   } else {
1995     if (requested_addr != NULL) {
1996       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
1997     } else {
1998       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
1999     }
2000   }
2001 
2002   // book-keeping
2003   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2004   assert0(is_aligned_to(addr, os::vm_page_size()));
2005 
2006   return addr;
2007 }
2008 
2009 static bool release_shmated_memory(char* addr, size_t size) {
2010 
2011   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2012     addr, addr + size - 1);
2013 
2014   bool rc = false;
2015 
2016   // TODO: is there a way to verify shm size without doing bookkeeping?
2017   if (::shmdt(addr) != 0) {
2018     trcVerbose("error (%d).", errno);
2019   } else {
2020     trcVerbose("ok.");
2021     rc = true;
2022   }
2023   return rc;
2024 }
2025 
2026 static bool uncommit_shmated_memory(char* addr, size_t size) {
2027   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2028     addr, addr + size - 1);
2029 
2030   const bool rc = my_disclaim64(addr, size);
2031 
2032   if (!rc) {
2033     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2034     return false;
2035   }
2036   return true;
2037 }
2038 
2039 ////////////////////////////////  mmap-based routines /////////////////////////////////
2040 
2041 // Reserve memory via mmap.
2042 // If <requested_addr> is given, an attempt is made to attach at the given address.
2043 // Failing that, memory is allocated at any address.
2044 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2045 // allocate at an address aligned with the given alignment. Failing that, memory
2046 // is aligned anywhere.
2047 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2048   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2049     "alignment_hint " UINTX_FORMAT "...",
2050     bytes, requested_addr, alignment_hint);
2051 
2052   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2053   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2054     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2055     return NULL;
2056   }
2057 
2058   // We must prevent anyone from attaching too close to the
2059   // BRK because that may cause malloc OOM.
2060   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2061     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2062       "Will attach anywhere.", requested_addr);
2063     // Act like the OS refused to attach there.
2064     requested_addr = NULL;
2065   }
2066 
2067   // Specify one or the other but not both.
2068   assert0(!(requested_addr != NULL && alignment_hint > 0));
2069 
2070   // In 64K mode, we claim the global page size (os::vm_page_size())
2071   // is 64K. This is one of the few points where that illusion may
2072   // break, because mmap() will always return memory aligned to 4K. So
2073   // we must ensure we only ever return memory aligned to 64k.
2074   if (alignment_hint) {
2075     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2076   } else {
2077     alignment_hint = os::vm_page_size();
2078   }
2079 
2080   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2081   const size_t size = align_up(bytes, os::vm_page_size());
2082 
2083   // alignment: Allocate memory large enough to include an aligned range of the right size and
2084   // cut off the leading and trailing waste pages.
2085   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2086   const size_t extra_size = size + alignment_hint;
2087 
2088   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2089   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2090   int flags = MAP_ANONYMOUS | MAP_SHARED;
2091 
2092   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2093   // it means if wishaddress is given but MAP_FIXED is not set.
2094   //
2095   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2096   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2097   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2098   // get clobbered.
2099   if (requested_addr != NULL) {
2100     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2101       flags |= MAP_FIXED;
2102     }
2103   }
2104 
2105   char* addr = (char*)::mmap(requested_addr, extra_size,
2106       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2107 
2108   if (addr == MAP_FAILED) {
2109     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2110     return NULL;
2111   }
2112 
2113   // Handle alignment.
2114   char* const addr_aligned = align_up(addr, alignment_hint);
2115   const size_t waste_pre = addr_aligned - addr;
2116   char* const addr_aligned_end = addr_aligned + size;
2117   const size_t waste_post = extra_size - waste_pre - size;
2118   if (waste_pre > 0) {
2119     ::munmap(addr, waste_pre);
2120   }
2121   if (waste_post > 0) {
2122     ::munmap(addr_aligned_end, waste_post);
2123   }
2124   addr = addr_aligned;
2125 
2126   if (addr) {
2127     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2128       addr, addr + bytes, bytes);
2129   } else {
2130     if (requested_addr != NULL) {
2131       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2132     } else {
2133       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2134     }
2135   }
2136 
2137   // bookkeeping
2138   vmembk_add(addr, size, 4*K, VMEM_MAPPED);
2139 
2140   // Test alignment, see above.
2141   assert0(is_aligned_to(addr, os::vm_page_size()));
2142 
2143   return addr;
2144 }
2145 
2146 static bool release_mmaped_memory(char* addr, size_t size) {
2147   assert0(is_aligned_to(addr, os::vm_page_size()));
2148   assert0(is_aligned_to(size, os::vm_page_size()));
2149 
2150   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2151     addr, addr + size - 1);
2152   bool rc = false;
2153 
2154   if (::munmap(addr, size) != 0) {
2155     trcVerbose("failed (%d)\n", errno);
2156     rc = false;
2157   } else {
2158     trcVerbose("ok.");
2159     rc = true;
2160   }
2161 
2162   return rc;
2163 }
2164 
2165 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2166 
2167   assert0(is_aligned_to(addr, os::vm_page_size()));
2168   assert0(is_aligned_to(size, os::vm_page_size()));
2169 
2170   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2171     addr, addr + size - 1);
2172   bool rc = false;
2173 
2174   // Uncommit mmap memory with msync MS_INVALIDATE.
2175   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2176     trcVerbose("failed (%d)\n", errno);
2177     rc = false;
2178   } else {
2179     trcVerbose("ok.");
2180     rc = true;
2181   }
2182 
2183   return rc;
2184 }
2185 
2186 int os::vm_page_size() {
2187   // Seems redundant as all get out.
2188   assert(os::Aix::page_size() != -1, "must call os::init");
2189   return os::Aix::page_size();
2190 }
2191 
2192 // Aix allocates memory by pages.
2193 int os::vm_allocation_granularity() {
2194   assert(os::Aix::page_size() != -1, "must call os::init");
2195   return os::Aix::page_size();
2196 }
2197 
2198 #ifdef PRODUCT
2199 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2200                                     int err) {
2201   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2202           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2203           os::errno_name(err), err);
2204 }
2205 #endif
2206 
2207 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2208                                   const char* mesg) {
2209   assert(mesg != NULL, "mesg must be specified");
2210   if (!pd_commit_memory(addr, size, exec)) {
2211     // Add extra info in product mode for vm_exit_out_of_memory():
2212     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2213     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2214   }
2215 }
2216 
2217 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2218 
2219   assert(is_aligned_to(addr, os::vm_page_size()),
2220     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2221     p2i(addr), os::vm_page_size());
2222   assert(is_aligned_to(size, os::vm_page_size()),
2223     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2224     size, os::vm_page_size());
2225 
2226   vmembk_t* const vmi = vmembk_find(addr);
2227   guarantee0(vmi);
2228   vmi->assert_is_valid_subrange(addr, size);
2229 
2230   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2231 
2232   if (UseExplicitCommit) {
2233     // AIX commits memory on touch. So, touch all pages to be committed.
2234     for (char* p = addr; p < (addr + size); p += 4*K) {
2235       *p = '\0';
2236     }
2237   }
2238 
2239   return true;
2240 }
2241 
2242 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2243   return pd_commit_memory(addr, size, exec);
2244 }
2245 
2246 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2247                                   size_t alignment_hint, bool exec,
2248                                   const char* mesg) {
2249   // Alignment_hint is ignored on this OS.
2250   pd_commit_memory_or_exit(addr, size, exec, mesg);
2251 }
2252 
2253 bool os::pd_uncommit_memory(char* addr, size_t size) {
2254   assert(is_aligned_to(addr, os::vm_page_size()),
2255     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2256     p2i(addr), os::vm_page_size());
2257   assert(is_aligned_to(size, os::vm_page_size()),
2258     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2259     size, os::vm_page_size());
2260 
2261   // Dynamically do different things for mmap/shmat.
2262   const vmembk_t* const vmi = vmembk_find(addr);
2263   guarantee0(vmi);
2264   vmi->assert_is_valid_subrange(addr, size);
2265 
2266   if (vmi->type == VMEM_SHMATED) {
2267     return uncommit_shmated_memory(addr, size);
2268   } else {
2269     return uncommit_mmaped_memory(addr, size);
2270   }
2271 }
2272 
2273 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2274   // Do not call this; no need to commit stack pages on AIX.
2275   ShouldNotReachHere();
2276   return true;
2277 }
2278 
2279 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2280   // Do not call this; no need to commit stack pages on AIX.
2281   ShouldNotReachHere();
2282   return true;
2283 }
2284 
2285 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2286 }
2287 
2288 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2289 }
2290 
2291 void os::numa_make_global(char *addr, size_t bytes) {
2292 }
2293 
2294 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2295 }
2296 
2297 bool os::numa_topology_changed() {
2298   return false;
2299 }
2300 
2301 size_t os::numa_get_groups_num() {
2302   return 1;
2303 }
2304 
2305 int os::numa_get_group_id() {
2306   return 0;
2307 }
2308 
2309 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2310   if (size > 0) {
2311     ids[0] = 0;
2312     return 1;
2313   }
2314   return 0;
2315 }
2316 
2317 bool os::get_page_info(char *start, page_info* info) {
2318   return false;
2319 }
2320 
2321 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2322   return end;
2323 }
2324 
2325 // Reserves and attaches a shared memory segment.
2326 // Will assert if a wish address is given and could not be obtained.
2327 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2328 
2329   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2330   // thereby clobbering old mappings at that place. That is probably
2331   // not intended, never used and almost certainly an error were it
2332   // ever be used this way (to try attaching at a specified address
2333   // without clobbering old mappings an alternate API exists,
2334   // os::attempt_reserve_memory_at()).
2335   // Instead of mimicking the dangerous coding of the other platforms, here I
2336   // just ignore the request address (release) or assert(debug).
2337   assert0(requested_addr == NULL);
2338 
2339   // Always round to os::vm_page_size(), which may be larger than 4K.
2340   bytes = align_up(bytes, os::vm_page_size());
2341   const size_t alignment_hint0 =
2342     alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0;
2343 
2344   // In 4K mode always use mmap.
2345   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2346   if (os::vm_page_size() == 4*K) {
2347     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2348   } else {
2349     if (bytes >= Use64KPagesThreshold) {
2350       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2351     } else {
2352       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2353     }
2354   }
2355 }
2356 
2357 bool os::pd_release_memory(char* addr, size_t size) {
2358 
2359   // Dynamically do different things for mmap/shmat.
2360   vmembk_t* const vmi = vmembk_find(addr);
2361   guarantee0(vmi);
2362 
2363   // Always round to os::vm_page_size(), which may be larger than 4K.
2364   size = align_up(size, os::vm_page_size());
2365   addr = align_up(addr, os::vm_page_size());
2366 
2367   bool rc = false;
2368   bool remove_bookkeeping = false;
2369   if (vmi->type == VMEM_SHMATED) {
2370     // For shmatted memory, we do:
2371     // - If user wants to release the whole range, release the memory (shmdt).
2372     // - If user only wants to release a partial range, uncommit (disclaim) that
2373     //   range. That way, at least, we do not use memory anymore (bust still page
2374     //   table space).
2375     vmi->assert_is_valid_subrange(addr, size);
2376     if (addr == vmi->addr && size == vmi->size) {
2377       rc = release_shmated_memory(addr, size);
2378       remove_bookkeeping = true;
2379     } else {
2380       rc = uncommit_shmated_memory(addr, size);
2381     }
2382   } else {
2383     // User may unmap partial regions but region has to be fully contained.
2384 #ifdef ASSERT
2385     vmi->assert_is_valid_subrange(addr, size);
2386 #endif
2387     rc = release_mmaped_memory(addr, size);
2388     remove_bookkeeping = true;
2389   }
2390 
2391   // update bookkeeping
2392   if (rc && remove_bookkeeping) {
2393     vmembk_remove(vmi);
2394   }
2395 
2396   return rc;
2397 }
2398 
2399 static bool checked_mprotect(char* addr, size_t size, int prot) {
2400 
2401   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2402   // not tell me if protection failed when trying to protect an un-protectable range.
2403   //
2404   // This means if the memory was allocated using shmget/shmat, protection wont work
2405   // but mprotect will still return 0:
2406   //
2407   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2408 
2409   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2410 
2411   if (!rc) {
2412     const char* const s_errno = os::errno_name(errno);
2413     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2414     return false;
2415   }
2416 
2417   // mprotect success check
2418   //
2419   // Mprotect said it changed the protection but can I believe it?
2420   //
2421   // To be sure I need to check the protection afterwards. Try to
2422   // read from protected memory and check whether that causes a segfault.
2423   //
2424   if (!os::Aix::xpg_sus_mode()) {
2425 
2426     if (CanUseSafeFetch32()) {
2427 
2428       const bool read_protected =
2429         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2430          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2431 
2432       if (prot & PROT_READ) {
2433         rc = !read_protected;
2434       } else {
2435         rc = read_protected;
2436       }
2437 
2438       if (!rc) {
2439         if (os::Aix::on_pase()) {
2440           // There is an issue on older PASE systems where mprotect() will return success but the
2441           // memory will not be protected.
2442           // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2443           // machines; we only see it rarely, when using mprotect() to protect the guard page of
2444           // a stack. It is an OS error.
2445           //
2446           // A valid strategy is just to try again. This usually works. :-/
2447 
2448           ::usleep(1000);
2449           if (::mprotect(addr, size, prot) == 0) {
2450             const bool read_protected_2 =
2451               (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2452               SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2453             rc = true;
2454           }
2455         }
2456       }
2457     }
2458   }
2459 
2460   assert(rc == true, "mprotect failed.");
2461 
2462   return rc;
2463 }
2464 
2465 // Set protections specified
2466 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2467   unsigned int p = 0;
2468   switch (prot) {
2469   case MEM_PROT_NONE: p = PROT_NONE; break;
2470   case MEM_PROT_READ: p = PROT_READ; break;
2471   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2472   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2473   default:
2474     ShouldNotReachHere();
2475   }
2476   // is_committed is unused.
2477   return checked_mprotect(addr, size, p);
2478 }
2479 
2480 bool os::guard_memory(char* addr, size_t size) {
2481   return checked_mprotect(addr, size, PROT_NONE);
2482 }
2483 
2484 bool os::unguard_memory(char* addr, size_t size) {
2485   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2486 }
2487 
2488 // Large page support
2489 
2490 static size_t _large_page_size = 0;
2491 
2492 // Enable large page support if OS allows that.
2493 void os::large_page_init() {
2494   return; // Nothing to do. See query_multipage_support and friends.
2495 }
2496 
2497 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2498   // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2499   // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2500   // so this is not needed.
2501   assert(false, "should not be called on AIX");
2502   return NULL;
2503 }
2504 
2505 bool os::release_memory_special(char* base, size_t bytes) {
2506   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2507   Unimplemented();
2508   return false;
2509 }
2510 
2511 size_t os::large_page_size() {
2512   return _large_page_size;
2513 }
2514 
2515 bool os::can_commit_large_page_memory() {
2516   // Does not matter, we do not support huge pages.
2517   return false;
2518 }
2519 
2520 bool os::can_execute_large_page_memory() {
2521   // Does not matter, we do not support huge pages.
2522   return false;
2523 }
2524 
2525 // Reserve memory at an arbitrary address, only if that area is
2526 // available (and not reserved for something else).
2527 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2528   char* addr = NULL;
2529 
2530   // Always round to os::vm_page_size(), which may be larger than 4K.
2531   bytes = align_up(bytes, os::vm_page_size());
2532 
2533   // In 4K mode always use mmap.
2534   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2535   if (os::vm_page_size() == 4*K) {
2536     return reserve_mmaped_memory(bytes, requested_addr, 0);
2537   } else {
2538     if (bytes >= Use64KPagesThreshold) {
2539       return reserve_shmated_memory(bytes, requested_addr, 0);
2540     } else {
2541       return reserve_mmaped_memory(bytes, requested_addr, 0);
2542     }
2543   }
2544 
2545   return addr;
2546 }
2547 
2548 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2549   return ::read(fd, buf, nBytes);
2550 }
2551 
2552 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2553   return ::pread(fd, buf, nBytes, offset);
2554 }
2555 
2556 void os::naked_short_sleep(jlong ms) {
2557   struct timespec req;
2558 
2559   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2560   req.tv_sec = 0;
2561   if (ms > 0) {
2562     req.tv_nsec = (ms % 1000) * 1000000;
2563   }
2564   else {
2565     req.tv_nsec = 1;
2566   }
2567 
2568   nanosleep(&req, NULL);
2569 
2570   return;
2571 }
2572 
2573 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2574 void os::infinite_sleep() {
2575   while (true) {    // sleep forever ...
2576     ::sleep(100);   // ... 100 seconds at a time
2577   }
2578 }
2579 
2580 // Used to convert frequent JVM_Yield() to nops
2581 bool os::dont_yield() {
2582   return DontYieldALot;
2583 }
2584 
2585 void os::naked_yield() {
2586   sched_yield();
2587 }
2588 
2589 ////////////////////////////////////////////////////////////////////////////////
2590 // thread priority support
2591 
2592 // From AIX manpage to pthread_setschedparam
2593 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2594 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2595 //
2596 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2597 // range from 40 to 80, where 40 is the least favored priority and 80
2598 // is the most favored."
2599 //
2600 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2601 // scheduling there; however, this still leaves iSeries.)
2602 //
2603 // We use the same values for AIX and PASE.
2604 int os::java_to_os_priority[CriticalPriority + 1] = {
2605   54,             // 0 Entry should never be used
2606 
2607   55,             // 1 MinPriority
2608   55,             // 2
2609   56,             // 3
2610 
2611   56,             // 4
2612   57,             // 5 NormPriority
2613   57,             // 6
2614 
2615   58,             // 7
2616   58,             // 8
2617   59,             // 9 NearMaxPriority
2618 
2619   60,             // 10 MaxPriority
2620 
2621   60              // 11 CriticalPriority
2622 };
2623 
2624 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2625   if (!UseThreadPriorities) return OS_OK;
2626   pthread_t thr = thread->osthread()->pthread_id();
2627   int policy = SCHED_OTHER;
2628   struct sched_param param;
2629   param.sched_priority = newpri;
2630   int ret = pthread_setschedparam(thr, policy, &param);
2631 
2632   if (ret != 0) {
2633     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2634         (int)thr, newpri, ret, os::errno_name(ret));
2635   }
2636   return (ret == 0) ? OS_OK : OS_ERR;
2637 }
2638 
2639 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2640   if (!UseThreadPriorities) {
2641     *priority_ptr = java_to_os_priority[NormPriority];
2642     return OS_OK;
2643   }
2644   pthread_t thr = thread->osthread()->pthread_id();
2645   int policy = SCHED_OTHER;
2646   struct sched_param param;
2647   int ret = pthread_getschedparam(thr, &policy, &param);
2648   *priority_ptr = param.sched_priority;
2649 
2650   return (ret == 0) ? OS_OK : OS_ERR;
2651 }
2652 
2653 // Hint to the underlying OS that a task switch would not be good.
2654 // Void return because it's a hint and can fail.
2655 void os::hint_no_preempt() {}
2656 
2657 ////////////////////////////////////////////////////////////////////////////////
2658 // suspend/resume support
2659 
2660 //  The low-level signal-based suspend/resume support is a remnant from the
2661 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2662 //  within hotspot. Needed for fetch_frame_from_ucontext(), which is used by:
2663 //    - Forte Analyzer: AsyncGetCallTrace()
2664 //    - StackBanging: get_frame_at_stack_banging_point()
2665 //
2666 //  The remaining code is greatly simplified from the more general suspension
2667 //  code that used to be used.
2668 //
2669 //  The protocol is quite simple:
2670 //  - suspend:
2671 //      - sends a signal to the target thread
2672 //      - polls the suspend state of the osthread using a yield loop
2673 //      - target thread signal handler (SR_handler) sets suspend state
2674 //        and blocks in sigsuspend until continued
2675 //  - resume:
2676 //      - sets target osthread state to continue
2677 //      - sends signal to end the sigsuspend loop in the SR_handler
2678 //
2679 //  Note that the SR_lock plays no role in this suspend/resume protocol,
2680 //  but is checked for NULL in SR_handler as a thread termination indicator.
2681 //
2682 
2683 static void resume_clear_context(OSThread *osthread) {
2684   osthread->set_ucontext(NULL);
2685   osthread->set_siginfo(NULL);
2686 }
2687 
2688 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2689   osthread->set_ucontext(context);
2690   osthread->set_siginfo(siginfo);
2691 }
2692 
2693 //
2694 // Handler function invoked when a thread's execution is suspended or
2695 // resumed. We have to be careful that only async-safe functions are
2696 // called here (Note: most pthread functions are not async safe and
2697 // should be avoided.)
2698 //
2699 // Note: sigwait() is a more natural fit than sigsuspend() from an
2700 // interface point of view, but sigwait() prevents the signal hander
2701 // from being run. libpthread would get very confused by not having
2702 // its signal handlers run and prevents sigwait()'s use with the
2703 // mutex granting granting signal.
2704 //
2705 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2706 //
2707 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2708   // Save and restore errno to avoid confusing native code with EINTR
2709   // after sigsuspend.
2710   int old_errno = errno;
2711 
2712   Thread* thread = Thread::current_or_null_safe();
2713   assert(thread != NULL, "Missing current thread in SR_handler");
2714 
2715   // On some systems we have seen signal delivery get "stuck" until the signal
2716   // mask is changed as part of thread termination. Check that the current thread
2717   // has not already terminated (via SR_lock()) - else the following assertion
2718   // will fail because the thread is no longer a JavaThread as the ~JavaThread
2719   // destructor has completed.
2720 
2721   if (thread->SR_lock() == NULL) {
2722     return;
2723   }
2724 
2725   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2726 
2727   OSThread* osthread = thread->osthread();
2728 
2729   os::SuspendResume::State current = osthread->sr.state();
2730   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2731     suspend_save_context(osthread, siginfo, context);
2732 
2733     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2734     os::SuspendResume::State state = osthread->sr.suspended();
2735     if (state == os::SuspendResume::SR_SUSPENDED) {
2736       sigset_t suspend_set;  // signals for sigsuspend()
2737 
2738       // get current set of blocked signals and unblock resume signal
2739       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2740       sigdelset(&suspend_set, SR_signum);
2741 
2742       // wait here until we are resumed
2743       while (1) {
2744         sigsuspend(&suspend_set);
2745 
2746         os::SuspendResume::State result = osthread->sr.running();
2747         if (result == os::SuspendResume::SR_RUNNING) {
2748           break;
2749         }
2750       }
2751 
2752     } else if (state == os::SuspendResume::SR_RUNNING) {
2753       // request was cancelled, continue
2754     } else {
2755       ShouldNotReachHere();
2756     }
2757 
2758     resume_clear_context(osthread);
2759   } else if (current == os::SuspendResume::SR_RUNNING) {
2760     // request was cancelled, continue
2761   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2762     // ignore
2763   } else {
2764     ShouldNotReachHere();
2765   }
2766 
2767   errno = old_errno;
2768 }
2769 
2770 static int SR_initialize() {
2771   struct sigaction act;
2772   char *s;
2773   // Get signal number to use for suspend/resume
2774   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2775     int sig = ::strtol(s, 0, 10);
2776     if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2777         sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2778       SR_signum = sig;
2779     } else {
2780       warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2781               sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2782     }
2783   }
2784 
2785   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2786         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2787 
2788   sigemptyset(&SR_sigset);
2789   sigaddset(&SR_sigset, SR_signum);
2790 
2791   // Set up signal handler for suspend/resume.
2792   act.sa_flags = SA_RESTART|SA_SIGINFO;
2793   act.sa_handler = (void (*)(int)) SR_handler;
2794 
2795   // SR_signum is blocked by default.
2796   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2797 
2798   if (sigaction(SR_signum, &act, 0) == -1) {
2799     return -1;
2800   }
2801 
2802   // Save signal flag
2803   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2804   return 0;
2805 }
2806 
2807 static int SR_finalize() {
2808   return 0;
2809 }
2810 
2811 static int sr_notify(OSThread* osthread) {
2812   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2813   assert_status(status == 0, status, "pthread_kill");
2814   return status;
2815 }
2816 
2817 // "Randomly" selected value for how long we want to spin
2818 // before bailing out on suspending a thread, also how often
2819 // we send a signal to a thread we want to resume
2820 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2821 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2822 
2823 // returns true on success and false on error - really an error is fatal
2824 // but this seems the normal response to library errors
2825 static bool do_suspend(OSThread* osthread) {
2826   assert(osthread->sr.is_running(), "thread should be running");
2827   // mark as suspended and send signal
2828 
2829   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2830     // failed to switch, state wasn't running?
2831     ShouldNotReachHere();
2832     return false;
2833   }
2834 
2835   if (sr_notify(osthread) != 0) {
2836     // try to cancel, switch to running
2837 
2838     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2839     if (result == os::SuspendResume::SR_RUNNING) {
2840       // cancelled
2841       return false;
2842     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2843       // somehow managed to suspend
2844       return true;
2845     } else {
2846       ShouldNotReachHere();
2847       return false;
2848     }
2849   }
2850 
2851   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2852 
2853   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2854     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2855       os::naked_yield();
2856     }
2857 
2858     // timeout, try to cancel the request
2859     if (n >= RANDOMLY_LARGE_INTEGER) {
2860       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2861       if (cancelled == os::SuspendResume::SR_RUNNING) {
2862         return false;
2863       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2864         return true;
2865       } else {
2866         ShouldNotReachHere();
2867         return false;
2868       }
2869     }
2870   }
2871 
2872   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2873   return true;
2874 }
2875 
2876 static void do_resume(OSThread* osthread) {
2877   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2878 
2879   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2880     // failed to switch to WAKEUP_REQUEST
2881     ShouldNotReachHere();
2882     return;
2883   }
2884 
2885   while (!osthread->sr.is_running()) {
2886     if (sr_notify(osthread) == 0) {
2887       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2888         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2889           os::naked_yield();
2890         }
2891       }
2892     } else {
2893       ShouldNotReachHere();
2894     }
2895   }
2896 
2897   guarantee(osthread->sr.is_running(), "Must be running!");
2898 }
2899 
2900 ///////////////////////////////////////////////////////////////////////////////////
2901 // signal handling (except suspend/resume)
2902 
2903 // This routine may be used by user applications as a "hook" to catch signals.
2904 // The user-defined signal handler must pass unrecognized signals to this
2905 // routine, and if it returns true (non-zero), then the signal handler must
2906 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2907 // routine will never retun false (zero), but instead will execute a VM panic
2908 // routine kill the process.
2909 //
2910 // If this routine returns false, it is OK to call it again. This allows
2911 // the user-defined signal handler to perform checks either before or after
2912 // the VM performs its own checks. Naturally, the user code would be making
2913 // a serious error if it tried to handle an exception (such as a null check
2914 // or breakpoint) that the VM was generating for its own correct operation.
2915 //
2916 // This routine may recognize any of the following kinds of signals:
2917 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2918 // It should be consulted by handlers for any of those signals.
2919 //
2920 // The caller of this routine must pass in the three arguments supplied
2921 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2922 // field of the structure passed to sigaction(). This routine assumes that
2923 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2924 //
2925 // Note that the VM will print warnings if it detects conflicting signal
2926 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2927 //
2928 extern "C" JNIEXPORT int
2929 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2930 
2931 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2932 // to be the thing to call; documentation is not terribly clear about whether
2933 // pthread_sigmask also works, and if it does, whether it does the same.
2934 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2935   const int rc = ::pthread_sigmask(how, set, oset);
2936   // return value semantics differ slightly for error case:
2937   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2938   // (so, pthread_sigmask is more theadsafe for error handling)
2939   // But success is always 0.
2940   return rc == 0 ? true : false;
2941 }
2942 
2943 // Function to unblock all signals which are, according
2944 // to POSIX, typical program error signals. If they happen while being blocked,
2945 // they typically will bring down the process immediately.
2946 bool unblock_program_error_signals() {
2947   sigset_t set;
2948   ::sigemptyset(&set);
2949   ::sigaddset(&set, SIGILL);
2950   ::sigaddset(&set, SIGBUS);
2951   ::sigaddset(&set, SIGFPE);
2952   ::sigaddset(&set, SIGSEGV);
2953   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2954 }
2955 
2956 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2957 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2958   assert(info != NULL && uc != NULL, "it must be old kernel");
2959 
2960   // Never leave program error signals blocked;
2961   // on all our platforms they would bring down the process immediately when
2962   // getting raised while being blocked.
2963   unblock_program_error_signals();
2964 
2965   int orig_errno = errno;  // Preserve errno value over signal handler.
2966   JVM_handle_aix_signal(sig, info, uc, true);
2967   errno = orig_errno;
2968 }
2969 
2970 // This boolean allows users to forward their own non-matching signals
2971 // to JVM_handle_aix_signal, harmlessly.
2972 bool os::Aix::signal_handlers_are_installed = false;
2973 
2974 // For signal-chaining
2975 struct sigaction sigact[NSIG];
2976 sigset_t sigs;
2977 bool os::Aix::libjsig_is_loaded = false;
2978 typedef struct sigaction *(*get_signal_t)(int);
2979 get_signal_t os::Aix::get_signal_action = NULL;
2980 
2981 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2982   struct sigaction *actp = NULL;
2983 
2984   if (libjsig_is_loaded) {
2985     // Retrieve the old signal handler from libjsig
2986     actp = (*get_signal_action)(sig);
2987   }
2988   if (actp == NULL) {
2989     // Retrieve the preinstalled signal handler from jvm
2990     actp = get_preinstalled_handler(sig);
2991   }
2992 
2993   return actp;
2994 }
2995 
2996 static bool call_chained_handler(struct sigaction *actp, int sig,
2997                                  siginfo_t *siginfo, void *context) {
2998   // Call the old signal handler
2999   if (actp->sa_handler == SIG_DFL) {
3000     // It's more reasonable to let jvm treat it as an unexpected exception
3001     // instead of taking the default action.
3002     return false;
3003   } else if (actp->sa_handler != SIG_IGN) {
3004     if ((actp->sa_flags & SA_NODEFER) == 0) {
3005       // automaticlly block the signal
3006       sigaddset(&(actp->sa_mask), sig);
3007     }
3008 
3009     sa_handler_t hand = NULL;
3010     sa_sigaction_t sa = NULL;
3011     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3012     // retrieve the chained handler
3013     if (siginfo_flag_set) {
3014       sa = actp->sa_sigaction;
3015     } else {
3016       hand = actp->sa_handler;
3017     }
3018 
3019     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3020       actp->sa_handler = SIG_DFL;
3021     }
3022 
3023     // try to honor the signal mask
3024     sigset_t oset;
3025     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3026 
3027     // call into the chained handler
3028     if (siginfo_flag_set) {
3029       (*sa)(sig, siginfo, context);
3030     } else {
3031       (*hand)(sig);
3032     }
3033 
3034     // restore the signal mask
3035     pthread_sigmask(SIG_SETMASK, &oset, 0);
3036   }
3037   // Tell jvm's signal handler the signal is taken care of.
3038   return true;
3039 }
3040 
3041 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3042   bool chained = false;
3043   // signal-chaining
3044   if (UseSignalChaining) {
3045     struct sigaction *actp = get_chained_signal_action(sig);
3046     if (actp != NULL) {
3047       chained = call_chained_handler(actp, sig, siginfo, context);
3048     }
3049   }
3050   return chained;
3051 }
3052 
3053 size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
3054   // Creating guard page is very expensive. Java thread has HotSpot
3055   // guard pages, only enable glibc guard page for non-Java threads.
3056   // (Remember: compiler thread is a Java thread, too!)
3057   //
3058   // Aix can have different page sizes for stack (4K) and heap (64K).
3059   // As Hotspot knows only one page size, we assume the stack has
3060   // the same page size as the heap. Returning page_size() here can
3061   // cause 16 guard pages which we want to avoid.  Thus we return 4K
3062   // which will be rounded to the real page size by the OS.
3063   return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : 4 * K);
3064 }
3065 
3066 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3067   if (sigismember(&sigs, sig)) {
3068     return &sigact[sig];
3069   }
3070   return NULL;
3071 }
3072 
3073 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3074   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3075   sigact[sig] = oldAct;
3076   sigaddset(&sigs, sig);
3077 }
3078 
3079 // for diagnostic
3080 int sigflags[NSIG];
3081 
3082 int os::Aix::get_our_sigflags(int sig) {
3083   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3084   return sigflags[sig];
3085 }
3086 
3087 void os::Aix::set_our_sigflags(int sig, int flags) {
3088   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3089   if (sig > 0 && sig < NSIG) {
3090     sigflags[sig] = flags;
3091   }
3092 }
3093 
3094 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3095   // Check for overwrite.
3096   struct sigaction oldAct;
3097   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3098 
3099   void* oldhand = oldAct.sa_sigaction
3100     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3101     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3102   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3103       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3104       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3105     if (AllowUserSignalHandlers || !set_installed) {
3106       // Do not overwrite; user takes responsibility to forward to us.
3107       return;
3108     } else if (UseSignalChaining) {
3109       // save the old handler in jvm
3110       save_preinstalled_handler(sig, oldAct);
3111       // libjsig also interposes the sigaction() call below and saves the
3112       // old sigaction on it own.
3113     } else {
3114       fatal("Encountered unexpected pre-existing sigaction handler "
3115             "%#lx for signal %d.", (long)oldhand, sig);
3116     }
3117   }
3118 
3119   struct sigaction sigAct;
3120   sigfillset(&(sigAct.sa_mask));
3121   if (!set_installed) {
3122     sigAct.sa_handler = SIG_DFL;
3123     sigAct.sa_flags = SA_RESTART;
3124   } else {
3125     sigAct.sa_sigaction = javaSignalHandler;
3126     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3127   }
3128   // Save flags, which are set by ours
3129   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3130   sigflags[sig] = sigAct.sa_flags;
3131 
3132   int ret = sigaction(sig, &sigAct, &oldAct);
3133   assert(ret == 0, "check");
3134 
3135   void* oldhand2 = oldAct.sa_sigaction
3136                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3137                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3138   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3139 }
3140 
3141 // install signal handlers for signals that HotSpot needs to
3142 // handle in order to support Java-level exception handling.
3143 void os::Aix::install_signal_handlers() {
3144   if (!signal_handlers_are_installed) {
3145     signal_handlers_are_installed = true;
3146 
3147     // signal-chaining
3148     typedef void (*signal_setting_t)();
3149     signal_setting_t begin_signal_setting = NULL;
3150     signal_setting_t end_signal_setting = NULL;
3151     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3152                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3153     if (begin_signal_setting != NULL) {
3154       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3155                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3156       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3157                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3158       libjsig_is_loaded = true;
3159       assert(UseSignalChaining, "should enable signal-chaining");
3160     }
3161     if (libjsig_is_loaded) {
3162       // Tell libjsig jvm is setting signal handlers.
3163       (*begin_signal_setting)();
3164     }
3165 
3166     ::sigemptyset(&sigs);
3167     set_signal_handler(SIGSEGV, true);
3168     set_signal_handler(SIGPIPE, true);
3169     set_signal_handler(SIGBUS, true);
3170     set_signal_handler(SIGILL, true);
3171     set_signal_handler(SIGFPE, true);
3172     set_signal_handler(SIGTRAP, true);
3173     set_signal_handler(SIGXFSZ, true);
3174 
3175     if (libjsig_is_loaded) {
3176       // Tell libjsig jvm finishes setting signal handlers.
3177       (*end_signal_setting)();
3178     }
3179 
3180     // We don't activate signal checker if libjsig is in place, we trust ourselves
3181     // and if UserSignalHandler is installed all bets are off.
3182     // Log that signal checking is off only if -verbose:jni is specified.
3183     if (CheckJNICalls) {
3184       if (libjsig_is_loaded) {
3185         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3186         check_signals = false;
3187       }
3188       if (AllowUserSignalHandlers) {
3189         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3190         check_signals = false;
3191       }
3192       // Need to initialize check_signal_done.
3193       ::sigemptyset(&check_signal_done);
3194     }
3195   }
3196 }
3197 
3198 static const char* get_signal_handler_name(address handler,
3199                                            char* buf, int buflen) {
3200   int offset;
3201   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3202   if (found) {
3203     // skip directory names
3204     const char *p1, *p2;
3205     p1 = buf;
3206     size_t len = strlen(os::file_separator());
3207     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3208     // The way os::dll_address_to_library_name is implemented on Aix
3209     // right now, it always returns -1 for the offset which is not
3210     // terribly informative.
3211     // Will fix that. For now, omit the offset.
3212     jio_snprintf(buf, buflen, "%s", p1);
3213   } else {
3214     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3215   }
3216   return buf;
3217 }
3218 
3219 static void print_signal_handler(outputStream* st, int sig,
3220                                  char* buf, size_t buflen) {
3221   struct sigaction sa;
3222   sigaction(sig, NULL, &sa);
3223 
3224   st->print("%s: ", os::exception_name(sig, buf, buflen));
3225 
3226   address handler = (sa.sa_flags & SA_SIGINFO)
3227     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3228     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3229 
3230   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3231     st->print("SIG_DFL");
3232   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3233     st->print("SIG_IGN");
3234   } else {
3235     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3236   }
3237 
3238   // Print readable mask.
3239   st->print(", sa_mask[0]=");
3240   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3241 
3242   address rh = VMError::get_resetted_sighandler(sig);
3243   // May be, handler was resetted by VMError?
3244   if (rh != NULL) {
3245     handler = rh;
3246     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3247   }
3248 
3249   // Print textual representation of sa_flags.
3250   st->print(", sa_flags=");
3251   os::Posix::print_sa_flags(st, sa.sa_flags);
3252 
3253   // Check: is it our handler?
3254   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3255       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3256     // It is our signal handler.
3257     // Check for flags, reset system-used one!
3258     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3259       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3260                 os::Aix::get_our_sigflags(sig));
3261     }
3262   }
3263   st->cr();
3264 }
3265 
3266 #define DO_SIGNAL_CHECK(sig) \
3267   if (!sigismember(&check_signal_done, sig)) \
3268     os::Aix::check_signal_handler(sig)
3269 
3270 // This method is a periodic task to check for misbehaving JNI applications
3271 // under CheckJNI, we can add any periodic checks here
3272 
3273 void os::run_periodic_checks() {
3274 
3275   if (check_signals == false) return;
3276 
3277   // SEGV and BUS if overridden could potentially prevent
3278   // generation of hs*.log in the event of a crash, debugging
3279   // such a case can be very challenging, so we absolutely
3280   // check the following for a good measure:
3281   DO_SIGNAL_CHECK(SIGSEGV);
3282   DO_SIGNAL_CHECK(SIGILL);
3283   DO_SIGNAL_CHECK(SIGFPE);
3284   DO_SIGNAL_CHECK(SIGBUS);
3285   DO_SIGNAL_CHECK(SIGPIPE);
3286   DO_SIGNAL_CHECK(SIGXFSZ);
3287   if (UseSIGTRAP) {
3288     DO_SIGNAL_CHECK(SIGTRAP);
3289   }
3290 
3291   // ReduceSignalUsage allows the user to override these handlers
3292   // see comments at the very top and jvm_solaris.h
3293   if (!ReduceSignalUsage) {
3294     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3295     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3296     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3297     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3298   }
3299 
3300   DO_SIGNAL_CHECK(SR_signum);
3301 }
3302 
3303 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3304 
3305 static os_sigaction_t os_sigaction = NULL;
3306 
3307 void os::Aix::check_signal_handler(int sig) {
3308   char buf[O_BUFLEN];
3309   address jvmHandler = NULL;
3310 
3311   struct sigaction act;
3312   if (os_sigaction == NULL) {
3313     // only trust the default sigaction, in case it has been interposed
3314     os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3315     if (os_sigaction == NULL) return;
3316   }
3317 
3318   os_sigaction(sig, (struct sigaction*)NULL, &act);
3319 
3320   address thisHandler = (act.sa_flags & SA_SIGINFO)
3321     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3322     : CAST_FROM_FN_PTR(address, act.sa_handler);
3323 
3324   switch(sig) {
3325   case SIGSEGV:
3326   case SIGBUS:
3327   case SIGFPE:
3328   case SIGPIPE:
3329   case SIGILL:
3330   case SIGXFSZ:
3331     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3332     break;
3333 
3334   case SHUTDOWN1_SIGNAL:
3335   case SHUTDOWN2_SIGNAL:
3336   case SHUTDOWN3_SIGNAL:
3337   case BREAK_SIGNAL:
3338     jvmHandler = (address)user_handler();
3339     break;
3340 
3341   default:
3342     if (sig == SR_signum) {
3343       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3344     } else {
3345       return;
3346     }
3347     break;
3348   }
3349 
3350   if (thisHandler != jvmHandler) {
3351     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3352     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3353     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3354     // No need to check this sig any longer
3355     sigaddset(&check_signal_done, sig);
3356     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3357     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3358       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3359                     exception_name(sig, buf, O_BUFLEN));
3360     }
3361   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3362     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3363     tty->print("expected:");
3364     os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3365     tty->cr();
3366     tty->print("  found:");
3367     os::Posix::print_sa_flags(tty, act.sa_flags);
3368     tty->cr();
3369     // No need to check this sig any longer
3370     sigaddset(&check_signal_done, sig);
3371   }
3372 
3373   // Dump all the signal
3374   if (sigismember(&check_signal_done, sig)) {
3375     print_signal_handlers(tty, buf, O_BUFLEN);
3376   }
3377 }
3378 
3379 // To install functions for atexit system call
3380 extern "C" {
3381   static void perfMemory_exit_helper() {
3382     perfMemory_exit();
3383   }
3384 }
3385 
3386 // This is called _before_ the most of global arguments have been parsed.
3387 void os::init(void) {
3388   // This is basic, we want to know if that ever changes.
3389   // (Shared memory boundary is supposed to be a 256M aligned.)
3390   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3391 
3392   // Record process break at startup.
3393   g_brk_at_startup = (address) ::sbrk(0);
3394   assert(g_brk_at_startup != (address) -1, "sbrk failed");
3395 
3396   // First off, we need to know whether we run on AIX or PASE, and
3397   // the OS level we run on.
3398   os::Aix::initialize_os_info();
3399 
3400   // Scan environment (SPEC1170 behaviour, etc).
3401   os::Aix::scan_environment();
3402 
3403   // Probe multipage support.
3404   query_multipage_support();
3405 
3406   // Act like we only have one page size by eliminating corner cases which
3407   // we did not support very well anyway.
3408   // We have two input conditions:
3409   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3410   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3411   //    setting.
3412   //    Data segment page size is important for us because it defines the thread stack page
3413   //    size, which is needed for guard page handling, stack banging etc.
3414   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3415   //    and should be allocated with 64k pages.
3416   //
3417   // So, we do the following:
3418   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3419   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3420   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3421   // 64k          no              --- AIX 5.2 ? ---
3422   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3423 
3424   // We explicitly leave no option to change page size, because only upgrading would work,
3425   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3426 
3427   if (g_multipage_support.datapsize == 4*K) {
3428     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3429     if (g_multipage_support.can_use_64K_pages) {
3430       // .. but we are able to use 64K pages dynamically.
3431       // This would be typical for java launchers which are not linked
3432       // with datapsize=64K (like, any other launcher but our own).
3433       //
3434       // In this case it would be smart to allocate the java heap with 64K
3435       // to get the performance benefit, and to fake 64k pages for the
3436       // data segment (when dealing with thread stacks).
3437       //
3438       // However, leave a possibility to downgrade to 4K, using
3439       // -XX:-Use64KPages.
3440       if (Use64KPages) {
3441         trcVerbose("64K page mode (faked for data segment)");
3442         Aix::_page_size = 64*K;
3443       } else {
3444         trcVerbose("4K page mode (Use64KPages=off)");
3445         Aix::_page_size = 4*K;
3446       }
3447     } else {
3448       // .. and not able to allocate 64k pages dynamically. Here, just
3449       // fall back to 4K paged mode and use mmap for everything.
3450       trcVerbose("4K page mode");
3451       Aix::_page_size = 4*K;
3452       FLAG_SET_ERGO(bool, Use64KPages, false);
3453     }
3454   } else {
3455     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3456     // This normally means that we can allocate 64k pages dynamically.
3457     // (There is one special case where this may be false: EXTSHM=on.
3458     // but we decided to not support that mode).
3459     assert0(g_multipage_support.can_use_64K_pages);
3460     Aix::_page_size = 64*K;
3461     trcVerbose("64K page mode");
3462     FLAG_SET_ERGO(bool, Use64KPages, true);
3463   }
3464 
3465   // For now UseLargePages is just ignored.
3466   FLAG_SET_ERGO(bool, UseLargePages, false);
3467   _page_sizes[0] = 0;
3468 
3469   // debug trace
3470   trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3471 
3472   // Next, we need to initialize libo4 and libperfstat libraries.
3473   if (os::Aix::on_pase()) {
3474     os::Aix::initialize_libo4();
3475   } else {
3476     os::Aix::initialize_libperfstat();
3477   }
3478 
3479   // Reset the perfstat information provided by ODM.
3480   if (os::Aix::on_aix()) {
3481     libperfstat::perfstat_reset();
3482   }
3483 
3484   // Now initialze basic system properties. Note that for some of the values we
3485   // need libperfstat etc.
3486   os::Aix::initialize_system_info();
3487 
3488   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3489 
3490   init_random(1234567);
3491 
3492   ThreadCritical::initialize();
3493 
3494   // Main_thread points to the aboriginal thread.
3495   Aix::_main_thread = pthread_self();
3496 
3497   initial_time_count = os::elapsed_counter();
3498 
3499   os::Posix::init();
3500 }
3501 
3502 // This is called _after_ the global arguments have been parsed.
3503 jint os::init_2(void) {
3504 
3505   os::Posix::init_2();
3506 
3507   if (os::Aix::on_pase()) {
3508     trcVerbose("Running on PASE.");
3509   } else {
3510     trcVerbose("Running on AIX (not PASE).");
3511   }
3512 
3513   trcVerbose("processor count: %d", os::_processor_count);
3514   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3515 
3516   // Initially build up the loaded dll map.
3517   LoadedLibraries::reload();
3518   if (Verbose) {
3519     trcVerbose("Loaded Libraries: ");
3520     LoadedLibraries::print(tty);
3521   }
3522 
3523   const int page_size = Aix::page_size();
3524   const int map_size = page_size;
3525 
3526   address map_address = (address) MAP_FAILED;
3527   const int prot  = PROT_READ;
3528   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3529 
3530   // Use optimized addresses for the polling page,
3531   // e.g. map it to a special 32-bit address.
3532   if (OptimizePollingPageLocation) {
3533     // architecture-specific list of address wishes:
3534     address address_wishes[] = {
3535       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3536       // PPC64: all address wishes are non-negative 32 bit values where
3537       // the lower 16 bits are all zero. we can load these addresses
3538       // with a single ppc_lis instruction.
3539       (address) 0x30000000, (address) 0x31000000,
3540       (address) 0x32000000, (address) 0x33000000,
3541       (address) 0x40000000, (address) 0x41000000,
3542       (address) 0x42000000, (address) 0x43000000,
3543       (address) 0x50000000, (address) 0x51000000,
3544       (address) 0x52000000, (address) 0x53000000,
3545       (address) 0x60000000, (address) 0x61000000,
3546       (address) 0x62000000, (address) 0x63000000
3547     };
3548     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3549 
3550     // iterate over the list of address wishes:
3551     for (int i=0; i<address_wishes_length; i++) {
3552       // Try to map with current address wish.
3553       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3554       // fail if the address is already mapped.
3555       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3556                                      map_size, prot,
3557                                      flags | MAP_FIXED,
3558                                      -1, 0);
3559       trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3560                    address_wishes[i], map_address + (ssize_t)page_size);
3561 
3562       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3563         // Map succeeded and map_address is at wished address, exit loop.
3564         break;
3565       }
3566 
3567       if (map_address != (address) MAP_FAILED) {
3568         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3569         ::munmap(map_address, map_size);
3570         map_address = (address) MAP_FAILED;
3571       }
3572       // Map failed, continue loop.
3573     }
3574   } // end OptimizePollingPageLocation
3575 
3576   if (map_address == (address) MAP_FAILED) {
3577     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3578   }
3579   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3580   os::set_polling_page(map_address);
3581 
3582   if (!UseMembar) {
3583     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3584     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3585     os::set_memory_serialize_page(mem_serialize_page);
3586 
3587     trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3588         mem_serialize_page, mem_serialize_page + Aix::page_size(),
3589         Aix::page_size(), Aix::page_size());
3590   }
3591 
3592   // initialize suspend/resume support - must do this before signal_sets_init()
3593   if (SR_initialize() != 0) {
3594     perror("SR_initialize failed");
3595     return JNI_ERR;
3596   }
3597 
3598   Aix::signal_sets_init();
3599   Aix::install_signal_handlers();
3600 
3601   // Check and sets minimum stack sizes against command line options
3602   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
3603     return JNI_ERR;
3604   }
3605 
3606   if (UseNUMA) {
3607     UseNUMA = false;
3608     warning("NUMA optimizations are not available on this OS.");
3609   }
3610 
3611   if (MaxFDLimit) {
3612     // Set the number of file descriptors to max. print out error
3613     // if getrlimit/setrlimit fails but continue regardless.
3614     struct rlimit nbr_files;
3615     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3616     if (status != 0) {
3617       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
3618     } else {
3619       nbr_files.rlim_cur = nbr_files.rlim_max;
3620       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3621       if (status != 0) {
3622         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3623       }
3624     }
3625   }
3626 
3627   if (PerfAllowAtExitRegistration) {
3628     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3629     // At exit functions can be delayed until process exit time, which
3630     // can be problematic for embedded VM situations. Embedded VMs should
3631     // call DestroyJavaVM() to assure that VM resources are released.
3632 
3633     // Note: perfMemory_exit_helper atexit function may be removed in
3634     // the future if the appropriate cleanup code can be added to the
3635     // VM_Exit VMOperation's doit method.
3636     if (atexit(perfMemory_exit_helper) != 0) {
3637       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3638     }
3639   }
3640 
3641   return JNI_OK;
3642 }
3643 
3644 // Mark the polling page as unreadable
3645 void os::make_polling_page_unreadable(void) {
3646   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3647     fatal("Could not disable polling page");
3648   }
3649 };
3650 
3651 // Mark the polling page as readable
3652 void os::make_polling_page_readable(void) {
3653   // Changed according to os_linux.cpp.
3654   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3655     fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3656   }
3657 };
3658 
3659 int os::active_processor_count() {
3660   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3661   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3662   return online_cpus;
3663 }
3664 
3665 void os::set_native_thread_name(const char *name) {
3666   // Not yet implemented.
3667   return;
3668 }
3669 
3670 bool os::distribute_processes(uint length, uint* distribution) {
3671   // Not yet implemented.
3672   return false;
3673 }
3674 
3675 bool os::bind_to_processor(uint processor_id) {
3676   // Not yet implemented.
3677   return false;
3678 }
3679 
3680 void os::SuspendedThreadTask::internal_do_task() {
3681   if (do_suspend(_thread->osthread())) {
3682     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3683     do_task(context);
3684     do_resume(_thread->osthread());
3685   }
3686 }
3687 
3688 ////////////////////////////////////////////////////////////////////////////////
3689 // debug support
3690 
3691 bool os::find(address addr, outputStream* st) {
3692 
3693   st->print(PTR_FORMAT ": ", addr);
3694 
3695   loaded_module_t lm;
3696   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3697       LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3698     st->print_cr("%s", lm.path);
3699     return true;
3700   }
3701 
3702   return false;
3703 }
3704 
3705 ////////////////////////////////////////////////////////////////////////////////
3706 // misc
3707 
3708 // This does not do anything on Aix. This is basically a hook for being
3709 // able to use structured exception handling (thread-local exception filters)
3710 // on, e.g., Win32.
3711 void
3712 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3713                          JavaCallArguments* args, Thread* thread) {
3714   f(value, method, args, thread);
3715 }
3716 
3717 void os::print_statistics() {
3718 }
3719 
3720 bool os::message_box(const char* title, const char* message) {
3721   int i;
3722   fdStream err(defaultStream::error_fd());
3723   for (i = 0; i < 78; i++) err.print_raw("=");
3724   err.cr();
3725   err.print_raw_cr(title);
3726   for (i = 0; i < 78; i++) err.print_raw("-");
3727   err.cr();
3728   err.print_raw_cr(message);
3729   for (i = 0; i < 78; i++) err.print_raw("=");
3730   err.cr();
3731 
3732   char buf[16];
3733   // Prevent process from exiting upon "read error" without consuming all CPU
3734   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3735 
3736   return buf[0] == 'y' || buf[0] == 'Y';
3737 }
3738 
3739 int os::stat(const char *path, struct stat *sbuf) {
3740   char pathbuf[MAX_PATH];
3741   if (strlen(path) > MAX_PATH - 1) {
3742     errno = ENAMETOOLONG;
3743     return -1;
3744   }
3745   os::native_path(strcpy(pathbuf, path));
3746   return ::stat(pathbuf, sbuf);
3747 }
3748 
3749 // Is a (classpath) directory empty?
3750 bool os::dir_is_empty(const char* path) {
3751   DIR *dir = NULL;
3752   struct dirent *ptr;
3753 
3754   dir = opendir(path);
3755   if (dir == NULL) return true;
3756 
3757   /* Scan the directory */
3758   bool result = true;
3759   char buf[sizeof(struct dirent) + MAX_PATH];
3760   while (result && (ptr = ::readdir(dir)) != NULL) {
3761     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3762       result = false;
3763     }
3764   }
3765   closedir(dir);
3766   return result;
3767 }
3768 
3769 // This code originates from JDK's sysOpen and open64_w
3770 // from src/solaris/hpi/src/system_md.c
3771 
3772 int os::open(const char *path, int oflag, int mode) {
3773 
3774   if (strlen(path) > MAX_PATH - 1) {
3775     errno = ENAMETOOLONG;
3776     return -1;
3777   }
3778   int fd;
3779 
3780   fd = ::open64(path, oflag, mode);
3781   if (fd == -1) return -1;
3782 
3783   // If the open succeeded, the file might still be a directory.
3784   {
3785     struct stat64 buf64;
3786     int ret = ::fstat64(fd, &buf64);
3787     int st_mode = buf64.st_mode;
3788 
3789     if (ret != -1) {
3790       if ((st_mode & S_IFMT) == S_IFDIR) {
3791         errno = EISDIR;
3792         ::close(fd);
3793         return -1;
3794       }
3795     } else {
3796       ::close(fd);
3797       return -1;
3798     }
3799   }
3800 
3801   // All file descriptors that are opened in the JVM and not
3802   // specifically destined for a subprocess should have the
3803   // close-on-exec flag set. If we don't set it, then careless 3rd
3804   // party native code might fork and exec without closing all
3805   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3806   // UNIXProcess.c), and this in turn might:
3807   //
3808   // - cause end-of-file to fail to be detected on some file
3809   //   descriptors, resulting in mysterious hangs, or
3810   //
3811   // - might cause an fopen in the subprocess to fail on a system
3812   //   suffering from bug 1085341.
3813   //
3814   // (Yes, the default setting of the close-on-exec flag is a Unix
3815   // design flaw.)
3816   //
3817   // See:
3818   // 1085341: 32-bit stdio routines should support file descriptors >255
3819   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3820   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3821 #ifdef FD_CLOEXEC
3822   {
3823     int flags = ::fcntl(fd, F_GETFD);
3824     if (flags != -1)
3825       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3826   }
3827 #endif
3828 
3829   return fd;
3830 }
3831 
3832 // create binary file, rewriting existing file if required
3833 int os::create_binary_file(const char* path, bool rewrite_existing) {
3834   int oflags = O_WRONLY | O_CREAT;
3835   if (!rewrite_existing) {
3836     oflags |= O_EXCL;
3837   }
3838   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3839 }
3840 
3841 // return current position of file pointer
3842 jlong os::current_file_offset(int fd) {
3843   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3844 }
3845 
3846 // move file pointer to the specified offset
3847 jlong os::seek_to_file_offset(int fd, jlong offset) {
3848   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3849 }
3850 
3851 // This code originates from JDK's sysAvailable
3852 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3853 
3854 int os::available(int fd, jlong *bytes) {
3855   jlong cur, end;
3856   int mode;
3857   struct stat64 buf64;
3858 
3859   if (::fstat64(fd, &buf64) >= 0) {
3860     mode = buf64.st_mode;
3861     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3862       int n;
3863       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3864         *bytes = n;
3865         return 1;
3866       }
3867     }
3868   }
3869   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3870     return 0;
3871   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3872     return 0;
3873   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3874     return 0;
3875   }
3876   *bytes = end - cur;
3877   return 1;
3878 }
3879 
3880 // Map a block of memory.
3881 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3882                         char *addr, size_t bytes, bool read_only,
3883                         bool allow_exec) {
3884   int prot;
3885   int flags = MAP_PRIVATE;
3886 
3887   if (read_only) {
3888     prot = PROT_READ;
3889     flags = MAP_SHARED;
3890   } else {
3891     prot = PROT_READ | PROT_WRITE;
3892     flags = MAP_PRIVATE;
3893   }
3894 
3895   if (allow_exec) {
3896     prot |= PROT_EXEC;
3897   }
3898 
3899   if (addr != NULL) {
3900     flags |= MAP_FIXED;
3901   }
3902 
3903   // Allow anonymous mappings if 'fd' is -1.
3904   if (fd == -1) {
3905     flags |= MAP_ANONYMOUS;
3906   }
3907 
3908   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3909                                      fd, file_offset);
3910   if (mapped_address == MAP_FAILED) {
3911     return NULL;
3912   }
3913   return mapped_address;
3914 }
3915 
3916 // Remap a block of memory.
3917 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3918                           char *addr, size_t bytes, bool read_only,
3919                           bool allow_exec) {
3920   // same as map_memory() on this OS
3921   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3922                         allow_exec);
3923 }
3924 
3925 // Unmap a block of memory.
3926 bool os::pd_unmap_memory(char* addr, size_t bytes) {
3927   return munmap(addr, bytes) == 0;
3928 }
3929 
3930 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3931 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
3932 // of a thread.
3933 //
3934 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3935 // the fast estimate available on the platform.
3936 
3937 jlong os::current_thread_cpu_time() {
3938   // return user + sys since the cost is the same
3939   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
3940   assert(n >= 0, "negative CPU time");
3941   return n;
3942 }
3943 
3944 jlong os::thread_cpu_time(Thread* thread) {
3945   // consistent with what current_thread_cpu_time() returns
3946   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
3947   assert(n >= 0, "negative CPU time");
3948   return n;
3949 }
3950 
3951 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
3952   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
3953   assert(n >= 0, "negative CPU time");
3954   return n;
3955 }
3956 
3957 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
3958   bool error = false;
3959 
3960   jlong sys_time = 0;
3961   jlong user_time = 0;
3962 
3963   // Reimplemented using getthrds64().
3964   //
3965   // Works like this:
3966   // For the thread in question, get the kernel thread id. Then get the
3967   // kernel thread statistics using that id.
3968   //
3969   // This only works of course when no pthread scheduling is used,
3970   // i.e. there is a 1:1 relationship to kernel threads.
3971   // On AIX, see AIXTHREAD_SCOPE variable.
3972 
3973   pthread_t pthtid = thread->osthread()->pthread_id();
3974 
3975   // retrieve kernel thread id for the pthread:
3976   tid64_t tid = 0;
3977   struct __pthrdsinfo pinfo;
3978   // I just love those otherworldly IBM APIs which force me to hand down
3979   // dummy buffers for stuff I dont care for...
3980   char dummy[1];
3981   int dummy_size = sizeof(dummy);
3982   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
3983                           dummy, &dummy_size) == 0) {
3984     tid = pinfo.__pi_tid;
3985   } else {
3986     tty->print_cr("pthread_getthrds_np failed.");
3987     error = true;
3988   }
3989 
3990   // retrieve kernel timing info for that kernel thread
3991   if (!error) {
3992     struct thrdentry64 thrdentry;
3993     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
3994       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
3995       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
3996     } else {
3997       tty->print_cr("pthread_getthrds_np failed.");
3998       error = true;
3999     }
4000   }
4001 
4002   if (p_sys_time) {
4003     *p_sys_time = sys_time;
4004   }
4005 
4006   if (p_user_time) {
4007     *p_user_time = user_time;
4008   }
4009 
4010   if (error) {
4011     return false;
4012   }
4013 
4014   return true;
4015 }
4016 
4017 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4018   jlong sys_time;
4019   jlong user_time;
4020 
4021   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4022     return -1;
4023   }
4024 
4025   return user_sys_cpu_time ? sys_time + user_time : user_time;
4026 }
4027 
4028 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4029   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4030   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4031   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4032   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4033 }
4034 
4035 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4036   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4037   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4038   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4039   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4040 }
4041 
4042 bool os::is_thread_cpu_time_supported() {
4043   return true;
4044 }
4045 
4046 // System loadavg support. Returns -1 if load average cannot be obtained.
4047 // For now just return the system wide load average (no processor sets).
4048 int os::loadavg(double values[], int nelem) {
4049 
4050   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4051   guarantee(values, "argument error");
4052 
4053   if (os::Aix::on_pase()) {
4054 
4055     // AS/400 PASE: use libo4 porting library
4056     double v[3] = { 0.0, 0.0, 0.0 };
4057 
4058     if (libo4::get_load_avg(v, v + 1, v + 2)) {
4059       for (int i = 0; i < nelem; i ++) {
4060         values[i] = v[i];
4061       }
4062       return nelem;
4063     } else {
4064       return -1;
4065     }
4066 
4067   } else {
4068 
4069     // AIX: use libperfstat
4070     libperfstat::cpuinfo_t ci;
4071     if (libperfstat::get_cpuinfo(&ci)) {
4072       for (int i = 0; i < nelem; i++) {
4073         values[i] = ci.loadavg[i];
4074       }
4075     } else {
4076       return -1;
4077     }
4078     return nelem;
4079   }
4080 }
4081 
4082 void os::pause() {
4083   char filename[MAX_PATH];
4084   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4085     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4086   } else {
4087     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4088   }
4089 
4090   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4091   if (fd != -1) {
4092     struct stat buf;
4093     ::close(fd);
4094     while (::stat(filename, &buf) == 0) {
4095       (void)::poll(NULL, 0, 100);
4096     }
4097   } else {
4098     trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4099   }
4100 }
4101 
4102 bool os::Aix::is_primordial_thread() {
4103   if (pthread_self() == (pthread_t)1) {
4104     return true;
4105   } else {
4106     return false;
4107   }
4108 }
4109 
4110 // OS recognitions (PASE/AIX, OS level) call this before calling any
4111 // one of Aix::on_pase(), Aix::os_version() static
4112 void os::Aix::initialize_os_info() {
4113 
4114   assert(_on_pase == -1 && _os_version == 0, "already called.");
4115 
4116   struct utsname uts;
4117   memset(&uts, 0, sizeof(uts));
4118   strcpy(uts.sysname, "?");
4119   if (::uname(&uts) == -1) {
4120     trcVerbose("uname failed (%d)", errno);
4121     guarantee(0, "Could not determine whether we run on AIX or PASE");
4122   } else {
4123     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4124                "node \"%s\" machine \"%s\"\n",
4125                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4126     const int major = atoi(uts.version);
4127     assert(major > 0, "invalid OS version");
4128     const int minor = atoi(uts.release);
4129     assert(minor > 0, "invalid OS release");
4130     _os_version = (major << 24) | (minor << 16);
4131     char ver_str[20] = {0};
4132     char *name_str = "unknown OS";
4133     if (strcmp(uts.sysname, "OS400") == 0) {
4134       // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4135       _on_pase = 1;
4136       if (os_version_short() < 0x0504) {
4137         trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4138         assert(false, "OS/400 release too old.");
4139       }
4140       name_str = "OS/400 (pase)";
4141       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4142     } else if (strcmp(uts.sysname, "AIX") == 0) {
4143       // We run on AIX. We do not support versions older than AIX 5.3.
4144       _on_pase = 0;
4145       // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4146       odmWrapper::determine_os_kernel_version(&_os_version);
4147       if (os_version_short() < 0x0503) {
4148         trcVerbose("AIX release older than AIX 5.3 not supported.");
4149         assert(false, "AIX release too old.");
4150       }
4151       name_str = "AIX";
4152       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4153                    major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4154     } else {
4155       assert(false, name_str);
4156     }
4157     trcVerbose("We run on %s %s", name_str, ver_str);
4158   }
4159 
4160   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4161 } // end: os::Aix::initialize_os_info()
4162 
4163 // Scan environment for important settings which might effect the VM.
4164 // Trace out settings. Warn about invalid settings and/or correct them.
4165 //
4166 // Must run after os::Aix::initialue_os_info().
4167 void os::Aix::scan_environment() {
4168 
4169   char* p;
4170   int rc;
4171 
4172   // Warn explicity if EXTSHM=ON is used. That switch changes how
4173   // System V shared memory behaves. One effect is that page size of
4174   // shared memory cannot be change dynamically, effectivly preventing
4175   // large pages from working.
4176   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4177   // recommendation is (in OSS notes) to switch it off.
4178   p = ::getenv("EXTSHM");
4179   trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4180   if (p && strcasecmp(p, "ON") == 0) {
4181     _extshm = 1;
4182     trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4183     if (!AllowExtshm) {
4184       // We allow under certain conditions the user to continue. However, we want this
4185       // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4186       // that the VM is not able to allocate 64k pages for the heap.
4187       // We do not want to run with reduced performance.
4188       vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4189     }
4190   } else {
4191     _extshm = 0;
4192   }
4193 
4194   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4195   // Not tested, not supported.
4196   //
4197   // Note that it might be worth the trouble to test and to require it, if only to
4198   // get useful return codes for mprotect.
4199   //
4200   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4201   // exec() ? before loading the libjvm ? ....)
4202   p = ::getenv("XPG_SUS_ENV");
4203   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4204   if (p && strcmp(p, "ON") == 0) {
4205     _xpg_sus_mode = 1;
4206     trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4207     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4208     // clobber address ranges. If we ever want to support that, we have to do some
4209     // testing first.
4210     guarantee(false, "XPG_SUS_ENV=ON not supported");
4211   } else {
4212     _xpg_sus_mode = 0;
4213   }
4214 
4215   if (os::Aix::on_pase()) {
4216     p = ::getenv("QIBM_MULTI_THREADED");
4217     trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4218   }
4219 
4220   p = ::getenv("LDR_CNTRL");
4221   trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4222   if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4223     if (p && ::strstr(p, "TEXTPSIZE")) {
4224       trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4225         "you may experience hangs or crashes on OS/400 V7R1.");
4226     }
4227   }
4228 
4229   p = ::getenv("AIXTHREAD_GUARDPAGES");
4230   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4231 
4232 } // end: os::Aix::scan_environment()
4233 
4234 // PASE: initialize the libo4 library (PASE porting library).
4235 void os::Aix::initialize_libo4() {
4236   guarantee(os::Aix::on_pase(), "OS/400 only.");
4237   if (!libo4::init()) {
4238     trcVerbose("libo4 initialization failed.");
4239     assert(false, "libo4 initialization failed");
4240   } else {
4241     trcVerbose("libo4 initialized.");
4242   }
4243 }
4244 
4245 // AIX: initialize the libperfstat library.
4246 void os::Aix::initialize_libperfstat() {
4247   assert(os::Aix::on_aix(), "AIX only");
4248   if (!libperfstat::init()) {
4249     trcVerbose("libperfstat initialization failed.");
4250     assert(false, "libperfstat initialization failed");
4251   } else {
4252     trcVerbose("libperfstat initialized.");
4253   }
4254 }
4255 
4256 /////////////////////////////////////////////////////////////////////////////
4257 // thread stack
4258 
4259 // Get the current stack base from the OS (actually, the pthread library).
4260 // Note: usually not page aligned.
4261 address os::current_stack_base() {
4262   AixMisc::stackbounds_t bounds;
4263   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4264   guarantee(rc, "Unable to retrieve stack bounds.");
4265   return bounds.base;
4266 }
4267 
4268 // Get the current stack size from the OS (actually, the pthread library).
4269 // Returned size is such that (base - size) is always aligned to page size.
4270 size_t os::current_stack_size() {
4271   AixMisc::stackbounds_t bounds;
4272   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4273   guarantee(rc, "Unable to retrieve stack bounds.");
4274   // Align the returned stack size such that the stack low address
4275   // is aligned to page size (Note: base is usually not and we do not care).
4276   // We need to do this because caller code will assume stack low address is
4277   // page aligned and will place guard pages without checking.
4278   address low = bounds.base - bounds.size;
4279   address low_aligned = (address)align_up(low, os::vm_page_size());
4280   size_t s = bounds.base - low_aligned;
4281   return s;
4282 }
4283 
4284 extern char** environ;
4285 
4286 // Run the specified command in a separate process. Return its exit value,
4287 // or -1 on failure (e.g. can't fork a new process).
4288 // Unlike system(), this function can be called from signal handler. It
4289 // doesn't block SIGINT et al.
4290 int os::fork_and_exec(char* cmd) {
4291   char * argv[4] = {"sh", "-c", cmd, NULL};
4292 
4293   pid_t pid = fork();
4294 
4295   if (pid < 0) {
4296     // fork failed
4297     return -1;
4298 
4299   } else if (pid == 0) {
4300     // child process
4301 
4302     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4303     execve("/usr/bin/sh", argv, environ);
4304 
4305     // execve failed
4306     _exit(-1);
4307 
4308   } else {
4309     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4310     // care about the actual exit code, for now.
4311 
4312     int status;
4313 
4314     // Wait for the child process to exit. This returns immediately if
4315     // the child has already exited. */
4316     while (waitpid(pid, &status, 0) < 0) {
4317       switch (errno) {
4318         case ECHILD: return 0;
4319         case EINTR: break;
4320         default: return -1;
4321       }
4322     }
4323 
4324     if (WIFEXITED(status)) {
4325       // The child exited normally; get its exit code.
4326       return WEXITSTATUS(status);
4327     } else if (WIFSIGNALED(status)) {
4328       // The child exited because of a signal.
4329       // The best value to return is 0x80 + signal number,
4330       // because that is what all Unix shells do, and because
4331       // it allows callers to distinguish between process exit and
4332       // process death by signal.
4333       return 0x80 + WTERMSIG(status);
4334     } else {
4335       // Unknown exit code; pass it through.
4336       return status;
4337     }
4338   }
4339   return -1;
4340 }
4341 
4342 // is_headless_jre()
4343 //
4344 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4345 // in order to report if we are running in a headless jre.
4346 //
4347 // Since JDK8 xawt/libmawt.so is moved into the same directory
4348 // as libawt.so, and renamed libawt_xawt.so
4349 bool os::is_headless_jre() {
4350   struct stat statbuf;
4351   char buf[MAXPATHLEN];
4352   char libmawtpath[MAXPATHLEN];
4353   const char *xawtstr = "/xawt/libmawt.so";
4354   const char *new_xawtstr = "/libawt_xawt.so";
4355 
4356   char *p;
4357 
4358   // Get path to libjvm.so
4359   os::jvm_path(buf, sizeof(buf));
4360 
4361   // Get rid of libjvm.so
4362   p = strrchr(buf, '/');
4363   if (p == NULL) return false;
4364   else *p = '\0';
4365 
4366   // Get rid of client or server
4367   p = strrchr(buf, '/');
4368   if (p == NULL) return false;
4369   else *p = '\0';
4370 
4371   // check xawt/libmawt.so
4372   strcpy(libmawtpath, buf);
4373   strcat(libmawtpath, xawtstr);
4374   if (::stat(libmawtpath, &statbuf) == 0) return false;
4375 
4376   // check libawt_xawt.so
4377   strcpy(libmawtpath, buf);
4378   strcat(libmawtpath, new_xawtstr);
4379   if (::stat(libmawtpath, &statbuf) == 0) return false;
4380 
4381   return true;
4382 }
4383 
4384 // Get the default path to the core file
4385 // Returns the length of the string
4386 int os::get_core_path(char* buffer, size_t bufferSize) {
4387   const char* p = get_current_directory(buffer, bufferSize);
4388 
4389   if (p == NULL) {
4390     assert(p != NULL, "failed to get current directory");
4391     return 0;
4392   }
4393 
4394   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4395                                                p, current_process_id());
4396 
4397   return strlen(buffer);
4398 }
4399 
4400 #ifndef PRODUCT
4401 void TestReserveMemorySpecial_test() {
4402   // No tests available for this platform
4403 }
4404 #endif
4405 
4406 bool os::start_debugging(char *buf, int buflen) {
4407   int len = (int)strlen(buf);
4408   char *p = &buf[len];
4409 
4410   jio_snprintf(p, buflen -len,
4411                  "\n\n"
4412                  "Do you want to debug the problem?\n\n"
4413                  "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4414                  "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4415                  "Otherwise, press RETURN to abort...",
4416                  os::current_process_id(),
4417                  os::current_thread_id(), thread_self());
4418 
4419   bool yes = os::message_box("Unexpected Error", buf);
4420 
4421   if (yes) {
4422     // yes, user asked VM to launch debugger
4423     jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4424 
4425     os::fork_and_exec(buf);
4426     yes = false;
4427   }
4428   return yes;
4429 }
4430 
4431 static inline time_t get_mtime(const char* filename) {
4432   struct stat st;
4433   int ret = os::stat(filename, &st);
4434   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
4435   return st.st_mtime;
4436 }
4437 
4438 int os::compare_file_modified_times(const char* file1, const char* file2) {
4439   time_t t1 = get_mtime(file1);
4440   time_t t2 = get_mtime(file2);
4441   return t1 - t2;
4442 }