1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "logging/log.hpp"
  40 #include "libo4.hpp"
  41 #include "libperfstat_aix.hpp"
  42 #include "libodm_aix.hpp"
  43 #include "loadlib_aix.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/filemap.hpp"
  46 #include "misc_aix.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "os_aix.inline.hpp"
  49 #include "os_share_aix.hpp"
  50 #include "porting_aix.hpp"
  51 #include "prims/jniFastGetField.hpp"
  52 #include "prims/jvm.h"
  53 #include "prims/jvm_misc.hpp"
  54 #include "runtime/arguments.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/extendedPC.hpp"
  57 #include "runtime/globals.hpp"
  58 #include "runtime/interfaceSupport.hpp"
  59 #include "runtime/java.hpp"
  60 #include "runtime/javaCalls.hpp"
  61 #include "runtime/mutexLocker.hpp"
  62 #include "runtime/objectMonitor.hpp"
  63 #include "runtime/orderAccess.inline.hpp"
  64 #include "runtime/os.hpp"
  65 #include "runtime/osThread.hpp"
  66 #include "runtime/perfMemory.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/statSampler.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/thread.inline.hpp"
  71 #include "runtime/threadCritical.hpp"
  72 #include "runtime/timer.hpp"
  73 #include "runtime/vm_version.hpp"
  74 #include "services/attachListener.hpp"
  75 #include "services/runtimeService.hpp"
  76 #include "utilities/decoder.hpp"
  77 #include "utilities/defaultStream.hpp"
  78 #include "utilities/events.hpp"
  79 #include "utilities/growableArray.hpp"
  80 #include "utilities/vmError.hpp"
  81 
  82 // put OS-includes here (sorted alphabetically)
  83 #include <errno.h>
  84 #include <fcntl.h>
  85 #include <inttypes.h>
  86 #include <poll.h>
  87 #include <procinfo.h>
  88 #include <pthread.h>
  89 #include <pwd.h>
  90 #include <semaphore.h>
  91 #include <signal.h>
  92 #include <stdint.h>
  93 #include <stdio.h>
  94 #include <string.h>
  95 #include <unistd.h>
  96 #include <sys/ioctl.h>
  97 #include <sys/ipc.h>
  98 #include <sys/mman.h>
  99 #include <sys/resource.h>
 100 #include <sys/select.h>
 101 #include <sys/shm.h>
 102 #include <sys/socket.h>
 103 #include <sys/stat.h>
 104 #include <sys/sysinfo.h>
 105 #include <sys/systemcfg.h>
 106 #include <sys/time.h>
 107 #include <sys/times.h>
 108 #include <sys/types.h>
 109 #include <sys/utsname.h>
 110 #include <sys/vminfo.h>
 111 #include <sys/wait.h>
 112 
 113 // Missing prototypes for various system APIs.
 114 extern "C"
 115 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
 116 
 117 #if !defined(_AIXVERSION_610)
 118 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
 119 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
 120 extern "C" int getargs   (procsinfo*, int, char*, int);
 121 #endif
 122 
 123 #define MAX_PATH (2 * K)
 124 
 125 // for timer info max values which include all bits
 126 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 127 // for multipage initialization error analysis (in 'g_multipage_error')
 128 #define ERROR_MP_OS_TOO_OLD                          100
 129 #define ERROR_MP_EXTSHM_ACTIVE                       101
 130 #define ERROR_MP_VMGETINFO_FAILED                    102
 131 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 132 
 133 // Query dimensions of the stack of the calling thread.
 134 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 135 static address resolve_function_descriptor_to_code_pointer(address p);
 136 
 137 static void vmembk_print_on(outputStream* os);
 138 
 139 ////////////////////////////////////////////////////////////////////////////////
 140 // global variables (for a description see os_aix.hpp)
 141 
 142 julong    os::Aix::_physical_memory = 0;
 143 
 144 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 145 int       os::Aix::_page_size = -1;
 146 
 147 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 148 int       os::Aix::_on_pase = -1;
 149 
 150 // 0 = uninitialized, otherwise 32 bit number:
 151 //  0xVVRRTTSS
 152 //  VV - major version
 153 //  RR - minor version
 154 //  TT - tech level, if known, 0 otherwise
 155 //  SS - service pack, if known, 0 otherwise
 156 uint32_t  os::Aix::_os_version = 0;
 157 
 158 // -1 = uninitialized, 0 - no, 1 - yes
 159 int       os::Aix::_xpg_sus_mode = -1;
 160 
 161 // -1 = uninitialized, 0 - no, 1 - yes
 162 int       os::Aix::_extshm = -1;
 163 
 164 ////////////////////////////////////////////////////////////////////////////////
 165 // local variables
 166 
 167 static jlong    initial_time_count = 0;
 168 static int      clock_tics_per_sec = 100;
 169 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 170 static bool     check_signals      = true;
 171 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 172 static sigset_t SR_sigset;
 173 
 174 // Process break recorded at startup.
 175 static address g_brk_at_startup = NULL;
 176 
 177 // This describes the state of multipage support of the underlying
 178 // OS. Note that this is of no interest to the outsize world and
 179 // therefore should not be defined in AIX class.
 180 //
 181 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 182 // latter two (16M "large" resp. 16G "huge" pages) require special
 183 // setup and are normally not available.
 184 //
 185 // AIX supports multiple page sizes per process, for:
 186 //  - Stack (of the primordial thread, so not relevant for us)
 187 //  - Data - data, bss, heap, for us also pthread stacks
 188 //  - Text - text code
 189 //  - shared memory
 190 //
 191 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 192 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 193 //
 194 // For shared memory, page size can be set dynamically via
 195 // shmctl(). Different shared memory regions can have different page
 196 // sizes.
 197 //
 198 // More information can be found at AIBM info center:
 199 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 200 //
 201 static struct {
 202   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 203   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 204   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 205   size_t pthr_stack_pagesize; // stack page size of pthread threads
 206   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 207   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 208   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 209   int error;                  // Error describing if something went wrong at multipage init.
 210 } g_multipage_support = {
 211   (size_t) -1,
 212   (size_t) -1,
 213   (size_t) -1,
 214   (size_t) -1,
 215   (size_t) -1,
 216   false, false,
 217   0
 218 };
 219 
 220 // We must not accidentally allocate memory close to the BRK - even if
 221 // that would work - because then we prevent the BRK segment from
 222 // growing which may result in a malloc OOM even though there is
 223 // enough memory. The problem only arises if we shmat() or mmap() at
 224 // a specific wish address, e.g. to place the heap in a
 225 // compressed-oops-friendly way.
 226 static bool is_close_to_brk(address a) {
 227   assert0(g_brk_at_startup != NULL);
 228   if (a >= g_brk_at_startup &&
 229       a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
 230     return true;
 231   }
 232   return false;
 233 }
 234 
 235 julong os::available_memory() {
 236   return Aix::available_memory();
 237 }
 238 
 239 julong os::Aix::available_memory() {
 240   // Avoid expensive API call here, as returned value will always be null.
 241   if (os::Aix::on_pase()) {
 242     return 0x0LL;
 243   }
 244   os::Aix::meminfo_t mi;
 245   if (os::Aix::get_meminfo(&mi)) {
 246     return mi.real_free;
 247   } else {
 248     return ULONG_MAX;
 249   }
 250 }
 251 
 252 julong os::physical_memory() {
 253   return Aix::physical_memory();
 254 }
 255 
 256 // Return true if user is running as root.
 257 
 258 bool os::have_special_privileges() {
 259   static bool init = false;
 260   static bool privileges = false;
 261   if (!init) {
 262     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 263     init = true;
 264   }
 265   return privileges;
 266 }
 267 
 268 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 269 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 270 static bool my_disclaim64(char* addr, size_t size) {
 271 
 272   if (size == 0) {
 273     return true;
 274   }
 275 
 276   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 277   const unsigned int maxDisclaimSize = 0x40000000;
 278 
 279   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 280   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 281 
 282   char* p = addr;
 283 
 284   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 285     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 286       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 287       return false;
 288     }
 289     p += maxDisclaimSize;
 290   }
 291 
 292   if (lastDisclaimSize > 0) {
 293     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 294       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 295       return false;
 296     }
 297   }
 298 
 299   return true;
 300 }
 301 
 302 // Cpu architecture string
 303 #if defined(PPC32)
 304 static char cpu_arch[] = "ppc";
 305 #elif defined(PPC64)
 306 static char cpu_arch[] = "ppc64";
 307 #else
 308 #error Add appropriate cpu_arch setting
 309 #endif
 310 
 311 // Wrap the function "vmgetinfo" which is not available on older OS releases.
 312 static int checked_vmgetinfo(void *out, int command, int arg) {
 313   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 314     guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
 315   }
 316   return ::vmgetinfo(out, command, arg);
 317 }
 318 
 319 // Given an address, returns the size of the page backing that address.
 320 size_t os::Aix::query_pagesize(void* addr) {
 321 
 322   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 323     // AS/400 older than V6R1: no vmgetinfo here, default to 4K
 324     return 4*K;
 325   }
 326 
 327   vm_page_info pi;
 328   pi.addr = (uint64_t)addr;
 329   if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 330     return pi.pagesize;
 331   } else {
 332     assert(false, "vmgetinfo failed to retrieve page size");
 333     return 4*K;
 334   }
 335 }
 336 
 337 void os::Aix::initialize_system_info() {
 338 
 339   // Get the number of online(logical) cpus instead of configured.
 340   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 341   assert(_processor_count > 0, "_processor_count must be > 0");
 342 
 343   // Retrieve total physical storage.
 344   os::Aix::meminfo_t mi;
 345   if (!os::Aix::get_meminfo(&mi)) {
 346     assert(false, "os::Aix::get_meminfo failed.");
 347   }
 348   _physical_memory = (julong) mi.real_total;
 349 }
 350 
 351 // Helper function for tracing page sizes.
 352 static const char* describe_pagesize(size_t pagesize) {
 353   switch (pagesize) {
 354     case 4*K : return "4K";
 355     case 64*K: return "64K";
 356     case 16*M: return "16M";
 357     case 16*G: return "16G";
 358     default:
 359       assert(false, "surprise");
 360       return "??";
 361   }
 362 }
 363 
 364 // Probe OS for multipage support.
 365 // Will fill the global g_multipage_support structure.
 366 // Must be called before calling os::large_page_init().
 367 static void query_multipage_support() {
 368 
 369   guarantee(g_multipage_support.pagesize == -1,
 370             "do not call twice");
 371 
 372   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 373 
 374   // This really would surprise me.
 375   assert(g_multipage_support.pagesize == 4*K, "surprise!");
 376 
 377   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 378   // Default data page size is defined either by linker options (-bdatapsize)
 379   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 380   // default should be 4K.
 381   {
 382     void* p = ::malloc(16*M);
 383     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 384     ::free(p);
 385   }
 386 
 387   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 388   // Note that this is pure curiosity. We do not rely on default page size but set
 389   // our own page size after allocated.
 390   {
 391     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 392     guarantee(shmid != -1, "shmget failed");
 393     void* p = ::shmat(shmid, NULL, 0);
 394     ::shmctl(shmid, IPC_RMID, NULL);
 395     guarantee(p != (void*) -1, "shmat failed");
 396     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 397     ::shmdt(p);
 398   }
 399 
 400   // Before querying the stack page size, make sure we are not running as primordial
 401   // thread (because primordial thread's stack may have different page size than
 402   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 403   // number of reasons so we may just as well guarantee it here.
 404   guarantee0(!os::Aix::is_primordial_thread());
 405 
 406   // Query pthread stack page size. Should be the same as data page size because
 407   // pthread stacks are allocated from C-Heap.
 408   {
 409     int dummy = 0;
 410     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 411   }
 412 
 413   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 414   {
 415     address any_function =
 416       resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 417     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 418   }
 419 
 420   // Now probe for support of 64K pages and 16M pages.
 421 
 422   // Before OS/400 V6R1, there is no support for pages other than 4K.
 423   if (os::Aix::on_pase_V5R4_or_older()) {
 424     trcVerbose("OS/400 < V6R1 - no large page support.");
 425     g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
 426     goto query_multipage_support_end;
 427   }
 428 
 429   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 430   {
 431     const int MAX_PAGE_SIZES = 4;
 432     psize_t sizes[MAX_PAGE_SIZES];
 433     const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 434     if (num_psizes == -1) {
 435       trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
 436       trcVerbose("disabling multipage support.");
 437       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 438       goto query_multipage_support_end;
 439     }
 440     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 441     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 442     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 443     for (int i = 0; i < num_psizes; i ++) {
 444       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 445     }
 446 
 447     // Can we use 64K, 16M pages?
 448     for (int i = 0; i < num_psizes; i ++) {
 449       const size_t pagesize = sizes[i];
 450       if (pagesize != 64*K && pagesize != 16*M) {
 451         continue;
 452       }
 453       bool can_use = false;
 454       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 455       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 456         IPC_CREAT | S_IRUSR | S_IWUSR);
 457       guarantee0(shmid != -1); // Should always work.
 458       // Try to set pagesize.
 459       struct shmid_ds shm_buf = { 0 };
 460       shm_buf.shm_pagesize = pagesize;
 461       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 462         const int en = errno;
 463         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 464         trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
 465           errno);
 466       } else {
 467         // Attach and double check pageisze.
 468         void* p = ::shmat(shmid, NULL, 0);
 469         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 470         guarantee0(p != (void*) -1); // Should always work.
 471         const size_t real_pagesize = os::Aix::query_pagesize(p);
 472         if (real_pagesize != pagesize) {
 473           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 474         } else {
 475           can_use = true;
 476         }
 477         ::shmdt(p);
 478       }
 479       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 480       if (pagesize == 64*K) {
 481         g_multipage_support.can_use_64K_pages = can_use;
 482       } else if (pagesize == 16*M) {
 483         g_multipage_support.can_use_16M_pages = can_use;
 484       }
 485     }
 486 
 487   } // end: check which pages can be used for shared memory
 488 
 489 query_multipage_support_end:
 490 
 491   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
 492       describe_pagesize(g_multipage_support.pagesize));
 493   trcVerbose("Data page size (C-Heap, bss, etc): %s",
 494       describe_pagesize(g_multipage_support.datapsize));
 495   trcVerbose("Text page size: %s",
 496       describe_pagesize(g_multipage_support.textpsize));
 497   trcVerbose("Thread stack page size (pthread): %s",
 498       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 499   trcVerbose("Default shared memory page size: %s",
 500       describe_pagesize(g_multipage_support.shmpsize));
 501   trcVerbose("Can use 64K pages dynamically with shared meory: %s",
 502       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 503   trcVerbose("Can use 16M pages dynamically with shared memory: %s",
 504       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 505   trcVerbose("Multipage error details: %d",
 506       g_multipage_support.error);
 507 
 508   // sanity checks
 509   assert0(g_multipage_support.pagesize == 4*K);
 510   assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K);
 511   assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K);
 512   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 513   assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K);
 514 
 515 }
 516 
 517 void os::init_system_properties_values() {
 518 
 519 #define DEFAULT_LIBPATH "/lib:/usr/lib"
 520 #define EXTENSIONS_DIR  "/lib/ext"
 521 
 522   // Buffer that fits several sprintfs.
 523   // Note that the space for the trailing null is provided
 524   // by the nulls included by the sizeof operator.
 525   const size_t bufsize =
 526     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 527          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 528   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 529 
 530   // sysclasspath, java_home, dll_dir
 531   {
 532     char *pslash;
 533     os::jvm_path(buf, bufsize);
 534 
 535     // Found the full path to libjvm.so.
 536     // Now cut the path to <java_home>/jre if we can.
 537     pslash = strrchr(buf, '/');
 538     if (pslash != NULL) {
 539       *pslash = '\0';            // Get rid of /libjvm.so.
 540     }
 541     pslash = strrchr(buf, '/');
 542     if (pslash != NULL) {
 543       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 544     }
 545     Arguments::set_dll_dir(buf);
 546 
 547     if (pslash != NULL) {
 548       pslash = strrchr(buf, '/');
 549       if (pslash != NULL) {
 550         *pslash = '\0';        // Get rid of /lib.
 551       }
 552     }
 553     Arguments::set_java_home(buf);
 554     set_boot_path('/', ':');
 555   }
 556 
 557   // Where to look for native libraries.
 558 
 559   // On Aix we get the user setting of LIBPATH.
 560   // Eventually, all the library path setting will be done here.
 561   // Get the user setting of LIBPATH.
 562   const char *v = ::getenv("LIBPATH");
 563   const char *v_colon = ":";
 564   if (v == NULL) { v = ""; v_colon = ""; }
 565 
 566   // Concatenate user and invariant part of ld_library_path.
 567   // That's +1 for the colon and +1 for the trailing '\0'.
 568   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 569   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 570   Arguments::set_library_path(ld_library_path);
 571   FREE_C_HEAP_ARRAY(char, ld_library_path);
 572 
 573   // Extensions directories.
 574   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 575   Arguments::set_ext_dirs(buf);
 576 
 577   FREE_C_HEAP_ARRAY(char, buf);
 578 
 579 #undef DEFAULT_LIBPATH
 580 #undef EXTENSIONS_DIR
 581 }
 582 
 583 ////////////////////////////////////////////////////////////////////////////////
 584 // breakpoint support
 585 
 586 void os::breakpoint() {
 587   BREAKPOINT;
 588 }
 589 
 590 extern "C" void breakpoint() {
 591   // use debugger to set breakpoint here
 592 }
 593 
 594 ////////////////////////////////////////////////////////////////////////////////
 595 // signal support
 596 
 597 debug_only(static bool signal_sets_initialized = false);
 598 static sigset_t unblocked_sigs, vm_sigs;
 599 
 600 bool os::Aix::is_sig_ignored(int sig) {
 601   struct sigaction oact;
 602   sigaction(sig, (struct sigaction*)NULL, &oact);
 603   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 604     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 605   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 606     return true;
 607   } else {
 608     return false;
 609   }
 610 }
 611 
 612 void os::Aix::signal_sets_init() {
 613   // Should also have an assertion stating we are still single-threaded.
 614   assert(!signal_sets_initialized, "Already initialized");
 615   // Fill in signals that are necessarily unblocked for all threads in
 616   // the VM. Currently, we unblock the following signals:
 617   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 618   //                         by -Xrs (=ReduceSignalUsage));
 619   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 620   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 621   // the dispositions or masks wrt these signals.
 622   // Programs embedding the VM that want to use the above signals for their
 623   // own purposes must, at this time, use the "-Xrs" option to prevent
 624   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 625   // (See bug 4345157, and other related bugs).
 626   // In reality, though, unblocking these signals is really a nop, since
 627   // these signals are not blocked by default.
 628   sigemptyset(&unblocked_sigs);
 629   sigaddset(&unblocked_sigs, SIGILL);
 630   sigaddset(&unblocked_sigs, SIGSEGV);
 631   sigaddset(&unblocked_sigs, SIGBUS);
 632   sigaddset(&unblocked_sigs, SIGFPE);
 633   sigaddset(&unblocked_sigs, SIGTRAP);
 634   sigaddset(&unblocked_sigs, SR_signum);
 635 
 636   if (!ReduceSignalUsage) {
 637    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 638      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 639    }
 640    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 641      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 642    }
 643    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 644      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 645    }
 646   }
 647   // Fill in signals that are blocked by all but the VM thread.
 648   sigemptyset(&vm_sigs);
 649   if (!ReduceSignalUsage)
 650     sigaddset(&vm_sigs, BREAK_SIGNAL);
 651   debug_only(signal_sets_initialized = true);
 652 }
 653 
 654 // These are signals that are unblocked while a thread is running Java.
 655 // (For some reason, they get blocked by default.)
 656 sigset_t* os::Aix::unblocked_signals() {
 657   assert(signal_sets_initialized, "Not initialized");
 658   return &unblocked_sigs;
 659 }
 660 
 661 // These are the signals that are blocked while a (non-VM) thread is
 662 // running Java. Only the VM thread handles these signals.
 663 sigset_t* os::Aix::vm_signals() {
 664   assert(signal_sets_initialized, "Not initialized");
 665   return &vm_sigs;
 666 }
 667 
 668 void os::Aix::hotspot_sigmask(Thread* thread) {
 669 
 670   //Save caller's signal mask before setting VM signal mask
 671   sigset_t caller_sigmask;
 672   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 673 
 674   OSThread* osthread = thread->osthread();
 675   osthread->set_caller_sigmask(caller_sigmask);
 676 
 677   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 678 
 679   if (!ReduceSignalUsage) {
 680     if (thread->is_VM_thread()) {
 681       // Only the VM thread handles BREAK_SIGNAL ...
 682       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 683     } else {
 684       // ... all other threads block BREAK_SIGNAL
 685       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 686     }
 687   }
 688 }
 689 
 690 // retrieve memory information.
 691 // Returns false if something went wrong;
 692 // content of pmi undefined in this case.
 693 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 694 
 695   assert(pmi, "get_meminfo: invalid parameter");
 696 
 697   memset(pmi, 0, sizeof(meminfo_t));
 698 
 699   if (os::Aix::on_pase()) {
 700     // On PASE, use the libo4 porting library.
 701 
 702     unsigned long long virt_total = 0;
 703     unsigned long long real_total = 0;
 704     unsigned long long real_free = 0;
 705     unsigned long long pgsp_total = 0;
 706     unsigned long long pgsp_free = 0;
 707     if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
 708       pmi->virt_total = virt_total;
 709       pmi->real_total = real_total;
 710       pmi->real_free = real_free;
 711       pmi->pgsp_total = pgsp_total;
 712       pmi->pgsp_free = pgsp_free;
 713       return true;
 714     }
 715     return false;
 716 
 717   } else {
 718 
 719     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 720     // See:
 721     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 722     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 723     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 724     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 725 
 726     perfstat_memory_total_t psmt;
 727     memset (&psmt, '\0', sizeof(psmt));
 728     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 729     if (rc == -1) {
 730       trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
 731       assert(0, "perfstat_memory_total() failed");
 732       return false;
 733     }
 734 
 735     assert(rc == 1, "perfstat_memory_total() - weird return code");
 736 
 737     // excerpt from
 738     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 739     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 740     // The fields of perfstat_memory_total_t:
 741     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 742     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 743     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 744     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 745     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 746 
 747     pmi->virt_total = psmt.virt_total * 4096;
 748     pmi->real_total = psmt.real_total * 4096;
 749     pmi->real_free = psmt.real_free * 4096;
 750     pmi->pgsp_total = psmt.pgsp_total * 4096;
 751     pmi->pgsp_free = psmt.pgsp_free * 4096;
 752 
 753     return true;
 754 
 755   }
 756 } // end os::Aix::get_meminfo
 757 
 758 //////////////////////////////////////////////////////////////////////////////
 759 // create new thread
 760 
 761 // Thread start routine for all newly created threads
 762 static void *thread_native_entry(Thread *thread) {
 763 
 764   // find out my own stack dimensions
 765   {
 766     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 767     address base = 0;
 768     size_t size = 0;
 769     query_stack_dimensions(&base, &size);
 770     thread->set_stack_base(base);
 771     thread->set_stack_size(size);
 772   }
 773 
 774   const pthread_t pthread_id = ::pthread_self();
 775   const tid_t kernel_thread_id = ::thread_self();
 776 
 777   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 778     os::current_thread_id(), (uintx) kernel_thread_id);
 779 
 780   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
 781   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
 782   // tools hook pthread_create(). In this case, we may run into problems establishing
 783   // guard pages on those stacks, because the stacks may reside in memory which is not
 784   // protectable (shmated).
 785   if (thread->stack_base() > ::sbrk(0)) {
 786     log_warning(os, thread)("Thread stack not in data segment.");
 787   }
 788 
 789   // Try to randomize the cache line index of hot stack frames.
 790   // This helps when threads of the same stack traces evict each other's
 791   // cache lines. The threads can be either from the same JVM instance, or
 792   // from different JVM instances. The benefit is especially true for
 793   // processors with hyperthreading technology.
 794 
 795   static int counter = 0;
 796   int pid = os::current_process_id();
 797   alloca(((pid ^ counter++) & 7) * 128);
 798 
 799   thread->initialize_thread_current();
 800 
 801   OSThread* osthread = thread->osthread();
 802 
 803   // Thread_id is pthread id.
 804   osthread->set_thread_id(pthread_id);
 805 
 806   // .. but keep kernel thread id too for diagnostics
 807   osthread->set_kernel_thread_id(kernel_thread_id);
 808 
 809   // Initialize signal mask for this thread.
 810   os::Aix::hotspot_sigmask(thread);
 811 
 812   // Initialize floating point control register.
 813   os::Aix::init_thread_fpu_state();
 814 
 815   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 816 
 817   // Call one more level start routine.
 818   thread->run();
 819 
 820   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 821     os::current_thread_id(), (uintx) kernel_thread_id);
 822 
 823   // If a thread has not deleted itself ("delete this") as part of its
 824   // termination sequence, we have to ensure thread-local-storage is
 825   // cleared before we actually terminate. No threads should ever be
 826   // deleted asynchronously with respect to their termination.
 827   if (Thread::current_or_null_safe() != NULL) {
 828     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 829     thread->clear_thread_current();
 830   }
 831 
 832   return 0;
 833 }
 834 
 835 bool os::create_thread(Thread* thread, ThreadType thr_type,
 836                        size_t req_stack_size) {
 837 
 838   assert(thread->osthread() == NULL, "caller responsible");
 839 
 840   // Allocate the OSThread object.
 841   OSThread* osthread = new OSThread(NULL, NULL);
 842   if (osthread == NULL) {
 843     return false;
 844   }
 845 
 846   // Set the correct thread state.
 847   osthread->set_thread_type(thr_type);
 848 
 849   // Initial state is ALLOCATED but not INITIALIZED
 850   osthread->set_state(ALLOCATED);
 851 
 852   thread->set_osthread(osthread);
 853 
 854   // Init thread attributes.
 855   pthread_attr_t attr;
 856   pthread_attr_init(&attr);
 857   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 858 
 859   // Make sure we run in 1:1 kernel-user-thread mode.
 860   if (os::Aix::on_aix()) {
 861     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 862     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 863   }
 864 
 865   // Start in suspended state, and in os::thread_start, wake the thread up.
 866   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 867 
 868   // Calculate stack size if it's not specified by caller.
 869   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 870   int status = pthread_attr_setstacksize(&attr, stack_size);
 871   assert_status(status == 0, status, "pthread_attr_setstacksize");
 872 
 873   // Configure libc guard page.
 874   pthread_attr_setguardsize(&attr, os::Aix::default_guard_size(thr_type));
 875 
 876   pthread_t tid;
 877   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
 878 
 879   char buf[64];
 880   if (ret == 0) {
 881     log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
 882       (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 883   } else {
 884     log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
 885       ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 886   }
 887 
 888   pthread_attr_destroy(&attr);
 889 
 890   if (ret != 0) {
 891     // Need to clean up stuff we've allocated so far.
 892     thread->set_osthread(NULL);
 893     delete osthread;
 894     return false;
 895   }
 896 
 897   // OSThread::thread_id is the pthread id.
 898   osthread->set_thread_id(tid);
 899 
 900   return true;
 901 }
 902 
 903 /////////////////////////////////////////////////////////////////////////////
 904 // attach existing thread
 905 
 906 // bootstrap the main thread
 907 bool os::create_main_thread(JavaThread* thread) {
 908   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 909   return create_attached_thread(thread);
 910 }
 911 
 912 bool os::create_attached_thread(JavaThread* thread) {
 913 #ifdef ASSERT
 914     thread->verify_not_published();
 915 #endif
 916 
 917   // Allocate the OSThread object
 918   OSThread* osthread = new OSThread(NULL, NULL);
 919 
 920   if (osthread == NULL) {
 921     return false;
 922   }
 923 
 924   const pthread_t pthread_id = ::pthread_self();
 925   const tid_t kernel_thread_id = ::thread_self();
 926 
 927   // OSThread::thread_id is the pthread id.
 928   osthread->set_thread_id(pthread_id);
 929 
 930   // .. but keep kernel thread id too for diagnostics
 931   osthread->set_kernel_thread_id(kernel_thread_id);
 932 
 933   // initialize floating point control register
 934   os::Aix::init_thread_fpu_state();
 935 
 936   // Initial thread state is RUNNABLE
 937   osthread->set_state(RUNNABLE);
 938 
 939   thread->set_osthread(osthread);
 940 
 941   if (UseNUMA) {
 942     int lgrp_id = os::numa_get_group_id();
 943     if (lgrp_id != -1) {
 944       thread->set_lgrp_id(lgrp_id);
 945     }
 946   }
 947 
 948   // initialize signal mask for this thread
 949   // and save the caller's signal mask
 950   os::Aix::hotspot_sigmask(thread);
 951 
 952   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 953     os::current_thread_id(), (uintx) kernel_thread_id);
 954 
 955   return true;
 956 }
 957 
 958 void os::pd_start_thread(Thread* thread) {
 959   int status = pthread_continue_np(thread->osthread()->pthread_id());
 960   assert(status == 0, "thr_continue failed");
 961 }
 962 
 963 // Free OS resources related to the OSThread
 964 void os::free_thread(OSThread* osthread) {
 965   assert(osthread != NULL, "osthread not set");
 966 
 967   // We are told to free resources of the argument thread,
 968   // but we can only really operate on the current thread.
 969   assert(Thread::current()->osthread() == osthread,
 970          "os::free_thread but not current thread");
 971 
 972   // Restore caller's signal mask
 973   sigset_t sigmask = osthread->caller_sigmask();
 974   pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
 975 
 976   delete osthread;
 977 }
 978 
 979 ////////////////////////////////////////////////////////////////////////////////
 980 // time support
 981 
 982 // Time since start-up in seconds to a fine granularity.
 983 // Used by VMSelfDestructTimer and the MemProfiler.
 984 double os::elapsedTime() {
 985   return (double)(os::elapsed_counter()) * 0.000001;
 986 }
 987 
 988 jlong os::elapsed_counter() {
 989   timeval time;
 990   int status = gettimeofday(&time, NULL);
 991   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
 992 }
 993 
 994 jlong os::elapsed_frequency() {
 995   return (1000 * 1000);
 996 }
 997 
 998 bool os::supports_vtime() { return true; }
 999 bool os::enable_vtime()   { return false; }
1000 bool os::vtime_enabled()  { return false; }
1001 
1002 double os::elapsedVTime() {
1003   struct rusage usage;
1004   int retval = getrusage(RUSAGE_THREAD, &usage);
1005   if (retval == 0) {
1006     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1007   } else {
1008     // better than nothing, but not much
1009     return elapsedTime();
1010   }
1011 }
1012 
1013 jlong os::javaTimeMillis() {
1014   timeval time;
1015   int status = gettimeofday(&time, NULL);
1016   assert(status != -1, "aix error at gettimeofday()");
1017   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1018 }
1019 
1020 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1021   timeval time;
1022   int status = gettimeofday(&time, NULL);
1023   assert(status != -1, "aix error at gettimeofday()");
1024   seconds = jlong(time.tv_sec);
1025   nanos = jlong(time.tv_usec) * 1000;
1026 }
1027 
1028 jlong os::javaTimeNanos() {
1029   if (os::Aix::on_pase()) {
1030 
1031     timeval time;
1032     int status = gettimeofday(&time, NULL);
1033     assert(status != -1, "PASE error at gettimeofday()");
1034     jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1035     return 1000 * usecs;
1036 
1037   } else {
1038     // On AIX use the precision of processors real time clock
1039     // or time base registers.
1040     timebasestruct_t time;
1041     int rc;
1042 
1043     // If the CPU has a time register, it will be used and
1044     // we have to convert to real time first. After convertion we have following data:
1045     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1046     // time.tb_low  [nanoseconds after the last full second above]
1047     // We better use mread_real_time here instead of read_real_time
1048     // to ensure that we will get a monotonic increasing time.
1049     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1050       rc = time_base_to_time(&time, TIMEBASE_SZ);
1051       assert(rc != -1, "aix error at time_base_to_time()");
1052     }
1053     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1054   }
1055 }
1056 
1057 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1058   info_ptr->max_value = ALL_64_BITS;
1059   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1060   info_ptr->may_skip_backward = false;
1061   info_ptr->may_skip_forward = false;
1062   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1063 }
1064 
1065 // Return the real, user, and system times in seconds from an
1066 // arbitrary fixed point in the past.
1067 bool os::getTimesSecs(double* process_real_time,
1068                       double* process_user_time,
1069                       double* process_system_time) {
1070   struct tms ticks;
1071   clock_t real_ticks = times(&ticks);
1072 
1073   if (real_ticks == (clock_t) (-1)) {
1074     return false;
1075   } else {
1076     double ticks_per_second = (double) clock_tics_per_sec;
1077     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1078     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1079     *process_real_time = ((double) real_ticks) / ticks_per_second;
1080 
1081     return true;
1082   }
1083 }
1084 
1085 char * os::local_time_string(char *buf, size_t buflen) {
1086   struct tm t;
1087   time_t long_time;
1088   time(&long_time);
1089   localtime_r(&long_time, &t);
1090   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1091                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1092                t.tm_hour, t.tm_min, t.tm_sec);
1093   return buf;
1094 }
1095 
1096 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1097   return localtime_r(clock, res);
1098 }
1099 
1100 ////////////////////////////////////////////////////////////////////////////////
1101 // runtime exit support
1102 
1103 // Note: os::shutdown() might be called very early during initialization, or
1104 // called from signal handler. Before adding something to os::shutdown(), make
1105 // sure it is async-safe and can handle partially initialized VM.
1106 void os::shutdown() {
1107 
1108   // allow PerfMemory to attempt cleanup of any persistent resources
1109   perfMemory_exit();
1110 
1111   // needs to remove object in file system
1112   AttachListener::abort();
1113 
1114   // flush buffered output, finish log files
1115   ostream_abort();
1116 
1117   // Check for abort hook
1118   abort_hook_t abort_hook = Arguments::abort_hook();
1119   if (abort_hook != NULL) {
1120     abort_hook();
1121   }
1122 }
1123 
1124 // Note: os::abort() might be called very early during initialization, or
1125 // called from signal handler. Before adding something to os::abort(), make
1126 // sure it is async-safe and can handle partially initialized VM.
1127 void os::abort(bool dump_core, void* siginfo, const void* context) {
1128   os::shutdown();
1129   if (dump_core) {
1130 #ifndef PRODUCT
1131     fdStream out(defaultStream::output_fd());
1132     out.print_raw("Current thread is ");
1133     char buf[16];
1134     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1135     out.print_raw_cr(buf);
1136     out.print_raw_cr("Dumping core ...");
1137 #endif
1138     ::abort(); // dump core
1139   }
1140 
1141   ::exit(1);
1142 }
1143 
1144 // Die immediately, no exit hook, no abort hook, no cleanup.
1145 void os::die() {
1146   ::abort();
1147 }
1148 
1149 // This method is a copy of JDK's sysGetLastErrorString
1150 // from src/solaris/hpi/src/system_md.c
1151 
1152 size_t os::lasterror(char *buf, size_t len) {
1153   if (errno == 0) return 0;
1154 
1155   const char *s = os::strerror(errno);
1156   size_t n = ::strlen(s);
1157   if (n >= len) {
1158     n = len - 1;
1159   }
1160   ::strncpy(buf, s, n);
1161   buf[n] = '\0';
1162   return n;
1163 }
1164 
1165 intx os::current_thread_id() {
1166   return (intx)pthread_self();
1167 }
1168 
1169 int os::current_process_id() {
1170   return getpid();
1171 }
1172 
1173 // DLL functions
1174 
1175 const char* os::dll_file_extension() { return ".so"; }
1176 
1177 // This must be hard coded because it's the system's temporary
1178 // directory not the java application's temp directory, ala java.io.tmpdir.
1179 const char* os::get_temp_directory() { return "/tmp"; }
1180 
1181 static bool file_exists(const char* filename) {
1182   struct stat statbuf;
1183   if (filename == NULL || strlen(filename) == 0) {
1184     return false;
1185   }
1186   return os::stat(filename, &statbuf) == 0;
1187 }
1188 
1189 bool os::dll_build_name(char* buffer, size_t buflen,
1190                         const char* pname, const char* fname) {
1191   bool retval = false;
1192   // Copied from libhpi
1193   const size_t pnamelen = pname ? strlen(pname) : 0;
1194 
1195   // Return error on buffer overflow.
1196   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1197     *buffer = '\0';
1198     return retval;
1199   }
1200 
1201   if (pnamelen == 0) {
1202     snprintf(buffer, buflen, "lib%s.so", fname);
1203     retval = true;
1204   } else if (strchr(pname, *os::path_separator()) != NULL) {
1205     int n;
1206     char** pelements = split_path(pname, &n);
1207     if (pelements == NULL) {
1208       return false;
1209     }
1210     for (int i = 0; i < n; i++) {
1211       // Really shouldn't be NULL, but check can't hurt
1212       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1213         continue; // skip the empty path values
1214       }
1215       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1216       if (file_exists(buffer)) {
1217         retval = true;
1218         break;
1219       }
1220     }
1221     // release the storage
1222     for (int i = 0; i < n; i++) {
1223       if (pelements[i] != NULL) {
1224         FREE_C_HEAP_ARRAY(char, pelements[i]);
1225       }
1226     }
1227     if (pelements != NULL) {
1228       FREE_C_HEAP_ARRAY(char*, pelements);
1229     }
1230   } else {
1231     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1232     retval = true;
1233   }
1234   return retval;
1235 }
1236 
1237 // Check if addr is inside libjvm.so.
1238 bool os::address_is_in_vm(address addr) {
1239 
1240   // Input could be a real pc or a function pointer literal. The latter
1241   // would be a function descriptor residing in the data segment of a module.
1242   loaded_module_t lm;
1243   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1244     return lm.is_in_vm;
1245   } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1246     return lm.is_in_vm;
1247   } else {
1248     return false;
1249   }
1250 
1251 }
1252 
1253 // Resolve an AIX function descriptor literal to a code pointer.
1254 // If the input is a valid code pointer to a text segment of a loaded module,
1255 //   it is returned unchanged.
1256 // If the input is a valid AIX function descriptor, it is resolved to the
1257 //   code entry point.
1258 // If the input is neither a valid function descriptor nor a valid code pointer,
1259 //   NULL is returned.
1260 static address resolve_function_descriptor_to_code_pointer(address p) {
1261 
1262   if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1263     // It is a real code pointer.
1264     return p;
1265   } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1266     // Pointer to data segment, potential function descriptor.
1267     address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1268     if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1269       // It is a function descriptor.
1270       return code_entry;
1271     }
1272   }
1273 
1274   return NULL;
1275 }
1276 
1277 bool os::dll_address_to_function_name(address addr, char *buf,
1278                                       int buflen, int *offset,
1279                                       bool demangle) {
1280   if (offset) {
1281     *offset = -1;
1282   }
1283   // Buf is not optional, but offset is optional.
1284   assert(buf != NULL, "sanity check");
1285   buf[0] = '\0';
1286 
1287   // Resolve function ptr literals first.
1288   addr = resolve_function_descriptor_to_code_pointer(addr);
1289   if (!addr) {
1290     return false;
1291   }
1292 
1293   return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle);
1294 }
1295 
1296 bool os::dll_address_to_library_name(address addr, char* buf,
1297                                      int buflen, int* offset) {
1298   if (offset) {
1299     *offset = -1;
1300   }
1301   // Buf is not optional, but offset is optional.
1302   assert(buf != NULL, "sanity check");
1303   buf[0] = '\0';
1304 
1305   // Resolve function ptr literals first.
1306   addr = resolve_function_descriptor_to_code_pointer(addr);
1307   if (!addr) {
1308     return false;
1309   }
1310 
1311   return AixSymbols::get_module_name(addr, buf, buflen);
1312 }
1313 
1314 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1315 // for the same architecture as Hotspot is running on.
1316 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1317 
1318   if (ebuf && ebuflen > 0) {
1319     ebuf[0] = '\0';
1320     ebuf[ebuflen - 1] = '\0';
1321   }
1322 
1323   if (!filename || strlen(filename) == 0) {
1324     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1325     return NULL;
1326   }
1327 
1328   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1329   void * result= ::dlopen(filename, RTLD_LAZY);
1330   if (result != NULL) {
1331     // Reload dll cache. Don't do this in signal handling.
1332     LoadedLibraries::reload();
1333     return result;
1334   } else {
1335     // error analysis when dlopen fails
1336     const char* const error_report = ::dlerror();
1337     if (error_report && ebuf && ebuflen > 0) {
1338       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1339                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1340     }
1341   }
1342   return NULL;
1343 }
1344 
1345 void* os::dll_lookup(void* handle, const char* name) {
1346   void* res = dlsym(handle, name);
1347   return res;
1348 }
1349 
1350 void* os::get_default_process_handle() {
1351   return (void*)::dlopen(NULL, RTLD_LAZY);
1352 }
1353 
1354 void os::print_dll_info(outputStream *st) {
1355   st->print_cr("Dynamic libraries:");
1356   LoadedLibraries::print(st);
1357 }
1358 
1359 void os::get_summary_os_info(char* buf, size_t buflen) {
1360   // There might be something more readable than uname results for AIX.
1361   struct utsname name;
1362   uname(&name);
1363   snprintf(buf, buflen, "%s %s", name.release, name.version);
1364 }
1365 
1366 void os::print_os_info(outputStream* st) {
1367   st->print("OS:");
1368 
1369   st->print("uname:");
1370   struct utsname name;
1371   uname(&name);
1372   st->print(name.sysname); st->print(" ");
1373   st->print(name.nodename); st->print(" ");
1374   st->print(name.release); st->print(" ");
1375   st->print(name.version); st->print(" ");
1376   st->print(name.machine);
1377   st->cr();
1378 
1379   uint32_t ver = os::Aix::os_version();
1380   st->print_cr("AIX kernel version %u.%u.%u.%u",
1381                (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1382 
1383   os::Posix::print_rlimit_info(st);
1384 
1385   // load average
1386   st->print("load average:");
1387   double loadavg[3] = {-1.L, -1.L, -1.L};
1388   os::loadavg(loadavg, 3);
1389   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1390   st->cr();
1391 
1392   // print wpar info
1393   libperfstat::wparinfo_t wi;
1394   if (libperfstat::get_wparinfo(&wi)) {
1395     st->print_cr("wpar info");
1396     st->print_cr("name: %s", wi.name);
1397     st->print_cr("id:   %d", wi.wpar_id);
1398     st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1399   }
1400 
1401   // print partition info
1402   libperfstat::partitioninfo_t pi;
1403   if (libperfstat::get_partitioninfo(&pi)) {
1404     st->print_cr("partition info");
1405     st->print_cr(" name: %s", pi.name);
1406   }
1407 
1408 }
1409 
1410 void os::print_memory_info(outputStream* st) {
1411 
1412   st->print_cr("Memory:");
1413 
1414   st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1415     describe_pagesize(g_multipage_support.pagesize));
1416   st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1417     describe_pagesize(g_multipage_support.datapsize));
1418   st->print_cr("  Text page size:                         %s",
1419     describe_pagesize(g_multipage_support.textpsize));
1420   st->print_cr("  Thread stack page size (pthread):       %s",
1421     describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1422   st->print_cr("  Default shared memory page size:        %s",
1423     describe_pagesize(g_multipage_support.shmpsize));
1424   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1425     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1426   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1427     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1428   st->print_cr("  Multipage error: %d",
1429     g_multipage_support.error);
1430   st->cr();
1431   st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1432 
1433   // print out LDR_CNTRL because it affects the default page sizes
1434   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1435   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1436 
1437   // Print out EXTSHM because it is an unsupported setting.
1438   const char* const extshm = ::getenv("EXTSHM");
1439   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1440   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1441     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1442   }
1443 
1444   // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1445   const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1446   st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1447       aixthread_guardpages ? aixthread_guardpages : "<unset>");
1448 
1449   os::Aix::meminfo_t mi;
1450   if (os::Aix::get_meminfo(&mi)) {
1451     char buffer[256];
1452     if (os::Aix::on_aix()) {
1453       st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1454       st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1455       st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1456       st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1457     } else {
1458       // PASE - Numbers are result of QWCRSSTS; they mean:
1459       // real_total: Sum of all system pools
1460       // real_free: always 0
1461       // pgsp_total: we take the size of the system ASP
1462       // pgsp_free: size of system ASP times percentage of system ASP unused
1463       st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1464       st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1465       st->print_cr("%% system asp used : " SIZE_FORMAT,
1466         mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1467     }
1468     st->print_raw(buffer);
1469   }
1470   st->cr();
1471 
1472   // Print segments allocated with os::reserve_memory.
1473   st->print_cr("internal virtual memory regions used by vm:");
1474   vmembk_print_on(st);
1475 }
1476 
1477 // Get a string for the cpuinfo that is a summary of the cpu type
1478 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1479   // This looks good
1480   libperfstat::cpuinfo_t ci;
1481   if (libperfstat::get_cpuinfo(&ci)) {
1482     strncpy(buf, ci.version, buflen);
1483   } else {
1484     strncpy(buf, "AIX", buflen);
1485   }
1486 }
1487 
1488 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1489   st->print("CPU:");
1490   st->print("total %d", os::processor_count());
1491   // It's not safe to query number of active processors after crash.
1492   // st->print("(active %d)", os::active_processor_count());
1493   st->print(" %s", VM_Version::features());
1494   st->cr();
1495 }
1496 
1497 static void print_signal_handler(outputStream* st, int sig,
1498                                  char* buf, size_t buflen);
1499 
1500 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1501   st->print_cr("Signal Handlers:");
1502   print_signal_handler(st, SIGSEGV, buf, buflen);
1503   print_signal_handler(st, SIGBUS , buf, buflen);
1504   print_signal_handler(st, SIGFPE , buf, buflen);
1505   print_signal_handler(st, SIGPIPE, buf, buflen);
1506   print_signal_handler(st, SIGXFSZ, buf, buflen);
1507   print_signal_handler(st, SIGILL , buf, buflen);
1508   print_signal_handler(st, SR_signum, buf, buflen);
1509   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1510   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1511   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1512   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1513   print_signal_handler(st, SIGTRAP, buf, buflen);
1514   // We also want to know if someone else adds a SIGDANGER handler because
1515   // that will interfere with OOM killling.
1516   print_signal_handler(st, SIGDANGER, buf, buflen);
1517 }
1518 
1519 static char saved_jvm_path[MAXPATHLEN] = {0};
1520 
1521 // Find the full path to the current module, libjvm.so.
1522 void os::jvm_path(char *buf, jint buflen) {
1523   // Error checking.
1524   if (buflen < MAXPATHLEN) {
1525     assert(false, "must use a large-enough buffer");
1526     buf[0] = '\0';
1527     return;
1528   }
1529   // Lazy resolve the path to current module.
1530   if (saved_jvm_path[0] != 0) {
1531     strcpy(buf, saved_jvm_path);
1532     return;
1533   }
1534 
1535   Dl_info dlinfo;
1536   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1537   assert(ret != 0, "cannot locate libjvm");
1538   char* rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1539   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1540 
1541   if (Arguments::sun_java_launcher_is_altjvm()) {
1542     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
1543     // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".
1544     // If "/jre/lib/" appears at the right place in the string, then
1545     // assume we are installed in a JDK and we're done. Otherwise, check
1546     // for a JAVA_HOME environment variable and fix up the path so it
1547     // looks like libjvm.so is installed there (append a fake suffix
1548     // hotspot/libjvm.so).
1549     const char *p = buf + strlen(buf) - 1;
1550     for (int count = 0; p > buf && count < 4; ++count) {
1551       for (--p; p > buf && *p != '/'; --p)
1552         /* empty */ ;
1553     }
1554 
1555     if (strncmp(p, "/jre/lib/", 9) != 0) {
1556       // Look for JAVA_HOME in the environment.
1557       char* java_home_var = ::getenv("JAVA_HOME");
1558       if (java_home_var != NULL && java_home_var[0] != 0) {
1559         char* jrelib_p;
1560         int len;
1561 
1562         // Check the current module name "libjvm.so".
1563         p = strrchr(buf, '/');
1564         if (p == NULL) {
1565           return;
1566         }
1567         assert(strstr(p, "/libjvm") == p, "invalid library name");
1568 
1569         rp = os::Posix::realpath(java_home_var, buf, buflen);
1570         if (rp == NULL) {
1571           return;
1572         }
1573 
1574         // determine if this is a legacy image or modules image
1575         // modules image doesn't have "jre" subdirectory
1576         len = strlen(buf);
1577         assert(len < buflen, "Ran out of buffer room");
1578         jrelib_p = buf + len;
1579         snprintf(jrelib_p, buflen-len, "/jre/lib");
1580         if (0 != access(buf, F_OK)) {
1581           snprintf(jrelib_p, buflen-len, "/lib");
1582         }
1583 
1584         if (0 == access(buf, F_OK)) {
1585           // Use current module name "libjvm.so"
1586           len = strlen(buf);
1587           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
1588         } else {
1589           // Go back to path of .so
1590           rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1591           if (rp == NULL) {
1592             return;
1593           }
1594         }
1595       }
1596     }
1597   }
1598 
1599   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1600   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1601 }
1602 
1603 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1604   // no prefix required, not even "_"
1605 }
1606 
1607 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1608   // no suffix required
1609 }
1610 
1611 ////////////////////////////////////////////////////////////////////////////////
1612 // sun.misc.Signal support
1613 
1614 static volatile jint sigint_count = 0;
1615 
1616 static void
1617 UserHandler(int sig, void *siginfo, void *context) {
1618   // 4511530 - sem_post is serialized and handled by the manager thread. When
1619   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1620   // don't want to flood the manager thread with sem_post requests.
1621   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1622     return;
1623 
1624   // Ctrl-C is pressed during error reporting, likely because the error
1625   // handler fails to abort. Let VM die immediately.
1626   if (sig == SIGINT && is_error_reported()) {
1627     os::die();
1628   }
1629 
1630   os::signal_notify(sig);
1631 }
1632 
1633 void* os::user_handler() {
1634   return CAST_FROM_FN_PTR(void*, UserHandler);
1635 }
1636 
1637 extern "C" {
1638   typedef void (*sa_handler_t)(int);
1639   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1640 }
1641 
1642 void* os::signal(int signal_number, void* handler) {
1643   struct sigaction sigAct, oldSigAct;
1644 
1645   sigfillset(&(sigAct.sa_mask));
1646 
1647   // Do not block out synchronous signals in the signal handler.
1648   // Blocking synchronous signals only makes sense if you can really
1649   // be sure that those signals won't happen during signal handling,
1650   // when the blocking applies. Normal signal handlers are lean and
1651   // do not cause signals. But our signal handlers tend to be "risky"
1652   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1653   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1654   // by a SIGILL, which was blocked due to the signal mask. The process
1655   // just hung forever. Better to crash from a secondary signal than to hang.
1656   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1657   sigdelset(&(sigAct.sa_mask), SIGBUS);
1658   sigdelset(&(sigAct.sa_mask), SIGILL);
1659   sigdelset(&(sigAct.sa_mask), SIGFPE);
1660   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1661 
1662   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1663 
1664   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1665 
1666   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1667     // -1 means registration failed
1668     return (void *)-1;
1669   }
1670 
1671   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1672 }
1673 
1674 void os::signal_raise(int signal_number) {
1675   ::raise(signal_number);
1676 }
1677 
1678 //
1679 // The following code is moved from os.cpp for making this
1680 // code platform specific, which it is by its very nature.
1681 //
1682 
1683 // Will be modified when max signal is changed to be dynamic
1684 int os::sigexitnum_pd() {
1685   return NSIG;
1686 }
1687 
1688 // a counter for each possible signal value
1689 static volatile jint pending_signals[NSIG+1] = { 0 };
1690 
1691 // Wrapper functions for: sem_init(), sem_post(), sem_wait()
1692 // On AIX, we use sem_init(), sem_post(), sem_wait()
1693 // On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1694 // do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1695 // Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1696 // on AIX, msem_..() calls are suspected of causing problems.
1697 static sem_t sig_sem;
1698 static msemaphore* p_sig_msem = 0;
1699 
1700 static void local_sem_init() {
1701   if (os::Aix::on_aix()) {
1702     int rc = ::sem_init(&sig_sem, 0, 0);
1703     guarantee(rc != -1, "sem_init failed");
1704   } else {
1705     // Memory semaphores must live in shared mem.
1706     guarantee0(p_sig_msem == NULL);
1707     p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1708     guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1709     guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1710   }
1711 }
1712 
1713 static void local_sem_post() {
1714   static bool warn_only_once = false;
1715   if (os::Aix::on_aix()) {
1716     int rc = ::sem_post(&sig_sem);
1717     if (rc == -1 && !warn_only_once) {
1718       trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
1719       warn_only_once = true;
1720     }
1721   } else {
1722     guarantee0(p_sig_msem != NULL);
1723     int rc = ::msem_unlock(p_sig_msem, 0);
1724     if (rc == -1 && !warn_only_once) {
1725       trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
1726       warn_only_once = true;
1727     }
1728   }
1729 }
1730 
1731 static void local_sem_wait() {
1732   static bool warn_only_once = false;
1733   if (os::Aix::on_aix()) {
1734     int rc = ::sem_wait(&sig_sem);
1735     if (rc == -1 && !warn_only_once) {
1736       trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
1737       warn_only_once = true;
1738     }
1739   } else {
1740     guarantee0(p_sig_msem != NULL); // must init before use
1741     int rc = ::msem_lock(p_sig_msem, 0);
1742     if (rc == -1 && !warn_only_once) {
1743       trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
1744       warn_only_once = true;
1745     }
1746   }
1747 }
1748 
1749 void os::signal_init_pd() {
1750   // Initialize signal structures
1751   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1752 
1753   // Initialize signal semaphore
1754   local_sem_init();
1755 }
1756 
1757 void os::signal_notify(int sig) {
1758   Atomic::inc(&pending_signals[sig]);
1759   local_sem_post();
1760 }
1761 
1762 static int check_pending_signals(bool wait) {
1763   Atomic::store(0, &sigint_count);
1764   for (;;) {
1765     for (int i = 0; i < NSIG + 1; i++) {
1766       jint n = pending_signals[i];
1767       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1768         return i;
1769       }
1770     }
1771     if (!wait) {
1772       return -1;
1773     }
1774     JavaThread *thread = JavaThread::current();
1775     ThreadBlockInVM tbivm(thread);
1776 
1777     bool threadIsSuspended;
1778     do {
1779       thread->set_suspend_equivalent();
1780       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1781 
1782       local_sem_wait();
1783 
1784       // were we externally suspended while we were waiting?
1785       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1786       if (threadIsSuspended) {
1787         //
1788         // The semaphore has been incremented, but while we were waiting
1789         // another thread suspended us. We don't want to continue running
1790         // while suspended because that would surprise the thread that
1791         // suspended us.
1792         //
1793 
1794         local_sem_post();
1795 
1796         thread->java_suspend_self();
1797       }
1798     } while (threadIsSuspended);
1799   }
1800 }
1801 
1802 int os::signal_lookup() {
1803   return check_pending_signals(false);
1804 }
1805 
1806 int os::signal_wait() {
1807   return check_pending_signals(true);
1808 }
1809 
1810 ////////////////////////////////////////////////////////////////////////////////
1811 // Virtual Memory
1812 
1813 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1814 
1815 #define VMEM_MAPPED  1
1816 #define VMEM_SHMATED 2
1817 
1818 struct vmembk_t {
1819   int type;         // 1 - mmap, 2 - shmat
1820   char* addr;
1821   size_t size;      // Real size, may be larger than usersize.
1822   size_t pagesize;  // page size of area
1823   vmembk_t* next;
1824 
1825   bool contains_addr(char* p) const {
1826     return p >= addr && p < (addr + size);
1827   }
1828 
1829   bool contains_range(char* p, size_t s) const {
1830     return contains_addr(p) && contains_addr(p + s - 1);
1831   }
1832 
1833   void print_on(outputStream* os) const {
1834     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1835       " bytes, %d %s pages), %s",
1836       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1837       (type == VMEM_SHMATED ? "shmat" : "mmap")
1838     );
1839   }
1840 
1841   // Check that range is a sub range of memory block (or equal to memory block);
1842   // also check that range is fully page aligned to the page size if the block.
1843   void assert_is_valid_subrange(char* p, size_t s) const {
1844     if (!contains_range(p, s)) {
1845       trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1846               "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1847               p, p + s, addr, addr + size);
1848       guarantee0(false);
1849     }
1850     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1851       trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1852               " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1853       guarantee0(false);
1854     }
1855   }
1856 };
1857 
1858 static struct {
1859   vmembk_t* first;
1860   MiscUtils::CritSect cs;
1861 } vmem;
1862 
1863 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1864   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1865   assert0(p);
1866   if (p) {
1867     MiscUtils::AutoCritSect lck(&vmem.cs);
1868     p->addr = addr; p->size = size;
1869     p->pagesize = pagesize;
1870     p->type = type;
1871     p->next = vmem.first;
1872     vmem.first = p;
1873   }
1874 }
1875 
1876 static vmembk_t* vmembk_find(char* addr) {
1877   MiscUtils::AutoCritSect lck(&vmem.cs);
1878   for (vmembk_t* p = vmem.first; p; p = p->next) {
1879     if (p->addr <= addr && (p->addr + p->size) > addr) {
1880       return p;
1881     }
1882   }
1883   return NULL;
1884 }
1885 
1886 static void vmembk_remove(vmembk_t* p0) {
1887   MiscUtils::AutoCritSect lck(&vmem.cs);
1888   assert0(p0);
1889   assert0(vmem.first); // List should not be empty.
1890   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1891     if (*pp == p0) {
1892       *pp = p0->next;
1893       ::free(p0);
1894       return;
1895     }
1896   }
1897   assert0(false); // Not found?
1898 }
1899 
1900 static void vmembk_print_on(outputStream* os) {
1901   MiscUtils::AutoCritSect lck(&vmem.cs);
1902   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1903     vmi->print_on(os);
1904     os->cr();
1905   }
1906 }
1907 
1908 // Reserve and attach a section of System V memory.
1909 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1910 // address. Failing that, it will attach the memory anywhere.
1911 // If <requested_addr> is NULL, function will attach the memory anywhere.
1912 //
1913 // <alignment_hint> is being ignored by this function. It is very probable however that the
1914 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1915 // Should this be not enogh, we can put more work into it.
1916 static char* reserve_shmated_memory (
1917   size_t bytes,
1918   char* requested_addr,
1919   size_t alignment_hint) {
1920 
1921   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1922     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1923     bytes, requested_addr, alignment_hint);
1924 
1925   // Either give me wish address or wish alignment but not both.
1926   assert0(!(requested_addr != NULL && alignment_hint != 0));
1927 
1928   // We must prevent anyone from attaching too close to the
1929   // BRK because that may cause malloc OOM.
1930   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1931     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1932       "Will attach anywhere.", requested_addr);
1933     // Act like the OS refused to attach there.
1934     requested_addr = NULL;
1935   }
1936 
1937   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1938   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1939   if (os::Aix::on_pase_V5R4_or_older()) {
1940     ShouldNotReachHere();
1941   }
1942 
1943   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1944   const size_t size = align_size_up(bytes, 64*K);
1945 
1946   // Reserve the shared segment.
1947   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1948   if (shmid == -1) {
1949     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1950     return NULL;
1951   }
1952 
1953   // Important note:
1954   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1955   // We must right after attaching it remove it from the system. System V shm segments are global and
1956   // survive the process.
1957   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1958 
1959   struct shmid_ds shmbuf;
1960   memset(&shmbuf, 0, sizeof(shmbuf));
1961   shmbuf.shm_pagesize = 64*K;
1962   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1963     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1964                size / (64*K), errno);
1965     // I want to know if this ever happens.
1966     assert(false, "failed to set page size for shmat");
1967   }
1968 
1969   // Now attach the shared segment.
1970   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1971   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1972   // were not a segment boundary.
1973   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1974   const int errno_shmat = errno;
1975 
1976   // (A) Right after shmat and before handing shmat errors delete the shm segment.
1977   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
1978     trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1979     assert(false, "failed to remove shared memory segment!");
1980   }
1981 
1982   // Handle shmat error. If we failed to attach, just return.
1983   if (addr == (char*)-1) {
1984     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
1985     return NULL;
1986   }
1987 
1988   // Just for info: query the real page size. In case setting the page size did not
1989   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1990   const size_t real_pagesize = os::Aix::query_pagesize(addr);
1991   if (real_pagesize != shmbuf.shm_pagesize) {
1992     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
1993   }
1994 
1995   if (addr) {
1996     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
1997       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
1998   } else {
1999     if (requested_addr != NULL) {
2000       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2001     } else {
2002       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2003     }
2004   }
2005 
2006   // book-keeping
2007   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2008   assert0(is_aligned_to(addr, os::vm_page_size()));
2009 
2010   return addr;
2011 }
2012 
2013 static bool release_shmated_memory(char* addr, size_t size) {
2014 
2015   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2016     addr, addr + size - 1);
2017 
2018   bool rc = false;
2019 
2020   // TODO: is there a way to verify shm size without doing bookkeeping?
2021   if (::shmdt(addr) != 0) {
2022     trcVerbose("error (%d).", errno);
2023   } else {
2024     trcVerbose("ok.");
2025     rc = true;
2026   }
2027   return rc;
2028 }
2029 
2030 static bool uncommit_shmated_memory(char* addr, size_t size) {
2031   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2032     addr, addr + size - 1);
2033 
2034   const bool rc = my_disclaim64(addr, size);
2035 
2036   if (!rc) {
2037     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2038     return false;
2039   }
2040   return true;
2041 }
2042 
2043 ////////////////////////////////  mmap-based routines /////////////////////////////////
2044 
2045 // Reserve memory via mmap.
2046 // If <requested_addr> is given, an attempt is made to attach at the given address.
2047 // Failing that, memory is allocated at any address.
2048 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2049 // allocate at an address aligned with the given alignment. Failing that, memory
2050 // is aligned anywhere.
2051 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2052   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2053     "alignment_hint " UINTX_FORMAT "...",
2054     bytes, requested_addr, alignment_hint);
2055 
2056   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2057   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2058     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2059     return NULL;
2060   }
2061 
2062   // We must prevent anyone from attaching too close to the
2063   // BRK because that may cause malloc OOM.
2064   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2065     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2066       "Will attach anywhere.", requested_addr);
2067     // Act like the OS refused to attach there.
2068     requested_addr = NULL;
2069   }
2070 
2071   // Specify one or the other but not both.
2072   assert0(!(requested_addr != NULL && alignment_hint > 0));
2073 
2074   // In 64K mode, we claim the global page size (os::vm_page_size())
2075   // is 64K. This is one of the few points where that illusion may
2076   // break, because mmap() will always return memory aligned to 4K. So
2077   // we must ensure we only ever return memory aligned to 64k.
2078   if (alignment_hint) {
2079     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2080   } else {
2081     alignment_hint = os::vm_page_size();
2082   }
2083 
2084   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2085   const size_t size = align_size_up(bytes, os::vm_page_size());
2086 
2087   // alignment: Allocate memory large enough to include an aligned range of the right size and
2088   // cut off the leading and trailing waste pages.
2089   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2090   const size_t extra_size = size + alignment_hint;
2091 
2092   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2093   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2094   int flags = MAP_ANONYMOUS | MAP_SHARED;
2095 
2096   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2097   // it means if wishaddress is given but MAP_FIXED is not set.
2098   //
2099   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2100   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2101   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2102   // get clobbered.
2103   if (requested_addr != NULL) {
2104     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2105       flags |= MAP_FIXED;
2106     }
2107   }
2108 
2109   char* addr = (char*)::mmap(requested_addr, extra_size,
2110       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2111 
2112   if (addr == MAP_FAILED) {
2113     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2114     return NULL;
2115   }
2116 
2117   // Handle alignment.
2118   char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2119   const size_t waste_pre = addr_aligned - addr;
2120   char* const addr_aligned_end = addr_aligned + size;
2121   const size_t waste_post = extra_size - waste_pre - size;
2122   if (waste_pre > 0) {
2123     ::munmap(addr, waste_pre);
2124   }
2125   if (waste_post > 0) {
2126     ::munmap(addr_aligned_end, waste_post);
2127   }
2128   addr = addr_aligned;
2129 
2130   if (addr) {
2131     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2132       addr, addr + bytes, bytes);
2133   } else {
2134     if (requested_addr != NULL) {
2135       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2136     } else {
2137       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2138     }
2139   }
2140 
2141   // bookkeeping
2142   vmembk_add(addr, size, 4*K, VMEM_MAPPED);
2143 
2144   // Test alignment, see above.
2145   assert0(is_aligned_to(addr, os::vm_page_size()));
2146 
2147   return addr;
2148 }
2149 
2150 static bool release_mmaped_memory(char* addr, size_t size) {
2151   assert0(is_aligned_to(addr, os::vm_page_size()));
2152   assert0(is_aligned_to(size, os::vm_page_size()));
2153 
2154   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2155     addr, addr + size - 1);
2156   bool rc = false;
2157 
2158   if (::munmap(addr, size) != 0) {
2159     trcVerbose("failed (%d)\n", errno);
2160     rc = false;
2161   } else {
2162     trcVerbose("ok.");
2163     rc = true;
2164   }
2165 
2166   return rc;
2167 }
2168 
2169 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2170 
2171   assert0(is_aligned_to(addr, os::vm_page_size()));
2172   assert0(is_aligned_to(size, os::vm_page_size()));
2173 
2174   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2175     addr, addr + size - 1);
2176   bool rc = false;
2177 
2178   // Uncommit mmap memory with msync MS_INVALIDATE.
2179   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2180     trcVerbose("failed (%d)\n", errno);
2181     rc = false;
2182   } else {
2183     trcVerbose("ok.");
2184     rc = true;
2185   }
2186 
2187   return rc;
2188 }
2189 
2190 int os::vm_page_size() {
2191   // Seems redundant as all get out.
2192   assert(os::Aix::page_size() != -1, "must call os::init");
2193   return os::Aix::page_size();
2194 }
2195 
2196 // Aix allocates memory by pages.
2197 int os::vm_allocation_granularity() {
2198   assert(os::Aix::page_size() != -1, "must call os::init");
2199   return os::Aix::page_size();
2200 }
2201 
2202 #ifdef PRODUCT
2203 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2204                                     int err) {
2205   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2206           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2207           os::errno_name(err), err);
2208 }
2209 #endif
2210 
2211 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2212                                   const char* mesg) {
2213   assert(mesg != NULL, "mesg must be specified");
2214   if (!pd_commit_memory(addr, size, exec)) {
2215     // Add extra info in product mode for vm_exit_out_of_memory():
2216     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2217     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2218   }
2219 }
2220 
2221 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2222 
2223   assert(is_aligned_to(addr, os::vm_page_size()),
2224     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2225     p2i(addr), os::vm_page_size());
2226   assert(is_aligned_to(size, os::vm_page_size()),
2227     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2228     size, os::vm_page_size());
2229 
2230   vmembk_t* const vmi = vmembk_find(addr);
2231   guarantee0(vmi);
2232   vmi->assert_is_valid_subrange(addr, size);
2233 
2234   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2235 
2236   if (UseExplicitCommit) {
2237     // AIX commits memory on touch. So, touch all pages to be committed.
2238     for (char* p = addr; p < (addr + size); p += 4*K) {
2239       *p = '\0';
2240     }
2241   }
2242 
2243   return true;
2244 }
2245 
2246 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2247   return pd_commit_memory(addr, size, exec);
2248 }
2249 
2250 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2251                                   size_t alignment_hint, bool exec,
2252                                   const char* mesg) {
2253   // Alignment_hint is ignored on this OS.
2254   pd_commit_memory_or_exit(addr, size, exec, mesg);
2255 }
2256 
2257 bool os::pd_uncommit_memory(char* addr, size_t size) {
2258   assert(is_aligned_to(addr, os::vm_page_size()),
2259     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2260     p2i(addr), os::vm_page_size());
2261   assert(is_aligned_to(size, os::vm_page_size()),
2262     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2263     size, os::vm_page_size());
2264 
2265   // Dynamically do different things for mmap/shmat.
2266   const vmembk_t* const vmi = vmembk_find(addr);
2267   guarantee0(vmi);
2268   vmi->assert_is_valid_subrange(addr, size);
2269 
2270   if (vmi->type == VMEM_SHMATED) {
2271     return uncommit_shmated_memory(addr, size);
2272   } else {
2273     return uncommit_mmaped_memory(addr, size);
2274   }
2275 }
2276 
2277 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2278   // Do not call this; no need to commit stack pages on AIX.
2279   ShouldNotReachHere();
2280   return true;
2281 }
2282 
2283 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2284   // Do not call this; no need to commit stack pages on AIX.
2285   ShouldNotReachHere();
2286   return true;
2287 }
2288 
2289 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2290 }
2291 
2292 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2293 }
2294 
2295 void os::numa_make_global(char *addr, size_t bytes) {
2296 }
2297 
2298 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2299 }
2300 
2301 bool os::numa_topology_changed() {
2302   return false;
2303 }
2304 
2305 size_t os::numa_get_groups_num() {
2306   return 1;
2307 }
2308 
2309 int os::numa_get_group_id() {
2310   return 0;
2311 }
2312 
2313 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2314   if (size > 0) {
2315     ids[0] = 0;
2316     return 1;
2317   }
2318   return 0;
2319 }
2320 
2321 bool os::get_page_info(char *start, page_info* info) {
2322   return false;
2323 }
2324 
2325 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2326   return end;
2327 }
2328 
2329 // Reserves and attaches a shared memory segment.
2330 // Will assert if a wish address is given and could not be obtained.
2331 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2332 
2333   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2334   // thereby clobbering old mappings at that place. That is probably
2335   // not intended, never used and almost certainly an error were it
2336   // ever be used this way (to try attaching at a specified address
2337   // without clobbering old mappings an alternate API exists,
2338   // os::attempt_reserve_memory_at()).
2339   // Instead of mimicking the dangerous coding of the other platforms, here I
2340   // just ignore the request address (release) or assert(debug).
2341   assert0(requested_addr == NULL);
2342 
2343   // Always round to os::vm_page_size(), which may be larger than 4K.
2344   bytes = align_size_up(bytes, os::vm_page_size());
2345   const size_t alignment_hint0 =
2346     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2347 
2348   // In 4K mode always use mmap.
2349   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2350   if (os::vm_page_size() == 4*K) {
2351     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2352   } else {
2353     if (bytes >= Use64KPagesThreshold) {
2354       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2355     } else {
2356       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2357     }
2358   }
2359 }
2360 
2361 bool os::pd_release_memory(char* addr, size_t size) {
2362 
2363   // Dynamically do different things for mmap/shmat.
2364   vmembk_t* const vmi = vmembk_find(addr);
2365   guarantee0(vmi);
2366 
2367   // Always round to os::vm_page_size(), which may be larger than 4K.
2368   size = align_size_up(size, os::vm_page_size());
2369   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2370 
2371   bool rc = false;
2372   bool remove_bookkeeping = false;
2373   if (vmi->type == VMEM_SHMATED) {
2374     // For shmatted memory, we do:
2375     // - If user wants to release the whole range, release the memory (shmdt).
2376     // - If user only wants to release a partial range, uncommit (disclaim) that
2377     //   range. That way, at least, we do not use memory anymore (bust still page
2378     //   table space).
2379     vmi->assert_is_valid_subrange(addr, size);
2380     if (addr == vmi->addr && size == vmi->size) {
2381       rc = release_shmated_memory(addr, size);
2382       remove_bookkeeping = true;
2383     } else {
2384       rc = uncommit_shmated_memory(addr, size);
2385     }
2386   } else {
2387     // User may unmap partial regions but region has to be fully contained.
2388 #ifdef ASSERT
2389     vmi->assert_is_valid_subrange(addr, size);
2390 #endif
2391     rc = release_mmaped_memory(addr, size);
2392     remove_bookkeeping = true;
2393   }
2394 
2395   // update bookkeeping
2396   if (rc && remove_bookkeeping) {
2397     vmembk_remove(vmi);
2398   }
2399 
2400   return rc;
2401 }
2402 
2403 static bool checked_mprotect(char* addr, size_t size, int prot) {
2404 
2405   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2406   // not tell me if protection failed when trying to protect an un-protectable range.
2407   //
2408   // This means if the memory was allocated using shmget/shmat, protection wont work
2409   // but mprotect will still return 0:
2410   //
2411   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2412 
2413   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2414 
2415   if (!rc) {
2416     const char* const s_errno = os::errno_name(errno);
2417     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2418     return false;
2419   }
2420 
2421   // mprotect success check
2422   //
2423   // Mprotect said it changed the protection but can I believe it?
2424   //
2425   // To be sure I need to check the protection afterwards. Try to
2426   // read from protected memory and check whether that causes a segfault.
2427   //
2428   if (!os::Aix::xpg_sus_mode()) {
2429 
2430     if (CanUseSafeFetch32()) {
2431 
2432       const bool read_protected =
2433         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2434          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2435 
2436       if (prot & PROT_READ) {
2437         rc = !read_protected;
2438       } else {
2439         rc = read_protected;
2440       }
2441 
2442       if (!rc) {
2443         if (os::Aix::on_pase()) {
2444           // There is an issue on older PASE systems where mprotect() will return success but the
2445           // memory will not be protected.
2446           // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2447           // machines; we only see it rarely, when using mprotect() to protect the guard page of
2448           // a stack. It is an OS error.
2449           //
2450           // A valid strategy is just to try again. This usually works. :-/
2451 
2452           ::usleep(1000);
2453           if (::mprotect(addr, size, prot) == 0) {
2454             const bool read_protected_2 =
2455               (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2456               SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2457             rc = true;
2458           }
2459         }
2460       }
2461     }
2462   }
2463 
2464   assert(rc == true, "mprotect failed.");
2465 
2466   return rc;
2467 }
2468 
2469 // Set protections specified
2470 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2471   unsigned int p = 0;
2472   switch (prot) {
2473   case MEM_PROT_NONE: p = PROT_NONE; break;
2474   case MEM_PROT_READ: p = PROT_READ; break;
2475   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2476   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2477   default:
2478     ShouldNotReachHere();
2479   }
2480   // is_committed is unused.
2481   return checked_mprotect(addr, size, p);
2482 }
2483 
2484 bool os::guard_memory(char* addr, size_t size) {
2485   return checked_mprotect(addr, size, PROT_NONE);
2486 }
2487 
2488 bool os::unguard_memory(char* addr, size_t size) {
2489   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2490 }
2491 
2492 // Large page support
2493 
2494 static size_t _large_page_size = 0;
2495 
2496 // Enable large page support if OS allows that.
2497 void os::large_page_init() {
2498   return; // Nothing to do. See query_multipage_support and friends.
2499 }
2500 
2501 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2502   // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2503   // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2504   // so this is not needed.
2505   assert(false, "should not be called on AIX");
2506   return NULL;
2507 }
2508 
2509 bool os::release_memory_special(char* base, size_t bytes) {
2510   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2511   Unimplemented();
2512   return false;
2513 }
2514 
2515 size_t os::large_page_size() {
2516   return _large_page_size;
2517 }
2518 
2519 bool os::can_commit_large_page_memory() {
2520   // Does not matter, we do not support huge pages.
2521   return false;
2522 }
2523 
2524 bool os::can_execute_large_page_memory() {
2525   // Does not matter, we do not support huge pages.
2526   return false;
2527 }
2528 
2529 // Reserve memory at an arbitrary address, only if that area is
2530 // available (and not reserved for something else).
2531 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2532   char* addr = NULL;
2533 
2534   // Always round to os::vm_page_size(), which may be larger than 4K.
2535   bytes = align_size_up(bytes, os::vm_page_size());
2536 
2537   // In 4K mode always use mmap.
2538   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2539   if (os::vm_page_size() == 4*K) {
2540     return reserve_mmaped_memory(bytes, requested_addr, 0);
2541   } else {
2542     if (bytes >= Use64KPagesThreshold) {
2543       return reserve_shmated_memory(bytes, requested_addr, 0);
2544     } else {
2545       return reserve_mmaped_memory(bytes, requested_addr, 0);
2546     }
2547   }
2548 
2549   return addr;
2550 }
2551 
2552 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2553   return ::read(fd, buf, nBytes);
2554 }
2555 
2556 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2557   return ::pread(fd, buf, nBytes, offset);
2558 }
2559 
2560 void os::naked_short_sleep(jlong ms) {
2561   struct timespec req;
2562 
2563   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2564   req.tv_sec = 0;
2565   if (ms > 0) {
2566     req.tv_nsec = (ms % 1000) * 1000000;
2567   }
2568   else {
2569     req.tv_nsec = 1;
2570   }
2571 
2572   nanosleep(&req, NULL);
2573 
2574   return;
2575 }
2576 
2577 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2578 void os::infinite_sleep() {
2579   while (true) {    // sleep forever ...
2580     ::sleep(100);   // ... 100 seconds at a time
2581   }
2582 }
2583 
2584 // Used to convert frequent JVM_Yield() to nops
2585 bool os::dont_yield() {
2586   return DontYieldALot;
2587 }
2588 
2589 void os::naked_yield() {
2590   sched_yield();
2591 }
2592 
2593 ////////////////////////////////////////////////////////////////////////////////
2594 // thread priority support
2595 
2596 // From AIX manpage to pthread_setschedparam
2597 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2598 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2599 //
2600 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2601 // range from 40 to 80, where 40 is the least favored priority and 80
2602 // is the most favored."
2603 //
2604 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2605 // scheduling there; however, this still leaves iSeries.)
2606 //
2607 // We use the same values for AIX and PASE.
2608 int os::java_to_os_priority[CriticalPriority + 1] = {
2609   54,             // 0 Entry should never be used
2610 
2611   55,             // 1 MinPriority
2612   55,             // 2
2613   56,             // 3
2614 
2615   56,             // 4
2616   57,             // 5 NormPriority
2617   57,             // 6
2618 
2619   58,             // 7
2620   58,             // 8
2621   59,             // 9 NearMaxPriority
2622 
2623   60,             // 10 MaxPriority
2624 
2625   60              // 11 CriticalPriority
2626 };
2627 
2628 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2629   if (!UseThreadPriorities) return OS_OK;
2630   pthread_t thr = thread->osthread()->pthread_id();
2631   int policy = SCHED_OTHER;
2632   struct sched_param param;
2633   param.sched_priority = newpri;
2634   int ret = pthread_setschedparam(thr, policy, &param);
2635 
2636   if (ret != 0) {
2637     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2638         (int)thr, newpri, ret, os::errno_name(ret));
2639   }
2640   return (ret == 0) ? OS_OK : OS_ERR;
2641 }
2642 
2643 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2644   if (!UseThreadPriorities) {
2645     *priority_ptr = java_to_os_priority[NormPriority];
2646     return OS_OK;
2647   }
2648   pthread_t thr = thread->osthread()->pthread_id();
2649   int policy = SCHED_OTHER;
2650   struct sched_param param;
2651   int ret = pthread_getschedparam(thr, &policy, &param);
2652   *priority_ptr = param.sched_priority;
2653 
2654   return (ret == 0) ? OS_OK : OS_ERR;
2655 }
2656 
2657 // Hint to the underlying OS that a task switch would not be good.
2658 // Void return because it's a hint and can fail.
2659 void os::hint_no_preempt() {}
2660 
2661 ////////////////////////////////////////////////////////////////////////////////
2662 // suspend/resume support
2663 
2664 //  the low-level signal-based suspend/resume support is a remnant from the
2665 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2666 //  within hotspot. Now there is a single use-case for this:
2667 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
2668 //      that runs in the watcher thread.
2669 //  The remaining code is greatly simplified from the more general suspension
2670 //  code that used to be used.
2671 //
2672 //  The protocol is quite simple:
2673 //  - suspend:
2674 //      - sends a signal to the target thread
2675 //      - polls the suspend state of the osthread using a yield loop
2676 //      - target thread signal handler (SR_handler) sets suspend state
2677 //        and blocks in sigsuspend until continued
2678 //  - resume:
2679 //      - sets target osthread state to continue
2680 //      - sends signal to end the sigsuspend loop in the SR_handler
2681 //
2682 //  Note that the SR_lock plays no role in this suspend/resume protocol,
2683 //  but is checked for NULL in SR_handler as a thread termination indicator.
2684 //
2685 
2686 static void resume_clear_context(OSThread *osthread) {
2687   osthread->set_ucontext(NULL);
2688   osthread->set_siginfo(NULL);
2689 }
2690 
2691 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2692   osthread->set_ucontext(context);
2693   osthread->set_siginfo(siginfo);
2694 }
2695 
2696 //
2697 // Handler function invoked when a thread's execution is suspended or
2698 // resumed. We have to be careful that only async-safe functions are
2699 // called here (Note: most pthread functions are not async safe and
2700 // should be avoided.)
2701 //
2702 // Note: sigwait() is a more natural fit than sigsuspend() from an
2703 // interface point of view, but sigwait() prevents the signal hander
2704 // from being run. libpthread would get very confused by not having
2705 // its signal handlers run and prevents sigwait()'s use with the
2706 // mutex granting granting signal.
2707 //
2708 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2709 //
2710 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2711   // Save and restore errno to avoid confusing native code with EINTR
2712   // after sigsuspend.
2713   int old_errno = errno;
2714 
2715   Thread* thread = Thread::current_or_null_safe();
2716   assert(thread != NULL, "Missing current thread in SR_handler");
2717 
2718   // On some systems we have seen signal delivery get "stuck" until the signal
2719   // mask is changed as part of thread termination. Check that the current thread
2720   // has not already terminated (via SR_lock()) - else the following assertion
2721   // will fail because the thread is no longer a JavaThread as the ~JavaThread
2722   // destructor has completed.
2723 
2724   if (thread->SR_lock() == NULL) {
2725     return;
2726   }
2727 
2728   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2729 
2730   OSThread* osthread = thread->osthread();
2731 
2732   os::SuspendResume::State current = osthread->sr.state();
2733   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2734     suspend_save_context(osthread, siginfo, context);
2735 
2736     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2737     os::SuspendResume::State state = osthread->sr.suspended();
2738     if (state == os::SuspendResume::SR_SUSPENDED) {
2739       sigset_t suspend_set;  // signals for sigsuspend()
2740 
2741       // get current set of blocked signals and unblock resume signal
2742       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2743       sigdelset(&suspend_set, SR_signum);
2744 
2745       // wait here until we are resumed
2746       while (1) {
2747         sigsuspend(&suspend_set);
2748 
2749         os::SuspendResume::State result = osthread->sr.running();
2750         if (result == os::SuspendResume::SR_RUNNING) {
2751           break;
2752         }
2753       }
2754 
2755     } else if (state == os::SuspendResume::SR_RUNNING) {
2756       // request was cancelled, continue
2757     } else {
2758       ShouldNotReachHere();
2759     }
2760 
2761     resume_clear_context(osthread);
2762   } else if (current == os::SuspendResume::SR_RUNNING) {
2763     // request was cancelled, continue
2764   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2765     // ignore
2766   } else {
2767     ShouldNotReachHere();
2768   }
2769 
2770   errno = old_errno;
2771 }
2772 
2773 static int SR_initialize() {
2774   struct sigaction act;
2775   char *s;
2776   // Get signal number to use for suspend/resume
2777   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2778     int sig = ::strtol(s, 0, 10);
2779     if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2780         sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2781       SR_signum = sig;
2782     } else {
2783       warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2784               sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2785     }
2786   }
2787 
2788   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2789         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2790 
2791   sigemptyset(&SR_sigset);
2792   sigaddset(&SR_sigset, SR_signum);
2793 
2794   // Set up signal handler for suspend/resume.
2795   act.sa_flags = SA_RESTART|SA_SIGINFO;
2796   act.sa_handler = (void (*)(int)) SR_handler;
2797 
2798   // SR_signum is blocked by default.
2799   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2800 
2801   if (sigaction(SR_signum, &act, 0) == -1) {
2802     return -1;
2803   }
2804 
2805   // Save signal flag
2806   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2807   return 0;
2808 }
2809 
2810 static int SR_finalize() {
2811   return 0;
2812 }
2813 
2814 static int sr_notify(OSThread* osthread) {
2815   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2816   assert_status(status == 0, status, "pthread_kill");
2817   return status;
2818 }
2819 
2820 // "Randomly" selected value for how long we want to spin
2821 // before bailing out on suspending a thread, also how often
2822 // we send a signal to a thread we want to resume
2823 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2824 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2825 
2826 // returns true on success and false on error - really an error is fatal
2827 // but this seems the normal response to library errors
2828 static bool do_suspend(OSThread* osthread) {
2829   assert(osthread->sr.is_running(), "thread should be running");
2830   // mark as suspended and send signal
2831 
2832   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2833     // failed to switch, state wasn't running?
2834     ShouldNotReachHere();
2835     return false;
2836   }
2837 
2838   if (sr_notify(osthread) != 0) {
2839     // try to cancel, switch to running
2840 
2841     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2842     if (result == os::SuspendResume::SR_RUNNING) {
2843       // cancelled
2844       return false;
2845     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2846       // somehow managed to suspend
2847       return true;
2848     } else {
2849       ShouldNotReachHere();
2850       return false;
2851     }
2852   }
2853 
2854   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2855 
2856   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2857     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2858       os::naked_yield();
2859     }
2860 
2861     // timeout, try to cancel the request
2862     if (n >= RANDOMLY_LARGE_INTEGER) {
2863       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2864       if (cancelled == os::SuspendResume::SR_RUNNING) {
2865         return false;
2866       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2867         return true;
2868       } else {
2869         ShouldNotReachHere();
2870         return false;
2871       }
2872     }
2873   }
2874 
2875   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2876   return true;
2877 }
2878 
2879 static void do_resume(OSThread* osthread) {
2880   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2881 
2882   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2883     // failed to switch to WAKEUP_REQUEST
2884     ShouldNotReachHere();
2885     return;
2886   }
2887 
2888   while (!osthread->sr.is_running()) {
2889     if (sr_notify(osthread) == 0) {
2890       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2891         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2892           os::naked_yield();
2893         }
2894       }
2895     } else {
2896       ShouldNotReachHere();
2897     }
2898   }
2899 
2900   guarantee(osthread->sr.is_running(), "Must be running!");
2901 }
2902 
2903 ///////////////////////////////////////////////////////////////////////////////////
2904 // signal handling (except suspend/resume)
2905 
2906 // This routine may be used by user applications as a "hook" to catch signals.
2907 // The user-defined signal handler must pass unrecognized signals to this
2908 // routine, and if it returns true (non-zero), then the signal handler must
2909 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2910 // routine will never retun false (zero), but instead will execute a VM panic
2911 // routine kill the process.
2912 //
2913 // If this routine returns false, it is OK to call it again. This allows
2914 // the user-defined signal handler to perform checks either before or after
2915 // the VM performs its own checks. Naturally, the user code would be making
2916 // a serious error if it tried to handle an exception (such as a null check
2917 // or breakpoint) that the VM was generating for its own correct operation.
2918 //
2919 // This routine may recognize any of the following kinds of signals:
2920 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2921 // It should be consulted by handlers for any of those signals.
2922 //
2923 // The caller of this routine must pass in the three arguments supplied
2924 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2925 // field of the structure passed to sigaction(). This routine assumes that
2926 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2927 //
2928 // Note that the VM will print warnings if it detects conflicting signal
2929 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2930 //
2931 extern "C" JNIEXPORT int
2932 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2933 
2934 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2935 // to be the thing to call; documentation is not terribly clear about whether
2936 // pthread_sigmask also works, and if it does, whether it does the same.
2937 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2938   const int rc = ::pthread_sigmask(how, set, oset);
2939   // return value semantics differ slightly for error case:
2940   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2941   // (so, pthread_sigmask is more theadsafe for error handling)
2942   // But success is always 0.
2943   return rc == 0 ? true : false;
2944 }
2945 
2946 // Function to unblock all signals which are, according
2947 // to POSIX, typical program error signals. If they happen while being blocked,
2948 // they typically will bring down the process immediately.
2949 bool unblock_program_error_signals() {
2950   sigset_t set;
2951   ::sigemptyset(&set);
2952   ::sigaddset(&set, SIGILL);
2953   ::sigaddset(&set, SIGBUS);
2954   ::sigaddset(&set, SIGFPE);
2955   ::sigaddset(&set, SIGSEGV);
2956   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2957 }
2958 
2959 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2960 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2961   assert(info != NULL && uc != NULL, "it must be old kernel");
2962 
2963   // Never leave program error signals blocked;
2964   // on all our platforms they would bring down the process immediately when
2965   // getting raised while being blocked.
2966   unblock_program_error_signals();
2967 
2968   int orig_errno = errno;  // Preserve errno value over signal handler.
2969   JVM_handle_aix_signal(sig, info, uc, true);
2970   errno = orig_errno;
2971 }
2972 
2973 // This boolean allows users to forward their own non-matching signals
2974 // to JVM_handle_aix_signal, harmlessly.
2975 bool os::Aix::signal_handlers_are_installed = false;
2976 
2977 // For signal-chaining
2978 struct sigaction sigact[NSIG];
2979 sigset_t sigs;
2980 bool os::Aix::libjsig_is_loaded = false;
2981 typedef struct sigaction *(*get_signal_t)(int);
2982 get_signal_t os::Aix::get_signal_action = NULL;
2983 
2984 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2985   struct sigaction *actp = NULL;
2986 
2987   if (libjsig_is_loaded) {
2988     // Retrieve the old signal handler from libjsig
2989     actp = (*get_signal_action)(sig);
2990   }
2991   if (actp == NULL) {
2992     // Retrieve the preinstalled signal handler from jvm
2993     actp = get_preinstalled_handler(sig);
2994   }
2995 
2996   return actp;
2997 }
2998 
2999 static bool call_chained_handler(struct sigaction *actp, int sig,
3000                                  siginfo_t *siginfo, void *context) {
3001   // Call the old signal handler
3002   if (actp->sa_handler == SIG_DFL) {
3003     // It's more reasonable to let jvm treat it as an unexpected exception
3004     // instead of taking the default action.
3005     return false;
3006   } else if (actp->sa_handler != SIG_IGN) {
3007     if ((actp->sa_flags & SA_NODEFER) == 0) {
3008       // automaticlly block the signal
3009       sigaddset(&(actp->sa_mask), sig);
3010     }
3011 
3012     sa_handler_t hand = NULL;
3013     sa_sigaction_t sa = NULL;
3014     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3015     // retrieve the chained handler
3016     if (siginfo_flag_set) {
3017       sa = actp->sa_sigaction;
3018     } else {
3019       hand = actp->sa_handler;
3020     }
3021 
3022     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3023       actp->sa_handler = SIG_DFL;
3024     }
3025 
3026     // try to honor the signal mask
3027     sigset_t oset;
3028     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3029 
3030     // call into the chained handler
3031     if (siginfo_flag_set) {
3032       (*sa)(sig, siginfo, context);
3033     } else {
3034       (*hand)(sig);
3035     }
3036 
3037     // restore the signal mask
3038     pthread_sigmask(SIG_SETMASK, &oset, 0);
3039   }
3040   // Tell jvm's signal handler the signal is taken care of.
3041   return true;
3042 }
3043 
3044 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3045   bool chained = false;
3046   // signal-chaining
3047   if (UseSignalChaining) {
3048     struct sigaction *actp = get_chained_signal_action(sig);
3049     if (actp != NULL) {
3050       chained = call_chained_handler(actp, sig, siginfo, context);
3051     }
3052   }
3053   return chained;
3054 }
3055 
3056 size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
3057   // Creating guard page is very expensive. Java thread has HotSpot
3058   // guard pages, only enable glibc guard page for non-Java threads.
3059   // (Remember: compiler thread is a Java thread, too!)
3060   //
3061   // Aix can have different page sizes for stack (4K) and heap (64K).
3062   // As Hotspot knows only one page size, we assume the stack has
3063   // the same page size as the heap. Returning page_size() here can
3064   // cause 16 guard pages which we want to avoid.  Thus we return 4K
3065   // which will be rounded to the real page size by the OS.
3066   return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : 4 * K);
3067 }
3068 
3069 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3070   if (sigismember(&sigs, sig)) {
3071     return &sigact[sig];
3072   }
3073   return NULL;
3074 }
3075 
3076 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3077   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3078   sigact[sig] = oldAct;
3079   sigaddset(&sigs, sig);
3080 }
3081 
3082 // for diagnostic
3083 int sigflags[NSIG];
3084 
3085 int os::Aix::get_our_sigflags(int sig) {
3086   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3087   return sigflags[sig];
3088 }
3089 
3090 void os::Aix::set_our_sigflags(int sig, int flags) {
3091   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3092   if (sig > 0 && sig < NSIG) {
3093     sigflags[sig] = flags;
3094   }
3095 }
3096 
3097 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3098   // Check for overwrite.
3099   struct sigaction oldAct;
3100   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3101 
3102   void* oldhand = oldAct.sa_sigaction
3103     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3104     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3105   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3106       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3107       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3108     if (AllowUserSignalHandlers || !set_installed) {
3109       // Do not overwrite; user takes responsibility to forward to us.
3110       return;
3111     } else if (UseSignalChaining) {
3112       // save the old handler in jvm
3113       save_preinstalled_handler(sig, oldAct);
3114       // libjsig also interposes the sigaction() call below and saves the
3115       // old sigaction on it own.
3116     } else {
3117       fatal("Encountered unexpected pre-existing sigaction handler "
3118             "%#lx for signal %d.", (long)oldhand, sig);
3119     }
3120   }
3121 
3122   struct sigaction sigAct;
3123   sigfillset(&(sigAct.sa_mask));
3124   if (!set_installed) {
3125     sigAct.sa_handler = SIG_DFL;
3126     sigAct.sa_flags = SA_RESTART;
3127   } else {
3128     sigAct.sa_sigaction = javaSignalHandler;
3129     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3130   }
3131   // Save flags, which are set by ours
3132   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3133   sigflags[sig] = sigAct.sa_flags;
3134 
3135   int ret = sigaction(sig, &sigAct, &oldAct);
3136   assert(ret == 0, "check");
3137 
3138   void* oldhand2 = oldAct.sa_sigaction
3139                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3140                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3141   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3142 }
3143 
3144 // install signal handlers for signals that HotSpot needs to
3145 // handle in order to support Java-level exception handling.
3146 void os::Aix::install_signal_handlers() {
3147   if (!signal_handlers_are_installed) {
3148     signal_handlers_are_installed = true;
3149 
3150     // signal-chaining
3151     typedef void (*signal_setting_t)();
3152     signal_setting_t begin_signal_setting = NULL;
3153     signal_setting_t end_signal_setting = NULL;
3154     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3155                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3156     if (begin_signal_setting != NULL) {
3157       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3158                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3159       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3160                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3161       libjsig_is_loaded = true;
3162       assert(UseSignalChaining, "should enable signal-chaining");
3163     }
3164     if (libjsig_is_loaded) {
3165       // Tell libjsig jvm is setting signal handlers.
3166       (*begin_signal_setting)();
3167     }
3168 
3169     ::sigemptyset(&sigs);
3170     set_signal_handler(SIGSEGV, true);
3171     set_signal_handler(SIGPIPE, true);
3172     set_signal_handler(SIGBUS, true);
3173     set_signal_handler(SIGILL, true);
3174     set_signal_handler(SIGFPE, true);
3175     set_signal_handler(SIGTRAP, true);
3176     set_signal_handler(SIGXFSZ, true);
3177 
3178     if (libjsig_is_loaded) {
3179       // Tell libjsig jvm finishes setting signal handlers.
3180       (*end_signal_setting)();
3181     }
3182 
3183     // We don't activate signal checker if libjsig is in place, we trust ourselves
3184     // and if UserSignalHandler is installed all bets are off.
3185     // Log that signal checking is off only if -verbose:jni is specified.
3186     if (CheckJNICalls) {
3187       if (libjsig_is_loaded) {
3188         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3189         check_signals = false;
3190       }
3191       if (AllowUserSignalHandlers) {
3192         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3193         check_signals = false;
3194       }
3195       // Need to initialize check_signal_done.
3196       ::sigemptyset(&check_signal_done);
3197     }
3198   }
3199 }
3200 
3201 static const char* get_signal_handler_name(address handler,
3202                                            char* buf, int buflen) {
3203   int offset;
3204   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3205   if (found) {
3206     // skip directory names
3207     const char *p1, *p2;
3208     p1 = buf;
3209     size_t len = strlen(os::file_separator());
3210     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3211     // The way os::dll_address_to_library_name is implemented on Aix
3212     // right now, it always returns -1 for the offset which is not
3213     // terribly informative.
3214     // Will fix that. For now, omit the offset.
3215     jio_snprintf(buf, buflen, "%s", p1);
3216   } else {
3217     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3218   }
3219   return buf;
3220 }
3221 
3222 static void print_signal_handler(outputStream* st, int sig,
3223                                  char* buf, size_t buflen) {
3224   struct sigaction sa;
3225   sigaction(sig, NULL, &sa);
3226 
3227   st->print("%s: ", os::exception_name(sig, buf, buflen));
3228 
3229   address handler = (sa.sa_flags & SA_SIGINFO)
3230     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3231     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3232 
3233   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3234     st->print("SIG_DFL");
3235   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3236     st->print("SIG_IGN");
3237   } else {
3238     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3239   }
3240 
3241   // Print readable mask.
3242   st->print(", sa_mask[0]=");
3243   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3244 
3245   address rh = VMError::get_resetted_sighandler(sig);
3246   // May be, handler was resetted by VMError?
3247   if (rh != NULL) {
3248     handler = rh;
3249     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3250   }
3251 
3252   // Print textual representation of sa_flags.
3253   st->print(", sa_flags=");
3254   os::Posix::print_sa_flags(st, sa.sa_flags);
3255 
3256   // Check: is it our handler?
3257   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3258       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3259     // It is our signal handler.
3260     // Check for flags, reset system-used one!
3261     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3262       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3263                 os::Aix::get_our_sigflags(sig));
3264     }
3265   }
3266   st->cr();
3267 }
3268 
3269 #define DO_SIGNAL_CHECK(sig) \
3270   if (!sigismember(&check_signal_done, sig)) \
3271     os::Aix::check_signal_handler(sig)
3272 
3273 // This method is a periodic task to check for misbehaving JNI applications
3274 // under CheckJNI, we can add any periodic checks here
3275 
3276 void os::run_periodic_checks() {
3277 
3278   if (check_signals == false) return;
3279 
3280   // SEGV and BUS if overridden could potentially prevent
3281   // generation of hs*.log in the event of a crash, debugging
3282   // such a case can be very challenging, so we absolutely
3283   // check the following for a good measure:
3284   DO_SIGNAL_CHECK(SIGSEGV);
3285   DO_SIGNAL_CHECK(SIGILL);
3286   DO_SIGNAL_CHECK(SIGFPE);
3287   DO_SIGNAL_CHECK(SIGBUS);
3288   DO_SIGNAL_CHECK(SIGPIPE);
3289   DO_SIGNAL_CHECK(SIGXFSZ);
3290   if (UseSIGTRAP) {
3291     DO_SIGNAL_CHECK(SIGTRAP);
3292   }
3293 
3294   // ReduceSignalUsage allows the user to override these handlers
3295   // see comments at the very top and jvm_solaris.h
3296   if (!ReduceSignalUsage) {
3297     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3298     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3299     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3300     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3301   }
3302 
3303   DO_SIGNAL_CHECK(SR_signum);
3304 }
3305 
3306 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3307 
3308 static os_sigaction_t os_sigaction = NULL;
3309 
3310 void os::Aix::check_signal_handler(int sig) {
3311   char buf[O_BUFLEN];
3312   address jvmHandler = NULL;
3313 
3314   struct sigaction act;
3315   if (os_sigaction == NULL) {
3316     // only trust the default sigaction, in case it has been interposed
3317     os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3318     if (os_sigaction == NULL) return;
3319   }
3320 
3321   os_sigaction(sig, (struct sigaction*)NULL, &act);
3322 
3323   address thisHandler = (act.sa_flags & SA_SIGINFO)
3324     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3325     : CAST_FROM_FN_PTR(address, act.sa_handler);
3326 
3327   switch(sig) {
3328   case SIGSEGV:
3329   case SIGBUS:
3330   case SIGFPE:
3331   case SIGPIPE:
3332   case SIGILL:
3333   case SIGXFSZ:
3334     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3335     break;
3336 
3337   case SHUTDOWN1_SIGNAL:
3338   case SHUTDOWN2_SIGNAL:
3339   case SHUTDOWN3_SIGNAL:
3340   case BREAK_SIGNAL:
3341     jvmHandler = (address)user_handler();
3342     break;
3343 
3344   default:
3345     if (sig == SR_signum) {
3346       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3347     } else {
3348       return;
3349     }
3350     break;
3351   }
3352 
3353   if (thisHandler != jvmHandler) {
3354     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3355     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3356     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3357     // No need to check this sig any longer
3358     sigaddset(&check_signal_done, sig);
3359     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3360     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3361       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3362                     exception_name(sig, buf, O_BUFLEN));
3363     }
3364   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3365     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3366     tty->print("expected:");
3367     os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3368     tty->cr();
3369     tty->print("  found:");
3370     os::Posix::print_sa_flags(tty, act.sa_flags);
3371     tty->cr();
3372     // No need to check this sig any longer
3373     sigaddset(&check_signal_done, sig);
3374   }
3375 
3376   // Dump all the signal
3377   if (sigismember(&check_signal_done, sig)) {
3378     print_signal_handlers(tty, buf, O_BUFLEN);
3379   }
3380 }
3381 
3382 // To install functions for atexit system call
3383 extern "C" {
3384   static void perfMemory_exit_helper() {
3385     perfMemory_exit();
3386   }
3387 }
3388 
3389 // This is called _before_ the most of global arguments have been parsed.
3390 void os::init(void) {
3391   // This is basic, we want to know if that ever changes.
3392   // (Shared memory boundary is supposed to be a 256M aligned.)
3393   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3394 
3395   // Record process break at startup.
3396   g_brk_at_startup = (address) ::sbrk(0);
3397   assert(g_brk_at_startup != (address) -1, "sbrk failed");
3398 
3399   // First off, we need to know whether we run on AIX or PASE, and
3400   // the OS level we run on.
3401   os::Aix::initialize_os_info();
3402 
3403   // Scan environment (SPEC1170 behaviour, etc).
3404   os::Aix::scan_environment();
3405 
3406   // Probe multipage support.
3407   query_multipage_support();
3408 
3409   // Act like we only have one page size by eliminating corner cases which
3410   // we did not support very well anyway.
3411   // We have two input conditions:
3412   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3413   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3414   //    setting.
3415   //    Data segment page size is important for us because it defines the thread stack page
3416   //    size, which is needed for guard page handling, stack banging etc.
3417   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3418   //    and should be allocated with 64k pages.
3419   //
3420   // So, we do the following:
3421   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3422   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3423   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3424   // 64k          no              --- AIX 5.2 ? ---
3425   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3426 
3427   // We explicitly leave no option to change page size, because only upgrading would work,
3428   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3429 
3430   if (g_multipage_support.datapsize == 4*K) {
3431     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3432     if (g_multipage_support.can_use_64K_pages) {
3433       // .. but we are able to use 64K pages dynamically.
3434       // This would be typical for java launchers which are not linked
3435       // with datapsize=64K (like, any other launcher but our own).
3436       //
3437       // In this case it would be smart to allocate the java heap with 64K
3438       // to get the performance benefit, and to fake 64k pages for the
3439       // data segment (when dealing with thread stacks).
3440       //
3441       // However, leave a possibility to downgrade to 4K, using
3442       // -XX:-Use64KPages.
3443       if (Use64KPages) {
3444         trcVerbose("64K page mode (faked for data segment)");
3445         Aix::_page_size = 64*K;
3446       } else {
3447         trcVerbose("4K page mode (Use64KPages=off)");
3448         Aix::_page_size = 4*K;
3449       }
3450     } else {
3451       // .. and not able to allocate 64k pages dynamically. Here, just
3452       // fall back to 4K paged mode and use mmap for everything.
3453       trcVerbose("4K page mode");
3454       Aix::_page_size = 4*K;
3455       FLAG_SET_ERGO(bool, Use64KPages, false);
3456     }
3457   } else {
3458     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3459     // This normally means that we can allocate 64k pages dynamically.
3460     // (There is one special case where this may be false: EXTSHM=on.
3461     // but we decided to not support that mode).
3462     assert0(g_multipage_support.can_use_64K_pages);
3463     Aix::_page_size = 64*K;
3464     trcVerbose("64K page mode");
3465     FLAG_SET_ERGO(bool, Use64KPages, true);
3466   }
3467 
3468   // For now UseLargePages is just ignored.
3469   FLAG_SET_ERGO(bool, UseLargePages, false);
3470   _page_sizes[0] = 0;
3471 
3472   // debug trace
3473   trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3474 
3475   // Next, we need to initialize libo4 and libperfstat libraries.
3476   if (os::Aix::on_pase()) {
3477     os::Aix::initialize_libo4();
3478   } else {
3479     os::Aix::initialize_libperfstat();
3480   }
3481 
3482   // Reset the perfstat information provided by ODM.
3483   if (os::Aix::on_aix()) {
3484     libperfstat::perfstat_reset();
3485   }
3486 
3487   // Now initialze basic system properties. Note that for some of the values we
3488   // need libperfstat etc.
3489   os::Aix::initialize_system_info();
3490 
3491   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3492 
3493   init_random(1234567);
3494 
3495   ThreadCritical::initialize();
3496 
3497   // Main_thread points to the aboriginal thread.
3498   Aix::_main_thread = pthread_self();
3499 
3500   initial_time_count = os::elapsed_counter();
3501 
3502   os::Posix::init();
3503 }
3504 
3505 // This is called _after_ the global arguments have been parsed.
3506 jint os::init_2(void) {
3507 
3508   os::Posix::init_2();
3509 
3510   if (os::Aix::on_pase()) {
3511     trcVerbose("Running on PASE.");
3512   } else {
3513     trcVerbose("Running on AIX (not PASE).");
3514   }
3515 
3516   trcVerbose("processor count: %d", os::_processor_count);
3517   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3518 
3519   // Initially build up the loaded dll map.
3520   LoadedLibraries::reload();
3521   if (Verbose) {
3522     trcVerbose("Loaded Libraries: ");
3523     LoadedLibraries::print(tty);
3524   }
3525 
3526   const int page_size = Aix::page_size();
3527   const int map_size = page_size;
3528 
3529   address map_address = (address) MAP_FAILED;
3530   const int prot  = PROT_READ;
3531   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3532 
3533   // Use optimized addresses for the polling page,
3534   // e.g. map it to a special 32-bit address.
3535   if (OptimizePollingPageLocation) {
3536     // architecture-specific list of address wishes:
3537     address address_wishes[] = {
3538       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3539       // PPC64: all address wishes are non-negative 32 bit values where
3540       // the lower 16 bits are all zero. we can load these addresses
3541       // with a single ppc_lis instruction.
3542       (address) 0x30000000, (address) 0x31000000,
3543       (address) 0x32000000, (address) 0x33000000,
3544       (address) 0x40000000, (address) 0x41000000,
3545       (address) 0x42000000, (address) 0x43000000,
3546       (address) 0x50000000, (address) 0x51000000,
3547       (address) 0x52000000, (address) 0x53000000,
3548       (address) 0x60000000, (address) 0x61000000,
3549       (address) 0x62000000, (address) 0x63000000
3550     };
3551     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3552 
3553     // iterate over the list of address wishes:
3554     for (int i=0; i<address_wishes_length; i++) {
3555       // Try to map with current address wish.
3556       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3557       // fail if the address is already mapped.
3558       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3559                                      map_size, prot,
3560                                      flags | MAP_FIXED,
3561                                      -1, 0);
3562       trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3563                    address_wishes[i], map_address + (ssize_t)page_size);
3564 
3565       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3566         // Map succeeded and map_address is at wished address, exit loop.
3567         break;
3568       }
3569 
3570       if (map_address != (address) MAP_FAILED) {
3571         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3572         ::munmap(map_address, map_size);
3573         map_address = (address) MAP_FAILED;
3574       }
3575       // Map failed, continue loop.
3576     }
3577   } // end OptimizePollingPageLocation
3578 
3579   if (map_address == (address) MAP_FAILED) {
3580     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3581   }
3582   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3583   os::set_polling_page(map_address);
3584 
3585   if (!UseMembar) {
3586     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3587     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3588     os::set_memory_serialize_page(mem_serialize_page);
3589 
3590     trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3591         mem_serialize_page, mem_serialize_page + Aix::page_size(),
3592         Aix::page_size(), Aix::page_size());
3593   }
3594 
3595   // initialize suspend/resume support - must do this before signal_sets_init()
3596   if (SR_initialize() != 0) {
3597     perror("SR_initialize failed");
3598     return JNI_ERR;
3599   }
3600 
3601   Aix::signal_sets_init();
3602   Aix::install_signal_handlers();
3603 
3604   // Check and sets minimum stack sizes against command line options
3605   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
3606     return JNI_ERR;
3607   }
3608 
3609   if (UseNUMA) {
3610     UseNUMA = false;
3611     warning("NUMA optimizations are not available on this OS.");
3612   }
3613 
3614   if (MaxFDLimit) {
3615     // Set the number of file descriptors to max. print out error
3616     // if getrlimit/setrlimit fails but continue regardless.
3617     struct rlimit nbr_files;
3618     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3619     if (status != 0) {
3620       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
3621     } else {
3622       nbr_files.rlim_cur = nbr_files.rlim_max;
3623       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3624       if (status != 0) {
3625         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3626       }
3627     }
3628   }
3629 
3630   if (PerfAllowAtExitRegistration) {
3631     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3632     // At exit functions can be delayed until process exit time, which
3633     // can be problematic for embedded VM situations. Embedded VMs should
3634     // call DestroyJavaVM() to assure that VM resources are released.
3635 
3636     // Note: perfMemory_exit_helper atexit function may be removed in
3637     // the future if the appropriate cleanup code can be added to the
3638     // VM_Exit VMOperation's doit method.
3639     if (atexit(perfMemory_exit_helper) != 0) {
3640       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3641     }
3642   }
3643 
3644   return JNI_OK;
3645 }
3646 
3647 // Mark the polling page as unreadable
3648 void os::make_polling_page_unreadable(void) {
3649   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3650     fatal("Could not disable polling page");
3651   }
3652 };
3653 
3654 // Mark the polling page as readable
3655 void os::make_polling_page_readable(void) {
3656   // Changed according to os_linux.cpp.
3657   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3658     fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3659   }
3660 };
3661 
3662 int os::active_processor_count() {
3663   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3664   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3665   return online_cpus;
3666 }
3667 
3668 void os::set_native_thread_name(const char *name) {
3669   // Not yet implemented.
3670   return;
3671 }
3672 
3673 bool os::distribute_processes(uint length, uint* distribution) {
3674   // Not yet implemented.
3675   return false;
3676 }
3677 
3678 bool os::bind_to_processor(uint processor_id) {
3679   // Not yet implemented.
3680   return false;
3681 }
3682 
3683 void os::SuspendedThreadTask::internal_do_task() {
3684   if (do_suspend(_thread->osthread())) {
3685     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3686     do_task(context);
3687     do_resume(_thread->osthread());
3688   }
3689 }
3690 
3691 class PcFetcher : public os::SuspendedThreadTask {
3692 public:
3693   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3694   ExtendedPC result();
3695 protected:
3696   void do_task(const os::SuspendedThreadTaskContext& context);
3697 private:
3698   ExtendedPC _epc;
3699 };
3700 
3701 ExtendedPC PcFetcher::result() {
3702   guarantee(is_done(), "task is not done yet.");
3703   return _epc;
3704 }
3705 
3706 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3707   Thread* thread = context.thread();
3708   OSThread* osthread = thread->osthread();
3709   if (osthread->ucontext() != NULL) {
3710     _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext());
3711   } else {
3712     // NULL context is unexpected, double-check this is the VMThread.
3713     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3714   }
3715 }
3716 
3717 // Suspends the target using the signal mechanism and then grabs the PC before
3718 // resuming the target. Used by the flat-profiler only
3719 ExtendedPC os::get_thread_pc(Thread* thread) {
3720   // Make sure that it is called by the watcher for the VMThread.
3721   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3722   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3723 
3724   PcFetcher fetcher(thread);
3725   fetcher.run();
3726   return fetcher.result();
3727 }
3728 
3729 ////////////////////////////////////////////////////////////////////////////////
3730 // debug support
3731 
3732 bool os::find(address addr, outputStream* st) {
3733 
3734   st->print(PTR_FORMAT ": ", addr);
3735 
3736   loaded_module_t lm;
3737   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3738       LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3739     st->print_cr("%s", lm.path);
3740     return true;
3741   }
3742 
3743   return false;
3744 }
3745 
3746 ////////////////////////////////////////////////////////////////////////////////
3747 // misc
3748 
3749 // This does not do anything on Aix. This is basically a hook for being
3750 // able to use structured exception handling (thread-local exception filters)
3751 // on, e.g., Win32.
3752 void
3753 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3754                          JavaCallArguments* args, Thread* thread) {
3755   f(value, method, args, thread);
3756 }
3757 
3758 void os::print_statistics() {
3759 }
3760 
3761 bool os::message_box(const char* title, const char* message) {
3762   int i;
3763   fdStream err(defaultStream::error_fd());
3764   for (i = 0; i < 78; i++) err.print_raw("=");
3765   err.cr();
3766   err.print_raw_cr(title);
3767   for (i = 0; i < 78; i++) err.print_raw("-");
3768   err.cr();
3769   err.print_raw_cr(message);
3770   for (i = 0; i < 78; i++) err.print_raw("=");
3771   err.cr();
3772 
3773   char buf[16];
3774   // Prevent process from exiting upon "read error" without consuming all CPU
3775   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3776 
3777   return buf[0] == 'y' || buf[0] == 'Y';
3778 }
3779 
3780 int os::stat(const char *path, struct stat *sbuf) {
3781   char pathbuf[MAX_PATH];
3782   if (strlen(path) > MAX_PATH - 1) {
3783     errno = ENAMETOOLONG;
3784     return -1;
3785   }
3786   os::native_path(strcpy(pathbuf, path));
3787   return ::stat(pathbuf, sbuf);
3788 }
3789 
3790 // Is a (classpath) directory empty?
3791 bool os::dir_is_empty(const char* path) {
3792   DIR *dir = NULL;
3793   struct dirent *ptr;
3794 
3795   dir = opendir(path);
3796   if (dir == NULL) return true;
3797 
3798   /* Scan the directory */
3799   bool result = true;
3800   char buf[sizeof(struct dirent) + MAX_PATH];
3801   while (result && (ptr = ::readdir(dir)) != NULL) {
3802     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3803       result = false;
3804     }
3805   }
3806   closedir(dir);
3807   return result;
3808 }
3809 
3810 // This code originates from JDK's sysOpen and open64_w
3811 // from src/solaris/hpi/src/system_md.c
3812 
3813 int os::open(const char *path, int oflag, int mode) {
3814 
3815   if (strlen(path) > MAX_PATH - 1) {
3816     errno = ENAMETOOLONG;
3817     return -1;
3818   }
3819   int fd;
3820 
3821   fd = ::open64(path, oflag, mode);
3822   if (fd == -1) return -1;
3823 
3824   // If the open succeeded, the file might still be a directory.
3825   {
3826     struct stat64 buf64;
3827     int ret = ::fstat64(fd, &buf64);
3828     int st_mode = buf64.st_mode;
3829 
3830     if (ret != -1) {
3831       if ((st_mode & S_IFMT) == S_IFDIR) {
3832         errno = EISDIR;
3833         ::close(fd);
3834         return -1;
3835       }
3836     } else {
3837       ::close(fd);
3838       return -1;
3839     }
3840   }
3841 
3842   // All file descriptors that are opened in the JVM and not
3843   // specifically destined for a subprocess should have the
3844   // close-on-exec flag set. If we don't set it, then careless 3rd
3845   // party native code might fork and exec without closing all
3846   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3847   // UNIXProcess.c), and this in turn might:
3848   //
3849   // - cause end-of-file to fail to be detected on some file
3850   //   descriptors, resulting in mysterious hangs, or
3851   //
3852   // - might cause an fopen in the subprocess to fail on a system
3853   //   suffering from bug 1085341.
3854   //
3855   // (Yes, the default setting of the close-on-exec flag is a Unix
3856   // design flaw.)
3857   //
3858   // See:
3859   // 1085341: 32-bit stdio routines should support file descriptors >255
3860   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3861   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3862 #ifdef FD_CLOEXEC
3863   {
3864     int flags = ::fcntl(fd, F_GETFD);
3865     if (flags != -1)
3866       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3867   }
3868 #endif
3869 
3870   return fd;
3871 }
3872 
3873 // create binary file, rewriting existing file if required
3874 int os::create_binary_file(const char* path, bool rewrite_existing) {
3875   int oflags = O_WRONLY | O_CREAT;
3876   if (!rewrite_existing) {
3877     oflags |= O_EXCL;
3878   }
3879   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3880 }
3881 
3882 // return current position of file pointer
3883 jlong os::current_file_offset(int fd) {
3884   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3885 }
3886 
3887 // move file pointer to the specified offset
3888 jlong os::seek_to_file_offset(int fd, jlong offset) {
3889   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3890 }
3891 
3892 // This code originates from JDK's sysAvailable
3893 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3894 
3895 int os::available(int fd, jlong *bytes) {
3896   jlong cur, end;
3897   int mode;
3898   struct stat64 buf64;
3899 
3900   if (::fstat64(fd, &buf64) >= 0) {
3901     mode = buf64.st_mode;
3902     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3903       int n;
3904       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3905         *bytes = n;
3906         return 1;
3907       }
3908     }
3909   }
3910   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3911     return 0;
3912   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3913     return 0;
3914   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3915     return 0;
3916   }
3917   *bytes = end - cur;
3918   return 1;
3919 }
3920 
3921 // Map a block of memory.
3922 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3923                         char *addr, size_t bytes, bool read_only,
3924                         bool allow_exec) {
3925   int prot;
3926   int flags = MAP_PRIVATE;
3927 
3928   if (read_only) {
3929     prot = PROT_READ;
3930     flags = MAP_SHARED;
3931   } else {
3932     prot = PROT_READ | PROT_WRITE;
3933     flags = MAP_PRIVATE;
3934   }
3935 
3936   if (allow_exec) {
3937     prot |= PROT_EXEC;
3938   }
3939 
3940   if (addr != NULL) {
3941     flags |= MAP_FIXED;
3942   }
3943 
3944   // Allow anonymous mappings if 'fd' is -1.
3945   if (fd == -1) {
3946     flags |= MAP_ANONYMOUS;
3947   }
3948 
3949   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3950                                      fd, file_offset);
3951   if (mapped_address == MAP_FAILED) {
3952     return NULL;
3953   }
3954   return mapped_address;
3955 }
3956 
3957 // Remap a block of memory.
3958 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3959                           char *addr, size_t bytes, bool read_only,
3960                           bool allow_exec) {
3961   // same as map_memory() on this OS
3962   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3963                         allow_exec);
3964 }
3965 
3966 // Unmap a block of memory.
3967 bool os::pd_unmap_memory(char* addr, size_t bytes) {
3968   return munmap(addr, bytes) == 0;
3969 }
3970 
3971 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3972 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
3973 // of a thread.
3974 //
3975 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3976 // the fast estimate available on the platform.
3977 
3978 jlong os::current_thread_cpu_time() {
3979   // return user + sys since the cost is the same
3980   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
3981   assert(n >= 0, "negative CPU time");
3982   return n;
3983 }
3984 
3985 jlong os::thread_cpu_time(Thread* thread) {
3986   // consistent with what current_thread_cpu_time() returns
3987   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
3988   assert(n >= 0, "negative CPU time");
3989   return n;
3990 }
3991 
3992 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
3993   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
3994   assert(n >= 0, "negative CPU time");
3995   return n;
3996 }
3997 
3998 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
3999   bool error = false;
4000 
4001   jlong sys_time = 0;
4002   jlong user_time = 0;
4003 
4004   // Reimplemented using getthrds64().
4005   //
4006   // Works like this:
4007   // For the thread in question, get the kernel thread id. Then get the
4008   // kernel thread statistics using that id.
4009   //
4010   // This only works of course when no pthread scheduling is used,
4011   // i.e. there is a 1:1 relationship to kernel threads.
4012   // On AIX, see AIXTHREAD_SCOPE variable.
4013 
4014   pthread_t pthtid = thread->osthread()->pthread_id();
4015 
4016   // retrieve kernel thread id for the pthread:
4017   tid64_t tid = 0;
4018   struct __pthrdsinfo pinfo;
4019   // I just love those otherworldly IBM APIs which force me to hand down
4020   // dummy buffers for stuff I dont care for...
4021   char dummy[1];
4022   int dummy_size = sizeof(dummy);
4023   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4024                           dummy, &dummy_size) == 0) {
4025     tid = pinfo.__pi_tid;
4026   } else {
4027     tty->print_cr("pthread_getthrds_np failed.");
4028     error = true;
4029   }
4030 
4031   // retrieve kernel timing info for that kernel thread
4032   if (!error) {
4033     struct thrdentry64 thrdentry;
4034     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4035       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4036       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4037     } else {
4038       tty->print_cr("pthread_getthrds_np failed.");
4039       error = true;
4040     }
4041   }
4042 
4043   if (p_sys_time) {
4044     *p_sys_time = sys_time;
4045   }
4046 
4047   if (p_user_time) {
4048     *p_user_time = user_time;
4049   }
4050 
4051   if (error) {
4052     return false;
4053   }
4054 
4055   return true;
4056 }
4057 
4058 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4059   jlong sys_time;
4060   jlong user_time;
4061 
4062   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4063     return -1;
4064   }
4065 
4066   return user_sys_cpu_time ? sys_time + user_time : user_time;
4067 }
4068 
4069 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4070   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4071   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4072   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4073   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4074 }
4075 
4076 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4077   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4078   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4079   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4080   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4081 }
4082 
4083 bool os::is_thread_cpu_time_supported() {
4084   return true;
4085 }
4086 
4087 // System loadavg support. Returns -1 if load average cannot be obtained.
4088 // For now just return the system wide load average (no processor sets).
4089 int os::loadavg(double values[], int nelem) {
4090 
4091   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4092   guarantee(values, "argument error");
4093 
4094   if (os::Aix::on_pase()) {
4095 
4096     // AS/400 PASE: use libo4 porting library
4097     double v[3] = { 0.0, 0.0, 0.0 };
4098 
4099     if (libo4::get_load_avg(v, v + 1, v + 2)) {
4100       for (int i = 0; i < nelem; i ++) {
4101         values[i] = v[i];
4102       }
4103       return nelem;
4104     } else {
4105       return -1;
4106     }
4107 
4108   } else {
4109 
4110     // AIX: use libperfstat
4111     libperfstat::cpuinfo_t ci;
4112     if (libperfstat::get_cpuinfo(&ci)) {
4113       for (int i = 0; i < nelem; i++) {
4114         values[i] = ci.loadavg[i];
4115       }
4116     } else {
4117       return -1;
4118     }
4119     return nelem;
4120   }
4121 }
4122 
4123 void os::pause() {
4124   char filename[MAX_PATH];
4125   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4126     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4127   } else {
4128     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4129   }
4130 
4131   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4132   if (fd != -1) {
4133     struct stat buf;
4134     ::close(fd);
4135     while (::stat(filename, &buf) == 0) {
4136       (void)::poll(NULL, 0, 100);
4137     }
4138   } else {
4139     trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4140   }
4141 }
4142 
4143 bool os::Aix::is_primordial_thread() {
4144   if (pthread_self() == (pthread_t)1) {
4145     return true;
4146   } else {
4147     return false;
4148   }
4149 }
4150 
4151 // OS recognitions (PASE/AIX, OS level) call this before calling any
4152 // one of Aix::on_pase(), Aix::os_version() static
4153 void os::Aix::initialize_os_info() {
4154 
4155   assert(_on_pase == -1 && _os_version == 0, "already called.");
4156 
4157   struct utsname uts;
4158   memset(&uts, 0, sizeof(uts));
4159   strcpy(uts.sysname, "?");
4160   if (::uname(&uts) == -1) {
4161     trcVerbose("uname failed (%d)", errno);
4162     guarantee(0, "Could not determine whether we run on AIX or PASE");
4163   } else {
4164     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4165                "node \"%s\" machine \"%s\"\n",
4166                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4167     const int major = atoi(uts.version);
4168     assert(major > 0, "invalid OS version");
4169     const int minor = atoi(uts.release);
4170     assert(minor > 0, "invalid OS release");
4171     _os_version = (major << 24) | (minor << 16);
4172     char ver_str[20] = {0};
4173     char *name_str = "unknown OS";
4174     if (strcmp(uts.sysname, "OS400") == 0) {
4175       // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4176       _on_pase = 1;
4177       if (os_version_short() < 0x0504) {
4178         trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4179         assert(false, "OS/400 release too old.");
4180       }
4181       name_str = "OS/400 (pase)";
4182       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4183     } else if (strcmp(uts.sysname, "AIX") == 0) {
4184       // We run on AIX. We do not support versions older than AIX 5.3.
4185       _on_pase = 0;
4186       // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4187       odmWrapper::determine_os_kernel_version(&_os_version);
4188       if (os_version_short() < 0x0503) {
4189         trcVerbose("AIX release older than AIX 5.3 not supported.");
4190         assert(false, "AIX release too old.");
4191       }
4192       name_str = "AIX";
4193       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4194                    major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4195     } else {
4196       assert(false, name_str);
4197     }
4198     trcVerbose("We run on %s %s", name_str, ver_str);
4199   }
4200 
4201   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4202 } // end: os::Aix::initialize_os_info()
4203 
4204 // Scan environment for important settings which might effect the VM.
4205 // Trace out settings. Warn about invalid settings and/or correct them.
4206 //
4207 // Must run after os::Aix::initialue_os_info().
4208 void os::Aix::scan_environment() {
4209 
4210   char* p;
4211   int rc;
4212 
4213   // Warn explicity if EXTSHM=ON is used. That switch changes how
4214   // System V shared memory behaves. One effect is that page size of
4215   // shared memory cannot be change dynamically, effectivly preventing
4216   // large pages from working.
4217   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4218   // recommendation is (in OSS notes) to switch it off.
4219   p = ::getenv("EXTSHM");
4220   trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4221   if (p && strcasecmp(p, "ON") == 0) {
4222     _extshm = 1;
4223     trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4224     if (!AllowExtshm) {
4225       // We allow under certain conditions the user to continue. However, we want this
4226       // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4227       // that the VM is not able to allocate 64k pages for the heap.
4228       // We do not want to run with reduced performance.
4229       vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4230     }
4231   } else {
4232     _extshm = 0;
4233   }
4234 
4235   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4236   // Not tested, not supported.
4237   //
4238   // Note that it might be worth the trouble to test and to require it, if only to
4239   // get useful return codes for mprotect.
4240   //
4241   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4242   // exec() ? before loading the libjvm ? ....)
4243   p = ::getenv("XPG_SUS_ENV");
4244   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4245   if (p && strcmp(p, "ON") == 0) {
4246     _xpg_sus_mode = 1;
4247     trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4248     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4249     // clobber address ranges. If we ever want to support that, we have to do some
4250     // testing first.
4251     guarantee(false, "XPG_SUS_ENV=ON not supported");
4252   } else {
4253     _xpg_sus_mode = 0;
4254   }
4255 
4256   if (os::Aix::on_pase()) {
4257     p = ::getenv("QIBM_MULTI_THREADED");
4258     trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4259   }
4260 
4261   p = ::getenv("LDR_CNTRL");
4262   trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4263   if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4264     if (p && ::strstr(p, "TEXTPSIZE")) {
4265       trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4266         "you may experience hangs or crashes on OS/400 V7R1.");
4267     }
4268   }
4269 
4270   p = ::getenv("AIXTHREAD_GUARDPAGES");
4271   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4272 
4273 } // end: os::Aix::scan_environment()
4274 
4275 // PASE: initialize the libo4 library (PASE porting library).
4276 void os::Aix::initialize_libo4() {
4277   guarantee(os::Aix::on_pase(), "OS/400 only.");
4278   if (!libo4::init()) {
4279     trcVerbose("libo4 initialization failed.");
4280     assert(false, "libo4 initialization failed");
4281   } else {
4282     trcVerbose("libo4 initialized.");
4283   }
4284 }
4285 
4286 // AIX: initialize the libperfstat library.
4287 void os::Aix::initialize_libperfstat() {
4288   assert(os::Aix::on_aix(), "AIX only");
4289   if (!libperfstat::init()) {
4290     trcVerbose("libperfstat initialization failed.");
4291     assert(false, "libperfstat initialization failed");
4292   } else {
4293     trcVerbose("libperfstat initialized.");
4294   }
4295 }
4296 
4297 /////////////////////////////////////////////////////////////////////////////
4298 // thread stack
4299 
4300 // Function to query the current stack size using pthread_getthrds_np.
4301 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4302 
4303   // Information about this api can be found (a) in the pthread.h header and
4304   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4305   //
4306   // The use of this API to find out the current stack is kind of undefined.
4307   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4308   // enough for cases where I let the pthread library create its stacks. For cases
4309   // where I create an own stack and pass this to pthread_create, it seems not to
4310   // work (the returned stack size in that case is 0).
4311 
4312   pthread_t tid = pthread_self();
4313   struct __pthrdsinfo pinfo;
4314   char dummy[1]; // Just needed to satisfy pthread_getthrds_np.
4315   int dummy_size = sizeof(dummy);
4316 
4317   memset(&pinfo, 0, sizeof(pinfo));
4318 
4319   const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4320                                      sizeof(pinfo), dummy, &dummy_size);
4321 
4322   if (rc != 0) {
4323     trcVerbose("pthread_getthrds_np failed (%d)", rc);
4324     return false;
4325   }
4326   guarantee0(pinfo.__pi_stackend);
4327 
4328   // The following may happen when invoking pthread_getthrds_np on a pthread
4329   // running on a user provided stack (when handing down a stack to pthread
4330   // create, see pthread_attr_setstackaddr).
4331   // Not sure what to do then.
4332 
4333   guarantee0(pinfo.__pi_stacksize);
4334 
4335   // Note: we get three values from pthread_getthrds_np:
4336   //       __pi_stackaddr, __pi_stacksize, __pi_stackend
4337   //
4338   // high addr    ---------------------
4339   //
4340   //    |         pthread internal data, like ~2K
4341   //    |
4342   //    |         ---------------------   __pi_stackend   (usually not page aligned, (xxxxF890))
4343   //    |
4344   //    |
4345   //    |
4346   //    |
4347   //    |
4348   //    |
4349   //    |          ---------------------   (__pi_stackend - __pi_stacksize)
4350   //    |
4351   //    |          padding to align the following AIX guard pages, if enabled.
4352   //    |
4353   //    V          ---------------------   __pi_stackaddr
4354   //
4355   // low addr      AIX guard pages, if enabled (AIXTHREAD_GUARDPAGES > 0)
4356   //
4357 
4358   address stack_base = (address)(pinfo.__pi_stackend);
4359   address stack_low_addr = (address)align_ptr_up(pinfo.__pi_stackaddr,
4360     os::vm_page_size());
4361   size_t stack_size = stack_base - stack_low_addr;
4362 
4363   if (p_stack_base) {
4364     *p_stack_base = stack_base;
4365   }
4366 
4367   if (p_stack_size) {
4368     *p_stack_size = stack_size;
4369   }
4370 
4371   return true;
4372 }
4373 
4374 // Get the current stack base from the OS (actually, the pthread library).
4375 address os::current_stack_base() {
4376   address p;
4377   query_stack_dimensions(&p, 0);
4378   return p;
4379 }
4380 
4381 // Get the current stack size from the OS (actually, the pthread library).
4382 size_t os::current_stack_size() {
4383   size_t s;
4384   query_stack_dimensions(0, &s);
4385   return s;
4386 }
4387 
4388 extern char** environ;
4389 
4390 // Run the specified command in a separate process. Return its exit value,
4391 // or -1 on failure (e.g. can't fork a new process).
4392 // Unlike system(), this function can be called from signal handler. It
4393 // doesn't block SIGINT et al.
4394 int os::fork_and_exec(char* cmd) {
4395   char * argv[4] = {"sh", "-c", cmd, NULL};
4396 
4397   pid_t pid = fork();
4398 
4399   if (pid < 0) {
4400     // fork failed
4401     return -1;
4402 
4403   } else if (pid == 0) {
4404     // child process
4405 
4406     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4407     execve("/usr/bin/sh", argv, environ);
4408 
4409     // execve failed
4410     _exit(-1);
4411 
4412   } else {
4413     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4414     // care about the actual exit code, for now.
4415 
4416     int status;
4417 
4418     // Wait for the child process to exit. This returns immediately if
4419     // the child has already exited. */
4420     while (waitpid(pid, &status, 0) < 0) {
4421       switch (errno) {
4422         case ECHILD: return 0;
4423         case EINTR: break;
4424         default: return -1;
4425       }
4426     }
4427 
4428     if (WIFEXITED(status)) {
4429       // The child exited normally; get its exit code.
4430       return WEXITSTATUS(status);
4431     } else if (WIFSIGNALED(status)) {
4432       // The child exited because of a signal.
4433       // The best value to return is 0x80 + signal number,
4434       // because that is what all Unix shells do, and because
4435       // it allows callers to distinguish between process exit and
4436       // process death by signal.
4437       return 0x80 + WTERMSIG(status);
4438     } else {
4439       // Unknown exit code; pass it through.
4440       return status;
4441     }
4442   }
4443   return -1;
4444 }
4445 
4446 // is_headless_jre()
4447 //
4448 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4449 // in order to report if we are running in a headless jre.
4450 //
4451 // Since JDK8 xawt/libmawt.so is moved into the same directory
4452 // as libawt.so, and renamed libawt_xawt.so
4453 bool os::is_headless_jre() {
4454   struct stat statbuf;
4455   char buf[MAXPATHLEN];
4456   char libmawtpath[MAXPATHLEN];
4457   const char *xawtstr = "/xawt/libmawt.so";
4458   const char *new_xawtstr = "/libawt_xawt.so";
4459 
4460   char *p;
4461 
4462   // Get path to libjvm.so
4463   os::jvm_path(buf, sizeof(buf));
4464 
4465   // Get rid of libjvm.so
4466   p = strrchr(buf, '/');
4467   if (p == NULL) return false;
4468   else *p = '\0';
4469 
4470   // Get rid of client or server
4471   p = strrchr(buf, '/');
4472   if (p == NULL) return false;
4473   else *p = '\0';
4474 
4475   // check xawt/libmawt.so
4476   strcpy(libmawtpath, buf);
4477   strcat(libmawtpath, xawtstr);
4478   if (::stat(libmawtpath, &statbuf) == 0) return false;
4479 
4480   // check libawt_xawt.so
4481   strcpy(libmawtpath, buf);
4482   strcat(libmawtpath, new_xawtstr);
4483   if (::stat(libmawtpath, &statbuf) == 0) return false;
4484 
4485   return true;
4486 }
4487 
4488 // Get the default path to the core file
4489 // Returns the length of the string
4490 int os::get_core_path(char* buffer, size_t bufferSize) {
4491   const char* p = get_current_directory(buffer, bufferSize);
4492 
4493   if (p == NULL) {
4494     assert(p != NULL, "failed to get current directory");
4495     return 0;
4496   }
4497 
4498   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4499                                                p, current_process_id());
4500 
4501   return strlen(buffer);
4502 }
4503 
4504 #ifndef PRODUCT
4505 void TestReserveMemorySpecial_test() {
4506   // No tests available for this platform
4507 }
4508 #endif
4509 
4510 bool os::start_debugging(char *buf, int buflen) {
4511   int len = (int)strlen(buf);
4512   char *p = &buf[len];
4513 
4514   jio_snprintf(p, buflen -len,
4515                  "\n\n"
4516                  "Do you want to debug the problem?\n\n"
4517                  "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4518                  "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4519                  "Otherwise, press RETURN to abort...",
4520                  os::current_process_id(),
4521                  os::current_thread_id(), thread_self());
4522 
4523   bool yes = os::message_box("Unexpected Error", buf);
4524 
4525   if (yes) {
4526     // yes, user asked VM to launch debugger
4527     jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4528 
4529     os::fork_and_exec(buf);
4530     yes = false;
4531   }
4532   return yes;
4533 }
4534 
4535 static inline time_t get_mtime(const char* filename) {
4536   struct stat st;
4537   int ret = os::stat(filename, &st);
4538   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
4539   return st.st_mtime;
4540 }
4541 
4542 int os::compare_file_modified_times(const char* file1, const char* file2) {
4543   time_t t1 = get_mtime(file1);
4544   time_t t2 = get_mtime(file2);
4545   return t1 - t2;
4546 }