1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "logging/log.hpp"
  40 #include "libo4.hpp"
  41 #include "libperfstat_aix.hpp"
  42 #include "libodm_aix.hpp"
  43 #include "loadlib_aix.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/filemap.hpp"
  46 #include "misc_aix.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "os_aix.inline.hpp"
  49 #include "os_share_aix.hpp"
  50 #include "porting_aix.hpp"
  51 #include "prims/jniFastGetField.hpp"
  52 #include "prims/jvm.h"
  53 #include "prims/jvm_misc.hpp"
  54 #include "runtime/arguments.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/extendedPC.hpp"
  57 #include "runtime/globals.hpp"
  58 #include "runtime/interfaceSupport.hpp"
  59 #include "runtime/java.hpp"
  60 #include "runtime/javaCalls.hpp"
  61 #include "runtime/mutexLocker.hpp"
  62 #include "runtime/objectMonitor.hpp"
  63 #include "runtime/orderAccess.inline.hpp"
  64 #include "runtime/os.hpp"
  65 #include "runtime/osThread.hpp"
  66 #include "runtime/perfMemory.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/statSampler.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/thread.inline.hpp"
  71 #include "runtime/threadCritical.hpp"
  72 #include "runtime/timer.hpp"
  73 #include "runtime/vm_version.hpp"
  74 #include "services/attachListener.hpp"
  75 #include "services/runtimeService.hpp"
  76 #include "utilities/align.hpp"
  77 #include "utilities/decoder.hpp"
  78 #include "utilities/defaultStream.hpp"
  79 #include "utilities/events.hpp"
  80 #include "utilities/growableArray.hpp"
  81 #include "utilities/vmError.hpp"
  82 
  83 // put OS-includes here (sorted alphabetically)
  84 #include <errno.h>
  85 #include <fcntl.h>
  86 #include <inttypes.h>
  87 #include <poll.h>
  88 #include <procinfo.h>
  89 #include <pthread.h>
  90 #include <pwd.h>
  91 #include <semaphore.h>
  92 #include <signal.h>
  93 #include <stdint.h>
  94 #include <stdio.h>
  95 #include <string.h>
  96 #include <unistd.h>
  97 #include <sys/ioctl.h>
  98 #include <sys/ipc.h>
  99 #include <sys/mman.h>
 100 #include <sys/resource.h>
 101 #include <sys/select.h>
 102 #include <sys/shm.h>
 103 #include <sys/socket.h>
 104 #include <sys/stat.h>
 105 #include <sys/sysinfo.h>
 106 #include <sys/systemcfg.h>
 107 #include <sys/time.h>
 108 #include <sys/times.h>
 109 #include <sys/types.h>
 110 #include <sys/utsname.h>
 111 #include <sys/vminfo.h>
 112 #include <sys/wait.h>
 113 
 114 // Missing prototypes for various system APIs.
 115 extern "C"
 116 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
 117 
 118 #if !defined(_AIXVERSION_610)
 119 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
 120 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
 121 extern "C" int getargs   (procsinfo*, int, char*, int);
 122 #endif
 123 
 124 #define MAX_PATH (2 * K)
 125 
 126 // for timer info max values which include all bits
 127 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 128 // for multipage initialization error analysis (in 'g_multipage_error')
 129 #define ERROR_MP_OS_TOO_OLD                          100
 130 #define ERROR_MP_EXTSHM_ACTIVE                       101
 131 #define ERROR_MP_VMGETINFO_FAILED                    102
 132 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 133 
 134 static address resolve_function_descriptor_to_code_pointer(address p);
 135 
 136 static void vmembk_print_on(outputStream* os);
 137 
 138 ////////////////////////////////////////////////////////////////////////////////
 139 // global variables (for a description see os_aix.hpp)
 140 
 141 julong    os::Aix::_physical_memory = 0;
 142 
 143 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 144 int       os::Aix::_page_size = -1;
 145 
 146 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 147 int       os::Aix::_on_pase = -1;
 148 
 149 // 0 = uninitialized, otherwise 32 bit number:
 150 //  0xVVRRTTSS
 151 //  VV - major version
 152 //  RR - minor version
 153 //  TT - tech level, if known, 0 otherwise
 154 //  SS - service pack, if known, 0 otherwise
 155 uint32_t  os::Aix::_os_version = 0;
 156 
 157 // -1 = uninitialized, 0 - no, 1 - yes
 158 int       os::Aix::_xpg_sus_mode = -1;
 159 
 160 // -1 = uninitialized, 0 - no, 1 - yes
 161 int       os::Aix::_extshm = -1;
 162 
 163 ////////////////////////////////////////////////////////////////////////////////
 164 // local variables
 165 
 166 static jlong    initial_time_count = 0;
 167 static int      clock_tics_per_sec = 100;
 168 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 169 static bool     check_signals      = true;
 170 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 171 static sigset_t SR_sigset;
 172 
 173 // Process break recorded at startup.
 174 static address g_brk_at_startup = NULL;
 175 
 176 // This describes the state of multipage support of the underlying
 177 // OS. Note that this is of no interest to the outsize world and
 178 // therefore should not be defined in AIX class.
 179 //
 180 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 181 // latter two (16M "large" resp. 16G "huge" pages) require special
 182 // setup and are normally not available.
 183 //
 184 // AIX supports multiple page sizes per process, for:
 185 //  - Stack (of the primordial thread, so not relevant for us)
 186 //  - Data - data, bss, heap, for us also pthread stacks
 187 //  - Text - text code
 188 //  - shared memory
 189 //
 190 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 191 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 192 //
 193 // For shared memory, page size can be set dynamically via
 194 // shmctl(). Different shared memory regions can have different page
 195 // sizes.
 196 //
 197 // More information can be found at AIBM info center:
 198 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 199 //
 200 static struct {
 201   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 202   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 203   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 204   size_t pthr_stack_pagesize; // stack page size of pthread threads
 205   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 206   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 207   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 208   int error;                  // Error describing if something went wrong at multipage init.
 209 } g_multipage_support = {
 210   (size_t) -1,
 211   (size_t) -1,
 212   (size_t) -1,
 213   (size_t) -1,
 214   (size_t) -1,
 215   false, false,
 216   0
 217 };
 218 
 219 // We must not accidentally allocate memory close to the BRK - even if
 220 // that would work - because then we prevent the BRK segment from
 221 // growing which may result in a malloc OOM even though there is
 222 // enough memory. The problem only arises if we shmat() or mmap() at
 223 // a specific wish address, e.g. to place the heap in a
 224 // compressed-oops-friendly way.
 225 static bool is_close_to_brk(address a) {
 226   assert0(g_brk_at_startup != NULL);
 227   if (a >= g_brk_at_startup &&
 228       a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
 229     return true;
 230   }
 231   return false;
 232 }
 233 
 234 julong os::available_memory() {
 235   return Aix::available_memory();
 236 }
 237 
 238 julong os::Aix::available_memory() {
 239   // Avoid expensive API call here, as returned value will always be null.
 240   if (os::Aix::on_pase()) {
 241     return 0x0LL;
 242   }
 243   os::Aix::meminfo_t mi;
 244   if (os::Aix::get_meminfo(&mi)) {
 245     return mi.real_free;
 246   } else {
 247     return ULONG_MAX;
 248   }
 249 }
 250 
 251 julong os::physical_memory() {
 252   return Aix::physical_memory();
 253 }
 254 
 255 // Return true if user is running as root.
 256 
 257 bool os::have_special_privileges() {
 258   static bool init = false;
 259   static bool privileges = false;
 260   if (!init) {
 261     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 262     init = true;
 263   }
 264   return privileges;
 265 }
 266 
 267 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 268 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 269 static bool my_disclaim64(char* addr, size_t size) {
 270 
 271   if (size == 0) {
 272     return true;
 273   }
 274 
 275   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 276   const unsigned int maxDisclaimSize = 0x40000000;
 277 
 278   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 279   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 280 
 281   char* p = addr;
 282 
 283   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 284     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 285       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 286       return false;
 287     }
 288     p += maxDisclaimSize;
 289   }
 290 
 291   if (lastDisclaimSize > 0) {
 292     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 293       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 294       return false;
 295     }
 296   }
 297 
 298   return true;
 299 }
 300 
 301 // Cpu architecture string
 302 #if defined(PPC32)
 303 static char cpu_arch[] = "ppc";
 304 #elif defined(PPC64)
 305 static char cpu_arch[] = "ppc64";
 306 #else
 307 #error Add appropriate cpu_arch setting
 308 #endif
 309 
 310 // Wrap the function "vmgetinfo" which is not available on older OS releases.
 311 static int checked_vmgetinfo(void *out, int command, int arg) {
 312   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 313     guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
 314   }
 315   return ::vmgetinfo(out, command, arg);
 316 }
 317 
 318 // Given an address, returns the size of the page backing that address.
 319 size_t os::Aix::query_pagesize(void* addr) {
 320 
 321   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 322     // AS/400 older than V6R1: no vmgetinfo here, default to 4K
 323     return 4*K;
 324   }
 325 
 326   vm_page_info pi;
 327   pi.addr = (uint64_t)addr;
 328   if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 329     return pi.pagesize;
 330   } else {
 331     assert(false, "vmgetinfo failed to retrieve page size");
 332     return 4*K;
 333   }
 334 }
 335 
 336 void os::Aix::initialize_system_info() {
 337 
 338   // Get the number of online(logical) cpus instead of configured.
 339   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 340   assert(_processor_count > 0, "_processor_count must be > 0");
 341 
 342   // Retrieve total physical storage.
 343   os::Aix::meminfo_t mi;
 344   if (!os::Aix::get_meminfo(&mi)) {
 345     assert(false, "os::Aix::get_meminfo failed.");
 346   }
 347   _physical_memory = (julong) mi.real_total;
 348 }
 349 
 350 // Helper function for tracing page sizes.
 351 static const char* describe_pagesize(size_t pagesize) {
 352   switch (pagesize) {
 353     case 4*K : return "4K";
 354     case 64*K: return "64K";
 355     case 16*M: return "16M";
 356     case 16*G: return "16G";
 357     default:
 358       assert(false, "surprise");
 359       return "??";
 360   }
 361 }
 362 
 363 // Probe OS for multipage support.
 364 // Will fill the global g_multipage_support structure.
 365 // Must be called before calling os::large_page_init().
 366 static void query_multipage_support() {
 367 
 368   guarantee(g_multipage_support.pagesize == -1,
 369             "do not call twice");
 370 
 371   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 372 
 373   // This really would surprise me.
 374   assert(g_multipage_support.pagesize == 4*K, "surprise!");
 375 
 376   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 377   // Default data page size is defined either by linker options (-bdatapsize)
 378   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 379   // default should be 4K.
 380   {
 381     void* p = ::malloc(16*M);
 382     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 383     ::free(p);
 384   }
 385 
 386   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 387   // Note that this is pure curiosity. We do not rely on default page size but set
 388   // our own page size after allocated.
 389   {
 390     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 391     guarantee(shmid != -1, "shmget failed");
 392     void* p = ::shmat(shmid, NULL, 0);
 393     ::shmctl(shmid, IPC_RMID, NULL);
 394     guarantee(p != (void*) -1, "shmat failed");
 395     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 396     ::shmdt(p);
 397   }
 398 
 399   // Before querying the stack page size, make sure we are not running as primordial
 400   // thread (because primordial thread's stack may have different page size than
 401   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 402   // number of reasons so we may just as well guarantee it here.
 403   guarantee0(!os::Aix::is_primordial_thread());
 404 
 405   // Query pthread stack page size. Should be the same as data page size because
 406   // pthread stacks are allocated from C-Heap.
 407   {
 408     int dummy = 0;
 409     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 410   }
 411 
 412   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 413   {
 414     address any_function =
 415       resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 416     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 417   }
 418 
 419   // Now probe for support of 64K pages and 16M pages.
 420 
 421   // Before OS/400 V6R1, there is no support for pages other than 4K.
 422   if (os::Aix::on_pase_V5R4_or_older()) {
 423     trcVerbose("OS/400 < V6R1 - no large page support.");
 424     g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
 425     goto query_multipage_support_end;
 426   }
 427 
 428   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 429   {
 430     const int MAX_PAGE_SIZES = 4;
 431     psize_t sizes[MAX_PAGE_SIZES];
 432     const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 433     if (num_psizes == -1) {
 434       trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
 435       trcVerbose("disabling multipage support.");
 436       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 437       goto query_multipage_support_end;
 438     }
 439     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 440     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 441     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 442     for (int i = 0; i < num_psizes; i ++) {
 443       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 444     }
 445 
 446     // Can we use 64K, 16M pages?
 447     for (int i = 0; i < num_psizes; i ++) {
 448       const size_t pagesize = sizes[i];
 449       if (pagesize != 64*K && pagesize != 16*M) {
 450         continue;
 451       }
 452       bool can_use = false;
 453       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 454       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 455         IPC_CREAT | S_IRUSR | S_IWUSR);
 456       guarantee0(shmid != -1); // Should always work.
 457       // Try to set pagesize.
 458       struct shmid_ds shm_buf = { 0 };
 459       shm_buf.shm_pagesize = pagesize;
 460       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 461         const int en = errno;
 462         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 463         trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
 464           errno);
 465       } else {
 466         // Attach and double check pageisze.
 467         void* p = ::shmat(shmid, NULL, 0);
 468         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 469         guarantee0(p != (void*) -1); // Should always work.
 470         const size_t real_pagesize = os::Aix::query_pagesize(p);
 471         if (real_pagesize != pagesize) {
 472           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 473         } else {
 474           can_use = true;
 475         }
 476         ::shmdt(p);
 477       }
 478       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 479       if (pagesize == 64*K) {
 480         g_multipage_support.can_use_64K_pages = can_use;
 481       } else if (pagesize == 16*M) {
 482         g_multipage_support.can_use_16M_pages = can_use;
 483       }
 484     }
 485 
 486   } // end: check which pages can be used for shared memory
 487 
 488 query_multipage_support_end:
 489 
 490   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
 491       describe_pagesize(g_multipage_support.pagesize));
 492   trcVerbose("Data page size (C-Heap, bss, etc): %s",
 493       describe_pagesize(g_multipage_support.datapsize));
 494   trcVerbose("Text page size: %s",
 495       describe_pagesize(g_multipage_support.textpsize));
 496   trcVerbose("Thread stack page size (pthread): %s",
 497       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 498   trcVerbose("Default shared memory page size: %s",
 499       describe_pagesize(g_multipage_support.shmpsize));
 500   trcVerbose("Can use 64K pages dynamically with shared meory: %s",
 501       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 502   trcVerbose("Can use 16M pages dynamically with shared memory: %s",
 503       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 504   trcVerbose("Multipage error details: %d",
 505       g_multipage_support.error);
 506 
 507   // sanity checks
 508   assert0(g_multipage_support.pagesize == 4*K);
 509   assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K);
 510   assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K);
 511   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 512   assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K);
 513 
 514 }
 515 
 516 void os::init_system_properties_values() {
 517 
 518 #define DEFAULT_LIBPATH "/lib:/usr/lib"
 519 #define EXTENSIONS_DIR  "/lib/ext"
 520 
 521   // Buffer that fits several sprintfs.
 522   // Note that the space for the trailing null is provided
 523   // by the nulls included by the sizeof operator.
 524   const size_t bufsize =
 525     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 526          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 527   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 528 
 529   // sysclasspath, java_home, dll_dir
 530   {
 531     char *pslash;
 532     os::jvm_path(buf, bufsize);
 533 
 534     // Found the full path to libjvm.so.
 535     // Now cut the path to <java_home>/jre if we can.
 536     pslash = strrchr(buf, '/');
 537     if (pslash != NULL) {
 538       *pslash = '\0';            // Get rid of /libjvm.so.
 539     }
 540     pslash = strrchr(buf, '/');
 541     if (pslash != NULL) {
 542       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 543     }
 544     Arguments::set_dll_dir(buf);
 545 
 546     if (pslash != NULL) {
 547       pslash = strrchr(buf, '/');
 548       if (pslash != NULL) {
 549         *pslash = '\0';        // Get rid of /lib.
 550       }
 551     }
 552     Arguments::set_java_home(buf);
 553     set_boot_path('/', ':');
 554   }
 555 
 556   // Where to look for native libraries.
 557 
 558   // On Aix we get the user setting of LIBPATH.
 559   // Eventually, all the library path setting will be done here.
 560   // Get the user setting of LIBPATH.
 561   const char *v = ::getenv("LIBPATH");
 562   const char *v_colon = ":";
 563   if (v == NULL) { v = ""; v_colon = ""; }
 564 
 565   // Concatenate user and invariant part of ld_library_path.
 566   // That's +1 for the colon and +1 for the trailing '\0'.
 567   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 568   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 569   Arguments::set_library_path(ld_library_path);
 570   FREE_C_HEAP_ARRAY(char, ld_library_path);
 571 
 572   // Extensions directories.
 573   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 574   Arguments::set_ext_dirs(buf);
 575 
 576   FREE_C_HEAP_ARRAY(char, buf);
 577 
 578 #undef DEFAULT_LIBPATH
 579 #undef EXTENSIONS_DIR
 580 }
 581 
 582 ////////////////////////////////////////////////////////////////////////////////
 583 // breakpoint support
 584 
 585 void os::breakpoint() {
 586   BREAKPOINT;
 587 }
 588 
 589 extern "C" void breakpoint() {
 590   // use debugger to set breakpoint here
 591 }
 592 
 593 ////////////////////////////////////////////////////////////////////////////////
 594 // signal support
 595 
 596 debug_only(static bool signal_sets_initialized = false);
 597 static sigset_t unblocked_sigs, vm_sigs;
 598 
 599 bool os::Aix::is_sig_ignored(int sig) {
 600   struct sigaction oact;
 601   sigaction(sig, (struct sigaction*)NULL, &oact);
 602   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 603     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 604   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 605     return true;
 606   } else {
 607     return false;
 608   }
 609 }
 610 
 611 void os::Aix::signal_sets_init() {
 612   // Should also have an assertion stating we are still single-threaded.
 613   assert(!signal_sets_initialized, "Already initialized");
 614   // Fill in signals that are necessarily unblocked for all threads in
 615   // the VM. Currently, we unblock the following signals:
 616   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 617   //                         by -Xrs (=ReduceSignalUsage));
 618   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 619   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 620   // the dispositions or masks wrt these signals.
 621   // Programs embedding the VM that want to use the above signals for their
 622   // own purposes must, at this time, use the "-Xrs" option to prevent
 623   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 624   // (See bug 4345157, and other related bugs).
 625   // In reality, though, unblocking these signals is really a nop, since
 626   // these signals are not blocked by default.
 627   sigemptyset(&unblocked_sigs);
 628   sigaddset(&unblocked_sigs, SIGILL);
 629   sigaddset(&unblocked_sigs, SIGSEGV);
 630   sigaddset(&unblocked_sigs, SIGBUS);
 631   sigaddset(&unblocked_sigs, SIGFPE);
 632   sigaddset(&unblocked_sigs, SIGTRAP);
 633   sigaddset(&unblocked_sigs, SR_signum);
 634 
 635   if (!ReduceSignalUsage) {
 636    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 637      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 638    }
 639    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 640      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 641    }
 642    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 643      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 644    }
 645   }
 646   // Fill in signals that are blocked by all but the VM thread.
 647   sigemptyset(&vm_sigs);
 648   if (!ReduceSignalUsage)
 649     sigaddset(&vm_sigs, BREAK_SIGNAL);
 650   debug_only(signal_sets_initialized = true);
 651 }
 652 
 653 // These are signals that are unblocked while a thread is running Java.
 654 // (For some reason, they get blocked by default.)
 655 sigset_t* os::Aix::unblocked_signals() {
 656   assert(signal_sets_initialized, "Not initialized");
 657   return &unblocked_sigs;
 658 }
 659 
 660 // These are the signals that are blocked while a (non-VM) thread is
 661 // running Java. Only the VM thread handles these signals.
 662 sigset_t* os::Aix::vm_signals() {
 663   assert(signal_sets_initialized, "Not initialized");
 664   return &vm_sigs;
 665 }
 666 
 667 void os::Aix::hotspot_sigmask(Thread* thread) {
 668 
 669   //Save caller's signal mask before setting VM signal mask
 670   sigset_t caller_sigmask;
 671   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 672 
 673   OSThread* osthread = thread->osthread();
 674   osthread->set_caller_sigmask(caller_sigmask);
 675 
 676   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 677 
 678   if (!ReduceSignalUsage) {
 679     if (thread->is_VM_thread()) {
 680       // Only the VM thread handles BREAK_SIGNAL ...
 681       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 682     } else {
 683       // ... all other threads block BREAK_SIGNAL
 684       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 685     }
 686   }
 687 }
 688 
 689 // retrieve memory information.
 690 // Returns false if something went wrong;
 691 // content of pmi undefined in this case.
 692 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 693 
 694   assert(pmi, "get_meminfo: invalid parameter");
 695 
 696   memset(pmi, 0, sizeof(meminfo_t));
 697 
 698   if (os::Aix::on_pase()) {
 699     // On PASE, use the libo4 porting library.
 700 
 701     unsigned long long virt_total = 0;
 702     unsigned long long real_total = 0;
 703     unsigned long long real_free = 0;
 704     unsigned long long pgsp_total = 0;
 705     unsigned long long pgsp_free = 0;
 706     if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
 707       pmi->virt_total = virt_total;
 708       pmi->real_total = real_total;
 709       pmi->real_free = real_free;
 710       pmi->pgsp_total = pgsp_total;
 711       pmi->pgsp_free = pgsp_free;
 712       return true;
 713     }
 714     return false;
 715 
 716   } else {
 717 
 718     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 719     // See:
 720     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 721     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 722     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 723     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 724 
 725     perfstat_memory_total_t psmt;
 726     memset (&psmt, '\0', sizeof(psmt));
 727     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 728     if (rc == -1) {
 729       trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
 730       assert(0, "perfstat_memory_total() failed");
 731       return false;
 732     }
 733 
 734     assert(rc == 1, "perfstat_memory_total() - weird return code");
 735 
 736     // excerpt from
 737     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 738     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 739     // The fields of perfstat_memory_total_t:
 740     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 741     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 742     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 743     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 744     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 745 
 746     pmi->virt_total = psmt.virt_total * 4096;
 747     pmi->real_total = psmt.real_total * 4096;
 748     pmi->real_free = psmt.real_free * 4096;
 749     pmi->pgsp_total = psmt.pgsp_total * 4096;
 750     pmi->pgsp_free = psmt.pgsp_free * 4096;
 751 
 752     return true;
 753 
 754   }
 755 } // end os::Aix::get_meminfo
 756 
 757 //////////////////////////////////////////////////////////////////////////////
 758 // create new thread
 759 
 760 // Thread start routine for all newly created threads
 761 static void *thread_native_entry(Thread *thread) {
 762 
 763   // find out my own stack dimensions
 764   {
 765     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 766     thread->set_stack_base(os::current_stack_base());
 767     thread->set_stack_size(os::current_stack_size());
 768   }
 769 
 770   const pthread_t pthread_id = ::pthread_self();
 771   const tid_t kernel_thread_id = ::thread_self();
 772 
 773   LogTarget(Info, os, thread) lt;
 774   if (lt.is_enabled()) {
 775     address low_address = thread->stack_end();
 776     address high_address = thread->stack_base();
 777     lt.print("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT
 778              ", stack [" PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "k using %uk pages)).",
 779              os::current_thread_id(), (uintx) kernel_thread_id, low_address, high_address,
 780              (high_address - low_address) / K, os::Aix::query_pagesize(low_address) / K);
 781   }
 782 
 783   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
 784   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
 785   // tools hook pthread_create(). In this case, we may run into problems establishing
 786   // guard pages on those stacks, because the stacks may reside in memory which is not
 787   // protectable (shmated).
 788   if (thread->stack_base() > ::sbrk(0)) {
 789     log_warning(os, thread)("Thread stack not in data segment.");
 790   }
 791 
 792   // Try to randomize the cache line index of hot stack frames.
 793   // This helps when threads of the same stack traces evict each other's
 794   // cache lines. The threads can be either from the same JVM instance, or
 795   // from different JVM instances. The benefit is especially true for
 796   // processors with hyperthreading technology.
 797 
 798   static int counter = 0;
 799   int pid = os::current_process_id();
 800   alloca(((pid ^ counter++) & 7) * 128);
 801 
 802   thread->initialize_thread_current();
 803 
 804   OSThread* osthread = thread->osthread();
 805 
 806   // Thread_id is pthread id.
 807   osthread->set_thread_id(pthread_id);
 808 
 809   // .. but keep kernel thread id too for diagnostics
 810   osthread->set_kernel_thread_id(kernel_thread_id);
 811 
 812   // Initialize signal mask for this thread.
 813   os::Aix::hotspot_sigmask(thread);
 814 
 815   // Initialize floating point control register.
 816   os::Aix::init_thread_fpu_state();
 817 
 818   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 819 
 820   // Call one more level start routine.
 821   thread->run();
 822 
 823   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 824     os::current_thread_id(), (uintx) kernel_thread_id);
 825 
 826   // If a thread has not deleted itself ("delete this") as part of its
 827   // termination sequence, we have to ensure thread-local-storage is
 828   // cleared before we actually terminate. No threads should ever be
 829   // deleted asynchronously with respect to their termination.
 830   if (Thread::current_or_null_safe() != NULL) {
 831     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 832     thread->clear_thread_current();
 833   }
 834 
 835   return 0;
 836 }
 837 
 838 bool os::create_thread(Thread* thread, ThreadType thr_type,
 839                        size_t req_stack_size) {
 840 
 841   assert(thread->osthread() == NULL, "caller responsible");
 842 
 843   // Allocate the OSThread object.
 844   OSThread* osthread = new OSThread(NULL, NULL);
 845   if (osthread == NULL) {
 846     return false;
 847   }
 848 
 849   // Set the correct thread state.
 850   osthread->set_thread_type(thr_type);
 851 
 852   // Initial state is ALLOCATED but not INITIALIZED
 853   osthread->set_state(ALLOCATED);
 854 
 855   thread->set_osthread(osthread);
 856 
 857   // Init thread attributes.
 858   pthread_attr_t attr;
 859   pthread_attr_init(&attr);
 860   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 861 
 862   // Make sure we run in 1:1 kernel-user-thread mode.
 863   if (os::Aix::on_aix()) {
 864     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 865     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 866   }
 867 
 868   // Start in suspended state, and in os::thread_start, wake the thread up.
 869   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 870 
 871   // Calculate stack size if it's not specified by caller.
 872   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 873 
 874   // JDK-8187028: It was observed that on some configurations (4K backed thread stacks)
 875   // the real thread stack size may be smaller than the requested stack size, by as much as 64K.
 876   // This very much looks like a pthread lib error. As a workaround, increase the stack size
 877   // by 64K for small thread stacks (arbitrarily choosen to be < 4MB)
 878   if (stack_size < 4096 * K) {
 879     stack_size += 64 * K;
 880   }
 881 
 882   // On Aix, pthread_attr_setstacksize fails with huge values and leaves the
 883   // thread size in attr unchanged. If this is the minimal stack size as set
 884   // by pthread_attr_init this leads to crashes after thread creation. E.g. the
 885   // guard pages might not fit on the tiny stack created.
 886   int ret = pthread_attr_setstacksize(&attr, stack_size);
 887   if (ret != 0) {
 888     log_warning(os, thread)("The thread stack size specified is invalid: " SIZE_FORMAT "k",
 889                             stack_size / K);
 890   }
 891 
 892   // Save some cycles and a page by disabling OS guard pages where we have our own
 893   // VM guard pages (in java threads). For other threads, keep system default guard
 894   // pages in place.
 895   if (thr_type == java_thread || thr_type == compiler_thread) {
 896     ret = pthread_attr_setguardsize(&attr, 0);
 897   }
 898 
 899   pthread_t tid = 0;
 900   if (ret == 0) {
 901     ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
 902   }
 903 
 904   if (ret == 0) {
 905     char buf[64];
 906     log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
 907       (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 908   } else {
 909     char buf[64];
 910     log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
 911       ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 912   }
 913 
 914   pthread_attr_destroy(&attr);
 915 
 916   if (ret != 0) {
 917     // Need to clean up stuff we've allocated so far.
 918     thread->set_osthread(NULL);
 919     delete osthread;
 920     return false;
 921   }
 922 
 923   // OSThread::thread_id is the pthread id.
 924   osthread->set_thread_id(tid);
 925 
 926   return true;
 927 }
 928 
 929 /////////////////////////////////////////////////////////////////////////////
 930 // attach existing thread
 931 
 932 // bootstrap the main thread
 933 bool os::create_main_thread(JavaThread* thread) {
 934   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 935   return create_attached_thread(thread);
 936 }
 937 
 938 bool os::create_attached_thread(JavaThread* thread) {
 939 #ifdef ASSERT
 940     thread->verify_not_published();
 941 #endif
 942 
 943   // Allocate the OSThread object
 944   OSThread* osthread = new OSThread(NULL, NULL);
 945 
 946   if (osthread == NULL) {
 947     return false;
 948   }
 949 
 950   const pthread_t pthread_id = ::pthread_self();
 951   const tid_t kernel_thread_id = ::thread_self();
 952 
 953   // OSThread::thread_id is the pthread id.
 954   osthread->set_thread_id(pthread_id);
 955 
 956   // .. but keep kernel thread id too for diagnostics
 957   osthread->set_kernel_thread_id(kernel_thread_id);
 958 
 959   // initialize floating point control register
 960   os::Aix::init_thread_fpu_state();
 961 
 962   // Initial thread state is RUNNABLE
 963   osthread->set_state(RUNNABLE);
 964 
 965   thread->set_osthread(osthread);
 966 
 967   if (UseNUMA) {
 968     int lgrp_id = os::numa_get_group_id();
 969     if (lgrp_id != -1) {
 970       thread->set_lgrp_id(lgrp_id);
 971     }
 972   }
 973 
 974   // initialize signal mask for this thread
 975   // and save the caller's signal mask
 976   os::Aix::hotspot_sigmask(thread);
 977 
 978   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 979     os::current_thread_id(), (uintx) kernel_thread_id);
 980 
 981   return true;
 982 }
 983 
 984 void os::pd_start_thread(Thread* thread) {
 985   int status = pthread_continue_np(thread->osthread()->pthread_id());
 986   assert(status == 0, "thr_continue failed");
 987 }
 988 
 989 // Free OS resources related to the OSThread
 990 void os::free_thread(OSThread* osthread) {
 991   assert(osthread != NULL, "osthread not set");
 992 
 993   // We are told to free resources of the argument thread,
 994   // but we can only really operate on the current thread.
 995   assert(Thread::current()->osthread() == osthread,
 996          "os::free_thread but not current thread");
 997 
 998   // Restore caller's signal mask
 999   sigset_t sigmask = osthread->caller_sigmask();
1000   pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1001 
1002   delete osthread;
1003 }
1004 
1005 ////////////////////////////////////////////////////////////////////////////////
1006 // time support
1007 
1008 // Time since start-up in seconds to a fine granularity.
1009 // Used by VMSelfDestructTimer and the MemProfiler.
1010 double os::elapsedTime() {
1011   return (double)(os::elapsed_counter()) * 0.000001;
1012 }
1013 
1014 jlong os::elapsed_counter() {
1015   timeval time;
1016   int status = gettimeofday(&time, NULL);
1017   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1018 }
1019 
1020 jlong os::elapsed_frequency() {
1021   return (1000 * 1000);
1022 }
1023 
1024 bool os::supports_vtime() { return true; }
1025 bool os::enable_vtime()   { return false; }
1026 bool os::vtime_enabled()  { return false; }
1027 
1028 double os::elapsedVTime() {
1029   struct rusage usage;
1030   int retval = getrusage(RUSAGE_THREAD, &usage);
1031   if (retval == 0) {
1032     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1033   } else {
1034     // better than nothing, but not much
1035     return elapsedTime();
1036   }
1037 }
1038 
1039 jlong os::javaTimeMillis() {
1040   timeval time;
1041   int status = gettimeofday(&time, NULL);
1042   assert(status != -1, "aix error at gettimeofday()");
1043   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1044 }
1045 
1046 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1047   timeval time;
1048   int status = gettimeofday(&time, NULL);
1049   assert(status != -1, "aix error at gettimeofday()");
1050   seconds = jlong(time.tv_sec);
1051   nanos = jlong(time.tv_usec) * 1000;
1052 }
1053 
1054 jlong os::javaTimeNanos() {
1055   if (os::Aix::on_pase()) {
1056 
1057     timeval time;
1058     int status = gettimeofday(&time, NULL);
1059     assert(status != -1, "PASE error at gettimeofday()");
1060     jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1061     return 1000 * usecs;
1062 
1063   } else {
1064     // On AIX use the precision of processors real time clock
1065     // or time base registers.
1066     timebasestruct_t time;
1067     int rc;
1068 
1069     // If the CPU has a time register, it will be used and
1070     // we have to convert to real time first. After convertion we have following data:
1071     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1072     // time.tb_low  [nanoseconds after the last full second above]
1073     // We better use mread_real_time here instead of read_real_time
1074     // to ensure that we will get a monotonic increasing time.
1075     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1076       rc = time_base_to_time(&time, TIMEBASE_SZ);
1077       assert(rc != -1, "aix error at time_base_to_time()");
1078     }
1079     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1080   }
1081 }
1082 
1083 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1084   info_ptr->max_value = ALL_64_BITS;
1085   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1086   info_ptr->may_skip_backward = false;
1087   info_ptr->may_skip_forward = false;
1088   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1089 }
1090 
1091 // Return the real, user, and system times in seconds from an
1092 // arbitrary fixed point in the past.
1093 bool os::getTimesSecs(double* process_real_time,
1094                       double* process_user_time,
1095                       double* process_system_time) {
1096   struct tms ticks;
1097   clock_t real_ticks = times(&ticks);
1098 
1099   if (real_ticks == (clock_t) (-1)) {
1100     return false;
1101   } else {
1102     double ticks_per_second = (double) clock_tics_per_sec;
1103     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1104     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1105     *process_real_time = ((double) real_ticks) / ticks_per_second;
1106 
1107     return true;
1108   }
1109 }
1110 
1111 char * os::local_time_string(char *buf, size_t buflen) {
1112   struct tm t;
1113   time_t long_time;
1114   time(&long_time);
1115   localtime_r(&long_time, &t);
1116   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1117                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1118                t.tm_hour, t.tm_min, t.tm_sec);
1119   return buf;
1120 }
1121 
1122 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1123   return localtime_r(clock, res);
1124 }
1125 
1126 ////////////////////////////////////////////////////////////////////////////////
1127 // runtime exit support
1128 
1129 // Note: os::shutdown() might be called very early during initialization, or
1130 // called from signal handler. Before adding something to os::shutdown(), make
1131 // sure it is async-safe and can handle partially initialized VM.
1132 void os::shutdown() {
1133 
1134   // allow PerfMemory to attempt cleanup of any persistent resources
1135   perfMemory_exit();
1136 
1137   // needs to remove object in file system
1138   AttachListener::abort();
1139 
1140   // flush buffered output, finish log files
1141   ostream_abort();
1142 
1143   // Check for abort hook
1144   abort_hook_t abort_hook = Arguments::abort_hook();
1145   if (abort_hook != NULL) {
1146     abort_hook();
1147   }
1148 }
1149 
1150 // Note: os::abort() might be called very early during initialization, or
1151 // called from signal handler. Before adding something to os::abort(), make
1152 // sure it is async-safe and can handle partially initialized VM.
1153 void os::abort(bool dump_core, void* siginfo, const void* context) {
1154   os::shutdown();
1155   if (dump_core) {
1156 #ifndef PRODUCT
1157     fdStream out(defaultStream::output_fd());
1158     out.print_raw("Current thread is ");
1159     char buf[16];
1160     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1161     out.print_raw_cr(buf);
1162     out.print_raw_cr("Dumping core ...");
1163 #endif
1164     ::abort(); // dump core
1165   }
1166 
1167   ::exit(1);
1168 }
1169 
1170 // Die immediately, no exit hook, no abort hook, no cleanup.
1171 void os::die() {
1172   ::abort();
1173 }
1174 
1175 // This method is a copy of JDK's sysGetLastErrorString
1176 // from src/solaris/hpi/src/system_md.c
1177 
1178 size_t os::lasterror(char *buf, size_t len) {
1179   if (errno == 0) return 0;
1180 
1181   const char *s = os::strerror(errno);
1182   size_t n = ::strlen(s);
1183   if (n >= len) {
1184     n = len - 1;
1185   }
1186   ::strncpy(buf, s, n);
1187   buf[n] = '\0';
1188   return n;
1189 }
1190 
1191 intx os::current_thread_id() {
1192   return (intx)pthread_self();
1193 }
1194 
1195 int os::current_process_id() {
1196   return getpid();
1197 }
1198 
1199 // DLL functions
1200 
1201 const char* os::dll_file_extension() { return ".so"; }
1202 
1203 // This must be hard coded because it's the system's temporary
1204 // directory not the java application's temp directory, ala java.io.tmpdir.
1205 const char* os::get_temp_directory() { return "/tmp"; }
1206 
1207 // Check if addr is inside libjvm.so.
1208 bool os::address_is_in_vm(address addr) {
1209 
1210   // Input could be a real pc or a function pointer literal. The latter
1211   // would be a function descriptor residing in the data segment of a module.
1212   loaded_module_t lm;
1213   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1214     return lm.is_in_vm;
1215   } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1216     return lm.is_in_vm;
1217   } else {
1218     return false;
1219   }
1220 
1221 }
1222 
1223 // Resolve an AIX function descriptor literal to a code pointer.
1224 // If the input is a valid code pointer to a text segment of a loaded module,
1225 //   it is returned unchanged.
1226 // If the input is a valid AIX function descriptor, it is resolved to the
1227 //   code entry point.
1228 // If the input is neither a valid function descriptor nor a valid code pointer,
1229 //   NULL is returned.
1230 static address resolve_function_descriptor_to_code_pointer(address p) {
1231 
1232   if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1233     // It is a real code pointer.
1234     return p;
1235   } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1236     // Pointer to data segment, potential function descriptor.
1237     address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1238     if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1239       // It is a function descriptor.
1240       return code_entry;
1241     }
1242   }
1243 
1244   return NULL;
1245 }
1246 
1247 bool os::dll_address_to_function_name(address addr, char *buf,
1248                                       int buflen, int *offset,
1249                                       bool demangle) {
1250   if (offset) {
1251     *offset = -1;
1252   }
1253   // Buf is not optional, but offset is optional.
1254   assert(buf != NULL, "sanity check");
1255   buf[0] = '\0';
1256 
1257   // Resolve function ptr literals first.
1258   addr = resolve_function_descriptor_to_code_pointer(addr);
1259   if (!addr) {
1260     return false;
1261   }
1262 
1263   return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle);
1264 }
1265 
1266 bool os::dll_address_to_library_name(address addr, char* buf,
1267                                      int buflen, int* offset) {
1268   if (offset) {
1269     *offset = -1;
1270   }
1271   // Buf is not optional, but offset is optional.
1272   assert(buf != NULL, "sanity check");
1273   buf[0] = '\0';
1274 
1275   // Resolve function ptr literals first.
1276   addr = resolve_function_descriptor_to_code_pointer(addr);
1277   if (!addr) {
1278     return false;
1279   }
1280 
1281   return AixSymbols::get_module_name(addr, buf, buflen);
1282 }
1283 
1284 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1285 // for the same architecture as Hotspot is running on.
1286 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1287 
1288   if (ebuf && ebuflen > 0) {
1289     ebuf[0] = '\0';
1290     ebuf[ebuflen - 1] = '\0';
1291   }
1292 
1293   if (!filename || strlen(filename) == 0) {
1294     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1295     return NULL;
1296   }
1297 
1298   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1299   void * result= ::dlopen(filename, RTLD_LAZY);
1300   if (result != NULL) {
1301     // Reload dll cache. Don't do this in signal handling.
1302     LoadedLibraries::reload();
1303     return result;
1304   } else {
1305     // error analysis when dlopen fails
1306     const char* const error_report = ::dlerror();
1307     if (error_report && ebuf && ebuflen > 0) {
1308       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1309                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1310     }
1311   }
1312   return NULL;
1313 }
1314 
1315 void* os::dll_lookup(void* handle, const char* name) {
1316   void* res = dlsym(handle, name);
1317   return res;
1318 }
1319 
1320 void* os::get_default_process_handle() {
1321   return (void*)::dlopen(NULL, RTLD_LAZY);
1322 }
1323 
1324 void os::print_dll_info(outputStream *st) {
1325   st->print_cr("Dynamic libraries:");
1326   LoadedLibraries::print(st);
1327 }
1328 
1329 void os::get_summary_os_info(char* buf, size_t buflen) {
1330   // There might be something more readable than uname results for AIX.
1331   struct utsname name;
1332   uname(&name);
1333   snprintf(buf, buflen, "%s %s", name.release, name.version);
1334 }
1335 
1336 void os::print_os_info(outputStream* st) {
1337   st->print("OS:");
1338 
1339   st->print("uname:");
1340   struct utsname name;
1341   uname(&name);
1342   st->print(name.sysname); st->print(" ");
1343   st->print(name.nodename); st->print(" ");
1344   st->print(name.release); st->print(" ");
1345   st->print(name.version); st->print(" ");
1346   st->print(name.machine);
1347   st->cr();
1348 
1349   uint32_t ver = os::Aix::os_version();
1350   st->print_cr("AIX kernel version %u.%u.%u.%u",
1351                (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1352 
1353   os::Posix::print_rlimit_info(st);
1354 
1355   // load average
1356   st->print("load average:");
1357   double loadavg[3] = {-1.L, -1.L, -1.L};
1358   os::loadavg(loadavg, 3);
1359   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1360   st->cr();
1361 
1362   // print wpar info
1363   libperfstat::wparinfo_t wi;
1364   if (libperfstat::get_wparinfo(&wi)) {
1365     st->print_cr("wpar info");
1366     st->print_cr("name: %s", wi.name);
1367     st->print_cr("id:   %d", wi.wpar_id);
1368     st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1369   }
1370 
1371   // print partition info
1372   libperfstat::partitioninfo_t pi;
1373   if (libperfstat::get_partitioninfo(&pi)) {
1374     st->print_cr("partition info");
1375     st->print_cr(" name: %s", pi.name);
1376   }
1377 
1378 }
1379 
1380 void os::print_memory_info(outputStream* st) {
1381 
1382   st->print_cr("Memory:");
1383 
1384   st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1385     describe_pagesize(g_multipage_support.pagesize));
1386   st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1387     describe_pagesize(g_multipage_support.datapsize));
1388   st->print_cr("  Text page size:                         %s",
1389     describe_pagesize(g_multipage_support.textpsize));
1390   st->print_cr("  Thread stack page size (pthread):       %s",
1391     describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1392   st->print_cr("  Default shared memory page size:        %s",
1393     describe_pagesize(g_multipage_support.shmpsize));
1394   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1395     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1396   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1397     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1398   st->print_cr("  Multipage error: %d",
1399     g_multipage_support.error);
1400   st->cr();
1401   st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1402 
1403   // print out LDR_CNTRL because it affects the default page sizes
1404   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1405   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1406 
1407   // Print out EXTSHM because it is an unsupported setting.
1408   const char* const extshm = ::getenv("EXTSHM");
1409   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1410   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1411     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1412   }
1413 
1414   // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1415   const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1416   st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1417       aixthread_guardpages ? aixthread_guardpages : "<unset>");
1418 
1419   os::Aix::meminfo_t mi;
1420   if (os::Aix::get_meminfo(&mi)) {
1421     char buffer[256];
1422     if (os::Aix::on_aix()) {
1423       st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1424       st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1425       st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1426       st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1427     } else {
1428       // PASE - Numbers are result of QWCRSSTS; they mean:
1429       // real_total: Sum of all system pools
1430       // real_free: always 0
1431       // pgsp_total: we take the size of the system ASP
1432       // pgsp_free: size of system ASP times percentage of system ASP unused
1433       st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1434       st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1435       st->print_cr("%% system asp used : " SIZE_FORMAT,
1436         mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1437     }
1438     st->print_raw(buffer);
1439   }
1440   st->cr();
1441 
1442   // Print segments allocated with os::reserve_memory.
1443   st->print_cr("internal virtual memory regions used by vm:");
1444   vmembk_print_on(st);
1445 }
1446 
1447 // Get a string for the cpuinfo that is a summary of the cpu type
1448 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1449   // This looks good
1450   libperfstat::cpuinfo_t ci;
1451   if (libperfstat::get_cpuinfo(&ci)) {
1452     strncpy(buf, ci.version, buflen);
1453   } else {
1454     strncpy(buf, "AIX", buflen);
1455   }
1456 }
1457 
1458 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1459   // Nothing to do beyond what os::print_cpu_info() does.
1460 }
1461 
1462 static void print_signal_handler(outputStream* st, int sig,
1463                                  char* buf, size_t buflen);
1464 
1465 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1466   st->print_cr("Signal Handlers:");
1467   print_signal_handler(st, SIGSEGV, buf, buflen);
1468   print_signal_handler(st, SIGBUS , buf, buflen);
1469   print_signal_handler(st, SIGFPE , buf, buflen);
1470   print_signal_handler(st, SIGPIPE, buf, buflen);
1471   print_signal_handler(st, SIGXFSZ, buf, buflen);
1472   print_signal_handler(st, SIGILL , buf, buflen);
1473   print_signal_handler(st, SR_signum, buf, buflen);
1474   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1475   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1476   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1477   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1478   print_signal_handler(st, SIGTRAP, buf, buflen);
1479   // We also want to know if someone else adds a SIGDANGER handler because
1480   // that will interfere with OOM killling.
1481   print_signal_handler(st, SIGDANGER, buf, buflen);
1482 }
1483 
1484 static char saved_jvm_path[MAXPATHLEN] = {0};
1485 
1486 // Find the full path to the current module, libjvm.so.
1487 void os::jvm_path(char *buf, jint buflen) {
1488   // Error checking.
1489   if (buflen < MAXPATHLEN) {
1490     assert(false, "must use a large-enough buffer");
1491     buf[0] = '\0';
1492     return;
1493   }
1494   // Lazy resolve the path to current module.
1495   if (saved_jvm_path[0] != 0) {
1496     strcpy(buf, saved_jvm_path);
1497     return;
1498   }
1499 
1500   Dl_info dlinfo;
1501   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1502   assert(ret != 0, "cannot locate libjvm");
1503   char* rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1504   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1505 
1506   if (Arguments::sun_java_launcher_is_altjvm()) {
1507     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
1508     // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".
1509     // If "/jre/lib/" appears at the right place in the string, then
1510     // assume we are installed in a JDK and we're done. Otherwise, check
1511     // for a JAVA_HOME environment variable and fix up the path so it
1512     // looks like libjvm.so is installed there (append a fake suffix
1513     // hotspot/libjvm.so).
1514     const char *p = buf + strlen(buf) - 1;
1515     for (int count = 0; p > buf && count < 4; ++count) {
1516       for (--p; p > buf && *p != '/'; --p)
1517         /* empty */ ;
1518     }
1519 
1520     if (strncmp(p, "/jre/lib/", 9) != 0) {
1521       // Look for JAVA_HOME in the environment.
1522       char* java_home_var = ::getenv("JAVA_HOME");
1523       if (java_home_var != NULL && java_home_var[0] != 0) {
1524         char* jrelib_p;
1525         int len;
1526 
1527         // Check the current module name "libjvm.so".
1528         p = strrchr(buf, '/');
1529         if (p == NULL) {
1530           return;
1531         }
1532         assert(strstr(p, "/libjvm") == p, "invalid library name");
1533 
1534         rp = os::Posix::realpath(java_home_var, buf, buflen);
1535         if (rp == NULL) {
1536           return;
1537         }
1538 
1539         // determine if this is a legacy image or modules image
1540         // modules image doesn't have "jre" subdirectory
1541         len = strlen(buf);
1542         assert(len < buflen, "Ran out of buffer room");
1543         jrelib_p = buf + len;
1544         snprintf(jrelib_p, buflen-len, "/jre/lib");
1545         if (0 != access(buf, F_OK)) {
1546           snprintf(jrelib_p, buflen-len, "/lib");
1547         }
1548 
1549         if (0 == access(buf, F_OK)) {
1550           // Use current module name "libjvm.so"
1551           len = strlen(buf);
1552           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
1553         } else {
1554           // Go back to path of .so
1555           rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1556           if (rp == NULL) {
1557             return;
1558           }
1559         }
1560       }
1561     }
1562   }
1563 
1564   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1565   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1566 }
1567 
1568 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1569   // no prefix required, not even "_"
1570 }
1571 
1572 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1573   // no suffix required
1574 }
1575 
1576 ////////////////////////////////////////////////////////////////////////////////
1577 // sun.misc.Signal support
1578 
1579 static volatile jint sigint_count = 0;
1580 
1581 static void
1582 UserHandler(int sig, void *siginfo, void *context) {
1583   // 4511530 - sem_post is serialized and handled by the manager thread. When
1584   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1585   // don't want to flood the manager thread with sem_post requests.
1586   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1587     return;
1588 
1589   // Ctrl-C is pressed during error reporting, likely because the error
1590   // handler fails to abort. Let VM die immediately.
1591   if (sig == SIGINT && VMError::is_error_reported()) {
1592     os::die();
1593   }
1594 
1595   os::signal_notify(sig);
1596 }
1597 
1598 void* os::user_handler() {
1599   return CAST_FROM_FN_PTR(void*, UserHandler);
1600 }
1601 
1602 extern "C" {
1603   typedef void (*sa_handler_t)(int);
1604   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1605 }
1606 
1607 void* os::signal(int signal_number, void* handler) {
1608   struct sigaction sigAct, oldSigAct;
1609 
1610   sigfillset(&(sigAct.sa_mask));
1611 
1612   // Do not block out synchronous signals in the signal handler.
1613   // Blocking synchronous signals only makes sense if you can really
1614   // be sure that those signals won't happen during signal handling,
1615   // when the blocking applies. Normal signal handlers are lean and
1616   // do not cause signals. But our signal handlers tend to be "risky"
1617   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1618   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1619   // by a SIGILL, which was blocked due to the signal mask. The process
1620   // just hung forever. Better to crash from a secondary signal than to hang.
1621   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1622   sigdelset(&(sigAct.sa_mask), SIGBUS);
1623   sigdelset(&(sigAct.sa_mask), SIGILL);
1624   sigdelset(&(sigAct.sa_mask), SIGFPE);
1625   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1626 
1627   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1628 
1629   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1630 
1631   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1632     // -1 means registration failed
1633     return (void *)-1;
1634   }
1635 
1636   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1637 }
1638 
1639 void os::signal_raise(int signal_number) {
1640   ::raise(signal_number);
1641 }
1642 
1643 //
1644 // The following code is moved from os.cpp for making this
1645 // code platform specific, which it is by its very nature.
1646 //
1647 
1648 // Will be modified when max signal is changed to be dynamic
1649 int os::sigexitnum_pd() {
1650   return NSIG;
1651 }
1652 
1653 // a counter for each possible signal value
1654 static volatile jint pending_signals[NSIG+1] = { 0 };
1655 
1656 // Wrapper functions for: sem_init(), sem_post(), sem_wait()
1657 // On AIX, we use sem_init(), sem_post(), sem_wait()
1658 // On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1659 // do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1660 // Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1661 // on AIX, msem_..() calls are suspected of causing problems.
1662 static sem_t sig_sem;
1663 static msemaphore* p_sig_msem = 0;
1664 
1665 static void local_sem_init() {
1666   if (os::Aix::on_aix()) {
1667     int rc = ::sem_init(&sig_sem, 0, 0);
1668     guarantee(rc != -1, "sem_init failed");
1669   } else {
1670     // Memory semaphores must live in shared mem.
1671     guarantee0(p_sig_msem == NULL);
1672     p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1673     guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1674     guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1675   }
1676 }
1677 
1678 static void local_sem_post() {
1679   static bool warn_only_once = false;
1680   if (os::Aix::on_aix()) {
1681     int rc = ::sem_post(&sig_sem);
1682     if (rc == -1 && !warn_only_once) {
1683       trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
1684       warn_only_once = true;
1685     }
1686   } else {
1687     guarantee0(p_sig_msem != NULL);
1688     int rc = ::msem_unlock(p_sig_msem, 0);
1689     if (rc == -1 && !warn_only_once) {
1690       trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
1691       warn_only_once = true;
1692     }
1693   }
1694 }
1695 
1696 static void local_sem_wait() {
1697   static bool warn_only_once = false;
1698   if (os::Aix::on_aix()) {
1699     int rc = ::sem_wait(&sig_sem);
1700     if (rc == -1 && !warn_only_once) {
1701       trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
1702       warn_only_once = true;
1703     }
1704   } else {
1705     guarantee0(p_sig_msem != NULL); // must init before use
1706     int rc = ::msem_lock(p_sig_msem, 0);
1707     if (rc == -1 && !warn_only_once) {
1708       trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
1709       warn_only_once = true;
1710     }
1711   }
1712 }
1713 
1714 void os::signal_init_pd() {
1715   // Initialize signal structures
1716   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1717 
1718   // Initialize signal semaphore
1719   local_sem_init();
1720 }
1721 
1722 void os::signal_notify(int sig) {
1723   Atomic::inc(&pending_signals[sig]);
1724   local_sem_post();
1725 }
1726 
1727 static int check_pending_signals(bool wait) {
1728   Atomic::store(0, &sigint_count);
1729   for (;;) {
1730     for (int i = 0; i < NSIG + 1; i++) {
1731       jint n = pending_signals[i];
1732       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1733         return i;
1734       }
1735     }
1736     if (!wait) {
1737       return -1;
1738     }
1739     JavaThread *thread = JavaThread::current();
1740     ThreadBlockInVM tbivm(thread);
1741 
1742     bool threadIsSuspended;
1743     do {
1744       thread->set_suspend_equivalent();
1745       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1746 
1747       local_sem_wait();
1748 
1749       // were we externally suspended while we were waiting?
1750       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1751       if (threadIsSuspended) {
1752         //
1753         // The semaphore has been incremented, but while we were waiting
1754         // another thread suspended us. We don't want to continue running
1755         // while suspended because that would surprise the thread that
1756         // suspended us.
1757         //
1758 
1759         local_sem_post();
1760 
1761         thread->java_suspend_self();
1762       }
1763     } while (threadIsSuspended);
1764   }
1765 }
1766 
1767 int os::signal_lookup() {
1768   return check_pending_signals(false);
1769 }
1770 
1771 int os::signal_wait() {
1772   return check_pending_signals(true);
1773 }
1774 
1775 ////////////////////////////////////////////////////////////////////////////////
1776 // Virtual Memory
1777 
1778 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1779 
1780 #define VMEM_MAPPED  1
1781 #define VMEM_SHMATED 2
1782 
1783 struct vmembk_t {
1784   int type;         // 1 - mmap, 2 - shmat
1785   char* addr;
1786   size_t size;      // Real size, may be larger than usersize.
1787   size_t pagesize;  // page size of area
1788   vmembk_t* next;
1789 
1790   bool contains_addr(char* p) const {
1791     return p >= addr && p < (addr + size);
1792   }
1793 
1794   bool contains_range(char* p, size_t s) const {
1795     return contains_addr(p) && contains_addr(p + s - 1);
1796   }
1797 
1798   void print_on(outputStream* os) const {
1799     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1800       " bytes, %d %s pages), %s",
1801       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1802       (type == VMEM_SHMATED ? "shmat" : "mmap")
1803     );
1804   }
1805 
1806   // Check that range is a sub range of memory block (or equal to memory block);
1807   // also check that range is fully page aligned to the page size if the block.
1808   void assert_is_valid_subrange(char* p, size_t s) const {
1809     if (!contains_range(p, s)) {
1810       trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1811               "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1812               p, p + s, addr, addr + size);
1813       guarantee0(false);
1814     }
1815     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1816       trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1817               " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1818       guarantee0(false);
1819     }
1820   }
1821 };
1822 
1823 static struct {
1824   vmembk_t* first;
1825   MiscUtils::CritSect cs;
1826 } vmem;
1827 
1828 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1829   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1830   assert0(p);
1831   if (p) {
1832     MiscUtils::AutoCritSect lck(&vmem.cs);
1833     p->addr = addr; p->size = size;
1834     p->pagesize = pagesize;
1835     p->type = type;
1836     p->next = vmem.first;
1837     vmem.first = p;
1838   }
1839 }
1840 
1841 static vmembk_t* vmembk_find(char* addr) {
1842   MiscUtils::AutoCritSect lck(&vmem.cs);
1843   for (vmembk_t* p = vmem.first; p; p = p->next) {
1844     if (p->addr <= addr && (p->addr + p->size) > addr) {
1845       return p;
1846     }
1847   }
1848   return NULL;
1849 }
1850 
1851 static void vmembk_remove(vmembk_t* p0) {
1852   MiscUtils::AutoCritSect lck(&vmem.cs);
1853   assert0(p0);
1854   assert0(vmem.first); // List should not be empty.
1855   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1856     if (*pp == p0) {
1857       *pp = p0->next;
1858       ::free(p0);
1859       return;
1860     }
1861   }
1862   assert0(false); // Not found?
1863 }
1864 
1865 static void vmembk_print_on(outputStream* os) {
1866   MiscUtils::AutoCritSect lck(&vmem.cs);
1867   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1868     vmi->print_on(os);
1869     os->cr();
1870   }
1871 }
1872 
1873 // Reserve and attach a section of System V memory.
1874 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1875 // address. Failing that, it will attach the memory anywhere.
1876 // If <requested_addr> is NULL, function will attach the memory anywhere.
1877 //
1878 // <alignment_hint> is being ignored by this function. It is very probable however that the
1879 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1880 // Should this be not enogh, we can put more work into it.
1881 static char* reserve_shmated_memory (
1882   size_t bytes,
1883   char* requested_addr,
1884   size_t alignment_hint) {
1885 
1886   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1887     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1888     bytes, requested_addr, alignment_hint);
1889 
1890   // Either give me wish address or wish alignment but not both.
1891   assert0(!(requested_addr != NULL && alignment_hint != 0));
1892 
1893   // We must prevent anyone from attaching too close to the
1894   // BRK because that may cause malloc OOM.
1895   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1896     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1897       "Will attach anywhere.", requested_addr);
1898     // Act like the OS refused to attach there.
1899     requested_addr = NULL;
1900   }
1901 
1902   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1903   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1904   if (os::Aix::on_pase_V5R4_or_older()) {
1905     ShouldNotReachHere();
1906   }
1907 
1908   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1909   const size_t size = align_up(bytes, 64*K);
1910 
1911   // Reserve the shared segment.
1912   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1913   if (shmid == -1) {
1914     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1915     return NULL;
1916   }
1917 
1918   // Important note:
1919   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1920   // We must right after attaching it remove it from the system. System V shm segments are global and
1921   // survive the process.
1922   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1923 
1924   struct shmid_ds shmbuf;
1925   memset(&shmbuf, 0, sizeof(shmbuf));
1926   shmbuf.shm_pagesize = 64*K;
1927   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1928     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1929                size / (64*K), errno);
1930     // I want to know if this ever happens.
1931     assert(false, "failed to set page size for shmat");
1932   }
1933 
1934   // Now attach the shared segment.
1935   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1936   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1937   // were not a segment boundary.
1938   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1939   const int errno_shmat = errno;
1940 
1941   // (A) Right after shmat and before handing shmat errors delete the shm segment.
1942   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
1943     trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1944     assert(false, "failed to remove shared memory segment!");
1945   }
1946 
1947   // Handle shmat error. If we failed to attach, just return.
1948   if (addr == (char*)-1) {
1949     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
1950     return NULL;
1951   }
1952 
1953   // Just for info: query the real page size. In case setting the page size did not
1954   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1955   const size_t real_pagesize = os::Aix::query_pagesize(addr);
1956   if (real_pagesize != shmbuf.shm_pagesize) {
1957     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
1958   }
1959 
1960   if (addr) {
1961     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
1962       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
1963   } else {
1964     if (requested_addr != NULL) {
1965       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
1966     } else {
1967       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
1968     }
1969   }
1970 
1971   // book-keeping
1972   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
1973   assert0(is_aligned_to(addr, os::vm_page_size()));
1974 
1975   return addr;
1976 }
1977 
1978 static bool release_shmated_memory(char* addr, size_t size) {
1979 
1980   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1981     addr, addr + size - 1);
1982 
1983   bool rc = false;
1984 
1985   // TODO: is there a way to verify shm size without doing bookkeeping?
1986   if (::shmdt(addr) != 0) {
1987     trcVerbose("error (%d).", errno);
1988   } else {
1989     trcVerbose("ok.");
1990     rc = true;
1991   }
1992   return rc;
1993 }
1994 
1995 static bool uncommit_shmated_memory(char* addr, size_t size) {
1996   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1997     addr, addr + size - 1);
1998 
1999   const bool rc = my_disclaim64(addr, size);
2000 
2001   if (!rc) {
2002     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2003     return false;
2004   }
2005   return true;
2006 }
2007 
2008 ////////////////////////////////  mmap-based routines /////////////////////////////////
2009 
2010 // Reserve memory via mmap.
2011 // If <requested_addr> is given, an attempt is made to attach at the given address.
2012 // Failing that, memory is allocated at any address.
2013 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2014 // allocate at an address aligned with the given alignment. Failing that, memory
2015 // is aligned anywhere.
2016 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2017   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2018     "alignment_hint " UINTX_FORMAT "...",
2019     bytes, requested_addr, alignment_hint);
2020 
2021   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2022   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2023     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2024     return NULL;
2025   }
2026 
2027   // We must prevent anyone from attaching too close to the
2028   // BRK because that may cause malloc OOM.
2029   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2030     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2031       "Will attach anywhere.", requested_addr);
2032     // Act like the OS refused to attach there.
2033     requested_addr = NULL;
2034   }
2035 
2036   // Specify one or the other but not both.
2037   assert0(!(requested_addr != NULL && alignment_hint > 0));
2038 
2039   // In 64K mode, we claim the global page size (os::vm_page_size())
2040   // is 64K. This is one of the few points where that illusion may
2041   // break, because mmap() will always return memory aligned to 4K. So
2042   // we must ensure we only ever return memory aligned to 64k.
2043   if (alignment_hint) {
2044     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2045   } else {
2046     alignment_hint = os::vm_page_size();
2047   }
2048 
2049   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2050   const size_t size = align_up(bytes, os::vm_page_size());
2051 
2052   // alignment: Allocate memory large enough to include an aligned range of the right size and
2053   // cut off the leading and trailing waste pages.
2054   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2055   const size_t extra_size = size + alignment_hint;
2056 
2057   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2058   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2059   int flags = MAP_ANONYMOUS | MAP_SHARED;
2060 
2061   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2062   // it means if wishaddress is given but MAP_FIXED is not set.
2063   //
2064   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2065   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2066   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2067   // get clobbered.
2068   if (requested_addr != NULL) {
2069     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2070       flags |= MAP_FIXED;
2071     }
2072   }
2073 
2074   char* addr = (char*)::mmap(requested_addr, extra_size,
2075       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2076 
2077   if (addr == MAP_FAILED) {
2078     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2079     return NULL;
2080   }
2081 
2082   // Handle alignment.
2083   char* const addr_aligned = align_up(addr, alignment_hint);
2084   const size_t waste_pre = addr_aligned - addr;
2085   char* const addr_aligned_end = addr_aligned + size;
2086   const size_t waste_post = extra_size - waste_pre - size;
2087   if (waste_pre > 0) {
2088     ::munmap(addr, waste_pre);
2089   }
2090   if (waste_post > 0) {
2091     ::munmap(addr_aligned_end, waste_post);
2092   }
2093   addr = addr_aligned;
2094 
2095   if (addr) {
2096     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2097       addr, addr + bytes, bytes);
2098   } else {
2099     if (requested_addr != NULL) {
2100       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2101     } else {
2102       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2103     }
2104   }
2105 
2106   // bookkeeping
2107   vmembk_add(addr, size, 4*K, VMEM_MAPPED);
2108 
2109   // Test alignment, see above.
2110   assert0(is_aligned_to(addr, os::vm_page_size()));
2111 
2112   return addr;
2113 }
2114 
2115 static bool release_mmaped_memory(char* addr, size_t size) {
2116   assert0(is_aligned_to(addr, os::vm_page_size()));
2117   assert0(is_aligned_to(size, os::vm_page_size()));
2118 
2119   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2120     addr, addr + size - 1);
2121   bool rc = false;
2122 
2123   if (::munmap(addr, size) != 0) {
2124     trcVerbose("failed (%d)\n", errno);
2125     rc = false;
2126   } else {
2127     trcVerbose("ok.");
2128     rc = true;
2129   }
2130 
2131   return rc;
2132 }
2133 
2134 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2135 
2136   assert0(is_aligned_to(addr, os::vm_page_size()));
2137   assert0(is_aligned_to(size, os::vm_page_size()));
2138 
2139   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2140     addr, addr + size - 1);
2141   bool rc = false;
2142 
2143   // Uncommit mmap memory with msync MS_INVALIDATE.
2144   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2145     trcVerbose("failed (%d)\n", errno);
2146     rc = false;
2147   } else {
2148     trcVerbose("ok.");
2149     rc = true;
2150   }
2151 
2152   return rc;
2153 }
2154 
2155 int os::vm_page_size() {
2156   // Seems redundant as all get out.
2157   assert(os::Aix::page_size() != -1, "must call os::init");
2158   return os::Aix::page_size();
2159 }
2160 
2161 // Aix allocates memory by pages.
2162 int os::vm_allocation_granularity() {
2163   assert(os::Aix::page_size() != -1, "must call os::init");
2164   return os::Aix::page_size();
2165 }
2166 
2167 #ifdef PRODUCT
2168 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2169                                     int err) {
2170   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2171           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2172           os::errno_name(err), err);
2173 }
2174 #endif
2175 
2176 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2177                                   const char* mesg) {
2178   assert(mesg != NULL, "mesg must be specified");
2179   if (!pd_commit_memory(addr, size, exec)) {
2180     // Add extra info in product mode for vm_exit_out_of_memory():
2181     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2182     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2183   }
2184 }
2185 
2186 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2187 
2188   assert(is_aligned_to(addr, os::vm_page_size()),
2189     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2190     p2i(addr), os::vm_page_size());
2191   assert(is_aligned_to(size, os::vm_page_size()),
2192     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2193     size, os::vm_page_size());
2194 
2195   vmembk_t* const vmi = vmembk_find(addr);
2196   guarantee0(vmi);
2197   vmi->assert_is_valid_subrange(addr, size);
2198 
2199   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2200 
2201   if (UseExplicitCommit) {
2202     // AIX commits memory on touch. So, touch all pages to be committed.
2203     for (char* p = addr; p < (addr + size); p += 4*K) {
2204       *p = '\0';
2205     }
2206   }
2207 
2208   return true;
2209 }
2210 
2211 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2212   return pd_commit_memory(addr, size, exec);
2213 }
2214 
2215 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2216                                   size_t alignment_hint, bool exec,
2217                                   const char* mesg) {
2218   // Alignment_hint is ignored on this OS.
2219   pd_commit_memory_or_exit(addr, size, exec, mesg);
2220 }
2221 
2222 bool os::pd_uncommit_memory(char* addr, size_t size) {
2223   assert(is_aligned_to(addr, os::vm_page_size()),
2224     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2225     p2i(addr), os::vm_page_size());
2226   assert(is_aligned_to(size, os::vm_page_size()),
2227     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2228     size, os::vm_page_size());
2229 
2230   // Dynamically do different things for mmap/shmat.
2231   const vmembk_t* const vmi = vmembk_find(addr);
2232   guarantee0(vmi);
2233   vmi->assert_is_valid_subrange(addr, size);
2234 
2235   if (vmi->type == VMEM_SHMATED) {
2236     return uncommit_shmated_memory(addr, size);
2237   } else {
2238     return uncommit_mmaped_memory(addr, size);
2239   }
2240 }
2241 
2242 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2243   // Do not call this; no need to commit stack pages on AIX.
2244   ShouldNotReachHere();
2245   return true;
2246 }
2247 
2248 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2249   // Do not call this; no need to commit stack pages on AIX.
2250   ShouldNotReachHere();
2251   return true;
2252 }
2253 
2254 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2255 }
2256 
2257 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2258 }
2259 
2260 void os::numa_make_global(char *addr, size_t bytes) {
2261 }
2262 
2263 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2264 }
2265 
2266 bool os::numa_topology_changed() {
2267   return false;
2268 }
2269 
2270 size_t os::numa_get_groups_num() {
2271   return 1;
2272 }
2273 
2274 int os::numa_get_group_id() {
2275   return 0;
2276 }
2277 
2278 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2279   if (size > 0) {
2280     ids[0] = 0;
2281     return 1;
2282   }
2283   return 0;
2284 }
2285 
2286 bool os::get_page_info(char *start, page_info* info) {
2287   return false;
2288 }
2289 
2290 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2291   return end;
2292 }
2293 
2294 // Reserves and attaches a shared memory segment.
2295 // Will assert if a wish address is given and could not be obtained.
2296 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2297 
2298   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2299   // thereby clobbering old mappings at that place. That is probably
2300   // not intended, never used and almost certainly an error were it
2301   // ever be used this way (to try attaching at a specified address
2302   // without clobbering old mappings an alternate API exists,
2303   // os::attempt_reserve_memory_at()).
2304   // Instead of mimicking the dangerous coding of the other platforms, here I
2305   // just ignore the request address (release) or assert(debug).
2306   assert0(requested_addr == NULL);
2307 
2308   // Always round to os::vm_page_size(), which may be larger than 4K.
2309   bytes = align_up(bytes, os::vm_page_size());
2310   const size_t alignment_hint0 =
2311     alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0;
2312 
2313   // In 4K mode always use mmap.
2314   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2315   if (os::vm_page_size() == 4*K) {
2316     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2317   } else {
2318     if (bytes >= Use64KPagesThreshold) {
2319       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2320     } else {
2321       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2322     }
2323   }
2324 }
2325 
2326 bool os::pd_release_memory(char* addr, size_t size) {
2327 
2328   // Dynamically do different things for mmap/shmat.
2329   vmembk_t* const vmi = vmembk_find(addr);
2330   guarantee0(vmi);
2331 
2332   // Always round to os::vm_page_size(), which may be larger than 4K.
2333   size = align_up(size, os::vm_page_size());
2334   addr = align_up(addr, os::vm_page_size());
2335 
2336   bool rc = false;
2337   bool remove_bookkeeping = false;
2338   if (vmi->type == VMEM_SHMATED) {
2339     // For shmatted memory, we do:
2340     // - If user wants to release the whole range, release the memory (shmdt).
2341     // - If user only wants to release a partial range, uncommit (disclaim) that
2342     //   range. That way, at least, we do not use memory anymore (bust still page
2343     //   table space).
2344     vmi->assert_is_valid_subrange(addr, size);
2345     if (addr == vmi->addr && size == vmi->size) {
2346       rc = release_shmated_memory(addr, size);
2347       remove_bookkeeping = true;
2348     } else {
2349       rc = uncommit_shmated_memory(addr, size);
2350     }
2351   } else {
2352     // User may unmap partial regions but region has to be fully contained.
2353 #ifdef ASSERT
2354     vmi->assert_is_valid_subrange(addr, size);
2355 #endif
2356     rc = release_mmaped_memory(addr, size);
2357     remove_bookkeeping = true;
2358   }
2359 
2360   // update bookkeeping
2361   if (rc && remove_bookkeeping) {
2362     vmembk_remove(vmi);
2363   }
2364 
2365   return rc;
2366 }
2367 
2368 static bool checked_mprotect(char* addr, size_t size, int prot) {
2369 
2370   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2371   // not tell me if protection failed when trying to protect an un-protectable range.
2372   //
2373   // This means if the memory was allocated using shmget/shmat, protection wont work
2374   // but mprotect will still return 0:
2375   //
2376   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2377 
2378   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2379 
2380   if (!rc) {
2381     const char* const s_errno = os::errno_name(errno);
2382     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2383     return false;
2384   }
2385 
2386   // mprotect success check
2387   //
2388   // Mprotect said it changed the protection but can I believe it?
2389   //
2390   // To be sure I need to check the protection afterwards. Try to
2391   // read from protected memory and check whether that causes a segfault.
2392   //
2393   if (!os::Aix::xpg_sus_mode()) {
2394 
2395     if (CanUseSafeFetch32()) {
2396 
2397       const bool read_protected =
2398         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2399          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2400 
2401       if (prot & PROT_READ) {
2402         rc = !read_protected;
2403       } else {
2404         rc = read_protected;
2405       }
2406 
2407       if (!rc) {
2408         if (os::Aix::on_pase()) {
2409           // There is an issue on older PASE systems where mprotect() will return success but the
2410           // memory will not be protected.
2411           // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2412           // machines; we only see it rarely, when using mprotect() to protect the guard page of
2413           // a stack. It is an OS error.
2414           //
2415           // A valid strategy is just to try again. This usually works. :-/
2416 
2417           ::usleep(1000);
2418           if (::mprotect(addr, size, prot) == 0) {
2419             const bool read_protected_2 =
2420               (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2421               SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2422             rc = true;
2423           }
2424         }
2425       }
2426     }
2427   }
2428 
2429   assert(rc == true, "mprotect failed.");
2430 
2431   return rc;
2432 }
2433 
2434 // Set protections specified
2435 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2436   unsigned int p = 0;
2437   switch (prot) {
2438   case MEM_PROT_NONE: p = PROT_NONE; break;
2439   case MEM_PROT_READ: p = PROT_READ; break;
2440   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2441   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2442   default:
2443     ShouldNotReachHere();
2444   }
2445   // is_committed is unused.
2446   return checked_mprotect(addr, size, p);
2447 }
2448 
2449 bool os::guard_memory(char* addr, size_t size) {
2450   return checked_mprotect(addr, size, PROT_NONE);
2451 }
2452 
2453 bool os::unguard_memory(char* addr, size_t size) {
2454   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2455 }
2456 
2457 // Large page support
2458 
2459 static size_t _large_page_size = 0;
2460 
2461 // Enable large page support if OS allows that.
2462 void os::large_page_init() {
2463   return; // Nothing to do. See query_multipage_support and friends.
2464 }
2465 
2466 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2467   // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2468   // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2469   // so this is not needed.
2470   assert(false, "should not be called on AIX");
2471   return NULL;
2472 }
2473 
2474 bool os::release_memory_special(char* base, size_t bytes) {
2475   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2476   Unimplemented();
2477   return false;
2478 }
2479 
2480 size_t os::large_page_size() {
2481   return _large_page_size;
2482 }
2483 
2484 bool os::can_commit_large_page_memory() {
2485   // Does not matter, we do not support huge pages.
2486   return false;
2487 }
2488 
2489 bool os::can_execute_large_page_memory() {
2490   // Does not matter, we do not support huge pages.
2491   return false;
2492 }
2493 
2494 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
2495   assert(file_desc >= 0, "file_desc is not valid");
2496   char* result = NULL;
2497 
2498   // Always round to os::vm_page_size(), which may be larger than 4K.
2499   bytes = align_up(bytes, os::vm_page_size());
2500   result = reserve_mmaped_memory(bytes, requested_addr, 0);
2501   
2502   if (result != NULL) {
2503     if (replace_existing_mapping_with_dax_file_mapping(result, bytes, file_desc) == NULL) {
2504       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
2505     }
2506   }
2507   return result;
2508 }
2509 
2510 // Reserve memory at an arbitrary address, only if that area is
2511 // available (and not reserved for something else).
2512 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2513   char* addr = NULL;
2514 
2515   // Always round to os::vm_page_size(), which may be larger than 4K.
2516   bytes = align_up(bytes, os::vm_page_size());
2517 
2518   // In 4K mode always use mmap.
2519   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2520   if (os::vm_page_size() == 4*K) {
2521     return reserve_mmaped_memory(bytes, requested_addr, 0);
2522   } else {
2523     if (bytes >= Use64KPagesThreshold) {
2524       return reserve_shmated_memory(bytes, requested_addr, 0);
2525     } else {
2526       return reserve_mmaped_memory(bytes, requested_addr, 0);
2527     }
2528   }
2529 
2530   return addr;
2531 }
2532 
2533 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2534   return ::read(fd, buf, nBytes);
2535 }
2536 
2537 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2538   return ::pread(fd, buf, nBytes, offset);
2539 }
2540 
2541 void os::naked_short_sleep(jlong ms) {
2542   struct timespec req;
2543 
2544   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2545   req.tv_sec = 0;
2546   if (ms > 0) {
2547     req.tv_nsec = (ms % 1000) * 1000000;
2548   }
2549   else {
2550     req.tv_nsec = 1;
2551   }
2552 
2553   nanosleep(&req, NULL);
2554 
2555   return;
2556 }
2557 
2558 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2559 void os::infinite_sleep() {
2560   while (true) {    // sleep forever ...
2561     ::sleep(100);   // ... 100 seconds at a time
2562   }
2563 }
2564 
2565 // Used to convert frequent JVM_Yield() to nops
2566 bool os::dont_yield() {
2567   return DontYieldALot;
2568 }
2569 
2570 void os::naked_yield() {
2571   sched_yield();
2572 }
2573 
2574 ////////////////////////////////////////////////////////////////////////////////
2575 // thread priority support
2576 
2577 // From AIX manpage to pthread_setschedparam
2578 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2579 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2580 //
2581 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2582 // range from 40 to 80, where 40 is the least favored priority and 80
2583 // is the most favored."
2584 //
2585 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2586 // scheduling there; however, this still leaves iSeries.)
2587 //
2588 // We use the same values for AIX and PASE.
2589 int os::java_to_os_priority[CriticalPriority + 1] = {
2590   54,             // 0 Entry should never be used
2591 
2592   55,             // 1 MinPriority
2593   55,             // 2
2594   56,             // 3
2595 
2596   56,             // 4
2597   57,             // 5 NormPriority
2598   57,             // 6
2599 
2600   58,             // 7
2601   58,             // 8
2602   59,             // 9 NearMaxPriority
2603 
2604   60,             // 10 MaxPriority
2605 
2606   60              // 11 CriticalPriority
2607 };
2608 
2609 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2610   if (!UseThreadPriorities) return OS_OK;
2611   pthread_t thr = thread->osthread()->pthread_id();
2612   int policy = SCHED_OTHER;
2613   struct sched_param param;
2614   param.sched_priority = newpri;
2615   int ret = pthread_setschedparam(thr, policy, &param);
2616 
2617   if (ret != 0) {
2618     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2619         (int)thr, newpri, ret, os::errno_name(ret));
2620   }
2621   return (ret == 0) ? OS_OK : OS_ERR;
2622 }
2623 
2624 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2625   if (!UseThreadPriorities) {
2626     *priority_ptr = java_to_os_priority[NormPriority];
2627     return OS_OK;
2628   }
2629   pthread_t thr = thread->osthread()->pthread_id();
2630   int policy = SCHED_OTHER;
2631   struct sched_param param;
2632   int ret = pthread_getschedparam(thr, &policy, &param);
2633   *priority_ptr = param.sched_priority;
2634 
2635   return (ret == 0) ? OS_OK : OS_ERR;
2636 }
2637 
2638 // Hint to the underlying OS that a task switch would not be good.
2639 // Void return because it's a hint and can fail.
2640 void os::hint_no_preempt() {}
2641 
2642 ////////////////////////////////////////////////////////////////////////////////
2643 // suspend/resume support
2644 
2645 //  The low-level signal-based suspend/resume support is a remnant from the
2646 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2647 //  within hotspot. Currently used by JFR's OSThreadSampler
2648 //
2649 //  The remaining code is greatly simplified from the more general suspension
2650 //  code that used to be used.
2651 //
2652 //  The protocol is quite simple:
2653 //  - suspend:
2654 //      - sends a signal to the target thread
2655 //      - polls the suspend state of the osthread using a yield loop
2656 //      - target thread signal handler (SR_handler) sets suspend state
2657 //        and blocks in sigsuspend until continued
2658 //  - resume:
2659 //      - sets target osthread state to continue
2660 //      - sends signal to end the sigsuspend loop in the SR_handler
2661 //
2662 //  Note that the SR_lock plays no role in this suspend/resume protocol,
2663 //  but is checked for NULL in SR_handler as a thread termination indicator.
2664 //  The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs.
2665 //
2666 //  Note that resume_clear_context() and suspend_save_context() are needed
2667 //  by SR_handler(), so that fetch_frame_from_ucontext() works,
2668 //  which in part is used by:
2669 //    - Forte Analyzer: AsyncGetCallTrace()
2670 //    - StackBanging: get_frame_at_stack_banging_point()
2671 
2672 static void resume_clear_context(OSThread *osthread) {
2673   osthread->set_ucontext(NULL);
2674   osthread->set_siginfo(NULL);
2675 }
2676 
2677 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2678   osthread->set_ucontext(context);
2679   osthread->set_siginfo(siginfo);
2680 }
2681 
2682 //
2683 // Handler function invoked when a thread's execution is suspended or
2684 // resumed. We have to be careful that only async-safe functions are
2685 // called here (Note: most pthread functions are not async safe and
2686 // should be avoided.)
2687 //
2688 // Note: sigwait() is a more natural fit than sigsuspend() from an
2689 // interface point of view, but sigwait() prevents the signal hander
2690 // from being run. libpthread would get very confused by not having
2691 // its signal handlers run and prevents sigwait()'s use with the
2692 // mutex granting granting signal.
2693 //
2694 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2695 //
2696 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2697   // Save and restore errno to avoid confusing native code with EINTR
2698   // after sigsuspend.
2699   int old_errno = errno;
2700 
2701   Thread* thread = Thread::current_or_null_safe();
2702   assert(thread != NULL, "Missing current thread in SR_handler");
2703 
2704   // On some systems we have seen signal delivery get "stuck" until the signal
2705   // mask is changed as part of thread termination. Check that the current thread
2706   // has not already terminated (via SR_lock()) - else the following assertion
2707   // will fail because the thread is no longer a JavaThread as the ~JavaThread
2708   // destructor has completed.
2709 
2710   if (thread->SR_lock() == NULL) {
2711     return;
2712   }
2713 
2714   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2715 
2716   OSThread* osthread = thread->osthread();
2717 
2718   os::SuspendResume::State current = osthread->sr.state();
2719   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2720     suspend_save_context(osthread, siginfo, context);
2721 
2722     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2723     os::SuspendResume::State state = osthread->sr.suspended();
2724     if (state == os::SuspendResume::SR_SUSPENDED) {
2725       sigset_t suspend_set;  // signals for sigsuspend()
2726 
2727       // get current set of blocked signals and unblock resume signal
2728       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2729       sigdelset(&suspend_set, SR_signum);
2730 
2731       // wait here until we are resumed
2732       while (1) {
2733         sigsuspend(&suspend_set);
2734 
2735         os::SuspendResume::State result = osthread->sr.running();
2736         if (result == os::SuspendResume::SR_RUNNING) {
2737           break;
2738         }
2739       }
2740 
2741     } else if (state == os::SuspendResume::SR_RUNNING) {
2742       // request was cancelled, continue
2743     } else {
2744       ShouldNotReachHere();
2745     }
2746 
2747     resume_clear_context(osthread);
2748   } else if (current == os::SuspendResume::SR_RUNNING) {
2749     // request was cancelled, continue
2750   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2751     // ignore
2752   } else {
2753     ShouldNotReachHere();
2754   }
2755 
2756   errno = old_errno;
2757 }
2758 
2759 static int SR_initialize() {
2760   struct sigaction act;
2761   char *s;
2762   // Get signal number to use for suspend/resume
2763   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2764     int sig = ::strtol(s, 0, 10);
2765     if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2766         sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2767       SR_signum = sig;
2768     } else {
2769       warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2770               sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2771     }
2772   }
2773 
2774   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2775         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2776 
2777   sigemptyset(&SR_sigset);
2778   sigaddset(&SR_sigset, SR_signum);
2779 
2780   // Set up signal handler for suspend/resume.
2781   act.sa_flags = SA_RESTART|SA_SIGINFO;
2782   act.sa_handler = (void (*)(int)) SR_handler;
2783 
2784   // SR_signum is blocked by default.
2785   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2786 
2787   if (sigaction(SR_signum, &act, 0) == -1) {
2788     return -1;
2789   }
2790 
2791   // Save signal flag
2792   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2793   return 0;
2794 }
2795 
2796 static int SR_finalize() {
2797   return 0;
2798 }
2799 
2800 static int sr_notify(OSThread* osthread) {
2801   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2802   assert_status(status == 0, status, "pthread_kill");
2803   return status;
2804 }
2805 
2806 // "Randomly" selected value for how long we want to spin
2807 // before bailing out on suspending a thread, also how often
2808 // we send a signal to a thread we want to resume
2809 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2810 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2811 
2812 // returns true on success and false on error - really an error is fatal
2813 // but this seems the normal response to library errors
2814 static bool do_suspend(OSThread* osthread) {
2815   assert(osthread->sr.is_running(), "thread should be running");
2816   // mark as suspended and send signal
2817 
2818   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2819     // failed to switch, state wasn't running?
2820     ShouldNotReachHere();
2821     return false;
2822   }
2823 
2824   if (sr_notify(osthread) != 0) {
2825     // try to cancel, switch to running
2826 
2827     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2828     if (result == os::SuspendResume::SR_RUNNING) {
2829       // cancelled
2830       return false;
2831     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2832       // somehow managed to suspend
2833       return true;
2834     } else {
2835       ShouldNotReachHere();
2836       return false;
2837     }
2838   }
2839 
2840   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2841 
2842   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2843     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2844       os::naked_yield();
2845     }
2846 
2847     // timeout, try to cancel the request
2848     if (n >= RANDOMLY_LARGE_INTEGER) {
2849       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2850       if (cancelled == os::SuspendResume::SR_RUNNING) {
2851         return false;
2852       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2853         return true;
2854       } else {
2855         ShouldNotReachHere();
2856         return false;
2857       }
2858     }
2859   }
2860 
2861   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2862   return true;
2863 }
2864 
2865 static void do_resume(OSThread* osthread) {
2866   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2867 
2868   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2869     // failed to switch to WAKEUP_REQUEST
2870     ShouldNotReachHere();
2871     return;
2872   }
2873 
2874   while (!osthread->sr.is_running()) {
2875     if (sr_notify(osthread) == 0) {
2876       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2877         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2878           os::naked_yield();
2879         }
2880       }
2881     } else {
2882       ShouldNotReachHere();
2883     }
2884   }
2885 
2886   guarantee(osthread->sr.is_running(), "Must be running!");
2887 }
2888 
2889 ///////////////////////////////////////////////////////////////////////////////////
2890 // signal handling (except suspend/resume)
2891 
2892 // This routine may be used by user applications as a "hook" to catch signals.
2893 // The user-defined signal handler must pass unrecognized signals to this
2894 // routine, and if it returns true (non-zero), then the signal handler must
2895 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2896 // routine will never retun false (zero), but instead will execute a VM panic
2897 // routine kill the process.
2898 //
2899 // If this routine returns false, it is OK to call it again. This allows
2900 // the user-defined signal handler to perform checks either before or after
2901 // the VM performs its own checks. Naturally, the user code would be making
2902 // a serious error if it tried to handle an exception (such as a null check
2903 // or breakpoint) that the VM was generating for its own correct operation.
2904 //
2905 // This routine may recognize any of the following kinds of signals:
2906 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2907 // It should be consulted by handlers for any of those signals.
2908 //
2909 // The caller of this routine must pass in the three arguments supplied
2910 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2911 // field of the structure passed to sigaction(). This routine assumes that
2912 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2913 //
2914 // Note that the VM will print warnings if it detects conflicting signal
2915 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2916 //
2917 extern "C" JNIEXPORT int
2918 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2919 
2920 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2921 // to be the thing to call; documentation is not terribly clear about whether
2922 // pthread_sigmask also works, and if it does, whether it does the same.
2923 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2924   const int rc = ::pthread_sigmask(how, set, oset);
2925   // return value semantics differ slightly for error case:
2926   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2927   // (so, pthread_sigmask is more theadsafe for error handling)
2928   // But success is always 0.
2929   return rc == 0 ? true : false;
2930 }
2931 
2932 // Function to unblock all signals which are, according
2933 // to POSIX, typical program error signals. If they happen while being blocked,
2934 // they typically will bring down the process immediately.
2935 bool unblock_program_error_signals() {
2936   sigset_t set;
2937   ::sigemptyset(&set);
2938   ::sigaddset(&set, SIGILL);
2939   ::sigaddset(&set, SIGBUS);
2940   ::sigaddset(&set, SIGFPE);
2941   ::sigaddset(&set, SIGSEGV);
2942   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2943 }
2944 
2945 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2946 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2947   assert(info != NULL && uc != NULL, "it must be old kernel");
2948 
2949   // Never leave program error signals blocked;
2950   // on all our platforms they would bring down the process immediately when
2951   // getting raised while being blocked.
2952   unblock_program_error_signals();
2953 
2954   int orig_errno = errno;  // Preserve errno value over signal handler.
2955   JVM_handle_aix_signal(sig, info, uc, true);
2956   errno = orig_errno;
2957 }
2958 
2959 // This boolean allows users to forward their own non-matching signals
2960 // to JVM_handle_aix_signal, harmlessly.
2961 bool os::Aix::signal_handlers_are_installed = false;
2962 
2963 // For signal-chaining
2964 struct sigaction sigact[NSIG];
2965 sigset_t sigs;
2966 bool os::Aix::libjsig_is_loaded = false;
2967 typedef struct sigaction *(*get_signal_t)(int);
2968 get_signal_t os::Aix::get_signal_action = NULL;
2969 
2970 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2971   struct sigaction *actp = NULL;
2972 
2973   if (libjsig_is_loaded) {
2974     // Retrieve the old signal handler from libjsig
2975     actp = (*get_signal_action)(sig);
2976   }
2977   if (actp == NULL) {
2978     // Retrieve the preinstalled signal handler from jvm
2979     actp = get_preinstalled_handler(sig);
2980   }
2981 
2982   return actp;
2983 }
2984 
2985 static bool call_chained_handler(struct sigaction *actp, int sig,
2986                                  siginfo_t *siginfo, void *context) {
2987   // Call the old signal handler
2988   if (actp->sa_handler == SIG_DFL) {
2989     // It's more reasonable to let jvm treat it as an unexpected exception
2990     // instead of taking the default action.
2991     return false;
2992   } else if (actp->sa_handler != SIG_IGN) {
2993     if ((actp->sa_flags & SA_NODEFER) == 0) {
2994       // automaticlly block the signal
2995       sigaddset(&(actp->sa_mask), sig);
2996     }
2997 
2998     sa_handler_t hand = NULL;
2999     sa_sigaction_t sa = NULL;
3000     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3001     // retrieve the chained handler
3002     if (siginfo_flag_set) {
3003       sa = actp->sa_sigaction;
3004     } else {
3005       hand = actp->sa_handler;
3006     }
3007 
3008     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3009       actp->sa_handler = SIG_DFL;
3010     }
3011 
3012     // try to honor the signal mask
3013     sigset_t oset;
3014     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3015 
3016     // call into the chained handler
3017     if (siginfo_flag_set) {
3018       (*sa)(sig, siginfo, context);
3019     } else {
3020       (*hand)(sig);
3021     }
3022 
3023     // restore the signal mask
3024     pthread_sigmask(SIG_SETMASK, &oset, 0);
3025   }
3026   // Tell jvm's signal handler the signal is taken care of.
3027   return true;
3028 }
3029 
3030 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3031   bool chained = false;
3032   // signal-chaining
3033   if (UseSignalChaining) {
3034     struct sigaction *actp = get_chained_signal_action(sig);
3035     if (actp != NULL) {
3036       chained = call_chained_handler(actp, sig, siginfo, context);
3037     }
3038   }
3039   return chained;
3040 }
3041 
3042 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3043   if (sigismember(&sigs, sig)) {
3044     return &sigact[sig];
3045   }
3046   return NULL;
3047 }
3048 
3049 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3050   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3051   sigact[sig] = oldAct;
3052   sigaddset(&sigs, sig);
3053 }
3054 
3055 // for diagnostic
3056 int sigflags[NSIG];
3057 
3058 int os::Aix::get_our_sigflags(int sig) {
3059   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3060   return sigflags[sig];
3061 }
3062 
3063 void os::Aix::set_our_sigflags(int sig, int flags) {
3064   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3065   if (sig > 0 && sig < NSIG) {
3066     sigflags[sig] = flags;
3067   }
3068 }
3069 
3070 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3071   // Check for overwrite.
3072   struct sigaction oldAct;
3073   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3074 
3075   void* oldhand = oldAct.sa_sigaction
3076     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3077     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3078   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3079       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3080       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3081     if (AllowUserSignalHandlers || !set_installed) {
3082       // Do not overwrite; user takes responsibility to forward to us.
3083       return;
3084     } else if (UseSignalChaining) {
3085       // save the old handler in jvm
3086       save_preinstalled_handler(sig, oldAct);
3087       // libjsig also interposes the sigaction() call below and saves the
3088       // old sigaction on it own.
3089     } else {
3090       fatal("Encountered unexpected pre-existing sigaction handler "
3091             "%#lx for signal %d.", (long)oldhand, sig);
3092     }
3093   }
3094 
3095   struct sigaction sigAct;
3096   sigfillset(&(sigAct.sa_mask));
3097   if (!set_installed) {
3098     sigAct.sa_handler = SIG_DFL;
3099     sigAct.sa_flags = SA_RESTART;
3100   } else {
3101     sigAct.sa_sigaction = javaSignalHandler;
3102     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3103   }
3104   // Save flags, which are set by ours
3105   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3106   sigflags[sig] = sigAct.sa_flags;
3107 
3108   int ret = sigaction(sig, &sigAct, &oldAct);
3109   assert(ret == 0, "check");
3110 
3111   void* oldhand2 = oldAct.sa_sigaction
3112                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3113                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3114   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3115 }
3116 
3117 // install signal handlers for signals that HotSpot needs to
3118 // handle in order to support Java-level exception handling.
3119 void os::Aix::install_signal_handlers() {
3120   if (!signal_handlers_are_installed) {
3121     signal_handlers_are_installed = true;
3122 
3123     // signal-chaining
3124     typedef void (*signal_setting_t)();
3125     signal_setting_t begin_signal_setting = NULL;
3126     signal_setting_t end_signal_setting = NULL;
3127     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3128                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3129     if (begin_signal_setting != NULL) {
3130       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3131                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3132       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3133                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3134       libjsig_is_loaded = true;
3135       assert(UseSignalChaining, "should enable signal-chaining");
3136     }
3137     if (libjsig_is_loaded) {
3138       // Tell libjsig jvm is setting signal handlers.
3139       (*begin_signal_setting)();
3140     }
3141 
3142     ::sigemptyset(&sigs);
3143     set_signal_handler(SIGSEGV, true);
3144     set_signal_handler(SIGPIPE, true);
3145     set_signal_handler(SIGBUS, true);
3146     set_signal_handler(SIGILL, true);
3147     set_signal_handler(SIGFPE, true);
3148     set_signal_handler(SIGTRAP, true);
3149     set_signal_handler(SIGXFSZ, true);
3150 
3151     if (libjsig_is_loaded) {
3152       // Tell libjsig jvm finishes setting signal handlers.
3153       (*end_signal_setting)();
3154     }
3155 
3156     // We don't activate signal checker if libjsig is in place, we trust ourselves
3157     // and if UserSignalHandler is installed all bets are off.
3158     // Log that signal checking is off only if -verbose:jni is specified.
3159     if (CheckJNICalls) {
3160       if (libjsig_is_loaded) {
3161         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3162         check_signals = false;
3163       }
3164       if (AllowUserSignalHandlers) {
3165         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3166         check_signals = false;
3167       }
3168       // Need to initialize check_signal_done.
3169       ::sigemptyset(&check_signal_done);
3170     }
3171   }
3172 }
3173 
3174 static const char* get_signal_handler_name(address handler,
3175                                            char* buf, int buflen) {
3176   int offset;
3177   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3178   if (found) {
3179     // skip directory names
3180     const char *p1, *p2;
3181     p1 = buf;
3182     size_t len = strlen(os::file_separator());
3183     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3184     // The way os::dll_address_to_library_name is implemented on Aix
3185     // right now, it always returns -1 for the offset which is not
3186     // terribly informative.
3187     // Will fix that. For now, omit the offset.
3188     jio_snprintf(buf, buflen, "%s", p1);
3189   } else {
3190     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3191   }
3192   return buf;
3193 }
3194 
3195 static void print_signal_handler(outputStream* st, int sig,
3196                                  char* buf, size_t buflen) {
3197   struct sigaction sa;
3198   sigaction(sig, NULL, &sa);
3199 
3200   st->print("%s: ", os::exception_name(sig, buf, buflen));
3201 
3202   address handler = (sa.sa_flags & SA_SIGINFO)
3203     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3204     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3205 
3206   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3207     st->print("SIG_DFL");
3208   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3209     st->print("SIG_IGN");
3210   } else {
3211     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3212   }
3213 
3214   // Print readable mask.
3215   st->print(", sa_mask[0]=");
3216   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3217 
3218   address rh = VMError::get_resetted_sighandler(sig);
3219   // May be, handler was resetted by VMError?
3220   if (rh != NULL) {
3221     handler = rh;
3222     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3223   }
3224 
3225   // Print textual representation of sa_flags.
3226   st->print(", sa_flags=");
3227   os::Posix::print_sa_flags(st, sa.sa_flags);
3228 
3229   // Check: is it our handler?
3230   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3231       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3232     // It is our signal handler.
3233     // Check for flags, reset system-used one!
3234     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3235       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3236                 os::Aix::get_our_sigflags(sig));
3237     }
3238   }
3239   st->cr();
3240 }
3241 
3242 #define DO_SIGNAL_CHECK(sig) \
3243   if (!sigismember(&check_signal_done, sig)) \
3244     os::Aix::check_signal_handler(sig)
3245 
3246 // This method is a periodic task to check for misbehaving JNI applications
3247 // under CheckJNI, we can add any periodic checks here
3248 
3249 void os::run_periodic_checks() {
3250 
3251   if (check_signals == false) return;
3252 
3253   // SEGV and BUS if overridden could potentially prevent
3254   // generation of hs*.log in the event of a crash, debugging
3255   // such a case can be very challenging, so we absolutely
3256   // check the following for a good measure:
3257   DO_SIGNAL_CHECK(SIGSEGV);
3258   DO_SIGNAL_CHECK(SIGILL);
3259   DO_SIGNAL_CHECK(SIGFPE);
3260   DO_SIGNAL_CHECK(SIGBUS);
3261   DO_SIGNAL_CHECK(SIGPIPE);
3262   DO_SIGNAL_CHECK(SIGXFSZ);
3263   if (UseSIGTRAP) {
3264     DO_SIGNAL_CHECK(SIGTRAP);
3265   }
3266 
3267   // ReduceSignalUsage allows the user to override these handlers
3268   // see comments at the very top and jvm_solaris.h
3269   if (!ReduceSignalUsage) {
3270     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3271     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3272     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3273     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3274   }
3275 
3276   DO_SIGNAL_CHECK(SR_signum);
3277 }
3278 
3279 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3280 
3281 static os_sigaction_t os_sigaction = NULL;
3282 
3283 void os::Aix::check_signal_handler(int sig) {
3284   char buf[O_BUFLEN];
3285   address jvmHandler = NULL;
3286 
3287   struct sigaction act;
3288   if (os_sigaction == NULL) {
3289     // only trust the default sigaction, in case it has been interposed
3290     os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3291     if (os_sigaction == NULL) return;
3292   }
3293 
3294   os_sigaction(sig, (struct sigaction*)NULL, &act);
3295 
3296   address thisHandler = (act.sa_flags & SA_SIGINFO)
3297     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3298     : CAST_FROM_FN_PTR(address, act.sa_handler);
3299 
3300   switch(sig) {
3301   case SIGSEGV:
3302   case SIGBUS:
3303   case SIGFPE:
3304   case SIGPIPE:
3305   case SIGILL:
3306   case SIGXFSZ:
3307     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3308     break;
3309 
3310   case SHUTDOWN1_SIGNAL:
3311   case SHUTDOWN2_SIGNAL:
3312   case SHUTDOWN3_SIGNAL:
3313   case BREAK_SIGNAL:
3314     jvmHandler = (address)user_handler();
3315     break;
3316 
3317   default:
3318     if (sig == SR_signum) {
3319       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3320     } else {
3321       return;
3322     }
3323     break;
3324   }
3325 
3326   if (thisHandler != jvmHandler) {
3327     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3328     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3329     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3330     // No need to check this sig any longer
3331     sigaddset(&check_signal_done, sig);
3332     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3333     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3334       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3335                     exception_name(sig, buf, O_BUFLEN));
3336     }
3337   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3338     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3339     tty->print("expected:");
3340     os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3341     tty->cr();
3342     tty->print("  found:");
3343     os::Posix::print_sa_flags(tty, act.sa_flags);
3344     tty->cr();
3345     // No need to check this sig any longer
3346     sigaddset(&check_signal_done, sig);
3347   }
3348 
3349   // Dump all the signal
3350   if (sigismember(&check_signal_done, sig)) {
3351     print_signal_handlers(tty, buf, O_BUFLEN);
3352   }
3353 }
3354 
3355 // To install functions for atexit system call
3356 extern "C" {
3357   static void perfMemory_exit_helper() {
3358     perfMemory_exit();
3359   }
3360 }
3361 
3362 // This is called _before_ the most of global arguments have been parsed.
3363 void os::init(void) {
3364   // This is basic, we want to know if that ever changes.
3365   // (Shared memory boundary is supposed to be a 256M aligned.)
3366   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3367 
3368   // Record process break at startup.
3369   g_brk_at_startup = (address) ::sbrk(0);
3370   assert(g_brk_at_startup != (address) -1, "sbrk failed");
3371 
3372   // First off, we need to know whether we run on AIX or PASE, and
3373   // the OS level we run on.
3374   os::Aix::initialize_os_info();
3375 
3376   // Scan environment (SPEC1170 behaviour, etc).
3377   os::Aix::scan_environment();
3378 
3379   // Probe multipage support.
3380   query_multipage_support();
3381 
3382   // Act like we only have one page size by eliminating corner cases which
3383   // we did not support very well anyway.
3384   // We have two input conditions:
3385   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3386   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3387   //    setting.
3388   //    Data segment page size is important for us because it defines the thread stack page
3389   //    size, which is needed for guard page handling, stack banging etc.
3390   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3391   //    and should be allocated with 64k pages.
3392   //
3393   // So, we do the following:
3394   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3395   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3396   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3397   // 64k          no              --- AIX 5.2 ? ---
3398   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3399 
3400   // We explicitly leave no option to change page size, because only upgrading would work,
3401   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3402 
3403   if (g_multipage_support.datapsize == 4*K) {
3404     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3405     if (g_multipage_support.can_use_64K_pages) {
3406       // .. but we are able to use 64K pages dynamically.
3407       // This would be typical for java launchers which are not linked
3408       // with datapsize=64K (like, any other launcher but our own).
3409       //
3410       // In this case it would be smart to allocate the java heap with 64K
3411       // to get the performance benefit, and to fake 64k pages for the
3412       // data segment (when dealing with thread stacks).
3413       //
3414       // However, leave a possibility to downgrade to 4K, using
3415       // -XX:-Use64KPages.
3416       if (Use64KPages) {
3417         trcVerbose("64K page mode (faked for data segment)");
3418         Aix::_page_size = 64*K;
3419       } else {
3420         trcVerbose("4K page mode (Use64KPages=off)");
3421         Aix::_page_size = 4*K;
3422       }
3423     } else {
3424       // .. and not able to allocate 64k pages dynamically. Here, just
3425       // fall back to 4K paged mode and use mmap for everything.
3426       trcVerbose("4K page mode");
3427       Aix::_page_size = 4*K;
3428       FLAG_SET_ERGO(bool, Use64KPages, false);
3429     }
3430   } else {
3431     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3432     // This normally means that we can allocate 64k pages dynamically.
3433     // (There is one special case where this may be false: EXTSHM=on.
3434     // but we decided to not support that mode).
3435     assert0(g_multipage_support.can_use_64K_pages);
3436     Aix::_page_size = 64*K;
3437     trcVerbose("64K page mode");
3438     FLAG_SET_ERGO(bool, Use64KPages, true);
3439   }
3440 
3441   // For now UseLargePages is just ignored.
3442   FLAG_SET_ERGO(bool, UseLargePages, false);
3443   _page_sizes[0] = 0;
3444 
3445   // debug trace
3446   trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3447 
3448   // Next, we need to initialize libo4 and libperfstat libraries.
3449   if (os::Aix::on_pase()) {
3450     os::Aix::initialize_libo4();
3451   } else {
3452     os::Aix::initialize_libperfstat();
3453   }
3454 
3455   // Reset the perfstat information provided by ODM.
3456   if (os::Aix::on_aix()) {
3457     libperfstat::perfstat_reset();
3458   }
3459 
3460   // Now initialze basic system properties. Note that for some of the values we
3461   // need libperfstat etc.
3462   os::Aix::initialize_system_info();
3463 
3464   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3465 
3466   init_random(1234567);
3467 
3468   // Main_thread points to the aboriginal thread.
3469   Aix::_main_thread = pthread_self();
3470 
3471   initial_time_count = os::elapsed_counter();
3472 
3473   os::Posix::init();
3474 }
3475 
3476 // This is called _after_ the global arguments have been parsed.
3477 jint os::init_2(void) {
3478 
3479   os::Posix::init_2();
3480 
3481   if (os::Aix::on_pase()) {
3482     trcVerbose("Running on PASE.");
3483   } else {
3484     trcVerbose("Running on AIX (not PASE).");
3485   }
3486 
3487   trcVerbose("processor count: %d", os::_processor_count);
3488   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3489 
3490   // Initially build up the loaded dll map.
3491   LoadedLibraries::reload();
3492   if (Verbose) {
3493     trcVerbose("Loaded Libraries: ");
3494     LoadedLibraries::print(tty);
3495   }
3496 
3497   const int page_size = Aix::page_size();
3498   const int map_size = page_size;
3499 
3500   address map_address = (address) MAP_FAILED;
3501   const int prot  = PROT_READ;
3502   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3503 
3504   // Use optimized addresses for the polling page,
3505   // e.g. map it to a special 32-bit address.
3506   if (OptimizePollingPageLocation) {
3507     // architecture-specific list of address wishes:
3508     address address_wishes[] = {
3509       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3510       // PPC64: all address wishes are non-negative 32 bit values where
3511       // the lower 16 bits are all zero. we can load these addresses
3512       // with a single ppc_lis instruction.
3513       (address) 0x30000000, (address) 0x31000000,
3514       (address) 0x32000000, (address) 0x33000000,
3515       (address) 0x40000000, (address) 0x41000000,
3516       (address) 0x42000000, (address) 0x43000000,
3517       (address) 0x50000000, (address) 0x51000000,
3518       (address) 0x52000000, (address) 0x53000000,
3519       (address) 0x60000000, (address) 0x61000000,
3520       (address) 0x62000000, (address) 0x63000000
3521     };
3522     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3523 
3524     // iterate over the list of address wishes:
3525     for (int i=0; i<address_wishes_length; i++) {
3526       // Try to map with current address wish.
3527       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3528       // fail if the address is already mapped.
3529       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3530                                      map_size, prot,
3531                                      flags | MAP_FIXED,
3532                                      -1, 0);
3533       trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3534                    address_wishes[i], map_address + (ssize_t)page_size);
3535 
3536       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3537         // Map succeeded and map_address is at wished address, exit loop.
3538         break;
3539       }
3540 
3541       if (map_address != (address) MAP_FAILED) {
3542         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3543         ::munmap(map_address, map_size);
3544         map_address = (address) MAP_FAILED;
3545       }
3546       // Map failed, continue loop.
3547     }
3548   } // end OptimizePollingPageLocation
3549 
3550   if (map_address == (address) MAP_FAILED) {
3551     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3552   }
3553   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3554   os::set_polling_page(map_address);
3555 
3556   if (!UseMembar) {
3557     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3558     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3559     os::set_memory_serialize_page(mem_serialize_page);
3560 
3561     trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3562         mem_serialize_page, mem_serialize_page + Aix::page_size(),
3563         Aix::page_size(), Aix::page_size());
3564   }
3565 
3566   // initialize suspend/resume support - must do this before signal_sets_init()
3567   if (SR_initialize() != 0) {
3568     perror("SR_initialize failed");
3569     return JNI_ERR;
3570   }
3571 
3572   Aix::signal_sets_init();
3573   Aix::install_signal_handlers();
3574 
3575   // Check and sets minimum stack sizes against command line options
3576   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
3577     return JNI_ERR;
3578   }
3579 
3580   if (UseNUMA) {
3581     UseNUMA = false;
3582     warning("NUMA optimizations are not available on this OS.");
3583   }
3584 
3585   if (MaxFDLimit) {
3586     // Set the number of file descriptors to max. print out error
3587     // if getrlimit/setrlimit fails but continue regardless.
3588     struct rlimit nbr_files;
3589     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3590     if (status != 0) {
3591       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
3592     } else {
3593       nbr_files.rlim_cur = nbr_files.rlim_max;
3594       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3595       if (status != 0) {
3596         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3597       }
3598     }
3599   }
3600 
3601   if (PerfAllowAtExitRegistration) {
3602     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3603     // At exit functions can be delayed until process exit time, which
3604     // can be problematic for embedded VM situations. Embedded VMs should
3605     // call DestroyJavaVM() to assure that VM resources are released.
3606 
3607     // Note: perfMemory_exit_helper atexit function may be removed in
3608     // the future if the appropriate cleanup code can be added to the
3609     // VM_Exit VMOperation's doit method.
3610     if (atexit(perfMemory_exit_helper) != 0) {
3611       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3612     }
3613   }
3614 
3615   return JNI_OK;
3616 }
3617 
3618 // Mark the polling page as unreadable
3619 void os::make_polling_page_unreadable(void) {
3620   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3621     fatal("Could not disable polling page");
3622   }
3623 };
3624 
3625 // Mark the polling page as readable
3626 void os::make_polling_page_readable(void) {
3627   // Changed according to os_linux.cpp.
3628   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3629     fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3630   }
3631 };
3632 
3633 int os::active_processor_count() {
3634   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3635   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3636   return online_cpus;
3637 }
3638 
3639 void os::set_native_thread_name(const char *name) {
3640   // Not yet implemented.
3641   return;
3642 }
3643 
3644 bool os::distribute_processes(uint length, uint* distribution) {
3645   // Not yet implemented.
3646   return false;
3647 }
3648 
3649 bool os::bind_to_processor(uint processor_id) {
3650   // Not yet implemented.
3651   return false;
3652 }
3653 
3654 void os::SuspendedThreadTask::internal_do_task() {
3655   if (do_suspend(_thread->osthread())) {
3656     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3657     do_task(context);
3658     do_resume(_thread->osthread());
3659   }
3660 }
3661 
3662 ////////////////////////////////////////////////////////////////////////////////
3663 // debug support
3664 
3665 bool os::find(address addr, outputStream* st) {
3666 
3667   st->print(PTR_FORMAT ": ", addr);
3668 
3669   loaded_module_t lm;
3670   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3671       LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3672     st->print_cr("%s", lm.path);
3673     return true;
3674   }
3675 
3676   return false;
3677 }
3678 
3679 ////////////////////////////////////////////////////////////////////////////////
3680 // misc
3681 
3682 // This does not do anything on Aix. This is basically a hook for being
3683 // able to use structured exception handling (thread-local exception filters)
3684 // on, e.g., Win32.
3685 void
3686 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3687                          JavaCallArguments* args, Thread* thread) {
3688   f(value, method, args, thread);
3689 }
3690 
3691 void os::print_statistics() {
3692 }
3693 
3694 bool os::message_box(const char* title, const char* message) {
3695   int i;
3696   fdStream err(defaultStream::error_fd());
3697   for (i = 0; i < 78; i++) err.print_raw("=");
3698   err.cr();
3699   err.print_raw_cr(title);
3700   for (i = 0; i < 78; i++) err.print_raw("-");
3701   err.cr();
3702   err.print_raw_cr(message);
3703   for (i = 0; i < 78; i++) err.print_raw("=");
3704   err.cr();
3705 
3706   char buf[16];
3707   // Prevent process from exiting upon "read error" without consuming all CPU
3708   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3709 
3710   return buf[0] == 'y' || buf[0] == 'Y';
3711 }
3712 
3713 int os::stat(const char *path, struct stat *sbuf) {
3714   char pathbuf[MAX_PATH];
3715   if (strlen(path) > MAX_PATH - 1) {
3716     errno = ENAMETOOLONG;
3717     return -1;
3718   }
3719   os::native_path(strcpy(pathbuf, path));
3720   return ::stat(pathbuf, sbuf);
3721 }
3722 
3723 // Is a (classpath) directory empty?
3724 bool os::dir_is_empty(const char* path) {
3725   DIR *dir = NULL;
3726   struct dirent *ptr;
3727 
3728   dir = opendir(path);
3729   if (dir == NULL) return true;
3730 
3731   /* Scan the directory */
3732   bool result = true;
3733   char buf[sizeof(struct dirent) + MAX_PATH];
3734   while (result && (ptr = ::readdir(dir)) != NULL) {
3735     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3736       result = false;
3737     }
3738   }
3739   closedir(dir);
3740   return result;
3741 }
3742 
3743 // This code originates from JDK's sysOpen and open64_w
3744 // from src/solaris/hpi/src/system_md.c
3745 
3746 int os::open(const char *path, int oflag, int mode) {
3747 
3748   if (strlen(path) > MAX_PATH - 1) {
3749     errno = ENAMETOOLONG;
3750     return -1;
3751   }
3752   int fd;
3753 
3754   fd = ::open64(path, oflag, mode);
3755   if (fd == -1) return -1;
3756 
3757   // If the open succeeded, the file might still be a directory.
3758   {
3759     struct stat64 buf64;
3760     int ret = ::fstat64(fd, &buf64);
3761     int st_mode = buf64.st_mode;
3762 
3763     if (ret != -1) {
3764       if ((st_mode & S_IFMT) == S_IFDIR) {
3765         errno = EISDIR;
3766         ::close(fd);
3767         return -1;
3768       }
3769     } else {
3770       ::close(fd);
3771       return -1;
3772     }
3773   }
3774 
3775   // All file descriptors that are opened in the JVM and not
3776   // specifically destined for a subprocess should have the
3777   // close-on-exec flag set. If we don't set it, then careless 3rd
3778   // party native code might fork and exec without closing all
3779   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3780   // UNIXProcess.c), and this in turn might:
3781   //
3782   // - cause end-of-file to fail to be detected on some file
3783   //   descriptors, resulting in mysterious hangs, or
3784   //
3785   // - might cause an fopen in the subprocess to fail on a system
3786   //   suffering from bug 1085341.
3787   //
3788   // (Yes, the default setting of the close-on-exec flag is a Unix
3789   // design flaw.)
3790   //
3791   // See:
3792   // 1085341: 32-bit stdio routines should support file descriptors >255
3793   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3794   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3795 #ifdef FD_CLOEXEC
3796   {
3797     int flags = ::fcntl(fd, F_GETFD);
3798     if (flags != -1)
3799       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3800   }
3801 #endif
3802 
3803   return fd;
3804 }
3805 
3806 // create binary file, rewriting existing file if required
3807 int os::create_binary_file(const char* path, bool rewrite_existing) {
3808   int oflags = O_WRONLY | O_CREAT;
3809   if (!rewrite_existing) {
3810     oflags |= O_EXCL;
3811   }
3812   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3813 }
3814 
3815 // return current position of file pointer
3816 jlong os::current_file_offset(int fd) {
3817   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3818 }
3819 
3820 // move file pointer to the specified offset
3821 jlong os::seek_to_file_offset(int fd, jlong offset) {
3822   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3823 }
3824 
3825 // This code originates from JDK's sysAvailable
3826 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3827 
3828 int os::available(int fd, jlong *bytes) {
3829   jlong cur, end;
3830   int mode;
3831   struct stat64 buf64;
3832 
3833   if (::fstat64(fd, &buf64) >= 0) {
3834     mode = buf64.st_mode;
3835     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3836       int n;
3837       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3838         *bytes = n;
3839         return 1;
3840       }
3841     }
3842   }
3843   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3844     return 0;
3845   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3846     return 0;
3847   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3848     return 0;
3849   }
3850   *bytes = end - cur;
3851   return 1;
3852 }
3853 
3854 // Map a block of memory.
3855 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3856                         char *addr, size_t bytes, bool read_only,
3857                         bool allow_exec) {
3858   int prot;
3859   int flags = MAP_PRIVATE;
3860 
3861   if (read_only) {
3862     prot = PROT_READ;
3863     flags = MAP_SHARED;
3864   } else {
3865     prot = PROT_READ | PROT_WRITE;
3866     flags = MAP_PRIVATE;
3867   }
3868 
3869   if (allow_exec) {
3870     prot |= PROT_EXEC;
3871   }
3872 
3873   if (addr != NULL) {
3874     flags |= MAP_FIXED;
3875   }
3876 
3877   // Allow anonymous mappings if 'fd' is -1.
3878   if (fd == -1) {
3879     flags |= MAP_ANONYMOUS;
3880   }
3881 
3882   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3883                                      fd, file_offset);
3884   if (mapped_address == MAP_FAILED) {
3885     return NULL;
3886   }
3887   return mapped_address;
3888 }
3889 
3890 // Remap a block of memory.
3891 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3892                           char *addr, size_t bytes, bool read_only,
3893                           bool allow_exec) {
3894   // same as map_memory() on this OS
3895   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3896                         allow_exec);
3897 }
3898 
3899 // Unmap a block of memory.
3900 bool os::pd_unmap_memory(char* addr, size_t bytes) {
3901   return munmap(addr, bytes) == 0;
3902 }
3903 
3904 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3905 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
3906 // of a thread.
3907 //
3908 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3909 // the fast estimate available on the platform.
3910 
3911 jlong os::current_thread_cpu_time() {
3912   // return user + sys since the cost is the same
3913   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
3914   assert(n >= 0, "negative CPU time");
3915   return n;
3916 }
3917 
3918 jlong os::thread_cpu_time(Thread* thread) {
3919   // consistent with what current_thread_cpu_time() returns
3920   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
3921   assert(n >= 0, "negative CPU time");
3922   return n;
3923 }
3924 
3925 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
3926   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
3927   assert(n >= 0, "negative CPU time");
3928   return n;
3929 }
3930 
3931 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
3932   bool error = false;
3933 
3934   jlong sys_time = 0;
3935   jlong user_time = 0;
3936 
3937   // Reimplemented using getthrds64().
3938   //
3939   // Works like this:
3940   // For the thread in question, get the kernel thread id. Then get the
3941   // kernel thread statistics using that id.
3942   //
3943   // This only works of course when no pthread scheduling is used,
3944   // i.e. there is a 1:1 relationship to kernel threads.
3945   // On AIX, see AIXTHREAD_SCOPE variable.
3946 
3947   pthread_t pthtid = thread->osthread()->pthread_id();
3948 
3949   // retrieve kernel thread id for the pthread:
3950   tid64_t tid = 0;
3951   struct __pthrdsinfo pinfo;
3952   // I just love those otherworldly IBM APIs which force me to hand down
3953   // dummy buffers for stuff I dont care for...
3954   char dummy[1];
3955   int dummy_size = sizeof(dummy);
3956   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
3957                           dummy, &dummy_size) == 0) {
3958     tid = pinfo.__pi_tid;
3959   } else {
3960     tty->print_cr("pthread_getthrds_np failed.");
3961     error = true;
3962   }
3963 
3964   // retrieve kernel timing info for that kernel thread
3965   if (!error) {
3966     struct thrdentry64 thrdentry;
3967     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
3968       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
3969       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
3970     } else {
3971       tty->print_cr("pthread_getthrds_np failed.");
3972       error = true;
3973     }
3974   }
3975 
3976   if (p_sys_time) {
3977     *p_sys_time = sys_time;
3978   }
3979 
3980   if (p_user_time) {
3981     *p_user_time = user_time;
3982   }
3983 
3984   if (error) {
3985     return false;
3986   }
3987 
3988   return true;
3989 }
3990 
3991 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
3992   jlong sys_time;
3993   jlong user_time;
3994 
3995   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
3996     return -1;
3997   }
3998 
3999   return user_sys_cpu_time ? sys_time + user_time : user_time;
4000 }
4001 
4002 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4003   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4004   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4005   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4006   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4007 }
4008 
4009 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4010   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4011   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4012   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4013   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4014 }
4015 
4016 bool os::is_thread_cpu_time_supported() {
4017   return true;
4018 }
4019 
4020 // System loadavg support. Returns -1 if load average cannot be obtained.
4021 // For now just return the system wide load average (no processor sets).
4022 int os::loadavg(double values[], int nelem) {
4023 
4024   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4025   guarantee(values, "argument error");
4026 
4027   if (os::Aix::on_pase()) {
4028 
4029     // AS/400 PASE: use libo4 porting library
4030     double v[3] = { 0.0, 0.0, 0.0 };
4031 
4032     if (libo4::get_load_avg(v, v + 1, v + 2)) {
4033       for (int i = 0; i < nelem; i ++) {
4034         values[i] = v[i];
4035       }
4036       return nelem;
4037     } else {
4038       return -1;
4039     }
4040 
4041   } else {
4042 
4043     // AIX: use libperfstat
4044     libperfstat::cpuinfo_t ci;
4045     if (libperfstat::get_cpuinfo(&ci)) {
4046       for (int i = 0; i < nelem; i++) {
4047         values[i] = ci.loadavg[i];
4048       }
4049     } else {
4050       return -1;
4051     }
4052     return nelem;
4053   }
4054 }
4055 
4056 void os::pause() {
4057   char filename[MAX_PATH];
4058   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4059     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4060   } else {
4061     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4062   }
4063 
4064   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4065   if (fd != -1) {
4066     struct stat buf;
4067     ::close(fd);
4068     while (::stat(filename, &buf) == 0) {
4069       (void)::poll(NULL, 0, 100);
4070     }
4071   } else {
4072     trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4073   }
4074 }
4075 
4076 bool os::Aix::is_primordial_thread() {
4077   if (pthread_self() == (pthread_t)1) {
4078     return true;
4079   } else {
4080     return false;
4081   }
4082 }
4083 
4084 // OS recognitions (PASE/AIX, OS level) call this before calling any
4085 // one of Aix::on_pase(), Aix::os_version() static
4086 void os::Aix::initialize_os_info() {
4087 
4088   assert(_on_pase == -1 && _os_version == 0, "already called.");
4089 
4090   struct utsname uts;
4091   memset(&uts, 0, sizeof(uts));
4092   strcpy(uts.sysname, "?");
4093   if (::uname(&uts) == -1) {
4094     trcVerbose("uname failed (%d)", errno);
4095     guarantee(0, "Could not determine whether we run on AIX or PASE");
4096   } else {
4097     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4098                "node \"%s\" machine \"%s\"\n",
4099                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4100     const int major = atoi(uts.version);
4101     assert(major > 0, "invalid OS version");
4102     const int minor = atoi(uts.release);
4103     assert(minor > 0, "invalid OS release");
4104     _os_version = (major << 24) | (minor << 16);
4105     char ver_str[20] = {0};
4106     char *name_str = "unknown OS";
4107     if (strcmp(uts.sysname, "OS400") == 0) {
4108       // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4109       _on_pase = 1;
4110       if (os_version_short() < 0x0504) {
4111         trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4112         assert(false, "OS/400 release too old.");
4113       }
4114       name_str = "OS/400 (pase)";
4115       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4116     } else if (strcmp(uts.sysname, "AIX") == 0) {
4117       // We run on AIX. We do not support versions older than AIX 5.3.
4118       _on_pase = 0;
4119       // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4120       odmWrapper::determine_os_kernel_version(&_os_version);
4121       if (os_version_short() < 0x0503) {
4122         trcVerbose("AIX release older than AIX 5.3 not supported.");
4123         assert(false, "AIX release too old.");
4124       }
4125       name_str = "AIX";
4126       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4127                    major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4128     } else {
4129       assert(false, name_str);
4130     }
4131     trcVerbose("We run on %s %s", name_str, ver_str);
4132   }
4133 
4134   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4135 } // end: os::Aix::initialize_os_info()
4136 
4137 // Scan environment for important settings which might effect the VM.
4138 // Trace out settings. Warn about invalid settings and/or correct them.
4139 //
4140 // Must run after os::Aix::initialue_os_info().
4141 void os::Aix::scan_environment() {
4142 
4143   char* p;
4144   int rc;
4145 
4146   // Warn explicity if EXTSHM=ON is used. That switch changes how
4147   // System V shared memory behaves. One effect is that page size of
4148   // shared memory cannot be change dynamically, effectivly preventing
4149   // large pages from working.
4150   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4151   // recommendation is (in OSS notes) to switch it off.
4152   p = ::getenv("EXTSHM");
4153   trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4154   if (p && strcasecmp(p, "ON") == 0) {
4155     _extshm = 1;
4156     trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4157     if (!AllowExtshm) {
4158       // We allow under certain conditions the user to continue. However, we want this
4159       // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4160       // that the VM is not able to allocate 64k pages for the heap.
4161       // We do not want to run with reduced performance.
4162       vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4163     }
4164   } else {
4165     _extshm = 0;
4166   }
4167 
4168   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4169   // Not tested, not supported.
4170   //
4171   // Note that it might be worth the trouble to test and to require it, if only to
4172   // get useful return codes for mprotect.
4173   //
4174   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4175   // exec() ? before loading the libjvm ? ....)
4176   p = ::getenv("XPG_SUS_ENV");
4177   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4178   if (p && strcmp(p, "ON") == 0) {
4179     _xpg_sus_mode = 1;
4180     trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4181     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4182     // clobber address ranges. If we ever want to support that, we have to do some
4183     // testing first.
4184     guarantee(false, "XPG_SUS_ENV=ON not supported");
4185   } else {
4186     _xpg_sus_mode = 0;
4187   }
4188 
4189   if (os::Aix::on_pase()) {
4190     p = ::getenv("QIBM_MULTI_THREADED");
4191     trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4192   }
4193 
4194   p = ::getenv("LDR_CNTRL");
4195   trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4196   if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4197     if (p && ::strstr(p, "TEXTPSIZE")) {
4198       trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4199         "you may experience hangs or crashes on OS/400 V7R1.");
4200     }
4201   }
4202 
4203   p = ::getenv("AIXTHREAD_GUARDPAGES");
4204   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4205 
4206 } // end: os::Aix::scan_environment()
4207 
4208 // PASE: initialize the libo4 library (PASE porting library).
4209 void os::Aix::initialize_libo4() {
4210   guarantee(os::Aix::on_pase(), "OS/400 only.");
4211   if (!libo4::init()) {
4212     trcVerbose("libo4 initialization failed.");
4213     assert(false, "libo4 initialization failed");
4214   } else {
4215     trcVerbose("libo4 initialized.");
4216   }
4217 }
4218 
4219 // AIX: initialize the libperfstat library.
4220 void os::Aix::initialize_libperfstat() {
4221   assert(os::Aix::on_aix(), "AIX only");
4222   if (!libperfstat::init()) {
4223     trcVerbose("libperfstat initialization failed.");
4224     assert(false, "libperfstat initialization failed");
4225   } else {
4226     trcVerbose("libperfstat initialized.");
4227   }
4228 }
4229 
4230 /////////////////////////////////////////////////////////////////////////////
4231 // thread stack
4232 
4233 // Get the current stack base from the OS (actually, the pthread library).
4234 // Note: usually not page aligned.
4235 address os::current_stack_base() {
4236   AixMisc::stackbounds_t bounds;
4237   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4238   guarantee(rc, "Unable to retrieve stack bounds.");
4239   return bounds.base;
4240 }
4241 
4242 // Get the current stack size from the OS (actually, the pthread library).
4243 // Returned size is such that (base - size) is always aligned to page size.
4244 size_t os::current_stack_size() {
4245   AixMisc::stackbounds_t bounds;
4246   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4247   guarantee(rc, "Unable to retrieve stack bounds.");
4248   // Align the returned stack size such that the stack low address
4249   // is aligned to page size (Note: base is usually not and we do not care).
4250   // We need to do this because caller code will assume stack low address is
4251   // page aligned and will place guard pages without checking.
4252   address low = bounds.base - bounds.size;
4253   address low_aligned = (address)align_up(low, os::vm_page_size());
4254   size_t s = bounds.base - low_aligned;
4255   return s;
4256 }
4257 
4258 extern char** environ;
4259 
4260 // Run the specified command in a separate process. Return its exit value,
4261 // or -1 on failure (e.g. can't fork a new process).
4262 // Unlike system(), this function can be called from signal handler. It
4263 // doesn't block SIGINT et al.
4264 int os::fork_and_exec(char* cmd) {
4265   char * argv[4] = {"sh", "-c", cmd, NULL};
4266 
4267   pid_t pid = fork();
4268 
4269   if (pid < 0) {
4270     // fork failed
4271     return -1;
4272 
4273   } else if (pid == 0) {
4274     // child process
4275 
4276     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4277     execve("/usr/bin/sh", argv, environ);
4278 
4279     // execve failed
4280     _exit(-1);
4281 
4282   } else {
4283     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4284     // care about the actual exit code, for now.
4285 
4286     int status;
4287 
4288     // Wait for the child process to exit. This returns immediately if
4289     // the child has already exited. */
4290     while (waitpid(pid, &status, 0) < 0) {
4291       switch (errno) {
4292         case ECHILD: return 0;
4293         case EINTR: break;
4294         default: return -1;
4295       }
4296     }
4297 
4298     if (WIFEXITED(status)) {
4299       // The child exited normally; get its exit code.
4300       return WEXITSTATUS(status);
4301     } else if (WIFSIGNALED(status)) {
4302       // The child exited because of a signal.
4303       // The best value to return is 0x80 + signal number,
4304       // because that is what all Unix shells do, and because
4305       // it allows callers to distinguish between process exit and
4306       // process death by signal.
4307       return 0x80 + WTERMSIG(status);
4308     } else {
4309       // Unknown exit code; pass it through.
4310       return status;
4311     }
4312   }
4313   return -1;
4314 }
4315 
4316 // is_headless_jre()
4317 //
4318 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4319 // in order to report if we are running in a headless jre.
4320 //
4321 // Since JDK8 xawt/libmawt.so is moved into the same directory
4322 // as libawt.so, and renamed libawt_xawt.so
4323 bool os::is_headless_jre() {
4324   struct stat statbuf;
4325   char buf[MAXPATHLEN];
4326   char libmawtpath[MAXPATHLEN];
4327   const char *xawtstr = "/xawt/libmawt.so";
4328   const char *new_xawtstr = "/libawt_xawt.so";
4329 
4330   char *p;
4331 
4332   // Get path to libjvm.so
4333   os::jvm_path(buf, sizeof(buf));
4334 
4335   // Get rid of libjvm.so
4336   p = strrchr(buf, '/');
4337   if (p == NULL) return false;
4338   else *p = '\0';
4339 
4340   // Get rid of client or server
4341   p = strrchr(buf, '/');
4342   if (p == NULL) return false;
4343   else *p = '\0';
4344 
4345   // check xawt/libmawt.so
4346   strcpy(libmawtpath, buf);
4347   strcat(libmawtpath, xawtstr);
4348   if (::stat(libmawtpath, &statbuf) == 0) return false;
4349 
4350   // check libawt_xawt.so
4351   strcpy(libmawtpath, buf);
4352   strcat(libmawtpath, new_xawtstr);
4353   if (::stat(libmawtpath, &statbuf) == 0) return false;
4354 
4355   return true;
4356 }
4357 
4358 // Get the default path to the core file
4359 // Returns the length of the string
4360 int os::get_core_path(char* buffer, size_t bufferSize) {
4361   const char* p = get_current_directory(buffer, bufferSize);
4362 
4363   if (p == NULL) {
4364     assert(p != NULL, "failed to get current directory");
4365     return 0;
4366   }
4367 
4368   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4369                                                p, current_process_id());
4370 
4371   return strlen(buffer);
4372 }
4373 
4374 #ifndef PRODUCT
4375 void TestReserveMemorySpecial_test() {
4376   // No tests available for this platform
4377 }
4378 #endif
4379 
4380 bool os::start_debugging(char *buf, int buflen) {
4381   int len = (int)strlen(buf);
4382   char *p = &buf[len];
4383 
4384   jio_snprintf(p, buflen -len,
4385                  "\n\n"
4386                  "Do you want to debug the problem?\n\n"
4387                  "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4388                  "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4389                  "Otherwise, press RETURN to abort...",
4390                  os::current_process_id(),
4391                  os::current_thread_id(), thread_self());
4392 
4393   bool yes = os::message_box("Unexpected Error", buf);
4394 
4395   if (yes) {
4396     // yes, user asked VM to launch debugger
4397     jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4398 
4399     os::fork_and_exec(buf);
4400     yes = false;
4401   }
4402   return yes;
4403 }
4404 
4405 static inline time_t get_mtime(const char* filename) {
4406   struct stat st;
4407   int ret = os::stat(filename, &st);
4408   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
4409   return st.st_mtime;
4410 }
4411 
4412 int os::compare_file_modified_times(const char* file1, const char* file2) {
4413   time_t t1 = get_mtime(file1);
4414   time_t t2 = get_mtime(file2);
4415   return t1 - t2;
4416 }