1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "logging/log.hpp"
  40 #include "libo4.hpp"
  41 #include "libperfstat_aix.hpp"
  42 #include "libodm_aix.hpp"
  43 #include "loadlib_aix.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/filemap.hpp"
  46 #include "misc_aix.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "os_aix.inline.hpp"
  49 #include "os_share_aix.hpp"
  50 #include "porting_aix.hpp"
  51 #include "prims/jniFastGetField.hpp"
  52 #include "prims/jvm.h"
  53 #include "prims/jvm_misc.hpp"
  54 #include "runtime/arguments.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/extendedPC.hpp"
  57 #include "runtime/globals.hpp"
  58 #include "runtime/interfaceSupport.hpp"
  59 #include "runtime/java.hpp"
  60 #include "runtime/javaCalls.hpp"
  61 #include "runtime/mutexLocker.hpp"
  62 #include "runtime/objectMonitor.hpp"
  63 #include "runtime/orderAccess.inline.hpp"
  64 #include "runtime/os.hpp"
  65 #include "runtime/osThread.hpp"
  66 #include "runtime/perfMemory.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/statSampler.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/thread.inline.hpp"
  71 #include "runtime/threadCritical.hpp"
  72 #include "runtime/timer.hpp"
  73 #include "runtime/vm_version.hpp"
  74 #include "services/attachListener.hpp"
  75 #include "services/runtimeService.hpp"
  76 #include "utilities/align.hpp"
  77 #include "utilities/decoder.hpp"
  78 #include "utilities/defaultStream.hpp"
  79 #include "utilities/events.hpp"
  80 #include "utilities/growableArray.hpp"
  81 #include "utilities/vmError.hpp"
  82 
  83 // put OS-includes here (sorted alphabetically)
  84 #include <errno.h>
  85 #include <fcntl.h>
  86 #include <inttypes.h>
  87 #include <poll.h>
  88 #include <procinfo.h>
  89 #include <pthread.h>
  90 #include <pwd.h>
  91 #include <semaphore.h>
  92 #include <signal.h>
  93 #include <stdint.h>
  94 #include <stdio.h>
  95 #include <string.h>
  96 #include <unistd.h>
  97 #include <sys/ioctl.h>
  98 #include <sys/ipc.h>
  99 #include <sys/mman.h>
 100 #include <sys/resource.h>
 101 #include <sys/select.h>
 102 #include <sys/shm.h>
 103 #include <sys/socket.h>
 104 #include <sys/stat.h>
 105 #include <sys/sysinfo.h>
 106 #include <sys/systemcfg.h>
 107 #include <sys/time.h>
 108 #include <sys/times.h>
 109 #include <sys/types.h>
 110 #include <sys/utsname.h>
 111 #include <sys/vminfo.h>
 112 #include <sys/wait.h>
 113 
 114 // Missing prototypes for various system APIs.
 115 extern "C"
 116 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
 117 
 118 #if !defined(_AIXVERSION_610)
 119 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
 120 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
 121 extern "C" int getargs   (procsinfo*, int, char*, int);
 122 #endif
 123 
 124 #define MAX_PATH (2 * K)
 125 
 126 // for timer info max values which include all bits
 127 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 128 // for multipage initialization error analysis (in 'g_multipage_error')
 129 #define ERROR_MP_OS_TOO_OLD                          100
 130 #define ERROR_MP_EXTSHM_ACTIVE                       101
 131 #define ERROR_MP_VMGETINFO_FAILED                    102
 132 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 133 
 134 static address resolve_function_descriptor_to_code_pointer(address p);
 135 
 136 static void vmembk_print_on(outputStream* os);
 137 
 138 ////////////////////////////////////////////////////////////////////////////////
 139 // global variables (for a description see os_aix.hpp)
 140 
 141 julong    os::Aix::_physical_memory = 0;
 142 
 143 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 144 int       os::Aix::_page_size = -1;
 145 
 146 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 147 int       os::Aix::_on_pase = -1;
 148 
 149 // 0 = uninitialized, otherwise 32 bit number:
 150 //  0xVVRRTTSS
 151 //  VV - major version
 152 //  RR - minor version
 153 //  TT - tech level, if known, 0 otherwise
 154 //  SS - service pack, if known, 0 otherwise
 155 uint32_t  os::Aix::_os_version = 0;
 156 
 157 // -1 = uninitialized, 0 - no, 1 - yes
 158 int       os::Aix::_xpg_sus_mode = -1;
 159 
 160 // -1 = uninitialized, 0 - no, 1 - yes
 161 int       os::Aix::_extshm = -1;
 162 
 163 ////////////////////////////////////////////////////////////////////////////////
 164 // local variables
 165 
 166 static jlong    initial_time_count = 0;
 167 static int      clock_tics_per_sec = 100;
 168 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 169 static bool     check_signals      = true;
 170 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 171 static sigset_t SR_sigset;
 172 
 173 // Process break recorded at startup.
 174 static address g_brk_at_startup = NULL;
 175 
 176 // This describes the state of multipage support of the underlying
 177 // OS. Note that this is of no interest to the outsize world and
 178 // therefore should not be defined in AIX class.
 179 //
 180 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 181 // latter two (16M "large" resp. 16G "huge" pages) require special
 182 // setup and are normally not available.
 183 //
 184 // AIX supports multiple page sizes per process, for:
 185 //  - Stack (of the primordial thread, so not relevant for us)
 186 //  - Data - data, bss, heap, for us also pthread stacks
 187 //  - Text - text code
 188 //  - shared memory
 189 //
 190 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 191 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 192 //
 193 // For shared memory, page size can be set dynamically via
 194 // shmctl(). Different shared memory regions can have different page
 195 // sizes.
 196 //
 197 // More information can be found at AIBM info center:
 198 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 199 //
 200 static struct {
 201   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 202   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 203   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 204   size_t pthr_stack_pagesize; // stack page size of pthread threads
 205   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 206   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 207   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 208   int error;                  // Error describing if something went wrong at multipage init.
 209 } g_multipage_support = {
 210   (size_t) -1,
 211   (size_t) -1,
 212   (size_t) -1,
 213   (size_t) -1,
 214   (size_t) -1,
 215   false, false,
 216   0
 217 };
 218 
 219 // We must not accidentally allocate memory close to the BRK - even if
 220 // that would work - because then we prevent the BRK segment from
 221 // growing which may result in a malloc OOM even though there is
 222 // enough memory. The problem only arises if we shmat() or mmap() at
 223 // a specific wish address, e.g. to place the heap in a
 224 // compressed-oops-friendly way.
 225 static bool is_close_to_brk(address a) {
 226   assert0(g_brk_at_startup != NULL);
 227   if (a >= g_brk_at_startup &&
 228       a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
 229     return true;
 230   }
 231   return false;
 232 }
 233 
 234 julong os::available_memory() {
 235   return Aix::available_memory();
 236 }
 237 
 238 julong os::Aix::available_memory() {
 239   // Avoid expensive API call here, as returned value will always be null.
 240   if (os::Aix::on_pase()) {
 241     return 0x0LL;
 242   }
 243   os::Aix::meminfo_t mi;
 244   if (os::Aix::get_meminfo(&mi)) {
 245     return mi.real_free;
 246   } else {
 247     return ULONG_MAX;
 248   }
 249 }
 250 
 251 julong os::physical_memory() {
 252   return Aix::physical_memory();
 253 }
 254 
 255 // Return true if user is running as root.
 256 
 257 bool os::have_special_privileges() {
 258   static bool init = false;
 259   static bool privileges = false;
 260   if (!init) {
 261     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 262     init = true;
 263   }
 264   return privileges;
 265 }
 266 
 267 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 268 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 269 static bool my_disclaim64(char* addr, size_t size) {
 270 
 271   if (size == 0) {
 272     return true;
 273   }
 274 
 275   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 276   const unsigned int maxDisclaimSize = 0x40000000;
 277 
 278   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 279   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 280 
 281   char* p = addr;
 282 
 283   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 284     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 285       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 286       return false;
 287     }
 288     p += maxDisclaimSize;
 289   }
 290 
 291   if (lastDisclaimSize > 0) {
 292     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 293       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 294       return false;
 295     }
 296   }
 297 
 298   return true;
 299 }
 300 
 301 // Cpu architecture string
 302 #if defined(PPC32)
 303 static char cpu_arch[] = "ppc";
 304 #elif defined(PPC64)
 305 static char cpu_arch[] = "ppc64";
 306 #else
 307 #error Add appropriate cpu_arch setting
 308 #endif
 309 
 310 // Wrap the function "vmgetinfo" which is not available on older OS releases.
 311 static int checked_vmgetinfo(void *out, int command, int arg) {
 312   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 313     guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
 314   }
 315   return ::vmgetinfo(out, command, arg);
 316 }
 317 
 318 // Given an address, returns the size of the page backing that address.
 319 size_t os::Aix::query_pagesize(void* addr) {
 320 
 321   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 322     // AS/400 older than V6R1: no vmgetinfo here, default to 4K
 323     return 4*K;
 324   }
 325 
 326   vm_page_info pi;
 327   pi.addr = (uint64_t)addr;
 328   if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 329     return pi.pagesize;
 330   } else {
 331     assert(false, "vmgetinfo failed to retrieve page size");
 332     return 4*K;
 333   }
 334 }
 335 
 336 void os::Aix::initialize_system_info() {
 337 
 338   // Get the number of online(logical) cpus instead of configured.
 339   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 340   assert(_processor_count > 0, "_processor_count must be > 0");
 341 
 342   // Retrieve total physical storage.
 343   os::Aix::meminfo_t mi;
 344   if (!os::Aix::get_meminfo(&mi)) {
 345     assert(false, "os::Aix::get_meminfo failed.");
 346   }
 347   _physical_memory = (julong) mi.real_total;
 348 }
 349 
 350 // Helper function for tracing page sizes.
 351 static const char* describe_pagesize(size_t pagesize) {
 352   switch (pagesize) {
 353     case 4*K : return "4K";
 354     case 64*K: return "64K";
 355     case 16*M: return "16M";
 356     case 16*G: return "16G";
 357     default:
 358       assert(false, "surprise");
 359       return "??";
 360   }
 361 }
 362 
 363 // Probe OS for multipage support.
 364 // Will fill the global g_multipage_support structure.
 365 // Must be called before calling os::large_page_init().
 366 static void query_multipage_support() {
 367 
 368   guarantee(g_multipage_support.pagesize == -1,
 369             "do not call twice");
 370 
 371   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 372 
 373   // This really would surprise me.
 374   assert(g_multipage_support.pagesize == 4*K, "surprise!");
 375 
 376   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 377   // Default data page size is defined either by linker options (-bdatapsize)
 378   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 379   // default should be 4K.
 380   {
 381     void* p = ::malloc(16*M);
 382     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 383     ::free(p);
 384   }
 385 
 386   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 387   // Note that this is pure curiosity. We do not rely on default page size but set
 388   // our own page size after allocated.
 389   {
 390     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 391     guarantee(shmid != -1, "shmget failed");
 392     void* p = ::shmat(shmid, NULL, 0);
 393     ::shmctl(shmid, IPC_RMID, NULL);
 394     guarantee(p != (void*) -1, "shmat failed");
 395     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 396     ::shmdt(p);
 397   }
 398 
 399   // Before querying the stack page size, make sure we are not running as primordial
 400   // thread (because primordial thread's stack may have different page size than
 401   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 402   // number of reasons so we may just as well guarantee it here.
 403   guarantee0(!os::Aix::is_primordial_thread());
 404 
 405   // Query pthread stack page size. Should be the same as data page size because
 406   // pthread stacks are allocated from C-Heap.
 407   {
 408     int dummy = 0;
 409     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 410   }
 411 
 412   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 413   {
 414     address any_function =
 415       resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 416     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 417   }
 418 
 419   // Now probe for support of 64K pages and 16M pages.
 420 
 421   // Before OS/400 V6R1, there is no support for pages other than 4K.
 422   if (os::Aix::on_pase_V5R4_or_older()) {
 423     trcVerbose("OS/400 < V6R1 - no large page support.");
 424     g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
 425     goto query_multipage_support_end;
 426   }
 427 
 428   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 429   {
 430     const int MAX_PAGE_SIZES = 4;
 431     psize_t sizes[MAX_PAGE_SIZES];
 432     const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 433     if (num_psizes == -1) {
 434       trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
 435       trcVerbose("disabling multipage support.");
 436       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 437       goto query_multipage_support_end;
 438     }
 439     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 440     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 441     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 442     for (int i = 0; i < num_psizes; i ++) {
 443       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 444     }
 445 
 446     // Can we use 64K, 16M pages?
 447     for (int i = 0; i < num_psizes; i ++) {
 448       const size_t pagesize = sizes[i];
 449       if (pagesize != 64*K && pagesize != 16*M) {
 450         continue;
 451       }
 452       bool can_use = false;
 453       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 454       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 455         IPC_CREAT | S_IRUSR | S_IWUSR);
 456       guarantee0(shmid != -1); // Should always work.
 457       // Try to set pagesize.
 458       struct shmid_ds shm_buf = { 0 };
 459       shm_buf.shm_pagesize = pagesize;
 460       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 461         const int en = errno;
 462         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 463         trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
 464           errno);
 465       } else {
 466         // Attach and double check pageisze.
 467         void* p = ::shmat(shmid, NULL, 0);
 468         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 469         guarantee0(p != (void*) -1); // Should always work.
 470         const size_t real_pagesize = os::Aix::query_pagesize(p);
 471         if (real_pagesize != pagesize) {
 472           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 473         } else {
 474           can_use = true;
 475         }
 476         ::shmdt(p);
 477       }
 478       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 479       if (pagesize == 64*K) {
 480         g_multipage_support.can_use_64K_pages = can_use;
 481       } else if (pagesize == 16*M) {
 482         g_multipage_support.can_use_16M_pages = can_use;
 483       }
 484     }
 485 
 486   } // end: check which pages can be used for shared memory
 487 
 488 query_multipage_support_end:
 489 
 490   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
 491       describe_pagesize(g_multipage_support.pagesize));
 492   trcVerbose("Data page size (C-Heap, bss, etc): %s",
 493       describe_pagesize(g_multipage_support.datapsize));
 494   trcVerbose("Text page size: %s",
 495       describe_pagesize(g_multipage_support.textpsize));
 496   trcVerbose("Thread stack page size (pthread): %s",
 497       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 498   trcVerbose("Default shared memory page size: %s",
 499       describe_pagesize(g_multipage_support.shmpsize));
 500   trcVerbose("Can use 64K pages dynamically with shared meory: %s",
 501       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 502   trcVerbose("Can use 16M pages dynamically with shared memory: %s",
 503       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 504   trcVerbose("Multipage error details: %d",
 505       g_multipage_support.error);
 506 
 507   // sanity checks
 508   assert0(g_multipage_support.pagesize == 4*K);
 509   assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K);
 510   assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K);
 511   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 512   assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K);
 513 
 514 }
 515 
 516 void os::init_system_properties_values() {
 517 
 518 #define DEFAULT_LIBPATH "/lib:/usr/lib"
 519 #define EXTENSIONS_DIR  "/lib/ext"
 520 
 521   // Buffer that fits several sprintfs.
 522   // Note that the space for the trailing null is provided
 523   // by the nulls included by the sizeof operator.
 524   const size_t bufsize =
 525     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 526          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 527   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 528 
 529   // sysclasspath, java_home, dll_dir
 530   {
 531     char *pslash;
 532     os::jvm_path(buf, bufsize);
 533 
 534     // Found the full path to libjvm.so.
 535     // Now cut the path to <java_home>/jre if we can.
 536     pslash = strrchr(buf, '/');
 537     if (pslash != NULL) {
 538       *pslash = '\0';            // Get rid of /libjvm.so.
 539     }
 540     pslash = strrchr(buf, '/');
 541     if (pslash != NULL) {
 542       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 543     }
 544     Arguments::set_dll_dir(buf);
 545 
 546     if (pslash != NULL) {
 547       pslash = strrchr(buf, '/');
 548       if (pslash != NULL) {
 549         *pslash = '\0';        // Get rid of /lib.
 550       }
 551     }
 552     Arguments::set_java_home(buf);
 553     set_boot_path('/', ':');
 554   }
 555 
 556   // Where to look for native libraries.
 557 
 558   // On Aix we get the user setting of LIBPATH.
 559   // Eventually, all the library path setting will be done here.
 560   // Get the user setting of LIBPATH.
 561   const char *v = ::getenv("LIBPATH");
 562   const char *v_colon = ":";
 563   if (v == NULL) { v = ""; v_colon = ""; }
 564 
 565   // Concatenate user and invariant part of ld_library_path.
 566   // That's +1 for the colon and +1 for the trailing '\0'.
 567   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 568   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 569   Arguments::set_library_path(ld_library_path);
 570   FREE_C_HEAP_ARRAY(char, ld_library_path);
 571 
 572   // Extensions directories.
 573   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 574   Arguments::set_ext_dirs(buf);
 575 
 576   FREE_C_HEAP_ARRAY(char, buf);
 577 
 578 #undef DEFAULT_LIBPATH
 579 #undef EXTENSIONS_DIR
 580 }
 581 
 582 ////////////////////////////////////////////////////////////////////////////////
 583 // breakpoint support
 584 
 585 void os::breakpoint() {
 586   BREAKPOINT;
 587 }
 588 
 589 extern "C" void breakpoint() {
 590   // use debugger to set breakpoint here
 591 }
 592 
 593 ////////////////////////////////////////////////////////////////////////////////
 594 // signal support
 595 
 596 debug_only(static bool signal_sets_initialized = false);
 597 static sigset_t unblocked_sigs, vm_sigs;
 598 
 599 bool os::Aix::is_sig_ignored(int sig) {
 600   struct sigaction oact;
 601   sigaction(sig, (struct sigaction*)NULL, &oact);
 602   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 603     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 604   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 605     return true;
 606   } else {
 607     return false;
 608   }
 609 }
 610 
 611 void os::Aix::signal_sets_init() {
 612   // Should also have an assertion stating we are still single-threaded.
 613   assert(!signal_sets_initialized, "Already initialized");
 614   // Fill in signals that are necessarily unblocked for all threads in
 615   // the VM. Currently, we unblock the following signals:
 616   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 617   //                         by -Xrs (=ReduceSignalUsage));
 618   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 619   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 620   // the dispositions or masks wrt these signals.
 621   // Programs embedding the VM that want to use the above signals for their
 622   // own purposes must, at this time, use the "-Xrs" option to prevent
 623   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 624   // (See bug 4345157, and other related bugs).
 625   // In reality, though, unblocking these signals is really a nop, since
 626   // these signals are not blocked by default.
 627   sigemptyset(&unblocked_sigs);
 628   sigaddset(&unblocked_sigs, SIGILL);
 629   sigaddset(&unblocked_sigs, SIGSEGV);
 630   sigaddset(&unblocked_sigs, SIGBUS);
 631   sigaddset(&unblocked_sigs, SIGFPE);
 632   sigaddset(&unblocked_sigs, SIGTRAP);
 633   sigaddset(&unblocked_sigs, SR_signum);
 634 
 635   if (!ReduceSignalUsage) {
 636    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 637      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 638    }
 639    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 640      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 641    }
 642    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 643      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 644    }
 645   }
 646   // Fill in signals that are blocked by all but the VM thread.
 647   sigemptyset(&vm_sigs);
 648   if (!ReduceSignalUsage)
 649     sigaddset(&vm_sigs, BREAK_SIGNAL);
 650   debug_only(signal_sets_initialized = true);
 651 }
 652 
 653 // These are signals that are unblocked while a thread is running Java.
 654 // (For some reason, they get blocked by default.)
 655 sigset_t* os::Aix::unblocked_signals() {
 656   assert(signal_sets_initialized, "Not initialized");
 657   return &unblocked_sigs;
 658 }
 659 
 660 // These are the signals that are blocked while a (non-VM) thread is
 661 // running Java. Only the VM thread handles these signals.
 662 sigset_t* os::Aix::vm_signals() {
 663   assert(signal_sets_initialized, "Not initialized");
 664   return &vm_sigs;
 665 }
 666 
 667 void os::Aix::hotspot_sigmask(Thread* thread) {
 668 
 669   //Save caller's signal mask before setting VM signal mask
 670   sigset_t caller_sigmask;
 671   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 672 
 673   OSThread* osthread = thread->osthread();
 674   osthread->set_caller_sigmask(caller_sigmask);
 675 
 676   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 677 
 678   if (!ReduceSignalUsage) {
 679     if (thread->is_VM_thread()) {
 680       // Only the VM thread handles BREAK_SIGNAL ...
 681       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 682     } else {
 683       // ... all other threads block BREAK_SIGNAL
 684       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 685     }
 686   }
 687 }
 688 
 689 // retrieve memory information.
 690 // Returns false if something went wrong;
 691 // content of pmi undefined in this case.
 692 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 693 
 694   assert(pmi, "get_meminfo: invalid parameter");
 695 
 696   memset(pmi, 0, sizeof(meminfo_t));
 697 
 698   if (os::Aix::on_pase()) {
 699     // On PASE, use the libo4 porting library.
 700 
 701     unsigned long long virt_total = 0;
 702     unsigned long long real_total = 0;
 703     unsigned long long real_free = 0;
 704     unsigned long long pgsp_total = 0;
 705     unsigned long long pgsp_free = 0;
 706     if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
 707       pmi->virt_total = virt_total;
 708       pmi->real_total = real_total;
 709       pmi->real_free = real_free;
 710       pmi->pgsp_total = pgsp_total;
 711       pmi->pgsp_free = pgsp_free;
 712       return true;
 713     }
 714     return false;
 715 
 716   } else {
 717 
 718     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 719     // See:
 720     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 721     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 722     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 723     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 724 
 725     perfstat_memory_total_t psmt;
 726     memset (&psmt, '\0', sizeof(psmt));
 727     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 728     if (rc == -1) {
 729       trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
 730       assert(0, "perfstat_memory_total() failed");
 731       return false;
 732     }
 733 
 734     assert(rc == 1, "perfstat_memory_total() - weird return code");
 735 
 736     // excerpt from
 737     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 738     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 739     // The fields of perfstat_memory_total_t:
 740     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 741     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 742     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 743     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 744     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 745 
 746     pmi->virt_total = psmt.virt_total * 4096;
 747     pmi->real_total = psmt.real_total * 4096;
 748     pmi->real_free = psmt.real_free * 4096;
 749     pmi->pgsp_total = psmt.pgsp_total * 4096;
 750     pmi->pgsp_free = psmt.pgsp_free * 4096;
 751 
 752     return true;
 753 
 754   }
 755 } // end os::Aix::get_meminfo
 756 
 757 //////////////////////////////////////////////////////////////////////////////
 758 // create new thread
 759 
 760 // Thread start routine for all newly created threads
 761 static void *thread_native_entry(Thread *thread) {
 762 
 763   // find out my own stack dimensions
 764   {
 765     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 766     thread->set_stack_base(os::current_stack_base());
 767     thread->set_stack_size(os::current_stack_size());
 768   }
 769 
 770   const pthread_t pthread_id = ::pthread_self();
 771   const tid_t kernel_thread_id = ::thread_self();
 772 
 773   LogTarget(Info, os, thread) lt;
 774   if (lt.is_enabled()) {
 775     address low_address = thread->stack_end();
 776     address high_address = thread->stack_base();
 777     lt.print("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT
 778              ", stack [" PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "k using %uk pages)).",
 779              os::current_thread_id(), (uintx) kernel_thread_id, low_address, high_address,
 780              (high_address - low_address) / K, os::Aix::query_pagesize(low_address) / K);
 781   }
 782 
 783   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
 784   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
 785   // tools hook pthread_create(). In this case, we may run into problems establishing
 786   // guard pages on those stacks, because the stacks may reside in memory which is not
 787   // protectable (shmated).
 788   if (thread->stack_base() > ::sbrk(0)) {
 789     log_warning(os, thread)("Thread stack not in data segment.");
 790   }
 791 
 792   // Try to randomize the cache line index of hot stack frames.
 793   // This helps when threads of the same stack traces evict each other's
 794   // cache lines. The threads can be either from the same JVM instance, or
 795   // from different JVM instances. The benefit is especially true for
 796   // processors with hyperthreading technology.
 797 
 798   static int counter = 0;
 799   int pid = os::current_process_id();
 800   alloca(((pid ^ counter++) & 7) * 128);
 801 
 802   thread->initialize_thread_current();
 803 
 804   OSThread* osthread = thread->osthread();
 805 
 806   // Thread_id is pthread id.
 807   osthread->set_thread_id(pthread_id);
 808 
 809   // .. but keep kernel thread id too for diagnostics
 810   osthread->set_kernel_thread_id(kernel_thread_id);
 811 
 812   // Initialize signal mask for this thread.
 813   os::Aix::hotspot_sigmask(thread);
 814 
 815   // Initialize floating point control register.
 816   os::Aix::init_thread_fpu_state();
 817 
 818   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 819 
 820   // Call one more level start routine.
 821   thread->run();
 822 
 823   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 824     os::current_thread_id(), (uintx) kernel_thread_id);
 825 
 826   // If a thread has not deleted itself ("delete this") as part of its
 827   // termination sequence, we have to ensure thread-local-storage is
 828   // cleared before we actually terminate. No threads should ever be
 829   // deleted asynchronously with respect to their termination.
 830   if (Thread::current_or_null_safe() != NULL) {
 831     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 832     thread->clear_thread_current();
 833   }
 834 
 835   return 0;
 836 }
 837 
 838 bool os::create_thread(Thread* thread, ThreadType thr_type,
 839                        size_t req_stack_size) {
 840 
 841   assert(thread->osthread() == NULL, "caller responsible");
 842 
 843   // Allocate the OSThread object.
 844   OSThread* osthread = new OSThread(NULL, NULL);
 845   if (osthread == NULL) {
 846     return false;
 847   }
 848 
 849   // Set the correct thread state.
 850   osthread->set_thread_type(thr_type);
 851 
 852   // Initial state is ALLOCATED but not INITIALIZED
 853   osthread->set_state(ALLOCATED);
 854 
 855   thread->set_osthread(osthread);
 856 
 857   // Init thread attributes.
 858   pthread_attr_t attr;
 859   pthread_attr_init(&attr);
 860   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 861 
 862   // Make sure we run in 1:1 kernel-user-thread mode.
 863   if (os::Aix::on_aix()) {
 864     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 865     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 866   }
 867 
 868   // Start in suspended state, and in os::thread_start, wake the thread up.
 869   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 870 
 871   // Calculate stack size if it's not specified by caller.
 872   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 873 
 874   // JDK-8187028: It was observed that on some configurations (4K backed thread stacks)
 875   // the real thread stack size may be smaller than the requested stack size, by as much as 64K.
 876   // This very much looks like a pthread lib error. As a workaround, increase the stack size
 877   // by 64K for small thread stacks (arbitrarily choosen to be < 4MB)
 878   if (stack_size < 4096 * K) {
 879     stack_size += 64 * K;
 880   }
 881 
 882   // On Aix, pthread_attr_setstacksize fails with huge values and leaves the
 883   // thread size in attr unchanged. If this is the minimal stack size as set
 884   // by pthread_attr_init this leads to crashes after thread creation. E.g. the
 885   // guard pages might not fit on the tiny stack created.
 886   int ret = pthread_attr_setstacksize(&attr, stack_size);
 887   if (ret != 0) {
 888     log_warning(os, thread)("The thread stack size specified is invalid: " SIZE_FORMAT "k",
 889                             stack_size / K);
 890   }
 891 
 892   // Configure libc guard page.
 893   ret = pthread_attr_setguardsize(&attr, os::Aix::default_guard_size(thr_type));
 894 
 895   pthread_t tid = 0;
 896   if (ret == 0) {
 897     ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
 898   }
 899 
 900   if (ret == 0) {
 901     char buf[64];
 902     log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
 903       (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 904   } else {
 905     char buf[64];
 906     log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
 907       ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 908   }
 909 
 910   pthread_attr_destroy(&attr);
 911 
 912   if (ret != 0) {
 913     // Need to clean up stuff we've allocated so far.
 914     thread->set_osthread(NULL);
 915     delete osthread;
 916     return false;
 917   }
 918 
 919   // OSThread::thread_id is the pthread id.
 920   osthread->set_thread_id(tid);
 921 
 922   return true;
 923 }
 924 
 925 /////////////////////////////////////////////////////////////////////////////
 926 // attach existing thread
 927 
 928 // bootstrap the main thread
 929 bool os::create_main_thread(JavaThread* thread) {
 930   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 931   return create_attached_thread(thread);
 932 }
 933 
 934 bool os::create_attached_thread(JavaThread* thread) {
 935 #ifdef ASSERT
 936     thread->verify_not_published();
 937 #endif
 938 
 939   // Allocate the OSThread object
 940   OSThread* osthread = new OSThread(NULL, NULL);
 941 
 942   if (osthread == NULL) {
 943     return false;
 944   }
 945 
 946   const pthread_t pthread_id = ::pthread_self();
 947   const tid_t kernel_thread_id = ::thread_self();
 948 
 949   // OSThread::thread_id is the pthread id.
 950   osthread->set_thread_id(pthread_id);
 951 
 952   // .. but keep kernel thread id too for diagnostics
 953   osthread->set_kernel_thread_id(kernel_thread_id);
 954 
 955   // initialize floating point control register
 956   os::Aix::init_thread_fpu_state();
 957 
 958   // Initial thread state is RUNNABLE
 959   osthread->set_state(RUNNABLE);
 960 
 961   thread->set_osthread(osthread);
 962 
 963   if (UseNUMA) {
 964     int lgrp_id = os::numa_get_group_id();
 965     if (lgrp_id != -1) {
 966       thread->set_lgrp_id(lgrp_id);
 967     }
 968   }
 969 
 970   // initialize signal mask for this thread
 971   // and save the caller's signal mask
 972   os::Aix::hotspot_sigmask(thread);
 973 
 974   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 975     os::current_thread_id(), (uintx) kernel_thread_id);
 976 
 977   return true;
 978 }
 979 
 980 void os::pd_start_thread(Thread* thread) {
 981   int status = pthread_continue_np(thread->osthread()->pthread_id());
 982   assert(status == 0, "thr_continue failed");
 983 }
 984 
 985 // Free OS resources related to the OSThread
 986 void os::free_thread(OSThread* osthread) {
 987   assert(osthread != NULL, "osthread not set");
 988 
 989   // We are told to free resources of the argument thread,
 990   // but we can only really operate on the current thread.
 991   assert(Thread::current()->osthread() == osthread,
 992          "os::free_thread but not current thread");
 993 
 994   // Restore caller's signal mask
 995   sigset_t sigmask = osthread->caller_sigmask();
 996   pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
 997 
 998   delete osthread;
 999 }
1000 
1001 ////////////////////////////////////////////////////////////////////////////////
1002 // time support
1003 
1004 // Time since start-up in seconds to a fine granularity.
1005 // Used by VMSelfDestructTimer and the MemProfiler.
1006 double os::elapsedTime() {
1007   return (double)(os::elapsed_counter()) * 0.000001;
1008 }
1009 
1010 jlong os::elapsed_counter() {
1011   timeval time;
1012   int status = gettimeofday(&time, NULL);
1013   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1014 }
1015 
1016 jlong os::elapsed_frequency() {
1017   return (1000 * 1000);
1018 }
1019 
1020 bool os::supports_vtime() { return true; }
1021 bool os::enable_vtime()   { return false; }
1022 bool os::vtime_enabled()  { return false; }
1023 
1024 double os::elapsedVTime() {
1025   struct rusage usage;
1026   int retval = getrusage(RUSAGE_THREAD, &usage);
1027   if (retval == 0) {
1028     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1029   } else {
1030     // better than nothing, but not much
1031     return elapsedTime();
1032   }
1033 }
1034 
1035 jlong os::javaTimeMillis() {
1036   timeval time;
1037   int status = gettimeofday(&time, NULL);
1038   assert(status != -1, "aix error at gettimeofday()");
1039   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1040 }
1041 
1042 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1043   timeval time;
1044   int status = gettimeofday(&time, NULL);
1045   assert(status != -1, "aix error at gettimeofday()");
1046   seconds = jlong(time.tv_sec);
1047   nanos = jlong(time.tv_usec) * 1000;
1048 }
1049 
1050 jlong os::javaTimeNanos() {
1051   if (os::Aix::on_pase()) {
1052 
1053     timeval time;
1054     int status = gettimeofday(&time, NULL);
1055     assert(status != -1, "PASE error at gettimeofday()");
1056     jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1057     return 1000 * usecs;
1058 
1059   } else {
1060     // On AIX use the precision of processors real time clock
1061     // or time base registers.
1062     timebasestruct_t time;
1063     int rc;
1064 
1065     // If the CPU has a time register, it will be used and
1066     // we have to convert to real time first. After convertion we have following data:
1067     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1068     // time.tb_low  [nanoseconds after the last full second above]
1069     // We better use mread_real_time here instead of read_real_time
1070     // to ensure that we will get a monotonic increasing time.
1071     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1072       rc = time_base_to_time(&time, TIMEBASE_SZ);
1073       assert(rc != -1, "aix error at time_base_to_time()");
1074     }
1075     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1076   }
1077 }
1078 
1079 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1080   info_ptr->max_value = ALL_64_BITS;
1081   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1082   info_ptr->may_skip_backward = false;
1083   info_ptr->may_skip_forward = false;
1084   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1085 }
1086 
1087 // Return the real, user, and system times in seconds from an
1088 // arbitrary fixed point in the past.
1089 bool os::getTimesSecs(double* process_real_time,
1090                       double* process_user_time,
1091                       double* process_system_time) {
1092   struct tms ticks;
1093   clock_t real_ticks = times(&ticks);
1094 
1095   if (real_ticks == (clock_t) (-1)) {
1096     return false;
1097   } else {
1098     double ticks_per_second = (double) clock_tics_per_sec;
1099     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1100     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1101     *process_real_time = ((double) real_ticks) / ticks_per_second;
1102 
1103     return true;
1104   }
1105 }
1106 
1107 char * os::local_time_string(char *buf, size_t buflen) {
1108   struct tm t;
1109   time_t long_time;
1110   time(&long_time);
1111   localtime_r(&long_time, &t);
1112   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1113                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1114                t.tm_hour, t.tm_min, t.tm_sec);
1115   return buf;
1116 }
1117 
1118 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1119   return localtime_r(clock, res);
1120 }
1121 
1122 ////////////////////////////////////////////////////////////////////////////////
1123 // runtime exit support
1124 
1125 // Note: os::shutdown() might be called very early during initialization, or
1126 // called from signal handler. Before adding something to os::shutdown(), make
1127 // sure it is async-safe and can handle partially initialized VM.
1128 void os::shutdown() {
1129 
1130   // allow PerfMemory to attempt cleanup of any persistent resources
1131   perfMemory_exit();
1132 
1133   // needs to remove object in file system
1134   AttachListener::abort();
1135 
1136   // flush buffered output, finish log files
1137   ostream_abort();
1138 
1139   // Check for abort hook
1140   abort_hook_t abort_hook = Arguments::abort_hook();
1141   if (abort_hook != NULL) {
1142     abort_hook();
1143   }
1144 }
1145 
1146 // Note: os::abort() might be called very early during initialization, or
1147 // called from signal handler. Before adding something to os::abort(), make
1148 // sure it is async-safe and can handle partially initialized VM.
1149 void os::abort(bool dump_core, void* siginfo, const void* context) {
1150   os::shutdown();
1151   if (dump_core) {
1152 #ifndef PRODUCT
1153     fdStream out(defaultStream::output_fd());
1154     out.print_raw("Current thread is ");
1155     char buf[16];
1156     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1157     out.print_raw_cr(buf);
1158     out.print_raw_cr("Dumping core ...");
1159 #endif
1160     ::abort(); // dump core
1161   }
1162 
1163   ::exit(1);
1164 }
1165 
1166 // Die immediately, no exit hook, no abort hook, no cleanup.
1167 void os::die() {
1168   ::abort();
1169 }
1170 
1171 // This method is a copy of JDK's sysGetLastErrorString
1172 // from src/solaris/hpi/src/system_md.c
1173 
1174 size_t os::lasterror(char *buf, size_t len) {
1175   if (errno == 0) return 0;
1176 
1177   const char *s = os::strerror(errno);
1178   size_t n = ::strlen(s);
1179   if (n >= len) {
1180     n = len - 1;
1181   }
1182   ::strncpy(buf, s, n);
1183   buf[n] = '\0';
1184   return n;
1185 }
1186 
1187 intx os::current_thread_id() {
1188   return (intx)pthread_self();
1189 }
1190 
1191 int os::current_process_id() {
1192   return getpid();
1193 }
1194 
1195 // DLL functions
1196 
1197 const char* os::dll_file_extension() { return ".so"; }
1198 
1199 // This must be hard coded because it's the system's temporary
1200 // directory not the java application's temp directory, ala java.io.tmpdir.
1201 const char* os::get_temp_directory() { return "/tmp"; }
1202 
1203 // Check if addr is inside libjvm.so.
1204 bool os::address_is_in_vm(address addr) {
1205 
1206   // Input could be a real pc or a function pointer literal. The latter
1207   // would be a function descriptor residing in the data segment of a module.
1208   loaded_module_t lm;
1209   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1210     return lm.is_in_vm;
1211   } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1212     return lm.is_in_vm;
1213   } else {
1214     return false;
1215   }
1216 
1217 }
1218 
1219 // Resolve an AIX function descriptor literal to a code pointer.
1220 // If the input is a valid code pointer to a text segment of a loaded module,
1221 //   it is returned unchanged.
1222 // If the input is a valid AIX function descriptor, it is resolved to the
1223 //   code entry point.
1224 // If the input is neither a valid function descriptor nor a valid code pointer,
1225 //   NULL is returned.
1226 static address resolve_function_descriptor_to_code_pointer(address p) {
1227 
1228   if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1229     // It is a real code pointer.
1230     return p;
1231   } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1232     // Pointer to data segment, potential function descriptor.
1233     address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1234     if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1235       // It is a function descriptor.
1236       return code_entry;
1237     }
1238   }
1239 
1240   return NULL;
1241 }
1242 
1243 bool os::dll_address_to_function_name(address addr, char *buf,
1244                                       int buflen, int *offset,
1245                                       bool demangle) {
1246   if (offset) {
1247     *offset = -1;
1248   }
1249   // Buf is not optional, but offset is optional.
1250   assert(buf != NULL, "sanity check");
1251   buf[0] = '\0';
1252 
1253   // Resolve function ptr literals first.
1254   addr = resolve_function_descriptor_to_code_pointer(addr);
1255   if (!addr) {
1256     return false;
1257   }
1258 
1259   return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle);
1260 }
1261 
1262 bool os::dll_address_to_library_name(address addr, char* buf,
1263                                      int buflen, int* offset) {
1264   if (offset) {
1265     *offset = -1;
1266   }
1267   // Buf is not optional, but offset is optional.
1268   assert(buf != NULL, "sanity check");
1269   buf[0] = '\0';
1270 
1271   // Resolve function ptr literals first.
1272   addr = resolve_function_descriptor_to_code_pointer(addr);
1273   if (!addr) {
1274     return false;
1275   }
1276 
1277   return AixSymbols::get_module_name(addr, buf, buflen);
1278 }
1279 
1280 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1281 // for the same architecture as Hotspot is running on.
1282 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1283 
1284   if (ebuf && ebuflen > 0) {
1285     ebuf[0] = '\0';
1286     ebuf[ebuflen - 1] = '\0';
1287   }
1288 
1289   if (!filename || strlen(filename) == 0) {
1290     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1291     return NULL;
1292   }
1293 
1294   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1295   void * result= ::dlopen(filename, RTLD_LAZY);
1296   if (result != NULL) {
1297     // Reload dll cache. Don't do this in signal handling.
1298     LoadedLibraries::reload();
1299     return result;
1300   } else {
1301     // error analysis when dlopen fails
1302     const char* const error_report = ::dlerror();
1303     if (error_report && ebuf && ebuflen > 0) {
1304       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1305                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1306     }
1307   }
1308   return NULL;
1309 }
1310 
1311 void* os::dll_lookup(void* handle, const char* name) {
1312   void* res = dlsym(handle, name);
1313   return res;
1314 }
1315 
1316 void* os::get_default_process_handle() {
1317   return (void*)::dlopen(NULL, RTLD_LAZY);
1318 }
1319 
1320 void os::print_dll_info(outputStream *st) {
1321   st->print_cr("Dynamic libraries:");
1322   LoadedLibraries::print(st);
1323 }
1324 
1325 void os::get_summary_os_info(char* buf, size_t buflen) {
1326   // There might be something more readable than uname results for AIX.
1327   struct utsname name;
1328   uname(&name);
1329   snprintf(buf, buflen, "%s %s", name.release, name.version);
1330 }
1331 
1332 void os::print_os_info(outputStream* st) {
1333   st->print("OS:");
1334 
1335   st->print("uname:");
1336   struct utsname name;
1337   uname(&name);
1338   st->print(name.sysname); st->print(" ");
1339   st->print(name.nodename); st->print(" ");
1340   st->print(name.release); st->print(" ");
1341   st->print(name.version); st->print(" ");
1342   st->print(name.machine);
1343   st->cr();
1344 
1345   uint32_t ver = os::Aix::os_version();
1346   st->print_cr("AIX kernel version %u.%u.%u.%u",
1347                (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1348 
1349   os::Posix::print_rlimit_info(st);
1350 
1351   // load average
1352   st->print("load average:");
1353   double loadavg[3] = {-1.L, -1.L, -1.L};
1354   os::loadavg(loadavg, 3);
1355   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1356   st->cr();
1357 
1358   // print wpar info
1359   libperfstat::wparinfo_t wi;
1360   if (libperfstat::get_wparinfo(&wi)) {
1361     st->print_cr("wpar info");
1362     st->print_cr("name: %s", wi.name);
1363     st->print_cr("id:   %d", wi.wpar_id);
1364     st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1365   }
1366 
1367   // print partition info
1368   libperfstat::partitioninfo_t pi;
1369   if (libperfstat::get_partitioninfo(&pi)) {
1370     st->print_cr("partition info");
1371     st->print_cr(" name: %s", pi.name);
1372   }
1373 
1374 }
1375 
1376 void os::print_memory_info(outputStream* st) {
1377 
1378   st->print_cr("Memory:");
1379 
1380   st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1381     describe_pagesize(g_multipage_support.pagesize));
1382   st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1383     describe_pagesize(g_multipage_support.datapsize));
1384   st->print_cr("  Text page size:                         %s",
1385     describe_pagesize(g_multipage_support.textpsize));
1386   st->print_cr("  Thread stack page size (pthread):       %s",
1387     describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1388   st->print_cr("  Default shared memory page size:        %s",
1389     describe_pagesize(g_multipage_support.shmpsize));
1390   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1391     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1392   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1393     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1394   st->print_cr("  Multipage error: %d",
1395     g_multipage_support.error);
1396   st->cr();
1397   st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1398 
1399   // print out LDR_CNTRL because it affects the default page sizes
1400   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1401   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1402 
1403   // Print out EXTSHM because it is an unsupported setting.
1404   const char* const extshm = ::getenv("EXTSHM");
1405   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1406   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1407     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1408   }
1409 
1410   // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1411   const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1412   st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1413       aixthread_guardpages ? aixthread_guardpages : "<unset>");
1414 
1415   os::Aix::meminfo_t mi;
1416   if (os::Aix::get_meminfo(&mi)) {
1417     char buffer[256];
1418     if (os::Aix::on_aix()) {
1419       st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1420       st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1421       st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1422       st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1423     } else {
1424       // PASE - Numbers are result of QWCRSSTS; they mean:
1425       // real_total: Sum of all system pools
1426       // real_free: always 0
1427       // pgsp_total: we take the size of the system ASP
1428       // pgsp_free: size of system ASP times percentage of system ASP unused
1429       st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1430       st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1431       st->print_cr("%% system asp used : " SIZE_FORMAT,
1432         mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1433     }
1434     st->print_raw(buffer);
1435   }
1436   st->cr();
1437 
1438   // Print segments allocated with os::reserve_memory.
1439   st->print_cr("internal virtual memory regions used by vm:");
1440   vmembk_print_on(st);
1441 }
1442 
1443 // Get a string for the cpuinfo that is a summary of the cpu type
1444 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1445   // This looks good
1446   libperfstat::cpuinfo_t ci;
1447   if (libperfstat::get_cpuinfo(&ci)) {
1448     strncpy(buf, ci.version, buflen);
1449   } else {
1450     strncpy(buf, "AIX", buflen);
1451   }
1452 }
1453 
1454 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1455   // Nothing to do beyond what os::print_cpu_info() does.
1456 }
1457 
1458 static void print_signal_handler(outputStream* st, int sig,
1459                                  char* buf, size_t buflen);
1460 
1461 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1462   st->print_cr("Signal Handlers:");
1463   print_signal_handler(st, SIGSEGV, buf, buflen);
1464   print_signal_handler(st, SIGBUS , buf, buflen);
1465   print_signal_handler(st, SIGFPE , buf, buflen);
1466   print_signal_handler(st, SIGPIPE, buf, buflen);
1467   print_signal_handler(st, SIGXFSZ, buf, buflen);
1468   print_signal_handler(st, SIGILL , buf, buflen);
1469   print_signal_handler(st, SR_signum, buf, buflen);
1470   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1471   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1472   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1473   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1474   print_signal_handler(st, SIGTRAP, buf, buflen);
1475   // We also want to know if someone else adds a SIGDANGER handler because
1476   // that will interfere with OOM killling.
1477   print_signal_handler(st, SIGDANGER, buf, buflen);
1478 }
1479 
1480 static char saved_jvm_path[MAXPATHLEN] = {0};
1481 
1482 // Find the full path to the current module, libjvm.so.
1483 void os::jvm_path(char *buf, jint buflen) {
1484   // Error checking.
1485   if (buflen < MAXPATHLEN) {
1486     assert(false, "must use a large-enough buffer");
1487     buf[0] = '\0';
1488     return;
1489   }
1490   // Lazy resolve the path to current module.
1491   if (saved_jvm_path[0] != 0) {
1492     strcpy(buf, saved_jvm_path);
1493     return;
1494   }
1495 
1496   Dl_info dlinfo;
1497   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1498   assert(ret != 0, "cannot locate libjvm");
1499   char* rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1500   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1501 
1502   if (Arguments::sun_java_launcher_is_altjvm()) {
1503     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
1504     // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".
1505     // If "/jre/lib/" appears at the right place in the string, then
1506     // assume we are installed in a JDK and we're done. Otherwise, check
1507     // for a JAVA_HOME environment variable and fix up the path so it
1508     // looks like libjvm.so is installed there (append a fake suffix
1509     // hotspot/libjvm.so).
1510     const char *p = buf + strlen(buf) - 1;
1511     for (int count = 0; p > buf && count < 4; ++count) {
1512       for (--p; p > buf && *p != '/'; --p)
1513         /* empty */ ;
1514     }
1515 
1516     if (strncmp(p, "/jre/lib/", 9) != 0) {
1517       // Look for JAVA_HOME in the environment.
1518       char* java_home_var = ::getenv("JAVA_HOME");
1519       if (java_home_var != NULL && java_home_var[0] != 0) {
1520         char* jrelib_p;
1521         int len;
1522 
1523         // Check the current module name "libjvm.so".
1524         p = strrchr(buf, '/');
1525         if (p == NULL) {
1526           return;
1527         }
1528         assert(strstr(p, "/libjvm") == p, "invalid library name");
1529 
1530         rp = os::Posix::realpath(java_home_var, buf, buflen);
1531         if (rp == NULL) {
1532           return;
1533         }
1534 
1535         // determine if this is a legacy image or modules image
1536         // modules image doesn't have "jre" subdirectory
1537         len = strlen(buf);
1538         assert(len < buflen, "Ran out of buffer room");
1539         jrelib_p = buf + len;
1540         snprintf(jrelib_p, buflen-len, "/jre/lib");
1541         if (0 != access(buf, F_OK)) {
1542           snprintf(jrelib_p, buflen-len, "/lib");
1543         }
1544 
1545         if (0 == access(buf, F_OK)) {
1546           // Use current module name "libjvm.so"
1547           len = strlen(buf);
1548           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
1549         } else {
1550           // Go back to path of .so
1551           rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1552           if (rp == NULL) {
1553             return;
1554           }
1555         }
1556       }
1557     }
1558   }
1559 
1560   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1561   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1562 }
1563 
1564 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1565   // no prefix required, not even "_"
1566 }
1567 
1568 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1569   // no suffix required
1570 }
1571 
1572 ////////////////////////////////////////////////////////////////////////////////
1573 // sun.misc.Signal support
1574 
1575 static volatile jint sigint_count = 0;
1576 
1577 static void
1578 UserHandler(int sig, void *siginfo, void *context) {
1579   // 4511530 - sem_post is serialized and handled by the manager thread. When
1580   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1581   // don't want to flood the manager thread with sem_post requests.
1582   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1583     return;
1584 
1585   // Ctrl-C is pressed during error reporting, likely because the error
1586   // handler fails to abort. Let VM die immediately.
1587   if (sig == SIGINT && VMError::is_error_reported()) {
1588     os::die();
1589   }
1590 
1591   os::signal_notify(sig);
1592 }
1593 
1594 void* os::user_handler() {
1595   return CAST_FROM_FN_PTR(void*, UserHandler);
1596 }
1597 
1598 extern "C" {
1599   typedef void (*sa_handler_t)(int);
1600   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1601 }
1602 
1603 void* os::signal(int signal_number, void* handler) {
1604   struct sigaction sigAct, oldSigAct;
1605 
1606   sigfillset(&(sigAct.sa_mask));
1607 
1608   // Do not block out synchronous signals in the signal handler.
1609   // Blocking synchronous signals only makes sense if you can really
1610   // be sure that those signals won't happen during signal handling,
1611   // when the blocking applies. Normal signal handlers are lean and
1612   // do not cause signals. But our signal handlers tend to be "risky"
1613   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1614   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1615   // by a SIGILL, which was blocked due to the signal mask. The process
1616   // just hung forever. Better to crash from a secondary signal than to hang.
1617   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1618   sigdelset(&(sigAct.sa_mask), SIGBUS);
1619   sigdelset(&(sigAct.sa_mask), SIGILL);
1620   sigdelset(&(sigAct.sa_mask), SIGFPE);
1621   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1622 
1623   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1624 
1625   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1626 
1627   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1628     // -1 means registration failed
1629     return (void *)-1;
1630   }
1631 
1632   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1633 }
1634 
1635 void os::signal_raise(int signal_number) {
1636   ::raise(signal_number);
1637 }
1638 
1639 //
1640 // The following code is moved from os.cpp for making this
1641 // code platform specific, which it is by its very nature.
1642 //
1643 
1644 // Will be modified when max signal is changed to be dynamic
1645 int os::sigexitnum_pd() {
1646   return NSIG;
1647 }
1648 
1649 // a counter for each possible signal value
1650 static volatile jint pending_signals[NSIG+1] = { 0 };
1651 
1652 // Wrapper functions for: sem_init(), sem_post(), sem_wait()
1653 // On AIX, we use sem_init(), sem_post(), sem_wait()
1654 // On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1655 // do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1656 // Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1657 // on AIX, msem_..() calls are suspected of causing problems.
1658 static sem_t sig_sem;
1659 static msemaphore* p_sig_msem = 0;
1660 
1661 static void local_sem_init() {
1662   if (os::Aix::on_aix()) {
1663     int rc = ::sem_init(&sig_sem, 0, 0);
1664     guarantee(rc != -1, "sem_init failed");
1665   } else {
1666     // Memory semaphores must live in shared mem.
1667     guarantee0(p_sig_msem == NULL);
1668     p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1669     guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1670     guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1671   }
1672 }
1673 
1674 static void local_sem_post() {
1675   static bool warn_only_once = false;
1676   if (os::Aix::on_aix()) {
1677     int rc = ::sem_post(&sig_sem);
1678     if (rc == -1 && !warn_only_once) {
1679       trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
1680       warn_only_once = true;
1681     }
1682   } else {
1683     guarantee0(p_sig_msem != NULL);
1684     int rc = ::msem_unlock(p_sig_msem, 0);
1685     if (rc == -1 && !warn_only_once) {
1686       trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
1687       warn_only_once = true;
1688     }
1689   }
1690 }
1691 
1692 static void local_sem_wait() {
1693   static bool warn_only_once = false;
1694   if (os::Aix::on_aix()) {
1695     int rc = ::sem_wait(&sig_sem);
1696     if (rc == -1 && !warn_only_once) {
1697       trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
1698       warn_only_once = true;
1699     }
1700   } else {
1701     guarantee0(p_sig_msem != NULL); // must init before use
1702     int rc = ::msem_lock(p_sig_msem, 0);
1703     if (rc == -1 && !warn_only_once) {
1704       trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
1705       warn_only_once = true;
1706     }
1707   }
1708 }
1709 
1710 void os::signal_init_pd() {
1711   // Initialize signal structures
1712   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1713 
1714   // Initialize signal semaphore
1715   local_sem_init();
1716 }
1717 
1718 void os::signal_notify(int sig) {
1719   Atomic::inc(&pending_signals[sig]);
1720   local_sem_post();
1721 }
1722 
1723 static int check_pending_signals(bool wait) {
1724   Atomic::store(0, &sigint_count);
1725   for (;;) {
1726     for (int i = 0; i < NSIG + 1; i++) {
1727       jint n = pending_signals[i];
1728       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1729         return i;
1730       }
1731     }
1732     if (!wait) {
1733       return -1;
1734     }
1735     JavaThread *thread = JavaThread::current();
1736     ThreadBlockInVM tbivm(thread);
1737 
1738     bool threadIsSuspended;
1739     do {
1740       thread->set_suspend_equivalent();
1741       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1742 
1743       local_sem_wait();
1744 
1745       // were we externally suspended while we were waiting?
1746       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1747       if (threadIsSuspended) {
1748         //
1749         // The semaphore has been incremented, but while we were waiting
1750         // another thread suspended us. We don't want to continue running
1751         // while suspended because that would surprise the thread that
1752         // suspended us.
1753         //
1754 
1755         local_sem_post();
1756 
1757         thread->java_suspend_self();
1758       }
1759     } while (threadIsSuspended);
1760   }
1761 }
1762 
1763 int os::signal_lookup() {
1764   return check_pending_signals(false);
1765 }
1766 
1767 int os::signal_wait() {
1768   return check_pending_signals(true);
1769 }
1770 
1771 ////////////////////////////////////////////////////////////////////////////////
1772 // Virtual Memory
1773 
1774 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1775 
1776 #define VMEM_MAPPED  1
1777 #define VMEM_SHMATED 2
1778 
1779 struct vmembk_t {
1780   int type;         // 1 - mmap, 2 - shmat
1781   char* addr;
1782   size_t size;      // Real size, may be larger than usersize.
1783   size_t pagesize;  // page size of area
1784   vmembk_t* next;
1785 
1786   bool contains_addr(char* p) const {
1787     return p >= addr && p < (addr + size);
1788   }
1789 
1790   bool contains_range(char* p, size_t s) const {
1791     return contains_addr(p) && contains_addr(p + s - 1);
1792   }
1793 
1794   void print_on(outputStream* os) const {
1795     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1796       " bytes, %d %s pages), %s",
1797       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1798       (type == VMEM_SHMATED ? "shmat" : "mmap")
1799     );
1800   }
1801 
1802   // Check that range is a sub range of memory block (or equal to memory block);
1803   // also check that range is fully page aligned to the page size if the block.
1804   void assert_is_valid_subrange(char* p, size_t s) const {
1805     if (!contains_range(p, s)) {
1806       trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1807               "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1808               p, p + s, addr, addr + size);
1809       guarantee0(false);
1810     }
1811     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1812       trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1813               " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1814       guarantee0(false);
1815     }
1816   }
1817 };
1818 
1819 static struct {
1820   vmembk_t* first;
1821   MiscUtils::CritSect cs;
1822 } vmem;
1823 
1824 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1825   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1826   assert0(p);
1827   if (p) {
1828     MiscUtils::AutoCritSect lck(&vmem.cs);
1829     p->addr = addr; p->size = size;
1830     p->pagesize = pagesize;
1831     p->type = type;
1832     p->next = vmem.first;
1833     vmem.first = p;
1834   }
1835 }
1836 
1837 static vmembk_t* vmembk_find(char* addr) {
1838   MiscUtils::AutoCritSect lck(&vmem.cs);
1839   for (vmembk_t* p = vmem.first; p; p = p->next) {
1840     if (p->addr <= addr && (p->addr + p->size) > addr) {
1841       return p;
1842     }
1843   }
1844   return NULL;
1845 }
1846 
1847 static void vmembk_remove(vmembk_t* p0) {
1848   MiscUtils::AutoCritSect lck(&vmem.cs);
1849   assert0(p0);
1850   assert0(vmem.first); // List should not be empty.
1851   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1852     if (*pp == p0) {
1853       *pp = p0->next;
1854       ::free(p0);
1855       return;
1856     }
1857   }
1858   assert0(false); // Not found?
1859 }
1860 
1861 static void vmembk_print_on(outputStream* os) {
1862   MiscUtils::AutoCritSect lck(&vmem.cs);
1863   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1864     vmi->print_on(os);
1865     os->cr();
1866   }
1867 }
1868 
1869 // Reserve and attach a section of System V memory.
1870 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1871 // address. Failing that, it will attach the memory anywhere.
1872 // If <requested_addr> is NULL, function will attach the memory anywhere.
1873 //
1874 // <alignment_hint> is being ignored by this function. It is very probable however that the
1875 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1876 // Should this be not enogh, we can put more work into it.
1877 static char* reserve_shmated_memory (
1878   size_t bytes,
1879   char* requested_addr,
1880   size_t alignment_hint) {
1881 
1882   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1883     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1884     bytes, requested_addr, alignment_hint);
1885 
1886   // Either give me wish address or wish alignment but not both.
1887   assert0(!(requested_addr != NULL && alignment_hint != 0));
1888 
1889   // We must prevent anyone from attaching too close to the
1890   // BRK because that may cause malloc OOM.
1891   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1892     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1893       "Will attach anywhere.", requested_addr);
1894     // Act like the OS refused to attach there.
1895     requested_addr = NULL;
1896   }
1897 
1898   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1899   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1900   if (os::Aix::on_pase_V5R4_or_older()) {
1901     ShouldNotReachHere();
1902   }
1903 
1904   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1905   const size_t size = align_up(bytes, 64*K);
1906 
1907   // Reserve the shared segment.
1908   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1909   if (shmid == -1) {
1910     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1911     return NULL;
1912   }
1913 
1914   // Important note:
1915   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1916   // We must right after attaching it remove it from the system. System V shm segments are global and
1917   // survive the process.
1918   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1919 
1920   struct shmid_ds shmbuf;
1921   memset(&shmbuf, 0, sizeof(shmbuf));
1922   shmbuf.shm_pagesize = 64*K;
1923   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1924     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1925                size / (64*K), errno);
1926     // I want to know if this ever happens.
1927     assert(false, "failed to set page size for shmat");
1928   }
1929 
1930   // Now attach the shared segment.
1931   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1932   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1933   // were not a segment boundary.
1934   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1935   const int errno_shmat = errno;
1936 
1937   // (A) Right after shmat and before handing shmat errors delete the shm segment.
1938   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
1939     trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1940     assert(false, "failed to remove shared memory segment!");
1941   }
1942 
1943   // Handle shmat error. If we failed to attach, just return.
1944   if (addr == (char*)-1) {
1945     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
1946     return NULL;
1947   }
1948 
1949   // Just for info: query the real page size. In case setting the page size did not
1950   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1951   const size_t real_pagesize = os::Aix::query_pagesize(addr);
1952   if (real_pagesize != shmbuf.shm_pagesize) {
1953     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
1954   }
1955 
1956   if (addr) {
1957     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
1958       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
1959   } else {
1960     if (requested_addr != NULL) {
1961       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
1962     } else {
1963       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
1964     }
1965   }
1966 
1967   // book-keeping
1968   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
1969   assert0(is_aligned_to(addr, os::vm_page_size()));
1970 
1971   return addr;
1972 }
1973 
1974 static bool release_shmated_memory(char* addr, size_t size) {
1975 
1976   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1977     addr, addr + size - 1);
1978 
1979   bool rc = false;
1980 
1981   // TODO: is there a way to verify shm size without doing bookkeeping?
1982   if (::shmdt(addr) != 0) {
1983     trcVerbose("error (%d).", errno);
1984   } else {
1985     trcVerbose("ok.");
1986     rc = true;
1987   }
1988   return rc;
1989 }
1990 
1991 static bool uncommit_shmated_memory(char* addr, size_t size) {
1992   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1993     addr, addr + size - 1);
1994 
1995   const bool rc = my_disclaim64(addr, size);
1996 
1997   if (!rc) {
1998     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
1999     return false;
2000   }
2001   return true;
2002 }
2003 
2004 ////////////////////////////////  mmap-based routines /////////////////////////////////
2005 
2006 // Reserve memory via mmap.
2007 // If <requested_addr> is given, an attempt is made to attach at the given address.
2008 // Failing that, memory is allocated at any address.
2009 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2010 // allocate at an address aligned with the given alignment. Failing that, memory
2011 // is aligned anywhere.
2012 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2013   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2014     "alignment_hint " UINTX_FORMAT "...",
2015     bytes, requested_addr, alignment_hint);
2016 
2017   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2018   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2019     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2020     return NULL;
2021   }
2022 
2023   // We must prevent anyone from attaching too close to the
2024   // BRK because that may cause malloc OOM.
2025   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2026     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2027       "Will attach anywhere.", requested_addr);
2028     // Act like the OS refused to attach there.
2029     requested_addr = NULL;
2030   }
2031 
2032   // Specify one or the other but not both.
2033   assert0(!(requested_addr != NULL && alignment_hint > 0));
2034 
2035   // In 64K mode, we claim the global page size (os::vm_page_size())
2036   // is 64K. This is one of the few points where that illusion may
2037   // break, because mmap() will always return memory aligned to 4K. So
2038   // we must ensure we only ever return memory aligned to 64k.
2039   if (alignment_hint) {
2040     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2041   } else {
2042     alignment_hint = os::vm_page_size();
2043   }
2044 
2045   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2046   const size_t size = align_up(bytes, os::vm_page_size());
2047 
2048   // alignment: Allocate memory large enough to include an aligned range of the right size and
2049   // cut off the leading and trailing waste pages.
2050   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2051   const size_t extra_size = size + alignment_hint;
2052 
2053   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2054   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2055   int flags = MAP_ANONYMOUS | MAP_SHARED;
2056 
2057   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2058   // it means if wishaddress is given but MAP_FIXED is not set.
2059   //
2060   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2061   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2062   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2063   // get clobbered.
2064   if (requested_addr != NULL) {
2065     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2066       flags |= MAP_FIXED;
2067     }
2068   }
2069 
2070   char* addr = (char*)::mmap(requested_addr, extra_size,
2071       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2072 
2073   if (addr == MAP_FAILED) {
2074     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2075     return NULL;
2076   }
2077 
2078   // Handle alignment.
2079   char* const addr_aligned = align_up(addr, alignment_hint);
2080   const size_t waste_pre = addr_aligned - addr;
2081   char* const addr_aligned_end = addr_aligned + size;
2082   const size_t waste_post = extra_size - waste_pre - size;
2083   if (waste_pre > 0) {
2084     ::munmap(addr, waste_pre);
2085   }
2086   if (waste_post > 0) {
2087     ::munmap(addr_aligned_end, waste_post);
2088   }
2089   addr = addr_aligned;
2090 
2091   if (addr) {
2092     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2093       addr, addr + bytes, bytes);
2094   } else {
2095     if (requested_addr != NULL) {
2096       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2097     } else {
2098       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2099     }
2100   }
2101 
2102   // bookkeeping
2103   vmembk_add(addr, size, 4*K, VMEM_MAPPED);
2104 
2105   // Test alignment, see above.
2106   assert0(is_aligned_to(addr, os::vm_page_size()));
2107 
2108   return addr;
2109 }
2110 
2111 static bool release_mmaped_memory(char* addr, size_t size) {
2112   assert0(is_aligned_to(addr, os::vm_page_size()));
2113   assert0(is_aligned_to(size, os::vm_page_size()));
2114 
2115   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2116     addr, addr + size - 1);
2117   bool rc = false;
2118 
2119   if (::munmap(addr, size) != 0) {
2120     trcVerbose("failed (%d)\n", errno);
2121     rc = false;
2122   } else {
2123     trcVerbose("ok.");
2124     rc = true;
2125   }
2126 
2127   return rc;
2128 }
2129 
2130 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2131 
2132   assert0(is_aligned_to(addr, os::vm_page_size()));
2133   assert0(is_aligned_to(size, os::vm_page_size()));
2134 
2135   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2136     addr, addr + size - 1);
2137   bool rc = false;
2138 
2139   // Uncommit mmap memory with msync MS_INVALIDATE.
2140   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2141     trcVerbose("failed (%d)\n", errno);
2142     rc = false;
2143   } else {
2144     trcVerbose("ok.");
2145     rc = true;
2146   }
2147 
2148   return rc;
2149 }
2150 
2151 int os::vm_page_size() {
2152   // Seems redundant as all get out.
2153   assert(os::Aix::page_size() != -1, "must call os::init");
2154   return os::Aix::page_size();
2155 }
2156 
2157 // Aix allocates memory by pages.
2158 int os::vm_allocation_granularity() {
2159   assert(os::Aix::page_size() != -1, "must call os::init");
2160   return os::Aix::page_size();
2161 }
2162 
2163 #ifdef PRODUCT
2164 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2165                                     int err) {
2166   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2167           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2168           os::errno_name(err), err);
2169 }
2170 #endif
2171 
2172 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2173                                   const char* mesg) {
2174   assert(mesg != NULL, "mesg must be specified");
2175   if (!pd_commit_memory(addr, size, exec)) {
2176     // Add extra info in product mode for vm_exit_out_of_memory():
2177     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2178     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2179   }
2180 }
2181 
2182 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2183 
2184   assert(is_aligned_to(addr, os::vm_page_size()),
2185     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2186     p2i(addr), os::vm_page_size());
2187   assert(is_aligned_to(size, os::vm_page_size()),
2188     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2189     size, os::vm_page_size());
2190 
2191   vmembk_t* const vmi = vmembk_find(addr);
2192   guarantee0(vmi);
2193   vmi->assert_is_valid_subrange(addr, size);
2194 
2195   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2196 
2197   if (UseExplicitCommit) {
2198     // AIX commits memory on touch. So, touch all pages to be committed.
2199     for (char* p = addr; p < (addr + size); p += 4*K) {
2200       *p = '\0';
2201     }
2202   }
2203 
2204   return true;
2205 }
2206 
2207 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2208   return pd_commit_memory(addr, size, exec);
2209 }
2210 
2211 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2212                                   size_t alignment_hint, bool exec,
2213                                   const char* mesg) {
2214   // Alignment_hint is ignored on this OS.
2215   pd_commit_memory_or_exit(addr, size, exec, mesg);
2216 }
2217 
2218 bool os::pd_uncommit_memory(char* addr, size_t size) {
2219   assert(is_aligned_to(addr, os::vm_page_size()),
2220     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2221     p2i(addr), os::vm_page_size());
2222   assert(is_aligned_to(size, os::vm_page_size()),
2223     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2224     size, os::vm_page_size());
2225 
2226   // Dynamically do different things for mmap/shmat.
2227   const vmembk_t* const vmi = vmembk_find(addr);
2228   guarantee0(vmi);
2229   vmi->assert_is_valid_subrange(addr, size);
2230 
2231   if (vmi->type == VMEM_SHMATED) {
2232     return uncommit_shmated_memory(addr, size);
2233   } else {
2234     return uncommit_mmaped_memory(addr, size);
2235   }
2236 }
2237 
2238 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2239   // Do not call this; no need to commit stack pages on AIX.
2240   ShouldNotReachHere();
2241   return true;
2242 }
2243 
2244 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2245   // Do not call this; no need to commit stack pages on AIX.
2246   ShouldNotReachHere();
2247   return true;
2248 }
2249 
2250 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2251 }
2252 
2253 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2254 }
2255 
2256 void os::numa_make_global(char *addr, size_t bytes) {
2257 }
2258 
2259 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2260 }
2261 
2262 bool os::numa_topology_changed() {
2263   return false;
2264 }
2265 
2266 size_t os::numa_get_groups_num() {
2267   return 1;
2268 }
2269 
2270 int os::numa_get_group_id() {
2271   return 0;
2272 }
2273 
2274 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2275   if (size > 0) {
2276     ids[0] = 0;
2277     return 1;
2278   }
2279   return 0;
2280 }
2281 
2282 bool os::get_page_info(char *start, page_info* info) {
2283   return false;
2284 }
2285 
2286 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2287   return end;
2288 }
2289 
2290 // Reserves and attaches a shared memory segment.
2291 // Will assert if a wish address is given and could not be obtained.
2292 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2293 
2294   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2295   // thereby clobbering old mappings at that place. That is probably
2296   // not intended, never used and almost certainly an error were it
2297   // ever be used this way (to try attaching at a specified address
2298   // without clobbering old mappings an alternate API exists,
2299   // os::attempt_reserve_memory_at()).
2300   // Instead of mimicking the dangerous coding of the other platforms, here I
2301   // just ignore the request address (release) or assert(debug).
2302   assert0(requested_addr == NULL);
2303 
2304   // Always round to os::vm_page_size(), which may be larger than 4K.
2305   bytes = align_up(bytes, os::vm_page_size());
2306   const size_t alignment_hint0 =
2307     alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0;
2308 
2309   // In 4K mode always use mmap.
2310   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2311   if (os::vm_page_size() == 4*K) {
2312     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2313   } else {
2314     if (bytes >= Use64KPagesThreshold) {
2315       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2316     } else {
2317       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2318     }
2319   }
2320 }
2321 
2322 bool os::pd_release_memory(char* addr, size_t size) {
2323 
2324   // Dynamically do different things for mmap/shmat.
2325   vmembk_t* const vmi = vmembk_find(addr);
2326   guarantee0(vmi);
2327 
2328   // Always round to os::vm_page_size(), which may be larger than 4K.
2329   size = align_up(size, os::vm_page_size());
2330   addr = align_up(addr, os::vm_page_size());
2331 
2332   bool rc = false;
2333   bool remove_bookkeeping = false;
2334   if (vmi->type == VMEM_SHMATED) {
2335     // For shmatted memory, we do:
2336     // - If user wants to release the whole range, release the memory (shmdt).
2337     // - If user only wants to release a partial range, uncommit (disclaim) that
2338     //   range. That way, at least, we do not use memory anymore (bust still page
2339     //   table space).
2340     vmi->assert_is_valid_subrange(addr, size);
2341     if (addr == vmi->addr && size == vmi->size) {
2342       rc = release_shmated_memory(addr, size);
2343       remove_bookkeeping = true;
2344     } else {
2345       rc = uncommit_shmated_memory(addr, size);
2346     }
2347   } else {
2348     // User may unmap partial regions but region has to be fully contained.
2349 #ifdef ASSERT
2350     vmi->assert_is_valid_subrange(addr, size);
2351 #endif
2352     rc = release_mmaped_memory(addr, size);
2353     remove_bookkeeping = true;
2354   }
2355 
2356   // update bookkeeping
2357   if (rc && remove_bookkeeping) {
2358     vmembk_remove(vmi);
2359   }
2360 
2361   return rc;
2362 }
2363 
2364 static bool checked_mprotect(char* addr, size_t size, int prot) {
2365 
2366   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2367   // not tell me if protection failed when trying to protect an un-protectable range.
2368   //
2369   // This means if the memory was allocated using shmget/shmat, protection wont work
2370   // but mprotect will still return 0:
2371   //
2372   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2373 
2374   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2375 
2376   if (!rc) {
2377     const char* const s_errno = os::errno_name(errno);
2378     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2379     return false;
2380   }
2381 
2382   // mprotect success check
2383   //
2384   // Mprotect said it changed the protection but can I believe it?
2385   //
2386   // To be sure I need to check the protection afterwards. Try to
2387   // read from protected memory and check whether that causes a segfault.
2388   //
2389   if (!os::Aix::xpg_sus_mode()) {
2390 
2391     if (CanUseSafeFetch32()) {
2392 
2393       const bool read_protected =
2394         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2395          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2396 
2397       if (prot & PROT_READ) {
2398         rc = !read_protected;
2399       } else {
2400         rc = read_protected;
2401       }
2402 
2403       if (!rc) {
2404         if (os::Aix::on_pase()) {
2405           // There is an issue on older PASE systems where mprotect() will return success but the
2406           // memory will not be protected.
2407           // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2408           // machines; we only see it rarely, when using mprotect() to protect the guard page of
2409           // a stack. It is an OS error.
2410           //
2411           // A valid strategy is just to try again. This usually works. :-/
2412 
2413           ::usleep(1000);
2414           if (::mprotect(addr, size, prot) == 0) {
2415             const bool read_protected_2 =
2416               (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2417               SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2418             rc = true;
2419           }
2420         }
2421       }
2422     }
2423   }
2424 
2425   assert(rc == true, "mprotect failed.");
2426 
2427   return rc;
2428 }
2429 
2430 // Set protections specified
2431 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2432   unsigned int p = 0;
2433   switch (prot) {
2434   case MEM_PROT_NONE: p = PROT_NONE; break;
2435   case MEM_PROT_READ: p = PROT_READ; break;
2436   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2437   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2438   default:
2439     ShouldNotReachHere();
2440   }
2441   // is_committed is unused.
2442   return checked_mprotect(addr, size, p);
2443 }
2444 
2445 bool os::guard_memory(char* addr, size_t size) {
2446   return checked_mprotect(addr, size, PROT_NONE);
2447 }
2448 
2449 bool os::unguard_memory(char* addr, size_t size) {
2450   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2451 }
2452 
2453 // Large page support
2454 
2455 static size_t _large_page_size = 0;
2456 
2457 // Enable large page support if OS allows that.
2458 void os::large_page_init() {
2459   return; // Nothing to do. See query_multipage_support and friends.
2460 }
2461 
2462 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2463   // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2464   // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2465   // so this is not needed.
2466   assert(false, "should not be called on AIX");
2467   return NULL;
2468 }
2469 
2470 bool os::release_memory_special(char* base, size_t bytes) {
2471   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2472   Unimplemented();
2473   return false;
2474 }
2475 
2476 size_t os::large_page_size() {
2477   return _large_page_size;
2478 }
2479 
2480 bool os::can_commit_large_page_memory() {
2481   // Does not matter, we do not support huge pages.
2482   return false;
2483 }
2484 
2485 bool os::can_execute_large_page_memory() {
2486   // Does not matter, we do not support huge pages.
2487   return false;
2488 }
2489 
2490 // Reserve memory at an arbitrary address, only if that area is
2491 // available (and not reserved for something else).
2492 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2493   char* addr = NULL;
2494 
2495   // Always round to os::vm_page_size(), which may be larger than 4K.
2496   bytes = align_up(bytes, os::vm_page_size());
2497 
2498   // In 4K mode always use mmap.
2499   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2500   if (os::vm_page_size() == 4*K) {
2501     return reserve_mmaped_memory(bytes, requested_addr, 0);
2502   } else {
2503     if (bytes >= Use64KPagesThreshold) {
2504       return reserve_shmated_memory(bytes, requested_addr, 0);
2505     } else {
2506       return reserve_mmaped_memory(bytes, requested_addr, 0);
2507     }
2508   }
2509 
2510   return addr;
2511 }
2512 
2513 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2514   return ::read(fd, buf, nBytes);
2515 }
2516 
2517 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2518   return ::pread(fd, buf, nBytes, offset);
2519 }
2520 
2521 void os::naked_short_sleep(jlong ms) {
2522   struct timespec req;
2523 
2524   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2525   req.tv_sec = 0;
2526   if (ms > 0) {
2527     req.tv_nsec = (ms % 1000) * 1000000;
2528   }
2529   else {
2530     req.tv_nsec = 1;
2531   }
2532 
2533   nanosleep(&req, NULL);
2534 
2535   return;
2536 }
2537 
2538 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2539 void os::infinite_sleep() {
2540   while (true) {    // sleep forever ...
2541     ::sleep(100);   // ... 100 seconds at a time
2542   }
2543 }
2544 
2545 // Used to convert frequent JVM_Yield() to nops
2546 bool os::dont_yield() {
2547   return DontYieldALot;
2548 }
2549 
2550 void os::naked_yield() {
2551   sched_yield();
2552 }
2553 
2554 ////////////////////////////////////////////////////////////////////////////////
2555 // thread priority support
2556 
2557 // From AIX manpage to pthread_setschedparam
2558 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2559 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2560 //
2561 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2562 // range from 40 to 80, where 40 is the least favored priority and 80
2563 // is the most favored."
2564 //
2565 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2566 // scheduling there; however, this still leaves iSeries.)
2567 //
2568 // We use the same values for AIX and PASE.
2569 int os::java_to_os_priority[CriticalPriority + 1] = {
2570   54,             // 0 Entry should never be used
2571 
2572   55,             // 1 MinPriority
2573   55,             // 2
2574   56,             // 3
2575 
2576   56,             // 4
2577   57,             // 5 NormPriority
2578   57,             // 6
2579 
2580   58,             // 7
2581   58,             // 8
2582   59,             // 9 NearMaxPriority
2583 
2584   60,             // 10 MaxPriority
2585 
2586   60              // 11 CriticalPriority
2587 };
2588 
2589 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2590   if (!UseThreadPriorities) return OS_OK;
2591   pthread_t thr = thread->osthread()->pthread_id();
2592   int policy = SCHED_OTHER;
2593   struct sched_param param;
2594   param.sched_priority = newpri;
2595   int ret = pthread_setschedparam(thr, policy, &param);
2596 
2597   if (ret != 0) {
2598     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2599         (int)thr, newpri, ret, os::errno_name(ret));
2600   }
2601   return (ret == 0) ? OS_OK : OS_ERR;
2602 }
2603 
2604 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2605   if (!UseThreadPriorities) {
2606     *priority_ptr = java_to_os_priority[NormPriority];
2607     return OS_OK;
2608   }
2609   pthread_t thr = thread->osthread()->pthread_id();
2610   int policy = SCHED_OTHER;
2611   struct sched_param param;
2612   int ret = pthread_getschedparam(thr, &policy, &param);
2613   *priority_ptr = param.sched_priority;
2614 
2615   return (ret == 0) ? OS_OK : OS_ERR;
2616 }
2617 
2618 // Hint to the underlying OS that a task switch would not be good.
2619 // Void return because it's a hint and can fail.
2620 void os::hint_no_preempt() {}
2621 
2622 ////////////////////////////////////////////////////////////////////////////////
2623 // suspend/resume support
2624 
2625 //  The low-level signal-based suspend/resume support is a remnant from the
2626 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2627 //  within hotspot. Currently used by JFR's OSThreadSampler
2628 //
2629 //  The remaining code is greatly simplified from the more general suspension
2630 //  code that used to be used.
2631 //
2632 //  The protocol is quite simple:
2633 //  - suspend:
2634 //      - sends a signal to the target thread
2635 //      - polls the suspend state of the osthread using a yield loop
2636 //      - target thread signal handler (SR_handler) sets suspend state
2637 //        and blocks in sigsuspend until continued
2638 //  - resume:
2639 //      - sets target osthread state to continue
2640 //      - sends signal to end the sigsuspend loop in the SR_handler
2641 //
2642 //  Note that the SR_lock plays no role in this suspend/resume protocol,
2643 //  but is checked for NULL in SR_handler as a thread termination indicator.
2644 //  The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs.
2645 //
2646 //  Note that resume_clear_context() and suspend_save_context() are needed
2647 //  by SR_handler(), so that fetch_frame_from_ucontext() works,
2648 //  which in part is used by:
2649 //    - Forte Analyzer: AsyncGetCallTrace()
2650 //    - StackBanging: get_frame_at_stack_banging_point()
2651 
2652 static void resume_clear_context(OSThread *osthread) {
2653   osthread->set_ucontext(NULL);
2654   osthread->set_siginfo(NULL);
2655 }
2656 
2657 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2658   osthread->set_ucontext(context);
2659   osthread->set_siginfo(siginfo);
2660 }
2661 
2662 //
2663 // Handler function invoked when a thread's execution is suspended or
2664 // resumed. We have to be careful that only async-safe functions are
2665 // called here (Note: most pthread functions are not async safe and
2666 // should be avoided.)
2667 //
2668 // Note: sigwait() is a more natural fit than sigsuspend() from an
2669 // interface point of view, but sigwait() prevents the signal hander
2670 // from being run. libpthread would get very confused by not having
2671 // its signal handlers run and prevents sigwait()'s use with the
2672 // mutex granting granting signal.
2673 //
2674 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2675 //
2676 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2677   // Save and restore errno to avoid confusing native code with EINTR
2678   // after sigsuspend.
2679   int old_errno = errno;
2680 
2681   Thread* thread = Thread::current_or_null_safe();
2682   assert(thread != NULL, "Missing current thread in SR_handler");
2683 
2684   // On some systems we have seen signal delivery get "stuck" until the signal
2685   // mask is changed as part of thread termination. Check that the current thread
2686   // has not already terminated (via SR_lock()) - else the following assertion
2687   // will fail because the thread is no longer a JavaThread as the ~JavaThread
2688   // destructor has completed.
2689 
2690   if (thread->SR_lock() == NULL) {
2691     return;
2692   }
2693 
2694   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2695 
2696   OSThread* osthread = thread->osthread();
2697 
2698   os::SuspendResume::State current = osthread->sr.state();
2699   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2700     suspend_save_context(osthread, siginfo, context);
2701 
2702     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2703     os::SuspendResume::State state = osthread->sr.suspended();
2704     if (state == os::SuspendResume::SR_SUSPENDED) {
2705       sigset_t suspend_set;  // signals for sigsuspend()
2706 
2707       // get current set of blocked signals and unblock resume signal
2708       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2709       sigdelset(&suspend_set, SR_signum);
2710 
2711       // wait here until we are resumed
2712       while (1) {
2713         sigsuspend(&suspend_set);
2714 
2715         os::SuspendResume::State result = osthread->sr.running();
2716         if (result == os::SuspendResume::SR_RUNNING) {
2717           break;
2718         }
2719       }
2720 
2721     } else if (state == os::SuspendResume::SR_RUNNING) {
2722       // request was cancelled, continue
2723     } else {
2724       ShouldNotReachHere();
2725     }
2726 
2727     resume_clear_context(osthread);
2728   } else if (current == os::SuspendResume::SR_RUNNING) {
2729     // request was cancelled, continue
2730   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2731     // ignore
2732   } else {
2733     ShouldNotReachHere();
2734   }
2735 
2736   errno = old_errno;
2737 }
2738 
2739 static int SR_initialize() {
2740   struct sigaction act;
2741   char *s;
2742   // Get signal number to use for suspend/resume
2743   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2744     int sig = ::strtol(s, 0, 10);
2745     if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2746         sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2747       SR_signum = sig;
2748     } else {
2749       warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2750               sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2751     }
2752   }
2753 
2754   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2755         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2756 
2757   sigemptyset(&SR_sigset);
2758   sigaddset(&SR_sigset, SR_signum);
2759 
2760   // Set up signal handler for suspend/resume.
2761   act.sa_flags = SA_RESTART|SA_SIGINFO;
2762   act.sa_handler = (void (*)(int)) SR_handler;
2763 
2764   // SR_signum is blocked by default.
2765   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2766 
2767   if (sigaction(SR_signum, &act, 0) == -1) {
2768     return -1;
2769   }
2770 
2771   // Save signal flag
2772   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2773   return 0;
2774 }
2775 
2776 static int SR_finalize() {
2777   return 0;
2778 }
2779 
2780 static int sr_notify(OSThread* osthread) {
2781   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2782   assert_status(status == 0, status, "pthread_kill");
2783   return status;
2784 }
2785 
2786 // "Randomly" selected value for how long we want to spin
2787 // before bailing out on suspending a thread, also how often
2788 // we send a signal to a thread we want to resume
2789 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2790 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2791 
2792 // returns true on success and false on error - really an error is fatal
2793 // but this seems the normal response to library errors
2794 static bool do_suspend(OSThread* osthread) {
2795   assert(osthread->sr.is_running(), "thread should be running");
2796   // mark as suspended and send signal
2797 
2798   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2799     // failed to switch, state wasn't running?
2800     ShouldNotReachHere();
2801     return false;
2802   }
2803 
2804   if (sr_notify(osthread) != 0) {
2805     // try to cancel, switch to running
2806 
2807     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2808     if (result == os::SuspendResume::SR_RUNNING) {
2809       // cancelled
2810       return false;
2811     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2812       // somehow managed to suspend
2813       return true;
2814     } else {
2815       ShouldNotReachHere();
2816       return false;
2817     }
2818   }
2819 
2820   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2821 
2822   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2823     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2824       os::naked_yield();
2825     }
2826 
2827     // timeout, try to cancel the request
2828     if (n >= RANDOMLY_LARGE_INTEGER) {
2829       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2830       if (cancelled == os::SuspendResume::SR_RUNNING) {
2831         return false;
2832       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2833         return true;
2834       } else {
2835         ShouldNotReachHere();
2836         return false;
2837       }
2838     }
2839   }
2840 
2841   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2842   return true;
2843 }
2844 
2845 static void do_resume(OSThread* osthread) {
2846   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2847 
2848   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2849     // failed to switch to WAKEUP_REQUEST
2850     ShouldNotReachHere();
2851     return;
2852   }
2853 
2854   while (!osthread->sr.is_running()) {
2855     if (sr_notify(osthread) == 0) {
2856       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2857         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2858           os::naked_yield();
2859         }
2860       }
2861     } else {
2862       ShouldNotReachHere();
2863     }
2864   }
2865 
2866   guarantee(osthread->sr.is_running(), "Must be running!");
2867 }
2868 
2869 ///////////////////////////////////////////////////////////////////////////////////
2870 // signal handling (except suspend/resume)
2871 
2872 // This routine may be used by user applications as a "hook" to catch signals.
2873 // The user-defined signal handler must pass unrecognized signals to this
2874 // routine, and if it returns true (non-zero), then the signal handler must
2875 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2876 // routine will never retun false (zero), but instead will execute a VM panic
2877 // routine kill the process.
2878 //
2879 // If this routine returns false, it is OK to call it again. This allows
2880 // the user-defined signal handler to perform checks either before or after
2881 // the VM performs its own checks. Naturally, the user code would be making
2882 // a serious error if it tried to handle an exception (such as a null check
2883 // or breakpoint) that the VM was generating for its own correct operation.
2884 //
2885 // This routine may recognize any of the following kinds of signals:
2886 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2887 // It should be consulted by handlers for any of those signals.
2888 //
2889 // The caller of this routine must pass in the three arguments supplied
2890 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2891 // field of the structure passed to sigaction(). This routine assumes that
2892 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2893 //
2894 // Note that the VM will print warnings if it detects conflicting signal
2895 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2896 //
2897 extern "C" JNIEXPORT int
2898 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2899 
2900 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2901 // to be the thing to call; documentation is not terribly clear about whether
2902 // pthread_sigmask also works, and if it does, whether it does the same.
2903 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2904   const int rc = ::pthread_sigmask(how, set, oset);
2905   // return value semantics differ slightly for error case:
2906   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2907   // (so, pthread_sigmask is more theadsafe for error handling)
2908   // But success is always 0.
2909   return rc == 0 ? true : false;
2910 }
2911 
2912 // Function to unblock all signals which are, according
2913 // to POSIX, typical program error signals. If they happen while being blocked,
2914 // they typically will bring down the process immediately.
2915 bool unblock_program_error_signals() {
2916   sigset_t set;
2917   ::sigemptyset(&set);
2918   ::sigaddset(&set, SIGILL);
2919   ::sigaddset(&set, SIGBUS);
2920   ::sigaddset(&set, SIGFPE);
2921   ::sigaddset(&set, SIGSEGV);
2922   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2923 }
2924 
2925 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2926 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2927   assert(info != NULL && uc != NULL, "it must be old kernel");
2928 
2929   // Never leave program error signals blocked;
2930   // on all our platforms they would bring down the process immediately when
2931   // getting raised while being blocked.
2932   unblock_program_error_signals();
2933 
2934   int orig_errno = errno;  // Preserve errno value over signal handler.
2935   JVM_handle_aix_signal(sig, info, uc, true);
2936   errno = orig_errno;
2937 }
2938 
2939 // This boolean allows users to forward their own non-matching signals
2940 // to JVM_handle_aix_signal, harmlessly.
2941 bool os::Aix::signal_handlers_are_installed = false;
2942 
2943 // For signal-chaining
2944 struct sigaction sigact[NSIG];
2945 sigset_t sigs;
2946 bool os::Aix::libjsig_is_loaded = false;
2947 typedef struct sigaction *(*get_signal_t)(int);
2948 get_signal_t os::Aix::get_signal_action = NULL;
2949 
2950 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2951   struct sigaction *actp = NULL;
2952 
2953   if (libjsig_is_loaded) {
2954     // Retrieve the old signal handler from libjsig
2955     actp = (*get_signal_action)(sig);
2956   }
2957   if (actp == NULL) {
2958     // Retrieve the preinstalled signal handler from jvm
2959     actp = get_preinstalled_handler(sig);
2960   }
2961 
2962   return actp;
2963 }
2964 
2965 static bool call_chained_handler(struct sigaction *actp, int sig,
2966                                  siginfo_t *siginfo, void *context) {
2967   // Call the old signal handler
2968   if (actp->sa_handler == SIG_DFL) {
2969     // It's more reasonable to let jvm treat it as an unexpected exception
2970     // instead of taking the default action.
2971     return false;
2972   } else if (actp->sa_handler != SIG_IGN) {
2973     if ((actp->sa_flags & SA_NODEFER) == 0) {
2974       // automaticlly block the signal
2975       sigaddset(&(actp->sa_mask), sig);
2976     }
2977 
2978     sa_handler_t hand = NULL;
2979     sa_sigaction_t sa = NULL;
2980     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
2981     // retrieve the chained handler
2982     if (siginfo_flag_set) {
2983       sa = actp->sa_sigaction;
2984     } else {
2985       hand = actp->sa_handler;
2986     }
2987 
2988     if ((actp->sa_flags & SA_RESETHAND) != 0) {
2989       actp->sa_handler = SIG_DFL;
2990     }
2991 
2992     // try to honor the signal mask
2993     sigset_t oset;
2994     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
2995 
2996     // call into the chained handler
2997     if (siginfo_flag_set) {
2998       (*sa)(sig, siginfo, context);
2999     } else {
3000       (*hand)(sig);
3001     }
3002 
3003     // restore the signal mask
3004     pthread_sigmask(SIG_SETMASK, &oset, 0);
3005   }
3006   // Tell jvm's signal handler the signal is taken care of.
3007   return true;
3008 }
3009 
3010 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3011   bool chained = false;
3012   // signal-chaining
3013   if (UseSignalChaining) {
3014     struct sigaction *actp = get_chained_signal_action(sig);
3015     if (actp != NULL) {
3016       chained = call_chained_handler(actp, sig, siginfo, context);
3017     }
3018   }
3019   return chained;
3020 }
3021 
3022 size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
3023   // Creating guard page is very expensive. Java thread has HotSpot
3024   // guard pages, only enable glibc guard page for non-Java threads.
3025   // (Remember: compiler thread is a Java thread, too!)
3026   //
3027   // Aix can have different page sizes for stack (4K) and heap (64K).
3028   // As Hotspot knows only one page size, we assume the stack has
3029   // the same page size as the heap. Returning page_size() here can
3030   // cause 16 guard pages which we want to avoid.  Thus we return 4K
3031   // which will be rounded to the real page size by the OS.
3032   return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : 4 * K);
3033 }
3034 
3035 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3036   if (sigismember(&sigs, sig)) {
3037     return &sigact[sig];
3038   }
3039   return NULL;
3040 }
3041 
3042 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3043   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3044   sigact[sig] = oldAct;
3045   sigaddset(&sigs, sig);
3046 }
3047 
3048 // for diagnostic
3049 int sigflags[NSIG];
3050 
3051 int os::Aix::get_our_sigflags(int sig) {
3052   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3053   return sigflags[sig];
3054 }
3055 
3056 void os::Aix::set_our_sigflags(int sig, int flags) {
3057   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3058   if (sig > 0 && sig < NSIG) {
3059     sigflags[sig] = flags;
3060   }
3061 }
3062 
3063 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3064   // Check for overwrite.
3065   struct sigaction oldAct;
3066   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3067 
3068   void* oldhand = oldAct.sa_sigaction
3069     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3070     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3071   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3072       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3073       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3074     if (AllowUserSignalHandlers || !set_installed) {
3075       // Do not overwrite; user takes responsibility to forward to us.
3076       return;
3077     } else if (UseSignalChaining) {
3078       // save the old handler in jvm
3079       save_preinstalled_handler(sig, oldAct);
3080       // libjsig also interposes the sigaction() call below and saves the
3081       // old sigaction on it own.
3082     } else {
3083       fatal("Encountered unexpected pre-existing sigaction handler "
3084             "%#lx for signal %d.", (long)oldhand, sig);
3085     }
3086   }
3087 
3088   struct sigaction sigAct;
3089   sigfillset(&(sigAct.sa_mask));
3090   if (!set_installed) {
3091     sigAct.sa_handler = SIG_DFL;
3092     sigAct.sa_flags = SA_RESTART;
3093   } else {
3094     sigAct.sa_sigaction = javaSignalHandler;
3095     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3096   }
3097   // Save flags, which are set by ours
3098   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3099   sigflags[sig] = sigAct.sa_flags;
3100 
3101   int ret = sigaction(sig, &sigAct, &oldAct);
3102   assert(ret == 0, "check");
3103 
3104   void* oldhand2 = oldAct.sa_sigaction
3105                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3106                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3107   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3108 }
3109 
3110 // install signal handlers for signals that HotSpot needs to
3111 // handle in order to support Java-level exception handling.
3112 void os::Aix::install_signal_handlers() {
3113   if (!signal_handlers_are_installed) {
3114     signal_handlers_are_installed = true;
3115 
3116     // signal-chaining
3117     typedef void (*signal_setting_t)();
3118     signal_setting_t begin_signal_setting = NULL;
3119     signal_setting_t end_signal_setting = NULL;
3120     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3121                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3122     if (begin_signal_setting != NULL) {
3123       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3124                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3125       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3126                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3127       libjsig_is_loaded = true;
3128       assert(UseSignalChaining, "should enable signal-chaining");
3129     }
3130     if (libjsig_is_loaded) {
3131       // Tell libjsig jvm is setting signal handlers.
3132       (*begin_signal_setting)();
3133     }
3134 
3135     ::sigemptyset(&sigs);
3136     set_signal_handler(SIGSEGV, true);
3137     set_signal_handler(SIGPIPE, true);
3138     set_signal_handler(SIGBUS, true);
3139     set_signal_handler(SIGILL, true);
3140     set_signal_handler(SIGFPE, true);
3141     set_signal_handler(SIGTRAP, true);
3142     set_signal_handler(SIGXFSZ, true);
3143 
3144     if (libjsig_is_loaded) {
3145       // Tell libjsig jvm finishes setting signal handlers.
3146       (*end_signal_setting)();
3147     }
3148 
3149     // We don't activate signal checker if libjsig is in place, we trust ourselves
3150     // and if UserSignalHandler is installed all bets are off.
3151     // Log that signal checking is off only if -verbose:jni is specified.
3152     if (CheckJNICalls) {
3153       if (libjsig_is_loaded) {
3154         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3155         check_signals = false;
3156       }
3157       if (AllowUserSignalHandlers) {
3158         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3159         check_signals = false;
3160       }
3161       // Need to initialize check_signal_done.
3162       ::sigemptyset(&check_signal_done);
3163     }
3164   }
3165 }
3166 
3167 static const char* get_signal_handler_name(address handler,
3168                                            char* buf, int buflen) {
3169   int offset;
3170   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3171   if (found) {
3172     // skip directory names
3173     const char *p1, *p2;
3174     p1 = buf;
3175     size_t len = strlen(os::file_separator());
3176     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3177     // The way os::dll_address_to_library_name is implemented on Aix
3178     // right now, it always returns -1 for the offset which is not
3179     // terribly informative.
3180     // Will fix that. For now, omit the offset.
3181     jio_snprintf(buf, buflen, "%s", p1);
3182   } else {
3183     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3184   }
3185   return buf;
3186 }
3187 
3188 static void print_signal_handler(outputStream* st, int sig,
3189                                  char* buf, size_t buflen) {
3190   struct sigaction sa;
3191   sigaction(sig, NULL, &sa);
3192 
3193   st->print("%s: ", os::exception_name(sig, buf, buflen));
3194 
3195   address handler = (sa.sa_flags & SA_SIGINFO)
3196     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3197     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3198 
3199   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3200     st->print("SIG_DFL");
3201   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3202     st->print("SIG_IGN");
3203   } else {
3204     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3205   }
3206 
3207   // Print readable mask.
3208   st->print(", sa_mask[0]=");
3209   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3210 
3211   address rh = VMError::get_resetted_sighandler(sig);
3212   // May be, handler was resetted by VMError?
3213   if (rh != NULL) {
3214     handler = rh;
3215     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3216   }
3217 
3218   // Print textual representation of sa_flags.
3219   st->print(", sa_flags=");
3220   os::Posix::print_sa_flags(st, sa.sa_flags);
3221 
3222   // Check: is it our handler?
3223   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3224       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3225     // It is our signal handler.
3226     // Check for flags, reset system-used one!
3227     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3228       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3229                 os::Aix::get_our_sigflags(sig));
3230     }
3231   }
3232   st->cr();
3233 }
3234 
3235 #define DO_SIGNAL_CHECK(sig) \
3236   if (!sigismember(&check_signal_done, sig)) \
3237     os::Aix::check_signal_handler(sig)
3238 
3239 // This method is a periodic task to check for misbehaving JNI applications
3240 // under CheckJNI, we can add any periodic checks here
3241 
3242 void os::run_periodic_checks() {
3243 
3244   if (check_signals == false) return;
3245 
3246   // SEGV and BUS if overridden could potentially prevent
3247   // generation of hs*.log in the event of a crash, debugging
3248   // such a case can be very challenging, so we absolutely
3249   // check the following for a good measure:
3250   DO_SIGNAL_CHECK(SIGSEGV);
3251   DO_SIGNAL_CHECK(SIGILL);
3252   DO_SIGNAL_CHECK(SIGFPE);
3253   DO_SIGNAL_CHECK(SIGBUS);
3254   DO_SIGNAL_CHECK(SIGPIPE);
3255   DO_SIGNAL_CHECK(SIGXFSZ);
3256   if (UseSIGTRAP) {
3257     DO_SIGNAL_CHECK(SIGTRAP);
3258   }
3259 
3260   // ReduceSignalUsage allows the user to override these handlers
3261   // see comments at the very top and jvm_solaris.h
3262   if (!ReduceSignalUsage) {
3263     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3264     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3265     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3266     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3267   }
3268 
3269   DO_SIGNAL_CHECK(SR_signum);
3270 }
3271 
3272 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3273 
3274 static os_sigaction_t os_sigaction = NULL;
3275 
3276 void os::Aix::check_signal_handler(int sig) {
3277   char buf[O_BUFLEN];
3278   address jvmHandler = NULL;
3279 
3280   struct sigaction act;
3281   if (os_sigaction == NULL) {
3282     // only trust the default sigaction, in case it has been interposed
3283     os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3284     if (os_sigaction == NULL) return;
3285   }
3286 
3287   os_sigaction(sig, (struct sigaction*)NULL, &act);
3288 
3289   address thisHandler = (act.sa_flags & SA_SIGINFO)
3290     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3291     : CAST_FROM_FN_PTR(address, act.sa_handler);
3292 
3293   switch(sig) {
3294   case SIGSEGV:
3295   case SIGBUS:
3296   case SIGFPE:
3297   case SIGPIPE:
3298   case SIGILL:
3299   case SIGXFSZ:
3300     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3301     break;
3302 
3303   case SHUTDOWN1_SIGNAL:
3304   case SHUTDOWN2_SIGNAL:
3305   case SHUTDOWN3_SIGNAL:
3306   case BREAK_SIGNAL:
3307     jvmHandler = (address)user_handler();
3308     break;
3309 
3310   default:
3311     if (sig == SR_signum) {
3312       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3313     } else {
3314       return;
3315     }
3316     break;
3317   }
3318 
3319   if (thisHandler != jvmHandler) {
3320     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3321     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3322     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3323     // No need to check this sig any longer
3324     sigaddset(&check_signal_done, sig);
3325     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3326     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3327       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3328                     exception_name(sig, buf, O_BUFLEN));
3329     }
3330   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3331     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3332     tty->print("expected:");
3333     os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3334     tty->cr();
3335     tty->print("  found:");
3336     os::Posix::print_sa_flags(tty, act.sa_flags);
3337     tty->cr();
3338     // No need to check this sig any longer
3339     sigaddset(&check_signal_done, sig);
3340   }
3341 
3342   // Dump all the signal
3343   if (sigismember(&check_signal_done, sig)) {
3344     print_signal_handlers(tty, buf, O_BUFLEN);
3345   }
3346 }
3347 
3348 // To install functions for atexit system call
3349 extern "C" {
3350   static void perfMemory_exit_helper() {
3351     perfMemory_exit();
3352   }
3353 }
3354 
3355 // This is called _before_ the most of global arguments have been parsed.
3356 void os::init(void) {
3357   // This is basic, we want to know if that ever changes.
3358   // (Shared memory boundary is supposed to be a 256M aligned.)
3359   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3360 
3361   // Record process break at startup.
3362   g_brk_at_startup = (address) ::sbrk(0);
3363   assert(g_brk_at_startup != (address) -1, "sbrk failed");
3364 
3365   // First off, we need to know whether we run on AIX or PASE, and
3366   // the OS level we run on.
3367   os::Aix::initialize_os_info();
3368 
3369   // Scan environment (SPEC1170 behaviour, etc).
3370   os::Aix::scan_environment();
3371 
3372   // Probe multipage support.
3373   query_multipage_support();
3374 
3375   // Act like we only have one page size by eliminating corner cases which
3376   // we did not support very well anyway.
3377   // We have two input conditions:
3378   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3379   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3380   //    setting.
3381   //    Data segment page size is important for us because it defines the thread stack page
3382   //    size, which is needed for guard page handling, stack banging etc.
3383   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3384   //    and should be allocated with 64k pages.
3385   //
3386   // So, we do the following:
3387   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3388   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3389   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3390   // 64k          no              --- AIX 5.2 ? ---
3391   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3392 
3393   // We explicitly leave no option to change page size, because only upgrading would work,
3394   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3395 
3396   if (g_multipage_support.datapsize == 4*K) {
3397     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3398     if (g_multipage_support.can_use_64K_pages) {
3399       // .. but we are able to use 64K pages dynamically.
3400       // This would be typical for java launchers which are not linked
3401       // with datapsize=64K (like, any other launcher but our own).
3402       //
3403       // In this case it would be smart to allocate the java heap with 64K
3404       // to get the performance benefit, and to fake 64k pages for the
3405       // data segment (when dealing with thread stacks).
3406       //
3407       // However, leave a possibility to downgrade to 4K, using
3408       // -XX:-Use64KPages.
3409       if (Use64KPages) {
3410         trcVerbose("64K page mode (faked for data segment)");
3411         Aix::_page_size = 64*K;
3412       } else {
3413         trcVerbose("4K page mode (Use64KPages=off)");
3414         Aix::_page_size = 4*K;
3415       }
3416     } else {
3417       // .. and not able to allocate 64k pages dynamically. Here, just
3418       // fall back to 4K paged mode and use mmap for everything.
3419       trcVerbose("4K page mode");
3420       Aix::_page_size = 4*K;
3421       FLAG_SET_ERGO(bool, Use64KPages, false);
3422     }
3423   } else {
3424     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3425     // This normally means that we can allocate 64k pages dynamically.
3426     // (There is one special case where this may be false: EXTSHM=on.
3427     // but we decided to not support that mode).
3428     assert0(g_multipage_support.can_use_64K_pages);
3429     Aix::_page_size = 64*K;
3430     trcVerbose("64K page mode");
3431     FLAG_SET_ERGO(bool, Use64KPages, true);
3432   }
3433 
3434   // For now UseLargePages is just ignored.
3435   FLAG_SET_ERGO(bool, UseLargePages, false);
3436   _page_sizes[0] = 0;
3437 
3438   // debug trace
3439   trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3440 
3441   // Next, we need to initialize libo4 and libperfstat libraries.
3442   if (os::Aix::on_pase()) {
3443     os::Aix::initialize_libo4();
3444   } else {
3445     os::Aix::initialize_libperfstat();
3446   }
3447 
3448   // Reset the perfstat information provided by ODM.
3449   if (os::Aix::on_aix()) {
3450     libperfstat::perfstat_reset();
3451   }
3452 
3453   // Now initialze basic system properties. Note that for some of the values we
3454   // need libperfstat etc.
3455   os::Aix::initialize_system_info();
3456 
3457   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3458 
3459   init_random(1234567);
3460 
3461   // Main_thread points to the aboriginal thread.
3462   Aix::_main_thread = pthread_self();
3463 
3464   initial_time_count = os::elapsed_counter();
3465 
3466   os::Posix::init();
3467 }
3468 
3469 // This is called _after_ the global arguments have been parsed.
3470 jint os::init_2(void) {
3471 
3472   os::Posix::init_2();
3473 
3474   if (os::Aix::on_pase()) {
3475     trcVerbose("Running on PASE.");
3476   } else {
3477     trcVerbose("Running on AIX (not PASE).");
3478   }
3479 
3480   trcVerbose("processor count: %d", os::_processor_count);
3481   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3482 
3483   // Initially build up the loaded dll map.
3484   LoadedLibraries::reload();
3485   if (Verbose) {
3486     trcVerbose("Loaded Libraries: ");
3487     LoadedLibraries::print(tty);
3488   }
3489 
3490   const int page_size = Aix::page_size();
3491   const int map_size = page_size;
3492 
3493   address map_address = (address) MAP_FAILED;
3494   const int prot  = PROT_READ;
3495   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3496 
3497   // Use optimized addresses for the polling page,
3498   // e.g. map it to a special 32-bit address.
3499   if (OptimizePollingPageLocation) {
3500     // architecture-specific list of address wishes:
3501     address address_wishes[] = {
3502       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3503       // PPC64: all address wishes are non-negative 32 bit values where
3504       // the lower 16 bits are all zero. we can load these addresses
3505       // with a single ppc_lis instruction.
3506       (address) 0x30000000, (address) 0x31000000,
3507       (address) 0x32000000, (address) 0x33000000,
3508       (address) 0x40000000, (address) 0x41000000,
3509       (address) 0x42000000, (address) 0x43000000,
3510       (address) 0x50000000, (address) 0x51000000,
3511       (address) 0x52000000, (address) 0x53000000,
3512       (address) 0x60000000, (address) 0x61000000,
3513       (address) 0x62000000, (address) 0x63000000
3514     };
3515     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3516 
3517     // iterate over the list of address wishes:
3518     for (int i=0; i<address_wishes_length; i++) {
3519       // Try to map with current address wish.
3520       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3521       // fail if the address is already mapped.
3522       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3523                                      map_size, prot,
3524                                      flags | MAP_FIXED,
3525                                      -1, 0);
3526       trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3527                    address_wishes[i], map_address + (ssize_t)page_size);
3528 
3529       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3530         // Map succeeded and map_address is at wished address, exit loop.
3531         break;
3532       }
3533 
3534       if (map_address != (address) MAP_FAILED) {
3535         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3536         ::munmap(map_address, map_size);
3537         map_address = (address) MAP_FAILED;
3538       }
3539       // Map failed, continue loop.
3540     }
3541   } // end OptimizePollingPageLocation
3542 
3543   if (map_address == (address) MAP_FAILED) {
3544     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3545   }
3546   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3547   os::set_polling_page(map_address);
3548 
3549   if (!UseMembar) {
3550     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3551     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3552     os::set_memory_serialize_page(mem_serialize_page);
3553 
3554     trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3555         mem_serialize_page, mem_serialize_page + Aix::page_size(),
3556         Aix::page_size(), Aix::page_size());
3557   }
3558 
3559   // initialize suspend/resume support - must do this before signal_sets_init()
3560   if (SR_initialize() != 0) {
3561     perror("SR_initialize failed");
3562     return JNI_ERR;
3563   }
3564 
3565   Aix::signal_sets_init();
3566   Aix::install_signal_handlers();
3567 
3568   // Check and sets minimum stack sizes against command line options
3569   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
3570     return JNI_ERR;
3571   }
3572 
3573   if (UseNUMA) {
3574     UseNUMA = false;
3575     warning("NUMA optimizations are not available on this OS.");
3576   }
3577 
3578   if (MaxFDLimit) {
3579     // Set the number of file descriptors to max. print out error
3580     // if getrlimit/setrlimit fails but continue regardless.
3581     struct rlimit nbr_files;
3582     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3583     if (status != 0) {
3584       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
3585     } else {
3586       nbr_files.rlim_cur = nbr_files.rlim_max;
3587       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3588       if (status != 0) {
3589         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3590       }
3591     }
3592   }
3593 
3594   if (PerfAllowAtExitRegistration) {
3595     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3596     // At exit functions can be delayed until process exit time, which
3597     // can be problematic for embedded VM situations. Embedded VMs should
3598     // call DestroyJavaVM() to assure that VM resources are released.
3599 
3600     // Note: perfMemory_exit_helper atexit function may be removed in
3601     // the future if the appropriate cleanup code can be added to the
3602     // VM_Exit VMOperation's doit method.
3603     if (atexit(perfMemory_exit_helper) != 0) {
3604       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3605     }
3606   }
3607 
3608   return JNI_OK;
3609 }
3610 
3611 // Mark the polling page as unreadable
3612 void os::make_polling_page_unreadable(void) {
3613   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3614     fatal("Could not disable polling page");
3615   }
3616 };
3617 
3618 // Mark the polling page as readable
3619 void os::make_polling_page_readable(void) {
3620   // Changed according to os_linux.cpp.
3621   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3622     fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3623   }
3624 };
3625 
3626 int os::active_processor_count() {
3627   // User has overridden the number of active processors
3628   if (ActiveProcessorCount > 0) {
3629     log_trace(os)("active_processor_count: "
3630                   "active processor count set by user : %d",
3631                   (int)ActiveProcessorCount);
3632     return ActiveProcessorCount;
3633   }
3634 
3635   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3636   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3637   return online_cpus;
3638 }
3639 
3640 void os::set_native_thread_name(const char *name) {
3641   // Not yet implemented.
3642   return;
3643 }
3644 
3645 bool os::distribute_processes(uint length, uint* distribution) {
3646   // Not yet implemented.
3647   return false;
3648 }
3649 
3650 bool os::bind_to_processor(uint processor_id) {
3651   // Not yet implemented.
3652   return false;
3653 }
3654 
3655 void os::SuspendedThreadTask::internal_do_task() {
3656   if (do_suspend(_thread->osthread())) {
3657     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3658     do_task(context);
3659     do_resume(_thread->osthread());
3660   }
3661 }
3662 
3663 ////////////////////////////////////////////////////////////////////////////////
3664 // debug support
3665 
3666 bool os::find(address addr, outputStream* st) {
3667 
3668   st->print(PTR_FORMAT ": ", addr);
3669 
3670   loaded_module_t lm;
3671   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3672       LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3673     st->print_cr("%s", lm.path);
3674     return true;
3675   }
3676 
3677   return false;
3678 }
3679 
3680 ////////////////////////////////////////////////////////////////////////////////
3681 // misc
3682 
3683 // This does not do anything on Aix. This is basically a hook for being
3684 // able to use structured exception handling (thread-local exception filters)
3685 // on, e.g., Win32.
3686 void
3687 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3688                          JavaCallArguments* args, Thread* thread) {
3689   f(value, method, args, thread);
3690 }
3691 
3692 void os::print_statistics() {
3693 }
3694 
3695 bool os::message_box(const char* title, const char* message) {
3696   int i;
3697   fdStream err(defaultStream::error_fd());
3698   for (i = 0; i < 78; i++) err.print_raw("=");
3699   err.cr();
3700   err.print_raw_cr(title);
3701   for (i = 0; i < 78; i++) err.print_raw("-");
3702   err.cr();
3703   err.print_raw_cr(message);
3704   for (i = 0; i < 78; i++) err.print_raw("=");
3705   err.cr();
3706 
3707   char buf[16];
3708   // Prevent process from exiting upon "read error" without consuming all CPU
3709   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3710 
3711   return buf[0] == 'y' || buf[0] == 'Y';
3712 }
3713 
3714 int os::stat(const char *path, struct stat *sbuf) {
3715   char pathbuf[MAX_PATH];
3716   if (strlen(path) > MAX_PATH - 1) {
3717     errno = ENAMETOOLONG;
3718     return -1;
3719   }
3720   os::native_path(strcpy(pathbuf, path));
3721   return ::stat(pathbuf, sbuf);
3722 }
3723 
3724 // Is a (classpath) directory empty?
3725 bool os::dir_is_empty(const char* path) {
3726   DIR *dir = NULL;
3727   struct dirent *ptr;
3728 
3729   dir = opendir(path);
3730   if (dir == NULL) return true;
3731 
3732   /* Scan the directory */
3733   bool result = true;
3734   char buf[sizeof(struct dirent) + MAX_PATH];
3735   while (result && (ptr = ::readdir(dir)) != NULL) {
3736     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3737       result = false;
3738     }
3739   }
3740   closedir(dir);
3741   return result;
3742 }
3743 
3744 // This code originates from JDK's sysOpen and open64_w
3745 // from src/solaris/hpi/src/system_md.c
3746 
3747 int os::open(const char *path, int oflag, int mode) {
3748 
3749   if (strlen(path) > MAX_PATH - 1) {
3750     errno = ENAMETOOLONG;
3751     return -1;
3752   }
3753   int fd;
3754 
3755   fd = ::open64(path, oflag, mode);
3756   if (fd == -1) return -1;
3757 
3758   // If the open succeeded, the file might still be a directory.
3759   {
3760     struct stat64 buf64;
3761     int ret = ::fstat64(fd, &buf64);
3762     int st_mode = buf64.st_mode;
3763 
3764     if (ret != -1) {
3765       if ((st_mode & S_IFMT) == S_IFDIR) {
3766         errno = EISDIR;
3767         ::close(fd);
3768         return -1;
3769       }
3770     } else {
3771       ::close(fd);
3772       return -1;
3773     }
3774   }
3775 
3776   // All file descriptors that are opened in the JVM and not
3777   // specifically destined for a subprocess should have the
3778   // close-on-exec flag set. If we don't set it, then careless 3rd
3779   // party native code might fork and exec without closing all
3780   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3781   // UNIXProcess.c), and this in turn might:
3782   //
3783   // - cause end-of-file to fail to be detected on some file
3784   //   descriptors, resulting in mysterious hangs, or
3785   //
3786   // - might cause an fopen in the subprocess to fail on a system
3787   //   suffering from bug 1085341.
3788   //
3789   // (Yes, the default setting of the close-on-exec flag is a Unix
3790   // design flaw.)
3791   //
3792   // See:
3793   // 1085341: 32-bit stdio routines should support file descriptors >255
3794   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3795   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3796 #ifdef FD_CLOEXEC
3797   {
3798     int flags = ::fcntl(fd, F_GETFD);
3799     if (flags != -1)
3800       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3801   }
3802 #endif
3803 
3804   return fd;
3805 }
3806 
3807 // create binary file, rewriting existing file if required
3808 int os::create_binary_file(const char* path, bool rewrite_existing) {
3809   int oflags = O_WRONLY | O_CREAT;
3810   if (!rewrite_existing) {
3811     oflags |= O_EXCL;
3812   }
3813   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3814 }
3815 
3816 // return current position of file pointer
3817 jlong os::current_file_offset(int fd) {
3818   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3819 }
3820 
3821 // move file pointer to the specified offset
3822 jlong os::seek_to_file_offset(int fd, jlong offset) {
3823   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3824 }
3825 
3826 // This code originates from JDK's sysAvailable
3827 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3828 
3829 int os::available(int fd, jlong *bytes) {
3830   jlong cur, end;
3831   int mode;
3832   struct stat64 buf64;
3833 
3834   if (::fstat64(fd, &buf64) >= 0) {
3835     mode = buf64.st_mode;
3836     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3837       int n;
3838       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3839         *bytes = n;
3840         return 1;
3841       }
3842     }
3843   }
3844   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3845     return 0;
3846   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3847     return 0;
3848   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3849     return 0;
3850   }
3851   *bytes = end - cur;
3852   return 1;
3853 }
3854 
3855 // Map a block of memory.
3856 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3857                         char *addr, size_t bytes, bool read_only,
3858                         bool allow_exec) {
3859   int prot;
3860   int flags = MAP_PRIVATE;
3861 
3862   if (read_only) {
3863     prot = PROT_READ;
3864     flags = MAP_SHARED;
3865   } else {
3866     prot = PROT_READ | PROT_WRITE;
3867     flags = MAP_PRIVATE;
3868   }
3869 
3870   if (allow_exec) {
3871     prot |= PROT_EXEC;
3872   }
3873 
3874   if (addr != NULL) {
3875     flags |= MAP_FIXED;
3876   }
3877 
3878   // Allow anonymous mappings if 'fd' is -1.
3879   if (fd == -1) {
3880     flags |= MAP_ANONYMOUS;
3881   }
3882 
3883   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3884                                      fd, file_offset);
3885   if (mapped_address == MAP_FAILED) {
3886     return NULL;
3887   }
3888   return mapped_address;
3889 }
3890 
3891 // Remap a block of memory.
3892 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3893                           char *addr, size_t bytes, bool read_only,
3894                           bool allow_exec) {
3895   // same as map_memory() on this OS
3896   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3897                         allow_exec);
3898 }
3899 
3900 // Unmap a block of memory.
3901 bool os::pd_unmap_memory(char* addr, size_t bytes) {
3902   return munmap(addr, bytes) == 0;
3903 }
3904 
3905 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3906 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
3907 // of a thread.
3908 //
3909 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3910 // the fast estimate available on the platform.
3911 
3912 jlong os::current_thread_cpu_time() {
3913   // return user + sys since the cost is the same
3914   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
3915   assert(n >= 0, "negative CPU time");
3916   return n;
3917 }
3918 
3919 jlong os::thread_cpu_time(Thread* thread) {
3920   // consistent with what current_thread_cpu_time() returns
3921   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
3922   assert(n >= 0, "negative CPU time");
3923   return n;
3924 }
3925 
3926 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
3927   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
3928   assert(n >= 0, "negative CPU time");
3929   return n;
3930 }
3931 
3932 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
3933   bool error = false;
3934 
3935   jlong sys_time = 0;
3936   jlong user_time = 0;
3937 
3938   // Reimplemented using getthrds64().
3939   //
3940   // Works like this:
3941   // For the thread in question, get the kernel thread id. Then get the
3942   // kernel thread statistics using that id.
3943   //
3944   // This only works of course when no pthread scheduling is used,
3945   // i.e. there is a 1:1 relationship to kernel threads.
3946   // On AIX, see AIXTHREAD_SCOPE variable.
3947 
3948   pthread_t pthtid = thread->osthread()->pthread_id();
3949 
3950   // retrieve kernel thread id for the pthread:
3951   tid64_t tid = 0;
3952   struct __pthrdsinfo pinfo;
3953   // I just love those otherworldly IBM APIs which force me to hand down
3954   // dummy buffers for stuff I dont care for...
3955   char dummy[1];
3956   int dummy_size = sizeof(dummy);
3957   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
3958                           dummy, &dummy_size) == 0) {
3959     tid = pinfo.__pi_tid;
3960   } else {
3961     tty->print_cr("pthread_getthrds_np failed.");
3962     error = true;
3963   }
3964 
3965   // retrieve kernel timing info for that kernel thread
3966   if (!error) {
3967     struct thrdentry64 thrdentry;
3968     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
3969       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
3970       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
3971     } else {
3972       tty->print_cr("pthread_getthrds_np failed.");
3973       error = true;
3974     }
3975   }
3976 
3977   if (p_sys_time) {
3978     *p_sys_time = sys_time;
3979   }
3980 
3981   if (p_user_time) {
3982     *p_user_time = user_time;
3983   }
3984 
3985   if (error) {
3986     return false;
3987   }
3988 
3989   return true;
3990 }
3991 
3992 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
3993   jlong sys_time;
3994   jlong user_time;
3995 
3996   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
3997     return -1;
3998   }
3999 
4000   return user_sys_cpu_time ? sys_time + user_time : user_time;
4001 }
4002 
4003 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4004   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4005   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4006   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4007   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4008 }
4009 
4010 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4011   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4012   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4013   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4014   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4015 }
4016 
4017 bool os::is_thread_cpu_time_supported() {
4018   return true;
4019 }
4020 
4021 // System loadavg support. Returns -1 if load average cannot be obtained.
4022 // For now just return the system wide load average (no processor sets).
4023 int os::loadavg(double values[], int nelem) {
4024 
4025   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4026   guarantee(values, "argument error");
4027 
4028   if (os::Aix::on_pase()) {
4029 
4030     // AS/400 PASE: use libo4 porting library
4031     double v[3] = { 0.0, 0.0, 0.0 };
4032 
4033     if (libo4::get_load_avg(v, v + 1, v + 2)) {
4034       for (int i = 0; i < nelem; i ++) {
4035         values[i] = v[i];
4036       }
4037       return nelem;
4038     } else {
4039       return -1;
4040     }
4041 
4042   } else {
4043 
4044     // AIX: use libperfstat
4045     libperfstat::cpuinfo_t ci;
4046     if (libperfstat::get_cpuinfo(&ci)) {
4047       for (int i = 0; i < nelem; i++) {
4048         values[i] = ci.loadavg[i];
4049       }
4050     } else {
4051       return -1;
4052     }
4053     return nelem;
4054   }
4055 }
4056 
4057 void os::pause() {
4058   char filename[MAX_PATH];
4059   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4060     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4061   } else {
4062     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4063   }
4064 
4065   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4066   if (fd != -1) {
4067     struct stat buf;
4068     ::close(fd);
4069     while (::stat(filename, &buf) == 0) {
4070       (void)::poll(NULL, 0, 100);
4071     }
4072   } else {
4073     trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4074   }
4075 }
4076 
4077 bool os::Aix::is_primordial_thread() {
4078   if (pthread_self() == (pthread_t)1) {
4079     return true;
4080   } else {
4081     return false;
4082   }
4083 }
4084 
4085 // OS recognitions (PASE/AIX, OS level) call this before calling any
4086 // one of Aix::on_pase(), Aix::os_version() static
4087 void os::Aix::initialize_os_info() {
4088 
4089   assert(_on_pase == -1 && _os_version == 0, "already called.");
4090 
4091   struct utsname uts;
4092   memset(&uts, 0, sizeof(uts));
4093   strcpy(uts.sysname, "?");
4094   if (::uname(&uts) == -1) {
4095     trcVerbose("uname failed (%d)", errno);
4096     guarantee(0, "Could not determine whether we run on AIX or PASE");
4097   } else {
4098     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4099                "node \"%s\" machine \"%s\"\n",
4100                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4101     const int major = atoi(uts.version);
4102     assert(major > 0, "invalid OS version");
4103     const int minor = atoi(uts.release);
4104     assert(minor > 0, "invalid OS release");
4105     _os_version = (major << 24) | (minor << 16);
4106     char ver_str[20] = {0};
4107     char *name_str = "unknown OS";
4108     if (strcmp(uts.sysname, "OS400") == 0) {
4109       // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4110       _on_pase = 1;
4111       if (os_version_short() < 0x0504) {
4112         trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4113         assert(false, "OS/400 release too old.");
4114       }
4115       name_str = "OS/400 (pase)";
4116       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4117     } else if (strcmp(uts.sysname, "AIX") == 0) {
4118       // We run on AIX. We do not support versions older than AIX 5.3.
4119       _on_pase = 0;
4120       // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4121       odmWrapper::determine_os_kernel_version(&_os_version);
4122       if (os_version_short() < 0x0503) {
4123         trcVerbose("AIX release older than AIX 5.3 not supported.");
4124         assert(false, "AIX release too old.");
4125       }
4126       name_str = "AIX";
4127       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4128                    major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4129     } else {
4130       assert(false, name_str);
4131     }
4132     trcVerbose("We run on %s %s", name_str, ver_str);
4133   }
4134 
4135   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4136 } // end: os::Aix::initialize_os_info()
4137 
4138 // Scan environment for important settings which might effect the VM.
4139 // Trace out settings. Warn about invalid settings and/or correct them.
4140 //
4141 // Must run after os::Aix::initialue_os_info().
4142 void os::Aix::scan_environment() {
4143 
4144   char* p;
4145   int rc;
4146 
4147   // Warn explicity if EXTSHM=ON is used. That switch changes how
4148   // System V shared memory behaves. One effect is that page size of
4149   // shared memory cannot be change dynamically, effectivly preventing
4150   // large pages from working.
4151   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4152   // recommendation is (in OSS notes) to switch it off.
4153   p = ::getenv("EXTSHM");
4154   trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4155   if (p && strcasecmp(p, "ON") == 0) {
4156     _extshm = 1;
4157     trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4158     if (!AllowExtshm) {
4159       // We allow under certain conditions the user to continue. However, we want this
4160       // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4161       // that the VM is not able to allocate 64k pages for the heap.
4162       // We do not want to run with reduced performance.
4163       vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4164     }
4165   } else {
4166     _extshm = 0;
4167   }
4168 
4169   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4170   // Not tested, not supported.
4171   //
4172   // Note that it might be worth the trouble to test and to require it, if only to
4173   // get useful return codes for mprotect.
4174   //
4175   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4176   // exec() ? before loading the libjvm ? ....)
4177   p = ::getenv("XPG_SUS_ENV");
4178   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4179   if (p && strcmp(p, "ON") == 0) {
4180     _xpg_sus_mode = 1;
4181     trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4182     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4183     // clobber address ranges. If we ever want to support that, we have to do some
4184     // testing first.
4185     guarantee(false, "XPG_SUS_ENV=ON not supported");
4186   } else {
4187     _xpg_sus_mode = 0;
4188   }
4189 
4190   if (os::Aix::on_pase()) {
4191     p = ::getenv("QIBM_MULTI_THREADED");
4192     trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4193   }
4194 
4195   p = ::getenv("LDR_CNTRL");
4196   trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4197   if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4198     if (p && ::strstr(p, "TEXTPSIZE")) {
4199       trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4200         "you may experience hangs or crashes on OS/400 V7R1.");
4201     }
4202   }
4203 
4204   p = ::getenv("AIXTHREAD_GUARDPAGES");
4205   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4206 
4207 } // end: os::Aix::scan_environment()
4208 
4209 // PASE: initialize the libo4 library (PASE porting library).
4210 void os::Aix::initialize_libo4() {
4211   guarantee(os::Aix::on_pase(), "OS/400 only.");
4212   if (!libo4::init()) {
4213     trcVerbose("libo4 initialization failed.");
4214     assert(false, "libo4 initialization failed");
4215   } else {
4216     trcVerbose("libo4 initialized.");
4217   }
4218 }
4219 
4220 // AIX: initialize the libperfstat library.
4221 void os::Aix::initialize_libperfstat() {
4222   assert(os::Aix::on_aix(), "AIX only");
4223   if (!libperfstat::init()) {
4224     trcVerbose("libperfstat initialization failed.");
4225     assert(false, "libperfstat initialization failed");
4226   } else {
4227     trcVerbose("libperfstat initialized.");
4228   }
4229 }
4230 
4231 /////////////////////////////////////////////////////////////////////////////
4232 // thread stack
4233 
4234 // Get the current stack base from the OS (actually, the pthread library).
4235 // Note: usually not page aligned.
4236 address os::current_stack_base() {
4237   AixMisc::stackbounds_t bounds;
4238   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4239   guarantee(rc, "Unable to retrieve stack bounds.");
4240   return bounds.base;
4241 }
4242 
4243 // Get the current stack size from the OS (actually, the pthread library).
4244 // Returned size is such that (base - size) is always aligned to page size.
4245 size_t os::current_stack_size() {
4246   AixMisc::stackbounds_t bounds;
4247   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4248   guarantee(rc, "Unable to retrieve stack bounds.");
4249   // Align the returned stack size such that the stack low address
4250   // is aligned to page size (Note: base is usually not and we do not care).
4251   // We need to do this because caller code will assume stack low address is
4252   // page aligned and will place guard pages without checking.
4253   address low = bounds.base - bounds.size;
4254   address low_aligned = (address)align_up(low, os::vm_page_size());
4255   size_t s = bounds.base - low_aligned;
4256   return s;
4257 }
4258 
4259 extern char** environ;
4260 
4261 // Run the specified command in a separate process. Return its exit value,
4262 // or -1 on failure (e.g. can't fork a new process).
4263 // Unlike system(), this function can be called from signal handler. It
4264 // doesn't block SIGINT et al.
4265 int os::fork_and_exec(char* cmd) {
4266   char * argv[4] = {"sh", "-c", cmd, NULL};
4267 
4268   pid_t pid = fork();
4269 
4270   if (pid < 0) {
4271     // fork failed
4272     return -1;
4273 
4274   } else if (pid == 0) {
4275     // child process
4276 
4277     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4278     execve("/usr/bin/sh", argv, environ);
4279 
4280     // execve failed
4281     _exit(-1);
4282 
4283   } else {
4284     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4285     // care about the actual exit code, for now.
4286 
4287     int status;
4288 
4289     // Wait for the child process to exit. This returns immediately if
4290     // the child has already exited. */
4291     while (waitpid(pid, &status, 0) < 0) {
4292       switch (errno) {
4293         case ECHILD: return 0;
4294         case EINTR: break;
4295         default: return -1;
4296       }
4297     }
4298 
4299     if (WIFEXITED(status)) {
4300       // The child exited normally; get its exit code.
4301       return WEXITSTATUS(status);
4302     } else if (WIFSIGNALED(status)) {
4303       // The child exited because of a signal.
4304       // The best value to return is 0x80 + signal number,
4305       // because that is what all Unix shells do, and because
4306       // it allows callers to distinguish between process exit and
4307       // process death by signal.
4308       return 0x80 + WTERMSIG(status);
4309     } else {
4310       // Unknown exit code; pass it through.
4311       return status;
4312     }
4313   }
4314   return -1;
4315 }
4316 
4317 // is_headless_jre()
4318 //
4319 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4320 // in order to report if we are running in a headless jre.
4321 //
4322 // Since JDK8 xawt/libmawt.so is moved into the same directory
4323 // as libawt.so, and renamed libawt_xawt.so
4324 bool os::is_headless_jre() {
4325   struct stat statbuf;
4326   char buf[MAXPATHLEN];
4327   char libmawtpath[MAXPATHLEN];
4328   const char *xawtstr = "/xawt/libmawt.so";
4329   const char *new_xawtstr = "/libawt_xawt.so";
4330 
4331   char *p;
4332 
4333   // Get path to libjvm.so
4334   os::jvm_path(buf, sizeof(buf));
4335 
4336   // Get rid of libjvm.so
4337   p = strrchr(buf, '/');
4338   if (p == NULL) return false;
4339   else *p = '\0';
4340 
4341   // Get rid of client or server
4342   p = strrchr(buf, '/');
4343   if (p == NULL) return false;
4344   else *p = '\0';
4345 
4346   // check xawt/libmawt.so
4347   strcpy(libmawtpath, buf);
4348   strcat(libmawtpath, xawtstr);
4349   if (::stat(libmawtpath, &statbuf) == 0) return false;
4350 
4351   // check libawt_xawt.so
4352   strcpy(libmawtpath, buf);
4353   strcat(libmawtpath, new_xawtstr);
4354   if (::stat(libmawtpath, &statbuf) == 0) return false;
4355 
4356   return true;
4357 }
4358 
4359 // Get the default path to the core file
4360 // Returns the length of the string
4361 int os::get_core_path(char* buffer, size_t bufferSize) {
4362   const char* p = get_current_directory(buffer, bufferSize);
4363 
4364   if (p == NULL) {
4365     assert(p != NULL, "failed to get current directory");
4366     return 0;
4367   }
4368 
4369   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4370                                                p, current_process_id());
4371 
4372   return strlen(buffer);
4373 }
4374 
4375 #ifndef PRODUCT
4376 void TestReserveMemorySpecial_test() {
4377   // No tests available for this platform
4378 }
4379 #endif
4380 
4381 bool os::start_debugging(char *buf, int buflen) {
4382   int len = (int)strlen(buf);
4383   char *p = &buf[len];
4384 
4385   jio_snprintf(p, buflen -len,
4386                  "\n\n"
4387                  "Do you want to debug the problem?\n\n"
4388                  "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4389                  "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4390                  "Otherwise, press RETURN to abort...",
4391                  os::current_process_id(),
4392                  os::current_thread_id(), thread_self());
4393 
4394   bool yes = os::message_box("Unexpected Error", buf);
4395 
4396   if (yes) {
4397     // yes, user asked VM to launch debugger
4398     jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4399 
4400     os::fork_and_exec(buf);
4401     yes = false;
4402   }
4403   return yes;
4404 }
4405 
4406 static inline time_t get_mtime(const char* filename) {
4407   struct stat st;
4408   int ret = os::stat(filename, &st);
4409   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
4410   return st.st_mtime;
4411 }
4412 
4413 int os::compare_file_modified_times(const char* file1, const char* file2) {
4414   time_t t1 = get_mtime(file1);
4415   time_t t2 = get_mtime(file2);
4416   return t1 - t2;
4417 }