1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "logging/log.hpp"
  40 #include "libo4.hpp"
  41 #include "libperfstat_aix.hpp"
  42 #include "libodm_aix.hpp"
  43 #include "loadlib_aix.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/filemap.hpp"
  46 #include "misc_aix.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "os_aix.inline.hpp"
  49 #include "os_share_aix.hpp"
  50 #include "porting_aix.hpp"
  51 #include "prims/jniFastGetField.hpp"
  52 #include "prims/jvm.h"
  53 #include "prims/jvm_misc.hpp"
  54 #include "runtime/arguments.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/extendedPC.hpp"
  57 #include "runtime/globals.hpp"
  58 #include "runtime/interfaceSupport.hpp"
  59 #include "runtime/java.hpp"
  60 #include "runtime/javaCalls.hpp"
  61 #include "runtime/mutexLocker.hpp"
  62 #include "runtime/objectMonitor.hpp"
  63 #include "runtime/orderAccess.inline.hpp"
  64 #include "runtime/os.hpp"
  65 #include "runtime/osThread.hpp"
  66 #include "runtime/perfMemory.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/statSampler.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/thread.inline.hpp"
  71 #include "runtime/threadCritical.hpp"
  72 #include "runtime/timer.hpp"
  73 #include "runtime/vm_version.hpp"
  74 #include "services/attachListener.hpp"
  75 #include "services/runtimeService.hpp"
  76 #include "utilities/decoder.hpp"
  77 #include "utilities/defaultStream.hpp"
  78 #include "utilities/events.hpp"
  79 #include "utilities/growableArray.hpp"
  80 #include "utilities/vmError.hpp"
  81 
  82 // put OS-includes here (sorted alphabetically)
  83 #include <errno.h>
  84 #include <fcntl.h>
  85 #include <inttypes.h>
  86 #include <poll.h>
  87 #include <procinfo.h>
  88 #include <pthread.h>
  89 #include <pwd.h>
  90 #include <semaphore.h>
  91 #include <signal.h>
  92 #include <stdint.h>
  93 #include <stdio.h>
  94 #include <string.h>
  95 #include <unistd.h>
  96 #include <sys/ioctl.h>
  97 #include <sys/ipc.h>
  98 #include <sys/mman.h>
  99 #include <sys/resource.h>
 100 #include <sys/select.h>
 101 #include <sys/shm.h>
 102 #include <sys/socket.h>
 103 #include <sys/stat.h>
 104 #include <sys/sysinfo.h>
 105 #include <sys/systemcfg.h>
 106 #include <sys/time.h>
 107 #include <sys/times.h>
 108 #include <sys/types.h>
 109 #include <sys/utsname.h>
 110 #include <sys/vminfo.h>
 111 #include <sys/wait.h>
 112 
 113 // Missing prototypes for various system APIs.
 114 extern "C"
 115 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
 116 
 117 #if !defined(_AIXVERSION_610)
 118 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
 119 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
 120 extern "C" int getargs   (procsinfo*, int, char*, int);
 121 #endif
 122 
 123 #define MAX_PATH (2 * K)
 124 
 125 // for timer info max values which include all bits
 126 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 127 // for multipage initialization error analysis (in 'g_multipage_error')
 128 #define ERROR_MP_OS_TOO_OLD                          100
 129 #define ERROR_MP_EXTSHM_ACTIVE                       101
 130 #define ERROR_MP_VMGETINFO_FAILED                    102
 131 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 132 
 133 static address resolve_function_descriptor_to_code_pointer(address p);
 134 
 135 static void vmembk_print_on(outputStream* os);
 136 
 137 ////////////////////////////////////////////////////////////////////////////////
 138 // global variables (for a description see os_aix.hpp)
 139 
 140 julong    os::Aix::_physical_memory = 0;
 141 
 142 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 143 int       os::Aix::_page_size = -1;
 144 
 145 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 146 int       os::Aix::_on_pase = -1;
 147 
 148 // 0 = uninitialized, otherwise 32 bit number:
 149 //  0xVVRRTTSS
 150 //  VV - major version
 151 //  RR - minor version
 152 //  TT - tech level, if known, 0 otherwise
 153 //  SS - service pack, if known, 0 otherwise
 154 uint32_t  os::Aix::_os_version = 0;
 155 
 156 // -1 = uninitialized, 0 - no, 1 - yes
 157 int       os::Aix::_xpg_sus_mode = -1;
 158 
 159 // -1 = uninitialized, 0 - no, 1 - yes
 160 int       os::Aix::_extshm = -1;
 161 
 162 ////////////////////////////////////////////////////////////////////////////////
 163 // local variables
 164 
 165 static jlong    initial_time_count = 0;
 166 static int      clock_tics_per_sec = 100;
 167 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 168 static bool     check_signals      = true;
 169 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 170 static sigset_t SR_sigset;
 171 
 172 // Process break recorded at startup.
 173 static address g_brk_at_startup = NULL;
 174 
 175 // This describes the state of multipage support of the underlying
 176 // OS. Note that this is of no interest to the outsize world and
 177 // therefore should not be defined in AIX class.
 178 //
 179 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 180 // latter two (16M "large" resp. 16G "huge" pages) require special
 181 // setup and are normally not available.
 182 //
 183 // AIX supports multiple page sizes per process, for:
 184 //  - Stack (of the primordial thread, so not relevant for us)
 185 //  - Data - data, bss, heap, for us also pthread stacks
 186 //  - Text - text code
 187 //  - shared memory
 188 //
 189 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 190 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 191 //
 192 // For shared memory, page size can be set dynamically via
 193 // shmctl(). Different shared memory regions can have different page
 194 // sizes.
 195 //
 196 // More information can be found at AIBM info center:
 197 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 198 //
 199 static struct {
 200   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 201   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 202   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 203   size_t pthr_stack_pagesize; // stack page size of pthread threads
 204   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 205   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 206   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 207   int error;                  // Error describing if something went wrong at multipage init.
 208 } g_multipage_support = {
 209   (size_t) -1,
 210   (size_t) -1,
 211   (size_t) -1,
 212   (size_t) -1,
 213   (size_t) -1,
 214   false, false,
 215   0
 216 };
 217 
 218 // We must not accidentally allocate memory close to the BRK - even if
 219 // that would work - because then we prevent the BRK segment from
 220 // growing which may result in a malloc OOM even though there is
 221 // enough memory. The problem only arises if we shmat() or mmap() at
 222 // a specific wish address, e.g. to place the heap in a
 223 // compressed-oops-friendly way.
 224 static bool is_close_to_brk(address a) {
 225   assert0(g_brk_at_startup != NULL);
 226   if (a >= g_brk_at_startup &&
 227       a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
 228     return true;
 229   }
 230   return false;
 231 }
 232 
 233 julong os::available_memory() {
 234   return Aix::available_memory();
 235 }
 236 
 237 julong os::Aix::available_memory() {
 238   // Avoid expensive API call here, as returned value will always be null.
 239   if (os::Aix::on_pase()) {
 240     return 0x0LL;
 241   }
 242   os::Aix::meminfo_t mi;
 243   if (os::Aix::get_meminfo(&mi)) {
 244     return mi.real_free;
 245   } else {
 246     return ULONG_MAX;
 247   }
 248 }
 249 
 250 julong os::physical_memory() {
 251   return Aix::physical_memory();
 252 }
 253 
 254 // Return true if user is running as root.
 255 
 256 bool os::have_special_privileges() {
 257   static bool init = false;
 258   static bool privileges = false;
 259   if (!init) {
 260     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 261     init = true;
 262   }
 263   return privileges;
 264 }
 265 
 266 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 267 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 268 static bool my_disclaim64(char* addr, size_t size) {
 269 
 270   if (size == 0) {
 271     return true;
 272   }
 273 
 274   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 275   const unsigned int maxDisclaimSize = 0x40000000;
 276 
 277   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 278   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 279 
 280   char* p = addr;
 281 
 282   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 283     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 284       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 285       return false;
 286     }
 287     p += maxDisclaimSize;
 288   }
 289 
 290   if (lastDisclaimSize > 0) {
 291     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 292       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 293       return false;
 294     }
 295   }
 296 
 297   return true;
 298 }
 299 
 300 // Cpu architecture string
 301 #if defined(PPC32)
 302 static char cpu_arch[] = "ppc";
 303 #elif defined(PPC64)
 304 static char cpu_arch[] = "ppc64";
 305 #else
 306 #error Add appropriate cpu_arch setting
 307 #endif
 308 
 309 // Wrap the function "vmgetinfo" which is not available on older OS releases.
 310 static int checked_vmgetinfo(void *out, int command, int arg) {
 311   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 312     guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
 313   }
 314   return ::vmgetinfo(out, command, arg);
 315 }
 316 
 317 // Given an address, returns the size of the page backing that address.
 318 size_t os::Aix::query_pagesize(void* addr) {
 319 
 320   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 321     // AS/400 older than V6R1: no vmgetinfo here, default to 4K
 322     return 4*K;
 323   }
 324 
 325   vm_page_info pi;
 326   pi.addr = (uint64_t)addr;
 327   if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 328     return pi.pagesize;
 329   } else {
 330     assert(false, "vmgetinfo failed to retrieve page size");
 331     return 4*K;
 332   }
 333 }
 334 
 335 void os::Aix::initialize_system_info() {
 336 
 337   // Get the number of online(logical) cpus instead of configured.
 338   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 339   assert(_processor_count > 0, "_processor_count must be > 0");
 340 
 341   // Retrieve total physical storage.
 342   os::Aix::meminfo_t mi;
 343   if (!os::Aix::get_meminfo(&mi)) {
 344     assert(false, "os::Aix::get_meminfo failed.");
 345   }
 346   _physical_memory = (julong) mi.real_total;
 347 }
 348 
 349 // Helper function for tracing page sizes.
 350 static const char* describe_pagesize(size_t pagesize) {
 351   switch (pagesize) {
 352     case 4*K : return "4K";
 353     case 64*K: return "64K";
 354     case 16*M: return "16M";
 355     case 16*G: return "16G";
 356     default:
 357       assert(false, "surprise");
 358       return "??";
 359   }
 360 }
 361 
 362 // Probe OS for multipage support.
 363 // Will fill the global g_multipage_support structure.
 364 // Must be called before calling os::large_page_init().
 365 static void query_multipage_support() {
 366 
 367   guarantee(g_multipage_support.pagesize == -1,
 368             "do not call twice");
 369 
 370   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 371 
 372   // This really would surprise me.
 373   assert(g_multipage_support.pagesize == 4*K, "surprise!");
 374 
 375   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 376   // Default data page size is defined either by linker options (-bdatapsize)
 377   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 378   // default should be 4K.
 379   {
 380     void* p = ::malloc(16*M);
 381     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 382     ::free(p);
 383   }
 384 
 385   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 386   // Note that this is pure curiosity. We do not rely on default page size but set
 387   // our own page size after allocated.
 388   {
 389     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 390     guarantee(shmid != -1, "shmget failed");
 391     void* p = ::shmat(shmid, NULL, 0);
 392     ::shmctl(shmid, IPC_RMID, NULL);
 393     guarantee(p != (void*) -1, "shmat failed");
 394     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 395     ::shmdt(p);
 396   }
 397 
 398   // Before querying the stack page size, make sure we are not running as primordial
 399   // thread (because primordial thread's stack may have different page size than
 400   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 401   // number of reasons so we may just as well guarantee it here.
 402   guarantee0(!os::Aix::is_primordial_thread());
 403 
 404   // Query pthread stack page size. Should be the same as data page size because
 405   // pthread stacks are allocated from C-Heap.
 406   {
 407     int dummy = 0;
 408     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 409   }
 410 
 411   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 412   {
 413     address any_function =
 414       resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 415     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 416   }
 417 
 418   // Now probe for support of 64K pages and 16M pages.
 419 
 420   // Before OS/400 V6R1, there is no support for pages other than 4K.
 421   if (os::Aix::on_pase_V5R4_or_older()) {
 422     trcVerbose("OS/400 < V6R1 - no large page support.");
 423     g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
 424     goto query_multipage_support_end;
 425   }
 426 
 427   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 428   {
 429     const int MAX_PAGE_SIZES = 4;
 430     psize_t sizes[MAX_PAGE_SIZES];
 431     const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 432     if (num_psizes == -1) {
 433       trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
 434       trcVerbose("disabling multipage support.");
 435       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 436       goto query_multipage_support_end;
 437     }
 438     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 439     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 440     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 441     for (int i = 0; i < num_psizes; i ++) {
 442       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 443     }
 444 
 445     // Can we use 64K, 16M pages?
 446     for (int i = 0; i < num_psizes; i ++) {
 447       const size_t pagesize = sizes[i];
 448       if (pagesize != 64*K && pagesize != 16*M) {
 449         continue;
 450       }
 451       bool can_use = false;
 452       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 453       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 454         IPC_CREAT | S_IRUSR | S_IWUSR);
 455       guarantee0(shmid != -1); // Should always work.
 456       // Try to set pagesize.
 457       struct shmid_ds shm_buf = { 0 };
 458       shm_buf.shm_pagesize = pagesize;
 459       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 460         const int en = errno;
 461         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 462         trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
 463           errno);
 464       } else {
 465         // Attach and double check pageisze.
 466         void* p = ::shmat(shmid, NULL, 0);
 467         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 468         guarantee0(p != (void*) -1); // Should always work.
 469         const size_t real_pagesize = os::Aix::query_pagesize(p);
 470         if (real_pagesize != pagesize) {
 471           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 472         } else {
 473           can_use = true;
 474         }
 475         ::shmdt(p);
 476       }
 477       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 478       if (pagesize == 64*K) {
 479         g_multipage_support.can_use_64K_pages = can_use;
 480       } else if (pagesize == 16*M) {
 481         g_multipage_support.can_use_16M_pages = can_use;
 482       }
 483     }
 484 
 485   } // end: check which pages can be used for shared memory
 486 
 487 query_multipage_support_end:
 488 
 489   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
 490       describe_pagesize(g_multipage_support.pagesize));
 491   trcVerbose("Data page size (C-Heap, bss, etc): %s",
 492       describe_pagesize(g_multipage_support.datapsize));
 493   trcVerbose("Text page size: %s",
 494       describe_pagesize(g_multipage_support.textpsize));
 495   trcVerbose("Thread stack page size (pthread): %s",
 496       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 497   trcVerbose("Default shared memory page size: %s",
 498       describe_pagesize(g_multipage_support.shmpsize));
 499   trcVerbose("Can use 64K pages dynamically with shared meory: %s",
 500       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 501   trcVerbose("Can use 16M pages dynamically with shared memory: %s",
 502       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 503   trcVerbose("Multipage error details: %d",
 504       g_multipage_support.error);
 505 
 506   // sanity checks
 507   assert0(g_multipage_support.pagesize == 4*K);
 508   assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K);
 509   assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K);
 510   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 511   assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K);
 512 
 513 }
 514 
 515 void os::init_system_properties_values() {
 516 
 517 #define DEFAULT_LIBPATH "/lib:/usr/lib"
 518 #define EXTENSIONS_DIR  "/lib/ext"
 519 
 520   // Buffer that fits several sprintfs.
 521   // Note that the space for the trailing null is provided
 522   // by the nulls included by the sizeof operator.
 523   const size_t bufsize =
 524     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 525          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 526   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 527 
 528   // sysclasspath, java_home, dll_dir
 529   {
 530     char *pslash;
 531     os::jvm_path(buf, bufsize);
 532 
 533     // Found the full path to libjvm.so.
 534     // Now cut the path to <java_home>/jre if we can.
 535     pslash = strrchr(buf, '/');
 536     if (pslash != NULL) {
 537       *pslash = '\0';            // Get rid of /libjvm.so.
 538     }
 539     pslash = strrchr(buf, '/');
 540     if (pslash != NULL) {
 541       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 542     }
 543     Arguments::set_dll_dir(buf);
 544 
 545     if (pslash != NULL) {
 546       pslash = strrchr(buf, '/');
 547       if (pslash != NULL) {
 548         *pslash = '\0';        // Get rid of /lib.
 549       }
 550     }
 551     Arguments::set_java_home(buf);
 552     set_boot_path('/', ':');
 553   }
 554 
 555   // Where to look for native libraries.
 556 
 557   // On Aix we get the user setting of LIBPATH.
 558   // Eventually, all the library path setting will be done here.
 559   // Get the user setting of LIBPATH.
 560   const char *v = ::getenv("LIBPATH");
 561   const char *v_colon = ":";
 562   if (v == NULL) { v = ""; v_colon = ""; }
 563 
 564   // Concatenate user and invariant part of ld_library_path.
 565   // That's +1 for the colon and +1 for the trailing '\0'.
 566   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 567   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 568   Arguments::set_library_path(ld_library_path);
 569   FREE_C_HEAP_ARRAY(char, ld_library_path);
 570 
 571   // Extensions directories.
 572   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 573   Arguments::set_ext_dirs(buf);
 574 
 575   FREE_C_HEAP_ARRAY(char, buf);
 576 
 577 #undef DEFAULT_LIBPATH
 578 #undef EXTENSIONS_DIR
 579 }
 580 
 581 ////////////////////////////////////////////////////////////////////////////////
 582 // breakpoint support
 583 
 584 void os::breakpoint() {
 585   BREAKPOINT;
 586 }
 587 
 588 extern "C" void breakpoint() {
 589   // use debugger to set breakpoint here
 590 }
 591 
 592 ////////////////////////////////////////////////////////////////////////////////
 593 // signal support
 594 
 595 debug_only(static bool signal_sets_initialized = false);
 596 static sigset_t unblocked_sigs, vm_sigs;
 597 
 598 bool os::Aix::is_sig_ignored(int sig) {
 599   struct sigaction oact;
 600   sigaction(sig, (struct sigaction*)NULL, &oact);
 601   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 602     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 603   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 604     return true;
 605   } else {
 606     return false;
 607   }
 608 }
 609 
 610 void os::Aix::signal_sets_init() {
 611   // Should also have an assertion stating we are still single-threaded.
 612   assert(!signal_sets_initialized, "Already initialized");
 613   // Fill in signals that are necessarily unblocked for all threads in
 614   // the VM. Currently, we unblock the following signals:
 615   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 616   //                         by -Xrs (=ReduceSignalUsage));
 617   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 618   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 619   // the dispositions or masks wrt these signals.
 620   // Programs embedding the VM that want to use the above signals for their
 621   // own purposes must, at this time, use the "-Xrs" option to prevent
 622   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 623   // (See bug 4345157, and other related bugs).
 624   // In reality, though, unblocking these signals is really a nop, since
 625   // these signals are not blocked by default.
 626   sigemptyset(&unblocked_sigs);
 627   sigaddset(&unblocked_sigs, SIGILL);
 628   sigaddset(&unblocked_sigs, SIGSEGV);
 629   sigaddset(&unblocked_sigs, SIGBUS);
 630   sigaddset(&unblocked_sigs, SIGFPE);
 631   sigaddset(&unblocked_sigs, SIGTRAP);
 632   sigaddset(&unblocked_sigs, SR_signum);
 633 
 634   if (!ReduceSignalUsage) {
 635    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 636      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 637    }
 638    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 639      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 640    }
 641    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 642      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 643    }
 644   }
 645   // Fill in signals that are blocked by all but the VM thread.
 646   sigemptyset(&vm_sigs);
 647   if (!ReduceSignalUsage)
 648     sigaddset(&vm_sigs, BREAK_SIGNAL);
 649   debug_only(signal_sets_initialized = true);
 650 }
 651 
 652 // These are signals that are unblocked while a thread is running Java.
 653 // (For some reason, they get blocked by default.)
 654 sigset_t* os::Aix::unblocked_signals() {
 655   assert(signal_sets_initialized, "Not initialized");
 656   return &unblocked_sigs;
 657 }
 658 
 659 // These are the signals that are blocked while a (non-VM) thread is
 660 // running Java. Only the VM thread handles these signals.
 661 sigset_t* os::Aix::vm_signals() {
 662   assert(signal_sets_initialized, "Not initialized");
 663   return &vm_sigs;
 664 }
 665 
 666 void os::Aix::hotspot_sigmask(Thread* thread) {
 667 
 668   //Save caller's signal mask before setting VM signal mask
 669   sigset_t caller_sigmask;
 670   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 671 
 672   OSThread* osthread = thread->osthread();
 673   osthread->set_caller_sigmask(caller_sigmask);
 674 
 675   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 676 
 677   if (!ReduceSignalUsage) {
 678     if (thread->is_VM_thread()) {
 679       // Only the VM thread handles BREAK_SIGNAL ...
 680       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 681     } else {
 682       // ... all other threads block BREAK_SIGNAL
 683       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 684     }
 685   }
 686 }
 687 
 688 // retrieve memory information.
 689 // Returns false if something went wrong;
 690 // content of pmi undefined in this case.
 691 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 692 
 693   assert(pmi, "get_meminfo: invalid parameter");
 694 
 695   memset(pmi, 0, sizeof(meminfo_t));
 696 
 697   if (os::Aix::on_pase()) {
 698     // On PASE, use the libo4 porting library.
 699 
 700     unsigned long long virt_total = 0;
 701     unsigned long long real_total = 0;
 702     unsigned long long real_free = 0;
 703     unsigned long long pgsp_total = 0;
 704     unsigned long long pgsp_free = 0;
 705     if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
 706       pmi->virt_total = virt_total;
 707       pmi->real_total = real_total;
 708       pmi->real_free = real_free;
 709       pmi->pgsp_total = pgsp_total;
 710       pmi->pgsp_free = pgsp_free;
 711       return true;
 712     }
 713     return false;
 714 
 715   } else {
 716 
 717     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 718     // See:
 719     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 720     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 721     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 722     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 723 
 724     perfstat_memory_total_t psmt;
 725     memset (&psmt, '\0', sizeof(psmt));
 726     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 727     if (rc == -1) {
 728       trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
 729       assert(0, "perfstat_memory_total() failed");
 730       return false;
 731     }
 732 
 733     assert(rc == 1, "perfstat_memory_total() - weird return code");
 734 
 735     // excerpt from
 736     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 737     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 738     // The fields of perfstat_memory_total_t:
 739     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 740     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 741     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 742     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 743     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 744 
 745     pmi->virt_total = psmt.virt_total * 4096;
 746     pmi->real_total = psmt.real_total * 4096;
 747     pmi->real_free = psmt.real_free * 4096;
 748     pmi->pgsp_total = psmt.pgsp_total * 4096;
 749     pmi->pgsp_free = psmt.pgsp_free * 4096;
 750 
 751     return true;
 752 
 753   }
 754 } // end os::Aix::get_meminfo
 755 
 756 //////////////////////////////////////////////////////////////////////////////
 757 // create new thread
 758 
 759 // Thread start routine for all newly created threads
 760 static void *thread_native_entry(Thread *thread) {
 761 
 762   // find out my own stack dimensions
 763   {
 764     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 765     thread->set_stack_base(os::current_stack_base());
 766     thread->set_stack_size(os::current_stack_size());
 767   }
 768 
 769   const pthread_t pthread_id = ::pthread_self();
 770   const tid_t kernel_thread_id = ::thread_self();
 771 
 772   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 773     os::current_thread_id(), (uintx) kernel_thread_id);
 774 
 775   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
 776   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
 777   // tools hook pthread_create(). In this case, we may run into problems establishing
 778   // guard pages on those stacks, because the stacks may reside in memory which is not
 779   // protectable (shmated).
 780   if (thread->stack_base() > ::sbrk(0)) {
 781     log_warning(os, thread)("Thread stack not in data segment.");
 782   }
 783 
 784   // Try to randomize the cache line index of hot stack frames.
 785   // This helps when threads of the same stack traces evict each other's
 786   // cache lines. The threads can be either from the same JVM instance, or
 787   // from different JVM instances. The benefit is especially true for
 788   // processors with hyperthreading technology.
 789 
 790   static int counter = 0;
 791   int pid = os::current_process_id();
 792   alloca(((pid ^ counter++) & 7) * 128);
 793 
 794   thread->initialize_thread_current();
 795 
 796   OSThread* osthread = thread->osthread();
 797 
 798   // Thread_id is pthread id.
 799   osthread->set_thread_id(pthread_id);
 800 
 801   // .. but keep kernel thread id too for diagnostics
 802   osthread->set_kernel_thread_id(kernel_thread_id);
 803 
 804   // Initialize signal mask for this thread.
 805   os::Aix::hotspot_sigmask(thread);
 806 
 807   // Initialize floating point control register.
 808   os::Aix::init_thread_fpu_state();
 809 
 810   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 811 
 812   // Call one more level start routine.
 813   thread->run();
 814 
 815   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 816     os::current_thread_id(), (uintx) kernel_thread_id);
 817 
 818   // If a thread has not deleted itself ("delete this") as part of its
 819   // termination sequence, we have to ensure thread-local-storage is
 820   // cleared before we actually terminate. No threads should ever be
 821   // deleted asynchronously with respect to their termination.
 822   if (Thread::current_or_null_safe() != NULL) {
 823     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 824     thread->clear_thread_current();
 825   }
 826 
 827   return 0;
 828 }
 829 
 830 bool os::create_thread(Thread* thread, ThreadType thr_type,
 831                        size_t req_stack_size) {
 832 
 833   assert(thread->osthread() == NULL, "caller responsible");
 834 
 835   // Allocate the OSThread object.
 836   OSThread* osthread = new OSThread(NULL, NULL);
 837   if (osthread == NULL) {
 838     return false;
 839   }
 840 
 841   // Set the correct thread state.
 842   osthread->set_thread_type(thr_type);
 843 
 844   // Initial state is ALLOCATED but not INITIALIZED
 845   osthread->set_state(ALLOCATED);
 846 
 847   thread->set_osthread(osthread);
 848 
 849   // Init thread attributes.
 850   pthread_attr_t attr;
 851   pthread_attr_init(&attr);
 852   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 853 
 854   // Make sure we run in 1:1 kernel-user-thread mode.
 855   if (os::Aix::on_aix()) {
 856     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 857     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 858   }
 859 
 860   // Start in suspended state, and in os::thread_start, wake the thread up.
 861   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 862 
 863   // Calculate stack size if it's not specified by caller.
 864   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 865   int status = pthread_attr_setstacksize(&attr, stack_size);
 866   assert_status(status == 0, status, "pthread_attr_setstacksize");
 867 
 868   // Configure libc guard page.
 869   pthread_attr_setguardsize(&attr, os::Aix::default_guard_size(thr_type));
 870 
 871   pthread_t tid;
 872   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
 873 
 874   char buf[64];
 875   if (ret == 0) {
 876     log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
 877       (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 878   } else {
 879     log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
 880       ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 881   }
 882 
 883   pthread_attr_destroy(&attr);
 884 
 885   if (ret != 0) {
 886     // Need to clean up stuff we've allocated so far.
 887     thread->set_osthread(NULL);
 888     delete osthread;
 889     return false;
 890   }
 891 
 892   // OSThread::thread_id is the pthread id.
 893   osthread->set_thread_id(tid);
 894 
 895   return true;
 896 }
 897 
 898 /////////////////////////////////////////////////////////////////////////////
 899 // attach existing thread
 900 
 901 // bootstrap the main thread
 902 bool os::create_main_thread(JavaThread* thread) {
 903   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 904   return create_attached_thread(thread);
 905 }
 906 
 907 bool os::create_attached_thread(JavaThread* thread) {
 908 #ifdef ASSERT
 909     thread->verify_not_published();
 910 #endif
 911 
 912   // Allocate the OSThread object
 913   OSThread* osthread = new OSThread(NULL, NULL);
 914 
 915   if (osthread == NULL) {
 916     return false;
 917   }
 918 
 919   const pthread_t pthread_id = ::pthread_self();
 920   const tid_t kernel_thread_id = ::thread_self();
 921 
 922   // OSThread::thread_id is the pthread id.
 923   osthread->set_thread_id(pthread_id);
 924 
 925   // .. but keep kernel thread id too for diagnostics
 926   osthread->set_kernel_thread_id(kernel_thread_id);
 927 
 928   // initialize floating point control register
 929   os::Aix::init_thread_fpu_state();
 930 
 931   // Initial thread state is RUNNABLE
 932   osthread->set_state(RUNNABLE);
 933 
 934   thread->set_osthread(osthread);
 935 
 936   if (UseNUMA) {
 937     int lgrp_id = os::numa_get_group_id();
 938     if (lgrp_id != -1) {
 939       thread->set_lgrp_id(lgrp_id);
 940     }
 941   }
 942 
 943   // initialize signal mask for this thread
 944   // and save the caller's signal mask
 945   os::Aix::hotspot_sigmask(thread);
 946 
 947   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 948     os::current_thread_id(), (uintx) kernel_thread_id);
 949 
 950   return true;
 951 }
 952 
 953 void os::pd_start_thread(Thread* thread) {
 954   int status = pthread_continue_np(thread->osthread()->pthread_id());
 955   assert(status == 0, "thr_continue failed");
 956 }
 957 
 958 // Free OS resources related to the OSThread
 959 void os::free_thread(OSThread* osthread) {
 960   assert(osthread != NULL, "osthread not set");
 961 
 962   // We are told to free resources of the argument thread,
 963   // but we can only really operate on the current thread.
 964   assert(Thread::current()->osthread() == osthread,
 965          "os::free_thread but not current thread");
 966 
 967   // Restore caller's signal mask
 968   sigset_t sigmask = osthread->caller_sigmask();
 969   pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
 970 
 971   delete osthread;
 972 }
 973 
 974 ////////////////////////////////////////////////////////////////////////////////
 975 // time support
 976 
 977 // Time since start-up in seconds to a fine granularity.
 978 // Used by VMSelfDestructTimer and the MemProfiler.
 979 double os::elapsedTime() {
 980   return (double)(os::elapsed_counter()) * 0.000001;
 981 }
 982 
 983 jlong os::elapsed_counter() {
 984   timeval time;
 985   int status = gettimeofday(&time, NULL);
 986   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
 987 }
 988 
 989 jlong os::elapsed_frequency() {
 990   return (1000 * 1000);
 991 }
 992 
 993 bool os::supports_vtime() { return true; }
 994 bool os::enable_vtime()   { return false; }
 995 bool os::vtime_enabled()  { return false; }
 996 
 997 double os::elapsedVTime() {
 998   struct rusage usage;
 999   int retval = getrusage(RUSAGE_THREAD, &usage);
1000   if (retval == 0) {
1001     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1002   } else {
1003     // better than nothing, but not much
1004     return elapsedTime();
1005   }
1006 }
1007 
1008 jlong os::javaTimeMillis() {
1009   timeval time;
1010   int status = gettimeofday(&time, NULL);
1011   assert(status != -1, "aix error at gettimeofday()");
1012   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1013 }
1014 
1015 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1016   timeval time;
1017   int status = gettimeofday(&time, NULL);
1018   assert(status != -1, "aix error at gettimeofday()");
1019   seconds = jlong(time.tv_sec);
1020   nanos = jlong(time.tv_usec) * 1000;
1021 }
1022 
1023 jlong os::javaTimeNanos() {
1024   if (os::Aix::on_pase()) {
1025 
1026     timeval time;
1027     int status = gettimeofday(&time, NULL);
1028     assert(status != -1, "PASE error at gettimeofday()");
1029     jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1030     return 1000 * usecs;
1031 
1032   } else {
1033     // On AIX use the precision of processors real time clock
1034     // or time base registers.
1035     timebasestruct_t time;
1036     int rc;
1037 
1038     // If the CPU has a time register, it will be used and
1039     // we have to convert to real time first. After convertion we have following data:
1040     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1041     // time.tb_low  [nanoseconds after the last full second above]
1042     // We better use mread_real_time here instead of read_real_time
1043     // to ensure that we will get a monotonic increasing time.
1044     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1045       rc = time_base_to_time(&time, TIMEBASE_SZ);
1046       assert(rc != -1, "aix error at time_base_to_time()");
1047     }
1048     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1049   }
1050 }
1051 
1052 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1053   info_ptr->max_value = ALL_64_BITS;
1054   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1055   info_ptr->may_skip_backward = false;
1056   info_ptr->may_skip_forward = false;
1057   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1058 }
1059 
1060 // Return the real, user, and system times in seconds from an
1061 // arbitrary fixed point in the past.
1062 bool os::getTimesSecs(double* process_real_time,
1063                       double* process_user_time,
1064                       double* process_system_time) {
1065   struct tms ticks;
1066   clock_t real_ticks = times(&ticks);
1067 
1068   if (real_ticks == (clock_t) (-1)) {
1069     return false;
1070   } else {
1071     double ticks_per_second = (double) clock_tics_per_sec;
1072     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1073     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1074     *process_real_time = ((double) real_ticks) / ticks_per_second;
1075 
1076     return true;
1077   }
1078 }
1079 
1080 char * os::local_time_string(char *buf, size_t buflen) {
1081   struct tm t;
1082   time_t long_time;
1083   time(&long_time);
1084   localtime_r(&long_time, &t);
1085   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1086                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1087                t.tm_hour, t.tm_min, t.tm_sec);
1088   return buf;
1089 }
1090 
1091 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1092   return localtime_r(clock, res);
1093 }
1094 
1095 ////////////////////////////////////////////////////////////////////////////////
1096 // runtime exit support
1097 
1098 // Note: os::shutdown() might be called very early during initialization, or
1099 // called from signal handler. Before adding something to os::shutdown(), make
1100 // sure it is async-safe and can handle partially initialized VM.
1101 void os::shutdown() {
1102 
1103   // allow PerfMemory to attempt cleanup of any persistent resources
1104   perfMemory_exit();
1105 
1106   // needs to remove object in file system
1107   AttachListener::abort();
1108 
1109   // flush buffered output, finish log files
1110   ostream_abort();
1111 
1112   // Check for abort hook
1113   abort_hook_t abort_hook = Arguments::abort_hook();
1114   if (abort_hook != NULL) {
1115     abort_hook();
1116   }
1117 }
1118 
1119 // Note: os::abort() might be called very early during initialization, or
1120 // called from signal handler. Before adding something to os::abort(), make
1121 // sure it is async-safe and can handle partially initialized VM.
1122 void os::abort(bool dump_core, void* siginfo, const void* context) {
1123   os::shutdown();
1124   if (dump_core) {
1125 #ifndef PRODUCT
1126     fdStream out(defaultStream::output_fd());
1127     out.print_raw("Current thread is ");
1128     char buf[16];
1129     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1130     out.print_raw_cr(buf);
1131     out.print_raw_cr("Dumping core ...");
1132 #endif
1133     ::abort(); // dump core
1134   }
1135 
1136   ::exit(1);
1137 }
1138 
1139 // Die immediately, no exit hook, no abort hook, no cleanup.
1140 void os::die() {
1141   ::abort();
1142 }
1143 
1144 // This method is a copy of JDK's sysGetLastErrorString
1145 // from src/solaris/hpi/src/system_md.c
1146 
1147 size_t os::lasterror(char *buf, size_t len) {
1148   if (errno == 0) return 0;
1149 
1150   const char *s = os::strerror(errno);
1151   size_t n = ::strlen(s);
1152   if (n >= len) {
1153     n = len - 1;
1154   }
1155   ::strncpy(buf, s, n);
1156   buf[n] = '\0';
1157   return n;
1158 }
1159 
1160 intx os::current_thread_id() {
1161   return (intx)pthread_self();
1162 }
1163 
1164 int os::current_process_id() {
1165   return getpid();
1166 }
1167 
1168 // DLL functions
1169 
1170 const char* os::dll_file_extension() { return ".so"; }
1171 
1172 // This must be hard coded because it's the system's temporary
1173 // directory not the java application's temp directory, ala java.io.tmpdir.
1174 const char* os::get_temp_directory() { return "/tmp"; }
1175 
1176 static bool file_exists(const char* filename) {
1177   struct stat statbuf;
1178   if (filename == NULL || strlen(filename) == 0) {
1179     return false;
1180   }
1181   return os::stat(filename, &statbuf) == 0;
1182 }
1183 
1184 bool os::dll_build_name(char* buffer, size_t buflen,
1185                         const char* pname, const char* fname) {
1186   bool retval = false;
1187   // Copied from libhpi
1188   const size_t pnamelen = pname ? strlen(pname) : 0;
1189 
1190   // Return error on buffer overflow.
1191   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1192     *buffer = '\0';
1193     return retval;
1194   }
1195 
1196   if (pnamelen == 0) {
1197     snprintf(buffer, buflen, "lib%s.so", fname);
1198     retval = true;
1199   } else if (strchr(pname, *os::path_separator()) != NULL) {
1200     int n;
1201     char** pelements = split_path(pname, &n);
1202     if (pelements == NULL) {
1203       return false;
1204     }
1205     for (int i = 0; i < n; i++) {
1206       // Really shouldn't be NULL, but check can't hurt
1207       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1208         continue; // skip the empty path values
1209       }
1210       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1211       if (file_exists(buffer)) {
1212         retval = true;
1213         break;
1214       }
1215     }
1216     // release the storage
1217     for (int i = 0; i < n; i++) {
1218       if (pelements[i] != NULL) {
1219         FREE_C_HEAP_ARRAY(char, pelements[i]);
1220       }
1221     }
1222     if (pelements != NULL) {
1223       FREE_C_HEAP_ARRAY(char*, pelements);
1224     }
1225   } else {
1226     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1227     retval = true;
1228   }
1229   return retval;
1230 }
1231 
1232 // Check if addr is inside libjvm.so.
1233 bool os::address_is_in_vm(address addr) {
1234 
1235   // Input could be a real pc or a function pointer literal. The latter
1236   // would be a function descriptor residing in the data segment of a module.
1237   loaded_module_t lm;
1238   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1239     return lm.is_in_vm;
1240   } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1241     return lm.is_in_vm;
1242   } else {
1243     return false;
1244   }
1245 
1246 }
1247 
1248 // Resolve an AIX function descriptor literal to a code pointer.
1249 // If the input is a valid code pointer to a text segment of a loaded module,
1250 //   it is returned unchanged.
1251 // If the input is a valid AIX function descriptor, it is resolved to the
1252 //   code entry point.
1253 // If the input is neither a valid function descriptor nor a valid code pointer,
1254 //   NULL is returned.
1255 static address resolve_function_descriptor_to_code_pointer(address p) {
1256 
1257   if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1258     // It is a real code pointer.
1259     return p;
1260   } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1261     // Pointer to data segment, potential function descriptor.
1262     address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1263     if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1264       // It is a function descriptor.
1265       return code_entry;
1266     }
1267   }
1268 
1269   return NULL;
1270 }
1271 
1272 bool os::dll_address_to_function_name(address addr, char *buf,
1273                                       int buflen, int *offset,
1274                                       bool demangle) {
1275   if (offset) {
1276     *offset = -1;
1277   }
1278   // Buf is not optional, but offset is optional.
1279   assert(buf != NULL, "sanity check");
1280   buf[0] = '\0';
1281 
1282   // Resolve function ptr literals first.
1283   addr = resolve_function_descriptor_to_code_pointer(addr);
1284   if (!addr) {
1285     return false;
1286   }
1287 
1288   return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle);
1289 }
1290 
1291 bool os::dll_address_to_library_name(address addr, char* buf,
1292                                      int buflen, int* offset) {
1293   if (offset) {
1294     *offset = -1;
1295   }
1296   // Buf is not optional, but offset is optional.
1297   assert(buf != NULL, "sanity check");
1298   buf[0] = '\0';
1299 
1300   // Resolve function ptr literals first.
1301   addr = resolve_function_descriptor_to_code_pointer(addr);
1302   if (!addr) {
1303     return false;
1304   }
1305 
1306   return AixSymbols::get_module_name(addr, buf, buflen);
1307 }
1308 
1309 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1310 // for the same architecture as Hotspot is running on.
1311 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1312 
1313   if (ebuf && ebuflen > 0) {
1314     ebuf[0] = '\0';
1315     ebuf[ebuflen - 1] = '\0';
1316   }
1317 
1318   if (!filename || strlen(filename) == 0) {
1319     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1320     return NULL;
1321   }
1322 
1323   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1324   void * result= ::dlopen(filename, RTLD_LAZY);
1325   if (result != NULL) {
1326     // Reload dll cache. Don't do this in signal handling.
1327     LoadedLibraries::reload();
1328     return result;
1329   } else {
1330     // error analysis when dlopen fails
1331     const char* const error_report = ::dlerror();
1332     if (error_report && ebuf && ebuflen > 0) {
1333       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1334                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1335     }
1336   }
1337   return NULL;
1338 }
1339 
1340 void* os::dll_lookup(void* handle, const char* name) {
1341   void* res = dlsym(handle, name);
1342   return res;
1343 }
1344 
1345 void* os::get_default_process_handle() {
1346   return (void*)::dlopen(NULL, RTLD_LAZY);
1347 }
1348 
1349 void os::print_dll_info(outputStream *st) {
1350   st->print_cr("Dynamic libraries:");
1351   LoadedLibraries::print(st);
1352 }
1353 
1354 void os::get_summary_os_info(char* buf, size_t buflen) {
1355   // There might be something more readable than uname results for AIX.
1356   struct utsname name;
1357   uname(&name);
1358   snprintf(buf, buflen, "%s %s", name.release, name.version);
1359 }
1360 
1361 void os::print_os_info(outputStream* st) {
1362   st->print("OS:");
1363 
1364   st->print("uname:");
1365   struct utsname name;
1366   uname(&name);
1367   st->print(name.sysname); st->print(" ");
1368   st->print(name.nodename); st->print(" ");
1369   st->print(name.release); st->print(" ");
1370   st->print(name.version); st->print(" ");
1371   st->print(name.machine);
1372   st->cr();
1373 
1374   uint32_t ver = os::Aix::os_version();
1375   st->print_cr("AIX kernel version %u.%u.%u.%u",
1376                (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1377 
1378   os::Posix::print_rlimit_info(st);
1379 
1380   // load average
1381   st->print("load average:");
1382   double loadavg[3] = {-1.L, -1.L, -1.L};
1383   os::loadavg(loadavg, 3);
1384   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1385   st->cr();
1386 
1387   // print wpar info
1388   libperfstat::wparinfo_t wi;
1389   if (libperfstat::get_wparinfo(&wi)) {
1390     st->print_cr("wpar info");
1391     st->print_cr("name: %s", wi.name);
1392     st->print_cr("id:   %d", wi.wpar_id);
1393     st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1394   }
1395 
1396   // print partition info
1397   libperfstat::partitioninfo_t pi;
1398   if (libperfstat::get_partitioninfo(&pi)) {
1399     st->print_cr("partition info");
1400     st->print_cr(" name: %s", pi.name);
1401   }
1402 
1403 }
1404 
1405 void os::print_memory_info(outputStream* st) {
1406 
1407   st->print_cr("Memory:");
1408 
1409   st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1410     describe_pagesize(g_multipage_support.pagesize));
1411   st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1412     describe_pagesize(g_multipage_support.datapsize));
1413   st->print_cr("  Text page size:                         %s",
1414     describe_pagesize(g_multipage_support.textpsize));
1415   st->print_cr("  Thread stack page size (pthread):       %s",
1416     describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1417   st->print_cr("  Default shared memory page size:        %s",
1418     describe_pagesize(g_multipage_support.shmpsize));
1419   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1420     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1421   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1422     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1423   st->print_cr("  Multipage error: %d",
1424     g_multipage_support.error);
1425   st->cr();
1426   st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1427 
1428   // print out LDR_CNTRL because it affects the default page sizes
1429   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1430   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1431 
1432   // Print out EXTSHM because it is an unsupported setting.
1433   const char* const extshm = ::getenv("EXTSHM");
1434   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1435   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1436     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1437   }
1438 
1439   // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1440   const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1441   st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1442       aixthread_guardpages ? aixthread_guardpages : "<unset>");
1443 
1444   os::Aix::meminfo_t mi;
1445   if (os::Aix::get_meminfo(&mi)) {
1446     char buffer[256];
1447     if (os::Aix::on_aix()) {
1448       st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1449       st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1450       st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1451       st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1452     } else {
1453       // PASE - Numbers are result of QWCRSSTS; they mean:
1454       // real_total: Sum of all system pools
1455       // real_free: always 0
1456       // pgsp_total: we take the size of the system ASP
1457       // pgsp_free: size of system ASP times percentage of system ASP unused
1458       st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1459       st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1460       st->print_cr("%% system asp used : " SIZE_FORMAT,
1461         mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1462     }
1463     st->print_raw(buffer);
1464   }
1465   st->cr();
1466 
1467   // Print segments allocated with os::reserve_memory.
1468   st->print_cr("internal virtual memory regions used by vm:");
1469   vmembk_print_on(st);
1470 }
1471 
1472 // Get a string for the cpuinfo that is a summary of the cpu type
1473 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1474   // This looks good
1475   libperfstat::cpuinfo_t ci;
1476   if (libperfstat::get_cpuinfo(&ci)) {
1477     strncpy(buf, ci.version, buflen);
1478   } else {
1479     strncpy(buf, "AIX", buflen);
1480   }
1481 }
1482 
1483 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1484   st->print("CPU:");
1485   st->print("total %d", os::processor_count());
1486   // It's not safe to query number of active processors after crash.
1487   // st->print("(active %d)", os::active_processor_count());
1488   st->print(" %s", VM_Version::features());
1489   st->cr();
1490 }
1491 
1492 static void print_signal_handler(outputStream* st, int sig,
1493                                  char* buf, size_t buflen);
1494 
1495 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1496   st->print_cr("Signal Handlers:");
1497   print_signal_handler(st, SIGSEGV, buf, buflen);
1498   print_signal_handler(st, SIGBUS , buf, buflen);
1499   print_signal_handler(st, SIGFPE , buf, buflen);
1500   print_signal_handler(st, SIGPIPE, buf, buflen);
1501   print_signal_handler(st, SIGXFSZ, buf, buflen);
1502   print_signal_handler(st, SIGILL , buf, buflen);
1503   print_signal_handler(st, SR_signum, buf, buflen);
1504   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1505   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1506   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1507   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1508   print_signal_handler(st, SIGTRAP, buf, buflen);
1509   // We also want to know if someone else adds a SIGDANGER handler because
1510   // that will interfere with OOM killling.
1511   print_signal_handler(st, SIGDANGER, buf, buflen);
1512 }
1513 
1514 static char saved_jvm_path[MAXPATHLEN] = {0};
1515 
1516 // Find the full path to the current module, libjvm.so.
1517 void os::jvm_path(char *buf, jint buflen) {
1518   // Error checking.
1519   if (buflen < MAXPATHLEN) {
1520     assert(false, "must use a large-enough buffer");
1521     buf[0] = '\0';
1522     return;
1523   }
1524   // Lazy resolve the path to current module.
1525   if (saved_jvm_path[0] != 0) {
1526     strcpy(buf, saved_jvm_path);
1527     return;
1528   }
1529 
1530   Dl_info dlinfo;
1531   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1532   assert(ret != 0, "cannot locate libjvm");
1533   char* rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1534   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1535 
1536   if (Arguments::sun_java_launcher_is_altjvm()) {
1537     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
1538     // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".
1539     // If "/jre/lib/" appears at the right place in the string, then
1540     // assume we are installed in a JDK and we're done. Otherwise, check
1541     // for a JAVA_HOME environment variable and fix up the path so it
1542     // looks like libjvm.so is installed there (append a fake suffix
1543     // hotspot/libjvm.so).
1544     const char *p = buf + strlen(buf) - 1;
1545     for (int count = 0; p > buf && count < 4; ++count) {
1546       for (--p; p > buf && *p != '/'; --p)
1547         /* empty */ ;
1548     }
1549 
1550     if (strncmp(p, "/jre/lib/", 9) != 0) {
1551       // Look for JAVA_HOME in the environment.
1552       char* java_home_var = ::getenv("JAVA_HOME");
1553       if (java_home_var != NULL && java_home_var[0] != 0) {
1554         char* jrelib_p;
1555         int len;
1556 
1557         // Check the current module name "libjvm.so".
1558         p = strrchr(buf, '/');
1559         if (p == NULL) {
1560           return;
1561         }
1562         assert(strstr(p, "/libjvm") == p, "invalid library name");
1563 
1564         rp = os::Posix::realpath(java_home_var, buf, buflen);
1565         if (rp == NULL) {
1566           return;
1567         }
1568 
1569         // determine if this is a legacy image or modules image
1570         // modules image doesn't have "jre" subdirectory
1571         len = strlen(buf);
1572         assert(len < buflen, "Ran out of buffer room");
1573         jrelib_p = buf + len;
1574         snprintf(jrelib_p, buflen-len, "/jre/lib");
1575         if (0 != access(buf, F_OK)) {
1576           snprintf(jrelib_p, buflen-len, "/lib");
1577         }
1578 
1579         if (0 == access(buf, F_OK)) {
1580           // Use current module name "libjvm.so"
1581           len = strlen(buf);
1582           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
1583         } else {
1584           // Go back to path of .so
1585           rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1586           if (rp == NULL) {
1587             return;
1588           }
1589         }
1590       }
1591     }
1592   }
1593 
1594   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1595   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1596 }
1597 
1598 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1599   // no prefix required, not even "_"
1600 }
1601 
1602 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1603   // no suffix required
1604 }
1605 
1606 ////////////////////////////////////////////////////////////////////////////////
1607 // sun.misc.Signal support
1608 
1609 static volatile jint sigint_count = 0;
1610 
1611 static void
1612 UserHandler(int sig, void *siginfo, void *context) {
1613   // 4511530 - sem_post is serialized and handled by the manager thread. When
1614   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1615   // don't want to flood the manager thread with sem_post requests.
1616   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1617     return;
1618 
1619   // Ctrl-C is pressed during error reporting, likely because the error
1620   // handler fails to abort. Let VM die immediately.
1621   if (sig == SIGINT && is_error_reported()) {
1622     os::die();
1623   }
1624 
1625   os::signal_notify(sig);
1626 }
1627 
1628 void* os::user_handler() {
1629   return CAST_FROM_FN_PTR(void*, UserHandler);
1630 }
1631 
1632 extern "C" {
1633   typedef void (*sa_handler_t)(int);
1634   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1635 }
1636 
1637 void* os::signal(int signal_number, void* handler) {
1638   struct sigaction sigAct, oldSigAct;
1639 
1640   sigfillset(&(sigAct.sa_mask));
1641 
1642   // Do not block out synchronous signals in the signal handler.
1643   // Blocking synchronous signals only makes sense if you can really
1644   // be sure that those signals won't happen during signal handling,
1645   // when the blocking applies. Normal signal handlers are lean and
1646   // do not cause signals. But our signal handlers tend to be "risky"
1647   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1648   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1649   // by a SIGILL, which was blocked due to the signal mask. The process
1650   // just hung forever. Better to crash from a secondary signal than to hang.
1651   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1652   sigdelset(&(sigAct.sa_mask), SIGBUS);
1653   sigdelset(&(sigAct.sa_mask), SIGILL);
1654   sigdelset(&(sigAct.sa_mask), SIGFPE);
1655   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1656 
1657   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1658 
1659   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1660 
1661   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1662     // -1 means registration failed
1663     return (void *)-1;
1664   }
1665 
1666   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1667 }
1668 
1669 void os::signal_raise(int signal_number) {
1670   ::raise(signal_number);
1671 }
1672 
1673 //
1674 // The following code is moved from os.cpp for making this
1675 // code platform specific, which it is by its very nature.
1676 //
1677 
1678 // Will be modified when max signal is changed to be dynamic
1679 int os::sigexitnum_pd() {
1680   return NSIG;
1681 }
1682 
1683 // a counter for each possible signal value
1684 static volatile jint pending_signals[NSIG+1] = { 0 };
1685 
1686 // Wrapper functions for: sem_init(), sem_post(), sem_wait()
1687 // On AIX, we use sem_init(), sem_post(), sem_wait()
1688 // On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1689 // do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1690 // Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1691 // on AIX, msem_..() calls are suspected of causing problems.
1692 static sem_t sig_sem;
1693 static msemaphore* p_sig_msem = 0;
1694 
1695 static void local_sem_init() {
1696   if (os::Aix::on_aix()) {
1697     int rc = ::sem_init(&sig_sem, 0, 0);
1698     guarantee(rc != -1, "sem_init failed");
1699   } else {
1700     // Memory semaphores must live in shared mem.
1701     guarantee0(p_sig_msem == NULL);
1702     p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1703     guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1704     guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1705   }
1706 }
1707 
1708 static void local_sem_post() {
1709   static bool warn_only_once = false;
1710   if (os::Aix::on_aix()) {
1711     int rc = ::sem_post(&sig_sem);
1712     if (rc == -1 && !warn_only_once) {
1713       trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
1714       warn_only_once = true;
1715     }
1716   } else {
1717     guarantee0(p_sig_msem != NULL);
1718     int rc = ::msem_unlock(p_sig_msem, 0);
1719     if (rc == -1 && !warn_only_once) {
1720       trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
1721       warn_only_once = true;
1722     }
1723   }
1724 }
1725 
1726 static void local_sem_wait() {
1727   static bool warn_only_once = false;
1728   if (os::Aix::on_aix()) {
1729     int rc = ::sem_wait(&sig_sem);
1730     if (rc == -1 && !warn_only_once) {
1731       trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
1732       warn_only_once = true;
1733     }
1734   } else {
1735     guarantee0(p_sig_msem != NULL); // must init before use
1736     int rc = ::msem_lock(p_sig_msem, 0);
1737     if (rc == -1 && !warn_only_once) {
1738       trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
1739       warn_only_once = true;
1740     }
1741   }
1742 }
1743 
1744 void os::signal_init_pd() {
1745   // Initialize signal structures
1746   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1747 
1748   // Initialize signal semaphore
1749   local_sem_init();
1750 }
1751 
1752 void os::signal_notify(int sig) {
1753   Atomic::inc(&pending_signals[sig]);
1754   local_sem_post();
1755 }
1756 
1757 static int check_pending_signals(bool wait) {
1758   Atomic::store(0, &sigint_count);
1759   for (;;) {
1760     for (int i = 0; i < NSIG + 1; i++) {
1761       jint n = pending_signals[i];
1762       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1763         return i;
1764       }
1765     }
1766     if (!wait) {
1767       return -1;
1768     }
1769     JavaThread *thread = JavaThread::current();
1770     ThreadBlockInVM tbivm(thread);
1771 
1772     bool threadIsSuspended;
1773     do {
1774       thread->set_suspend_equivalent();
1775       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1776 
1777       local_sem_wait();
1778 
1779       // were we externally suspended while we were waiting?
1780       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1781       if (threadIsSuspended) {
1782         //
1783         // The semaphore has been incremented, but while we were waiting
1784         // another thread suspended us. We don't want to continue running
1785         // while suspended because that would surprise the thread that
1786         // suspended us.
1787         //
1788 
1789         local_sem_post();
1790 
1791         thread->java_suspend_self();
1792       }
1793     } while (threadIsSuspended);
1794   }
1795 }
1796 
1797 int os::signal_lookup() {
1798   return check_pending_signals(false);
1799 }
1800 
1801 int os::signal_wait() {
1802   return check_pending_signals(true);
1803 }
1804 
1805 ////////////////////////////////////////////////////////////////////////////////
1806 // Virtual Memory
1807 
1808 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1809 
1810 #define VMEM_MAPPED  1
1811 #define VMEM_SHMATED 2
1812 
1813 struct vmembk_t {
1814   int type;         // 1 - mmap, 2 - shmat
1815   char* addr;
1816   size_t size;      // Real size, may be larger than usersize.
1817   size_t pagesize;  // page size of area
1818   vmembk_t* next;
1819 
1820   bool contains_addr(char* p) const {
1821     return p >= addr && p < (addr + size);
1822   }
1823 
1824   bool contains_range(char* p, size_t s) const {
1825     return contains_addr(p) && contains_addr(p + s - 1);
1826   }
1827 
1828   void print_on(outputStream* os) const {
1829     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1830       " bytes, %d %s pages), %s",
1831       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1832       (type == VMEM_SHMATED ? "shmat" : "mmap")
1833     );
1834   }
1835 
1836   // Check that range is a sub range of memory block (or equal to memory block);
1837   // also check that range is fully page aligned to the page size if the block.
1838   void assert_is_valid_subrange(char* p, size_t s) const {
1839     if (!contains_range(p, s)) {
1840       trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1841               "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1842               p, p + s, addr, addr + size);
1843       guarantee0(false);
1844     }
1845     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1846       trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1847               " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1848       guarantee0(false);
1849     }
1850   }
1851 };
1852 
1853 static struct {
1854   vmembk_t* first;
1855   MiscUtils::CritSect cs;
1856 } vmem;
1857 
1858 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1859   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1860   assert0(p);
1861   if (p) {
1862     MiscUtils::AutoCritSect lck(&vmem.cs);
1863     p->addr = addr; p->size = size;
1864     p->pagesize = pagesize;
1865     p->type = type;
1866     p->next = vmem.first;
1867     vmem.first = p;
1868   }
1869 }
1870 
1871 static vmembk_t* vmembk_find(char* addr) {
1872   MiscUtils::AutoCritSect lck(&vmem.cs);
1873   for (vmembk_t* p = vmem.first; p; p = p->next) {
1874     if (p->addr <= addr && (p->addr + p->size) > addr) {
1875       return p;
1876     }
1877   }
1878   return NULL;
1879 }
1880 
1881 static void vmembk_remove(vmembk_t* p0) {
1882   MiscUtils::AutoCritSect lck(&vmem.cs);
1883   assert0(p0);
1884   assert0(vmem.first); // List should not be empty.
1885   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1886     if (*pp == p0) {
1887       *pp = p0->next;
1888       ::free(p0);
1889       return;
1890     }
1891   }
1892   assert0(false); // Not found?
1893 }
1894 
1895 static void vmembk_print_on(outputStream* os) {
1896   MiscUtils::AutoCritSect lck(&vmem.cs);
1897   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1898     vmi->print_on(os);
1899     os->cr();
1900   }
1901 }
1902 
1903 // Reserve and attach a section of System V memory.
1904 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1905 // address. Failing that, it will attach the memory anywhere.
1906 // If <requested_addr> is NULL, function will attach the memory anywhere.
1907 //
1908 // <alignment_hint> is being ignored by this function. It is very probable however that the
1909 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1910 // Should this be not enogh, we can put more work into it.
1911 static char* reserve_shmated_memory (
1912   size_t bytes,
1913   char* requested_addr,
1914   size_t alignment_hint) {
1915 
1916   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1917     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1918     bytes, requested_addr, alignment_hint);
1919 
1920   // Either give me wish address or wish alignment but not both.
1921   assert0(!(requested_addr != NULL && alignment_hint != 0));
1922 
1923   // We must prevent anyone from attaching too close to the
1924   // BRK because that may cause malloc OOM.
1925   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1926     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1927       "Will attach anywhere.", requested_addr);
1928     // Act like the OS refused to attach there.
1929     requested_addr = NULL;
1930   }
1931 
1932   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1933   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1934   if (os::Aix::on_pase_V5R4_or_older()) {
1935     ShouldNotReachHere();
1936   }
1937 
1938   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1939   const size_t size = align_up(bytes, 64*K);
1940 
1941   // Reserve the shared segment.
1942   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1943   if (shmid == -1) {
1944     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1945     return NULL;
1946   }
1947 
1948   // Important note:
1949   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1950   // We must right after attaching it remove it from the system. System V shm segments are global and
1951   // survive the process.
1952   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1953 
1954   struct shmid_ds shmbuf;
1955   memset(&shmbuf, 0, sizeof(shmbuf));
1956   shmbuf.shm_pagesize = 64*K;
1957   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1958     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1959                size / (64*K), errno);
1960     // I want to know if this ever happens.
1961     assert(false, "failed to set page size for shmat");
1962   }
1963 
1964   // Now attach the shared segment.
1965   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1966   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1967   // were not a segment boundary.
1968   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1969   const int errno_shmat = errno;
1970 
1971   // (A) Right after shmat and before handing shmat errors delete the shm segment.
1972   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
1973     trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1974     assert(false, "failed to remove shared memory segment!");
1975   }
1976 
1977   // Handle shmat error. If we failed to attach, just return.
1978   if (addr == (char*)-1) {
1979     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
1980     return NULL;
1981   }
1982 
1983   // Just for info: query the real page size. In case setting the page size did not
1984   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1985   const size_t real_pagesize = os::Aix::query_pagesize(addr);
1986   if (real_pagesize != shmbuf.shm_pagesize) {
1987     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
1988   }
1989 
1990   if (addr) {
1991     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
1992       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
1993   } else {
1994     if (requested_addr != NULL) {
1995       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
1996     } else {
1997       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
1998     }
1999   }
2000 
2001   // book-keeping
2002   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2003   assert0(is_aligned_to(addr, os::vm_page_size()));
2004 
2005   return addr;
2006 }
2007 
2008 static bool release_shmated_memory(char* addr, size_t size) {
2009 
2010   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2011     addr, addr + size - 1);
2012 
2013   bool rc = false;
2014 
2015   // TODO: is there a way to verify shm size without doing bookkeeping?
2016   if (::shmdt(addr) != 0) {
2017     trcVerbose("error (%d).", errno);
2018   } else {
2019     trcVerbose("ok.");
2020     rc = true;
2021   }
2022   return rc;
2023 }
2024 
2025 static bool uncommit_shmated_memory(char* addr, size_t size) {
2026   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2027     addr, addr + size - 1);
2028 
2029   const bool rc = my_disclaim64(addr, size);
2030 
2031   if (!rc) {
2032     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2033     return false;
2034   }
2035   return true;
2036 }
2037 
2038 ////////////////////////////////  mmap-based routines /////////////////////////////////
2039 
2040 // Reserve memory via mmap.
2041 // If <requested_addr> is given, an attempt is made to attach at the given address.
2042 // Failing that, memory is allocated at any address.
2043 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2044 // allocate at an address aligned with the given alignment. Failing that, memory
2045 // is aligned anywhere.
2046 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2047   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2048     "alignment_hint " UINTX_FORMAT "...",
2049     bytes, requested_addr, alignment_hint);
2050 
2051   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2052   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2053     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2054     return NULL;
2055   }
2056 
2057   // We must prevent anyone from attaching too close to the
2058   // BRK because that may cause malloc OOM.
2059   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2060     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2061       "Will attach anywhere.", requested_addr);
2062     // Act like the OS refused to attach there.
2063     requested_addr = NULL;
2064   }
2065 
2066   // Specify one or the other but not both.
2067   assert0(!(requested_addr != NULL && alignment_hint > 0));
2068 
2069   // In 64K mode, we claim the global page size (os::vm_page_size())
2070   // is 64K. This is one of the few points where that illusion may
2071   // break, because mmap() will always return memory aligned to 4K. So
2072   // we must ensure we only ever return memory aligned to 64k.
2073   if (alignment_hint) {
2074     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2075   } else {
2076     alignment_hint = os::vm_page_size();
2077   }
2078 
2079   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2080   const size_t size = align_up(bytes, os::vm_page_size());
2081 
2082   // alignment: Allocate memory large enough to include an aligned range of the right size and
2083   // cut off the leading and trailing waste pages.
2084   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2085   const size_t extra_size = size + alignment_hint;
2086 
2087   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2088   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2089   int flags = MAP_ANONYMOUS | MAP_SHARED;
2090 
2091   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2092   // it means if wishaddress is given but MAP_FIXED is not set.
2093   //
2094   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2095   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2096   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2097   // get clobbered.
2098   if (requested_addr != NULL) {
2099     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2100       flags |= MAP_FIXED;
2101     }
2102   }
2103 
2104   char* addr = (char*)::mmap(requested_addr, extra_size,
2105       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2106 
2107   if (addr == MAP_FAILED) {
2108     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2109     return NULL;
2110   }
2111 
2112   // Handle alignment.
2113   char* const addr_aligned = align_up(addr, alignment_hint);
2114   const size_t waste_pre = addr_aligned - addr;
2115   char* const addr_aligned_end = addr_aligned + size;
2116   const size_t waste_post = extra_size - waste_pre - size;
2117   if (waste_pre > 0) {
2118     ::munmap(addr, waste_pre);
2119   }
2120   if (waste_post > 0) {
2121     ::munmap(addr_aligned_end, waste_post);
2122   }
2123   addr = addr_aligned;
2124 
2125   if (addr) {
2126     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2127       addr, addr + bytes, bytes);
2128   } else {
2129     if (requested_addr != NULL) {
2130       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2131     } else {
2132       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2133     }
2134   }
2135 
2136   // bookkeeping
2137   vmembk_add(addr, size, 4*K, VMEM_MAPPED);
2138 
2139   // Test alignment, see above.
2140   assert0(is_aligned_to(addr, os::vm_page_size()));
2141 
2142   return addr;
2143 }
2144 
2145 static bool release_mmaped_memory(char* addr, size_t size) {
2146   assert0(is_aligned_to(addr, os::vm_page_size()));
2147   assert0(is_aligned_to(size, os::vm_page_size()));
2148 
2149   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2150     addr, addr + size - 1);
2151   bool rc = false;
2152 
2153   if (::munmap(addr, size) != 0) {
2154     trcVerbose("failed (%d)\n", errno);
2155     rc = false;
2156   } else {
2157     trcVerbose("ok.");
2158     rc = true;
2159   }
2160 
2161   return rc;
2162 }
2163 
2164 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2165 
2166   assert0(is_aligned_to(addr, os::vm_page_size()));
2167   assert0(is_aligned_to(size, os::vm_page_size()));
2168 
2169   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2170     addr, addr + size - 1);
2171   bool rc = false;
2172 
2173   // Uncommit mmap memory with msync MS_INVALIDATE.
2174   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2175     trcVerbose("failed (%d)\n", errno);
2176     rc = false;
2177   } else {
2178     trcVerbose("ok.");
2179     rc = true;
2180   }
2181 
2182   return rc;
2183 }
2184 
2185 int os::vm_page_size() {
2186   // Seems redundant as all get out.
2187   assert(os::Aix::page_size() != -1, "must call os::init");
2188   return os::Aix::page_size();
2189 }
2190 
2191 // Aix allocates memory by pages.
2192 int os::vm_allocation_granularity() {
2193   assert(os::Aix::page_size() != -1, "must call os::init");
2194   return os::Aix::page_size();
2195 }
2196 
2197 #ifdef PRODUCT
2198 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2199                                     int err) {
2200   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2201           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2202           os::errno_name(err), err);
2203 }
2204 #endif
2205 
2206 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2207                                   const char* mesg) {
2208   assert(mesg != NULL, "mesg must be specified");
2209   if (!pd_commit_memory(addr, size, exec)) {
2210     // Add extra info in product mode for vm_exit_out_of_memory():
2211     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2212     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2213   }
2214 }
2215 
2216 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2217 
2218   assert(is_aligned_to(addr, os::vm_page_size()),
2219     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2220     p2i(addr), os::vm_page_size());
2221   assert(is_aligned_to(size, os::vm_page_size()),
2222     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2223     size, os::vm_page_size());
2224 
2225   vmembk_t* const vmi = vmembk_find(addr);
2226   guarantee0(vmi);
2227   vmi->assert_is_valid_subrange(addr, size);
2228 
2229   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2230 
2231   if (UseExplicitCommit) {
2232     // AIX commits memory on touch. So, touch all pages to be committed.
2233     for (char* p = addr; p < (addr + size); p += 4*K) {
2234       *p = '\0';
2235     }
2236   }
2237 
2238   return true;
2239 }
2240 
2241 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2242   return pd_commit_memory(addr, size, exec);
2243 }
2244 
2245 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2246                                   size_t alignment_hint, bool exec,
2247                                   const char* mesg) {
2248   // Alignment_hint is ignored on this OS.
2249   pd_commit_memory_or_exit(addr, size, exec, mesg);
2250 }
2251 
2252 bool os::pd_uncommit_memory(char* addr, size_t size) {
2253   assert(is_aligned_to(addr, os::vm_page_size()),
2254     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2255     p2i(addr), os::vm_page_size());
2256   assert(is_aligned_to(size, os::vm_page_size()),
2257     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2258     size, os::vm_page_size());
2259 
2260   // Dynamically do different things for mmap/shmat.
2261   const vmembk_t* const vmi = vmembk_find(addr);
2262   guarantee0(vmi);
2263   vmi->assert_is_valid_subrange(addr, size);
2264 
2265   if (vmi->type == VMEM_SHMATED) {
2266     return uncommit_shmated_memory(addr, size);
2267   } else {
2268     return uncommit_mmaped_memory(addr, size);
2269   }
2270 }
2271 
2272 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2273   // Do not call this; no need to commit stack pages on AIX.
2274   ShouldNotReachHere();
2275   return true;
2276 }
2277 
2278 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2279   // Do not call this; no need to commit stack pages on AIX.
2280   ShouldNotReachHere();
2281   return true;
2282 }
2283 
2284 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2285 }
2286 
2287 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2288 }
2289 
2290 void os::numa_make_global(char *addr, size_t bytes) {
2291 }
2292 
2293 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2294 }
2295 
2296 bool os::numa_topology_changed() {
2297   return false;
2298 }
2299 
2300 size_t os::numa_get_groups_num() {
2301   return 1;
2302 }
2303 
2304 int os::numa_get_group_id() {
2305   return 0;
2306 }
2307 
2308 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2309   if (size > 0) {
2310     ids[0] = 0;
2311     return 1;
2312   }
2313   return 0;
2314 }
2315 
2316 bool os::get_page_info(char *start, page_info* info) {
2317   return false;
2318 }
2319 
2320 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2321   return end;
2322 }
2323 
2324 // Reserves and attaches a shared memory segment.
2325 // Will assert if a wish address is given and could not be obtained.
2326 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2327 
2328   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2329   // thereby clobbering old mappings at that place. That is probably
2330   // not intended, never used and almost certainly an error were it
2331   // ever be used this way (to try attaching at a specified address
2332   // without clobbering old mappings an alternate API exists,
2333   // os::attempt_reserve_memory_at()).
2334   // Instead of mimicking the dangerous coding of the other platforms, here I
2335   // just ignore the request address (release) or assert(debug).
2336   assert0(requested_addr == NULL);
2337 
2338   // Always round to os::vm_page_size(), which may be larger than 4K.
2339   bytes = align_up(bytes, os::vm_page_size());
2340   const size_t alignment_hint0 =
2341     alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0;
2342 
2343   // In 4K mode always use mmap.
2344   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2345   if (os::vm_page_size() == 4*K) {
2346     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2347   } else {
2348     if (bytes >= Use64KPagesThreshold) {
2349       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2350     } else {
2351       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2352     }
2353   }
2354 }
2355 
2356 bool os::pd_release_memory(char* addr, size_t size) {
2357 
2358   // Dynamically do different things for mmap/shmat.
2359   vmembk_t* const vmi = vmembk_find(addr);
2360   guarantee0(vmi);
2361 
2362   // Always round to os::vm_page_size(), which may be larger than 4K.
2363   size = align_up(size, os::vm_page_size());
2364   addr = align_up(addr, os::vm_page_size());
2365 
2366   bool rc = false;
2367   bool remove_bookkeeping = false;
2368   if (vmi->type == VMEM_SHMATED) {
2369     // For shmatted memory, we do:
2370     // - If user wants to release the whole range, release the memory (shmdt).
2371     // - If user only wants to release a partial range, uncommit (disclaim) that
2372     //   range. That way, at least, we do not use memory anymore (bust still page
2373     //   table space).
2374     vmi->assert_is_valid_subrange(addr, size);
2375     if (addr == vmi->addr && size == vmi->size) {
2376       rc = release_shmated_memory(addr, size);
2377       remove_bookkeeping = true;
2378     } else {
2379       rc = uncommit_shmated_memory(addr, size);
2380     }
2381   } else {
2382     // User may unmap partial regions but region has to be fully contained.
2383 #ifdef ASSERT
2384     vmi->assert_is_valid_subrange(addr, size);
2385 #endif
2386     rc = release_mmaped_memory(addr, size);
2387     remove_bookkeeping = true;
2388   }
2389 
2390   // update bookkeeping
2391   if (rc && remove_bookkeeping) {
2392     vmembk_remove(vmi);
2393   }
2394 
2395   return rc;
2396 }
2397 
2398 static bool checked_mprotect(char* addr, size_t size, int prot) {
2399 
2400   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2401   // not tell me if protection failed when trying to protect an un-protectable range.
2402   //
2403   // This means if the memory was allocated using shmget/shmat, protection wont work
2404   // but mprotect will still return 0:
2405   //
2406   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2407 
2408   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2409 
2410   if (!rc) {
2411     const char* const s_errno = os::errno_name(errno);
2412     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2413     return false;
2414   }
2415 
2416   // mprotect success check
2417   //
2418   // Mprotect said it changed the protection but can I believe it?
2419   //
2420   // To be sure I need to check the protection afterwards. Try to
2421   // read from protected memory and check whether that causes a segfault.
2422   //
2423   if (!os::Aix::xpg_sus_mode()) {
2424 
2425     if (CanUseSafeFetch32()) {
2426 
2427       const bool read_protected =
2428         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2429          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2430 
2431       if (prot & PROT_READ) {
2432         rc = !read_protected;
2433       } else {
2434         rc = read_protected;
2435       }
2436 
2437       if (!rc) {
2438         if (os::Aix::on_pase()) {
2439           // There is an issue on older PASE systems where mprotect() will return success but the
2440           // memory will not be protected.
2441           // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2442           // machines; we only see it rarely, when using mprotect() to protect the guard page of
2443           // a stack. It is an OS error.
2444           //
2445           // A valid strategy is just to try again. This usually works. :-/
2446 
2447           ::usleep(1000);
2448           if (::mprotect(addr, size, prot) == 0) {
2449             const bool read_protected_2 =
2450               (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2451               SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2452             rc = true;
2453           }
2454         }
2455       }
2456     }
2457   }
2458 
2459   assert(rc == true, "mprotect failed.");
2460 
2461   return rc;
2462 }
2463 
2464 // Set protections specified
2465 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2466   unsigned int p = 0;
2467   switch (prot) {
2468   case MEM_PROT_NONE: p = PROT_NONE; break;
2469   case MEM_PROT_READ: p = PROT_READ; break;
2470   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2471   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2472   default:
2473     ShouldNotReachHere();
2474   }
2475   // is_committed is unused.
2476   return checked_mprotect(addr, size, p);
2477 }
2478 
2479 bool os::guard_memory(char* addr, size_t size) {
2480   return checked_mprotect(addr, size, PROT_NONE);
2481 }
2482 
2483 bool os::unguard_memory(char* addr, size_t size) {
2484   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2485 }
2486 
2487 // Large page support
2488 
2489 static size_t _large_page_size = 0;
2490 
2491 // Enable large page support if OS allows that.
2492 void os::large_page_init() {
2493   return; // Nothing to do. See query_multipage_support and friends.
2494 }
2495 
2496 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2497   // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2498   // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2499   // so this is not needed.
2500   assert(false, "should not be called on AIX");
2501   return NULL;
2502 }
2503 
2504 bool os::release_memory_special(char* base, size_t bytes) {
2505   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2506   Unimplemented();
2507   return false;
2508 }
2509 
2510 size_t os::large_page_size() {
2511   return _large_page_size;
2512 }
2513 
2514 bool os::can_commit_large_page_memory() {
2515   // Does not matter, we do not support huge pages.
2516   return false;
2517 }
2518 
2519 bool os::can_execute_large_page_memory() {
2520   // Does not matter, we do not support huge pages.
2521   return false;
2522 }
2523 
2524 // Reserve memory at an arbitrary address, only if that area is
2525 // available (and not reserved for something else).
2526 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2527   char* addr = NULL;
2528 
2529   // Always round to os::vm_page_size(), which may be larger than 4K.
2530   bytes = align_up(bytes, os::vm_page_size());
2531 
2532   // In 4K mode always use mmap.
2533   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2534   if (os::vm_page_size() == 4*K) {
2535     return reserve_mmaped_memory(bytes, requested_addr, 0);
2536   } else {
2537     if (bytes >= Use64KPagesThreshold) {
2538       return reserve_shmated_memory(bytes, requested_addr, 0);
2539     } else {
2540       return reserve_mmaped_memory(bytes, requested_addr, 0);
2541     }
2542   }
2543 
2544   return addr;
2545 }
2546 
2547 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2548   return ::read(fd, buf, nBytes);
2549 }
2550 
2551 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2552   return ::pread(fd, buf, nBytes, offset);
2553 }
2554 
2555 void os::naked_short_sleep(jlong ms) {
2556   struct timespec req;
2557 
2558   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2559   req.tv_sec = 0;
2560   if (ms > 0) {
2561     req.tv_nsec = (ms % 1000) * 1000000;
2562   }
2563   else {
2564     req.tv_nsec = 1;
2565   }
2566 
2567   nanosleep(&req, NULL);
2568 
2569   return;
2570 }
2571 
2572 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2573 void os::infinite_sleep() {
2574   while (true) {    // sleep forever ...
2575     ::sleep(100);   // ... 100 seconds at a time
2576   }
2577 }
2578 
2579 // Used to convert frequent JVM_Yield() to nops
2580 bool os::dont_yield() {
2581   return DontYieldALot;
2582 }
2583 
2584 void os::naked_yield() {
2585   sched_yield();
2586 }
2587 
2588 ////////////////////////////////////////////////////////////////////////////////
2589 // thread priority support
2590 
2591 // From AIX manpage to pthread_setschedparam
2592 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2593 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2594 //
2595 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2596 // range from 40 to 80, where 40 is the least favored priority and 80
2597 // is the most favored."
2598 //
2599 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2600 // scheduling there; however, this still leaves iSeries.)
2601 //
2602 // We use the same values for AIX and PASE.
2603 int os::java_to_os_priority[CriticalPriority + 1] = {
2604   54,             // 0 Entry should never be used
2605 
2606   55,             // 1 MinPriority
2607   55,             // 2
2608   56,             // 3
2609 
2610   56,             // 4
2611   57,             // 5 NormPriority
2612   57,             // 6
2613 
2614   58,             // 7
2615   58,             // 8
2616   59,             // 9 NearMaxPriority
2617 
2618   60,             // 10 MaxPriority
2619 
2620   60              // 11 CriticalPriority
2621 };
2622 
2623 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2624   if (!UseThreadPriorities) return OS_OK;
2625   pthread_t thr = thread->osthread()->pthread_id();
2626   int policy = SCHED_OTHER;
2627   struct sched_param param;
2628   param.sched_priority = newpri;
2629   int ret = pthread_setschedparam(thr, policy, &param);
2630 
2631   if (ret != 0) {
2632     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2633         (int)thr, newpri, ret, os::errno_name(ret));
2634   }
2635   return (ret == 0) ? OS_OK : OS_ERR;
2636 }
2637 
2638 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2639   if (!UseThreadPriorities) {
2640     *priority_ptr = java_to_os_priority[NormPriority];
2641     return OS_OK;
2642   }
2643   pthread_t thr = thread->osthread()->pthread_id();
2644   int policy = SCHED_OTHER;
2645   struct sched_param param;
2646   int ret = pthread_getschedparam(thr, &policy, &param);
2647   *priority_ptr = param.sched_priority;
2648 
2649   return (ret == 0) ? OS_OK : OS_ERR;
2650 }
2651 
2652 // Hint to the underlying OS that a task switch would not be good.
2653 // Void return because it's a hint and can fail.
2654 void os::hint_no_preempt() {}
2655 
2656 ////////////////////////////////////////////////////////////////////////////////
2657 // suspend/resume support
2658 
2659 //  the low-level signal-based suspend/resume support is a remnant from the
2660 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2661 //  within hotspot. Now there is a single use-case for this:
2662 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
2663 //      that runs in the watcher thread.
2664 //  The remaining code is greatly simplified from the more general suspension
2665 //  code that used to be used.
2666 //
2667 //  The protocol is quite simple:
2668 //  - suspend:
2669 //      - sends a signal to the target thread
2670 //      - polls the suspend state of the osthread using a yield loop
2671 //      - target thread signal handler (SR_handler) sets suspend state
2672 //        and blocks in sigsuspend until continued
2673 //  - resume:
2674 //      - sets target osthread state to continue
2675 //      - sends signal to end the sigsuspend loop in the SR_handler
2676 //
2677 //  Note that the SR_lock plays no role in this suspend/resume protocol,
2678 //  but is checked for NULL in SR_handler as a thread termination indicator.
2679 //
2680 
2681 static void resume_clear_context(OSThread *osthread) {
2682   osthread->set_ucontext(NULL);
2683   osthread->set_siginfo(NULL);
2684 }
2685 
2686 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2687   osthread->set_ucontext(context);
2688   osthread->set_siginfo(siginfo);
2689 }
2690 
2691 //
2692 // Handler function invoked when a thread's execution is suspended or
2693 // resumed. We have to be careful that only async-safe functions are
2694 // called here (Note: most pthread functions are not async safe and
2695 // should be avoided.)
2696 //
2697 // Note: sigwait() is a more natural fit than sigsuspend() from an
2698 // interface point of view, but sigwait() prevents the signal hander
2699 // from being run. libpthread would get very confused by not having
2700 // its signal handlers run and prevents sigwait()'s use with the
2701 // mutex granting granting signal.
2702 //
2703 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2704 //
2705 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2706   // Save and restore errno to avoid confusing native code with EINTR
2707   // after sigsuspend.
2708   int old_errno = errno;
2709 
2710   Thread* thread = Thread::current_or_null_safe();
2711   assert(thread != NULL, "Missing current thread in SR_handler");
2712 
2713   // On some systems we have seen signal delivery get "stuck" until the signal
2714   // mask is changed as part of thread termination. Check that the current thread
2715   // has not already terminated (via SR_lock()) - else the following assertion
2716   // will fail because the thread is no longer a JavaThread as the ~JavaThread
2717   // destructor has completed.
2718 
2719   if (thread->SR_lock() == NULL) {
2720     return;
2721   }
2722 
2723   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2724 
2725   OSThread* osthread = thread->osthread();
2726 
2727   os::SuspendResume::State current = osthread->sr.state();
2728   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2729     suspend_save_context(osthread, siginfo, context);
2730 
2731     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2732     os::SuspendResume::State state = osthread->sr.suspended();
2733     if (state == os::SuspendResume::SR_SUSPENDED) {
2734       sigset_t suspend_set;  // signals for sigsuspend()
2735 
2736       // get current set of blocked signals and unblock resume signal
2737       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2738       sigdelset(&suspend_set, SR_signum);
2739 
2740       // wait here until we are resumed
2741       while (1) {
2742         sigsuspend(&suspend_set);
2743 
2744         os::SuspendResume::State result = osthread->sr.running();
2745         if (result == os::SuspendResume::SR_RUNNING) {
2746           break;
2747         }
2748       }
2749 
2750     } else if (state == os::SuspendResume::SR_RUNNING) {
2751       // request was cancelled, continue
2752     } else {
2753       ShouldNotReachHere();
2754     }
2755 
2756     resume_clear_context(osthread);
2757   } else if (current == os::SuspendResume::SR_RUNNING) {
2758     // request was cancelled, continue
2759   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2760     // ignore
2761   } else {
2762     ShouldNotReachHere();
2763   }
2764 
2765   errno = old_errno;
2766 }
2767 
2768 static int SR_initialize() {
2769   struct sigaction act;
2770   char *s;
2771   // Get signal number to use for suspend/resume
2772   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2773     int sig = ::strtol(s, 0, 10);
2774     if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2775         sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2776       SR_signum = sig;
2777     } else {
2778       warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2779               sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2780     }
2781   }
2782 
2783   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2784         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2785 
2786   sigemptyset(&SR_sigset);
2787   sigaddset(&SR_sigset, SR_signum);
2788 
2789   // Set up signal handler for suspend/resume.
2790   act.sa_flags = SA_RESTART|SA_SIGINFO;
2791   act.sa_handler = (void (*)(int)) SR_handler;
2792 
2793   // SR_signum is blocked by default.
2794   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2795 
2796   if (sigaction(SR_signum, &act, 0) == -1) {
2797     return -1;
2798   }
2799 
2800   // Save signal flag
2801   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2802   return 0;
2803 }
2804 
2805 static int SR_finalize() {
2806   return 0;
2807 }
2808 
2809 static int sr_notify(OSThread* osthread) {
2810   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2811   assert_status(status == 0, status, "pthread_kill");
2812   return status;
2813 }
2814 
2815 // "Randomly" selected value for how long we want to spin
2816 // before bailing out on suspending a thread, also how often
2817 // we send a signal to a thread we want to resume
2818 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2819 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2820 
2821 // returns true on success and false on error - really an error is fatal
2822 // but this seems the normal response to library errors
2823 static bool do_suspend(OSThread* osthread) {
2824   assert(osthread->sr.is_running(), "thread should be running");
2825   // mark as suspended and send signal
2826 
2827   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2828     // failed to switch, state wasn't running?
2829     ShouldNotReachHere();
2830     return false;
2831   }
2832 
2833   if (sr_notify(osthread) != 0) {
2834     // try to cancel, switch to running
2835 
2836     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2837     if (result == os::SuspendResume::SR_RUNNING) {
2838       // cancelled
2839       return false;
2840     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2841       // somehow managed to suspend
2842       return true;
2843     } else {
2844       ShouldNotReachHere();
2845       return false;
2846     }
2847   }
2848 
2849   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2850 
2851   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2852     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2853       os::naked_yield();
2854     }
2855 
2856     // timeout, try to cancel the request
2857     if (n >= RANDOMLY_LARGE_INTEGER) {
2858       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2859       if (cancelled == os::SuspendResume::SR_RUNNING) {
2860         return false;
2861       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2862         return true;
2863       } else {
2864         ShouldNotReachHere();
2865         return false;
2866       }
2867     }
2868   }
2869 
2870   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2871   return true;
2872 }
2873 
2874 static void do_resume(OSThread* osthread) {
2875   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2876 
2877   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2878     // failed to switch to WAKEUP_REQUEST
2879     ShouldNotReachHere();
2880     return;
2881   }
2882 
2883   while (!osthread->sr.is_running()) {
2884     if (sr_notify(osthread) == 0) {
2885       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2886         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2887           os::naked_yield();
2888         }
2889       }
2890     } else {
2891       ShouldNotReachHere();
2892     }
2893   }
2894 
2895   guarantee(osthread->sr.is_running(), "Must be running!");
2896 }
2897 
2898 ///////////////////////////////////////////////////////////////////////////////////
2899 // signal handling (except suspend/resume)
2900 
2901 // This routine may be used by user applications as a "hook" to catch signals.
2902 // The user-defined signal handler must pass unrecognized signals to this
2903 // routine, and if it returns true (non-zero), then the signal handler must
2904 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2905 // routine will never retun false (zero), but instead will execute a VM panic
2906 // routine kill the process.
2907 //
2908 // If this routine returns false, it is OK to call it again. This allows
2909 // the user-defined signal handler to perform checks either before or after
2910 // the VM performs its own checks. Naturally, the user code would be making
2911 // a serious error if it tried to handle an exception (such as a null check
2912 // or breakpoint) that the VM was generating for its own correct operation.
2913 //
2914 // This routine may recognize any of the following kinds of signals:
2915 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2916 // It should be consulted by handlers for any of those signals.
2917 //
2918 // The caller of this routine must pass in the three arguments supplied
2919 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2920 // field of the structure passed to sigaction(). This routine assumes that
2921 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2922 //
2923 // Note that the VM will print warnings if it detects conflicting signal
2924 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2925 //
2926 extern "C" JNIEXPORT int
2927 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2928 
2929 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2930 // to be the thing to call; documentation is not terribly clear about whether
2931 // pthread_sigmask also works, and if it does, whether it does the same.
2932 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2933   const int rc = ::pthread_sigmask(how, set, oset);
2934   // return value semantics differ slightly for error case:
2935   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2936   // (so, pthread_sigmask is more theadsafe for error handling)
2937   // But success is always 0.
2938   return rc == 0 ? true : false;
2939 }
2940 
2941 // Function to unblock all signals which are, according
2942 // to POSIX, typical program error signals. If they happen while being blocked,
2943 // they typically will bring down the process immediately.
2944 bool unblock_program_error_signals() {
2945   sigset_t set;
2946   ::sigemptyset(&set);
2947   ::sigaddset(&set, SIGILL);
2948   ::sigaddset(&set, SIGBUS);
2949   ::sigaddset(&set, SIGFPE);
2950   ::sigaddset(&set, SIGSEGV);
2951   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2952 }
2953 
2954 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2955 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2956   assert(info != NULL && uc != NULL, "it must be old kernel");
2957 
2958   // Never leave program error signals blocked;
2959   // on all our platforms they would bring down the process immediately when
2960   // getting raised while being blocked.
2961   unblock_program_error_signals();
2962 
2963   int orig_errno = errno;  // Preserve errno value over signal handler.
2964   JVM_handle_aix_signal(sig, info, uc, true);
2965   errno = orig_errno;
2966 }
2967 
2968 // This boolean allows users to forward their own non-matching signals
2969 // to JVM_handle_aix_signal, harmlessly.
2970 bool os::Aix::signal_handlers_are_installed = false;
2971 
2972 // For signal-chaining
2973 struct sigaction sigact[NSIG];
2974 sigset_t sigs;
2975 bool os::Aix::libjsig_is_loaded = false;
2976 typedef struct sigaction *(*get_signal_t)(int);
2977 get_signal_t os::Aix::get_signal_action = NULL;
2978 
2979 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2980   struct sigaction *actp = NULL;
2981 
2982   if (libjsig_is_loaded) {
2983     // Retrieve the old signal handler from libjsig
2984     actp = (*get_signal_action)(sig);
2985   }
2986   if (actp == NULL) {
2987     // Retrieve the preinstalled signal handler from jvm
2988     actp = get_preinstalled_handler(sig);
2989   }
2990 
2991   return actp;
2992 }
2993 
2994 static bool call_chained_handler(struct sigaction *actp, int sig,
2995                                  siginfo_t *siginfo, void *context) {
2996   // Call the old signal handler
2997   if (actp->sa_handler == SIG_DFL) {
2998     // It's more reasonable to let jvm treat it as an unexpected exception
2999     // instead of taking the default action.
3000     return false;
3001   } else if (actp->sa_handler != SIG_IGN) {
3002     if ((actp->sa_flags & SA_NODEFER) == 0) {
3003       // automaticlly block the signal
3004       sigaddset(&(actp->sa_mask), sig);
3005     }
3006 
3007     sa_handler_t hand = NULL;
3008     sa_sigaction_t sa = NULL;
3009     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3010     // retrieve the chained handler
3011     if (siginfo_flag_set) {
3012       sa = actp->sa_sigaction;
3013     } else {
3014       hand = actp->sa_handler;
3015     }
3016 
3017     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3018       actp->sa_handler = SIG_DFL;
3019     }
3020 
3021     // try to honor the signal mask
3022     sigset_t oset;
3023     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3024 
3025     // call into the chained handler
3026     if (siginfo_flag_set) {
3027       (*sa)(sig, siginfo, context);
3028     } else {
3029       (*hand)(sig);
3030     }
3031 
3032     // restore the signal mask
3033     pthread_sigmask(SIG_SETMASK, &oset, 0);
3034   }
3035   // Tell jvm's signal handler the signal is taken care of.
3036   return true;
3037 }
3038 
3039 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3040   bool chained = false;
3041   // signal-chaining
3042   if (UseSignalChaining) {
3043     struct sigaction *actp = get_chained_signal_action(sig);
3044     if (actp != NULL) {
3045       chained = call_chained_handler(actp, sig, siginfo, context);
3046     }
3047   }
3048   return chained;
3049 }
3050 
3051 size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
3052   // Creating guard page is very expensive. Java thread has HotSpot
3053   // guard pages, only enable glibc guard page for non-Java threads.
3054   // (Remember: compiler thread is a Java thread, too!)
3055   //
3056   // Aix can have different page sizes for stack (4K) and heap (64K).
3057   // As Hotspot knows only one page size, we assume the stack has
3058   // the same page size as the heap. Returning page_size() here can
3059   // cause 16 guard pages which we want to avoid.  Thus we return 4K
3060   // which will be rounded to the real page size by the OS.
3061   return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : 4 * K);
3062 }
3063 
3064 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3065   if (sigismember(&sigs, sig)) {
3066     return &sigact[sig];
3067   }
3068   return NULL;
3069 }
3070 
3071 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3072   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3073   sigact[sig] = oldAct;
3074   sigaddset(&sigs, sig);
3075 }
3076 
3077 // for diagnostic
3078 int sigflags[NSIG];
3079 
3080 int os::Aix::get_our_sigflags(int sig) {
3081   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3082   return sigflags[sig];
3083 }
3084 
3085 void os::Aix::set_our_sigflags(int sig, int flags) {
3086   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3087   if (sig > 0 && sig < NSIG) {
3088     sigflags[sig] = flags;
3089   }
3090 }
3091 
3092 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3093   // Check for overwrite.
3094   struct sigaction oldAct;
3095   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3096 
3097   void* oldhand = oldAct.sa_sigaction
3098     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3099     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3100   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3101       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3102       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3103     if (AllowUserSignalHandlers || !set_installed) {
3104       // Do not overwrite; user takes responsibility to forward to us.
3105       return;
3106     } else if (UseSignalChaining) {
3107       // save the old handler in jvm
3108       save_preinstalled_handler(sig, oldAct);
3109       // libjsig also interposes the sigaction() call below and saves the
3110       // old sigaction on it own.
3111     } else {
3112       fatal("Encountered unexpected pre-existing sigaction handler "
3113             "%#lx for signal %d.", (long)oldhand, sig);
3114     }
3115   }
3116 
3117   struct sigaction sigAct;
3118   sigfillset(&(sigAct.sa_mask));
3119   if (!set_installed) {
3120     sigAct.sa_handler = SIG_DFL;
3121     sigAct.sa_flags = SA_RESTART;
3122   } else {
3123     sigAct.sa_sigaction = javaSignalHandler;
3124     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3125   }
3126   // Save flags, which are set by ours
3127   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3128   sigflags[sig] = sigAct.sa_flags;
3129 
3130   int ret = sigaction(sig, &sigAct, &oldAct);
3131   assert(ret == 0, "check");
3132 
3133   void* oldhand2 = oldAct.sa_sigaction
3134                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3135                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3136   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3137 }
3138 
3139 // install signal handlers for signals that HotSpot needs to
3140 // handle in order to support Java-level exception handling.
3141 void os::Aix::install_signal_handlers() {
3142   if (!signal_handlers_are_installed) {
3143     signal_handlers_are_installed = true;
3144 
3145     // signal-chaining
3146     typedef void (*signal_setting_t)();
3147     signal_setting_t begin_signal_setting = NULL;
3148     signal_setting_t end_signal_setting = NULL;
3149     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3150                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3151     if (begin_signal_setting != NULL) {
3152       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3153                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3154       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3155                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3156       libjsig_is_loaded = true;
3157       assert(UseSignalChaining, "should enable signal-chaining");
3158     }
3159     if (libjsig_is_loaded) {
3160       // Tell libjsig jvm is setting signal handlers.
3161       (*begin_signal_setting)();
3162     }
3163 
3164     ::sigemptyset(&sigs);
3165     set_signal_handler(SIGSEGV, true);
3166     set_signal_handler(SIGPIPE, true);
3167     set_signal_handler(SIGBUS, true);
3168     set_signal_handler(SIGILL, true);
3169     set_signal_handler(SIGFPE, true);
3170     set_signal_handler(SIGTRAP, true);
3171     set_signal_handler(SIGXFSZ, true);
3172 
3173     if (libjsig_is_loaded) {
3174       // Tell libjsig jvm finishes setting signal handlers.
3175       (*end_signal_setting)();
3176     }
3177 
3178     // We don't activate signal checker if libjsig is in place, we trust ourselves
3179     // and if UserSignalHandler is installed all bets are off.
3180     // Log that signal checking is off only if -verbose:jni is specified.
3181     if (CheckJNICalls) {
3182       if (libjsig_is_loaded) {
3183         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3184         check_signals = false;
3185       }
3186       if (AllowUserSignalHandlers) {
3187         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3188         check_signals = false;
3189       }
3190       // Need to initialize check_signal_done.
3191       ::sigemptyset(&check_signal_done);
3192     }
3193   }
3194 }
3195 
3196 static const char* get_signal_handler_name(address handler,
3197                                            char* buf, int buflen) {
3198   int offset;
3199   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3200   if (found) {
3201     // skip directory names
3202     const char *p1, *p2;
3203     p1 = buf;
3204     size_t len = strlen(os::file_separator());
3205     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3206     // The way os::dll_address_to_library_name is implemented on Aix
3207     // right now, it always returns -1 for the offset which is not
3208     // terribly informative.
3209     // Will fix that. For now, omit the offset.
3210     jio_snprintf(buf, buflen, "%s", p1);
3211   } else {
3212     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3213   }
3214   return buf;
3215 }
3216 
3217 static void print_signal_handler(outputStream* st, int sig,
3218                                  char* buf, size_t buflen) {
3219   struct sigaction sa;
3220   sigaction(sig, NULL, &sa);
3221 
3222   st->print("%s: ", os::exception_name(sig, buf, buflen));
3223 
3224   address handler = (sa.sa_flags & SA_SIGINFO)
3225     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3226     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3227 
3228   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3229     st->print("SIG_DFL");
3230   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3231     st->print("SIG_IGN");
3232   } else {
3233     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3234   }
3235 
3236   // Print readable mask.
3237   st->print(", sa_mask[0]=");
3238   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3239 
3240   address rh = VMError::get_resetted_sighandler(sig);
3241   // May be, handler was resetted by VMError?
3242   if (rh != NULL) {
3243     handler = rh;
3244     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3245   }
3246 
3247   // Print textual representation of sa_flags.
3248   st->print(", sa_flags=");
3249   os::Posix::print_sa_flags(st, sa.sa_flags);
3250 
3251   // Check: is it our handler?
3252   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3253       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3254     // It is our signal handler.
3255     // Check for flags, reset system-used one!
3256     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3257       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3258                 os::Aix::get_our_sigflags(sig));
3259     }
3260   }
3261   st->cr();
3262 }
3263 
3264 #define DO_SIGNAL_CHECK(sig) \
3265   if (!sigismember(&check_signal_done, sig)) \
3266     os::Aix::check_signal_handler(sig)
3267 
3268 // This method is a periodic task to check for misbehaving JNI applications
3269 // under CheckJNI, we can add any periodic checks here
3270 
3271 void os::run_periodic_checks() {
3272 
3273   if (check_signals == false) return;
3274 
3275   // SEGV and BUS if overridden could potentially prevent
3276   // generation of hs*.log in the event of a crash, debugging
3277   // such a case can be very challenging, so we absolutely
3278   // check the following for a good measure:
3279   DO_SIGNAL_CHECK(SIGSEGV);
3280   DO_SIGNAL_CHECK(SIGILL);
3281   DO_SIGNAL_CHECK(SIGFPE);
3282   DO_SIGNAL_CHECK(SIGBUS);
3283   DO_SIGNAL_CHECK(SIGPIPE);
3284   DO_SIGNAL_CHECK(SIGXFSZ);
3285   if (UseSIGTRAP) {
3286     DO_SIGNAL_CHECK(SIGTRAP);
3287   }
3288 
3289   // ReduceSignalUsage allows the user to override these handlers
3290   // see comments at the very top and jvm_solaris.h
3291   if (!ReduceSignalUsage) {
3292     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3293     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3294     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3295     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3296   }
3297 
3298   DO_SIGNAL_CHECK(SR_signum);
3299 }
3300 
3301 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3302 
3303 static os_sigaction_t os_sigaction = NULL;
3304 
3305 void os::Aix::check_signal_handler(int sig) {
3306   char buf[O_BUFLEN];
3307   address jvmHandler = NULL;
3308 
3309   struct sigaction act;
3310   if (os_sigaction == NULL) {
3311     // only trust the default sigaction, in case it has been interposed
3312     os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3313     if (os_sigaction == NULL) return;
3314   }
3315 
3316   os_sigaction(sig, (struct sigaction*)NULL, &act);
3317 
3318   address thisHandler = (act.sa_flags & SA_SIGINFO)
3319     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3320     : CAST_FROM_FN_PTR(address, act.sa_handler);
3321 
3322   switch(sig) {
3323   case SIGSEGV:
3324   case SIGBUS:
3325   case SIGFPE:
3326   case SIGPIPE:
3327   case SIGILL:
3328   case SIGXFSZ:
3329     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3330     break;
3331 
3332   case SHUTDOWN1_SIGNAL:
3333   case SHUTDOWN2_SIGNAL:
3334   case SHUTDOWN3_SIGNAL:
3335   case BREAK_SIGNAL:
3336     jvmHandler = (address)user_handler();
3337     break;
3338 
3339   default:
3340     if (sig == SR_signum) {
3341       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3342     } else {
3343       return;
3344     }
3345     break;
3346   }
3347 
3348   if (thisHandler != jvmHandler) {
3349     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3350     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3351     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3352     // No need to check this sig any longer
3353     sigaddset(&check_signal_done, sig);
3354     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3355     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3356       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3357                     exception_name(sig, buf, O_BUFLEN));
3358     }
3359   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3360     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3361     tty->print("expected:");
3362     os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3363     tty->cr();
3364     tty->print("  found:");
3365     os::Posix::print_sa_flags(tty, act.sa_flags);
3366     tty->cr();
3367     // No need to check this sig any longer
3368     sigaddset(&check_signal_done, sig);
3369   }
3370 
3371   // Dump all the signal
3372   if (sigismember(&check_signal_done, sig)) {
3373     print_signal_handlers(tty, buf, O_BUFLEN);
3374   }
3375 }
3376 
3377 // To install functions for atexit system call
3378 extern "C" {
3379   static void perfMemory_exit_helper() {
3380     perfMemory_exit();
3381   }
3382 }
3383 
3384 // This is called _before_ the most of global arguments have been parsed.
3385 void os::init(void) {
3386   // This is basic, we want to know if that ever changes.
3387   // (Shared memory boundary is supposed to be a 256M aligned.)
3388   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3389 
3390   // Record process break at startup.
3391   g_brk_at_startup = (address) ::sbrk(0);
3392   assert(g_brk_at_startup != (address) -1, "sbrk failed");
3393 
3394   // First off, we need to know whether we run on AIX or PASE, and
3395   // the OS level we run on.
3396   os::Aix::initialize_os_info();
3397 
3398   // Scan environment (SPEC1170 behaviour, etc).
3399   os::Aix::scan_environment();
3400 
3401   // Probe multipage support.
3402   query_multipage_support();
3403 
3404   // Act like we only have one page size by eliminating corner cases which
3405   // we did not support very well anyway.
3406   // We have two input conditions:
3407   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3408   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3409   //    setting.
3410   //    Data segment page size is important for us because it defines the thread stack page
3411   //    size, which is needed for guard page handling, stack banging etc.
3412   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3413   //    and should be allocated with 64k pages.
3414   //
3415   // So, we do the following:
3416   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3417   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3418   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3419   // 64k          no              --- AIX 5.2 ? ---
3420   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3421 
3422   // We explicitly leave no option to change page size, because only upgrading would work,
3423   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3424 
3425   if (g_multipage_support.datapsize == 4*K) {
3426     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3427     if (g_multipage_support.can_use_64K_pages) {
3428       // .. but we are able to use 64K pages dynamically.
3429       // This would be typical for java launchers which are not linked
3430       // with datapsize=64K (like, any other launcher but our own).
3431       //
3432       // In this case it would be smart to allocate the java heap with 64K
3433       // to get the performance benefit, and to fake 64k pages for the
3434       // data segment (when dealing with thread stacks).
3435       //
3436       // However, leave a possibility to downgrade to 4K, using
3437       // -XX:-Use64KPages.
3438       if (Use64KPages) {
3439         trcVerbose("64K page mode (faked for data segment)");
3440         Aix::_page_size = 64*K;
3441       } else {
3442         trcVerbose("4K page mode (Use64KPages=off)");
3443         Aix::_page_size = 4*K;
3444       }
3445     } else {
3446       // .. and not able to allocate 64k pages dynamically. Here, just
3447       // fall back to 4K paged mode and use mmap for everything.
3448       trcVerbose("4K page mode");
3449       Aix::_page_size = 4*K;
3450       FLAG_SET_ERGO(bool, Use64KPages, false);
3451     }
3452   } else {
3453     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3454     // This normally means that we can allocate 64k pages dynamically.
3455     // (There is one special case where this may be false: EXTSHM=on.
3456     // but we decided to not support that mode).
3457     assert0(g_multipage_support.can_use_64K_pages);
3458     Aix::_page_size = 64*K;
3459     trcVerbose("64K page mode");
3460     FLAG_SET_ERGO(bool, Use64KPages, true);
3461   }
3462 
3463   // For now UseLargePages is just ignored.
3464   FLAG_SET_ERGO(bool, UseLargePages, false);
3465   _page_sizes[0] = 0;
3466 
3467   // debug trace
3468   trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3469 
3470   // Next, we need to initialize libo4 and libperfstat libraries.
3471   if (os::Aix::on_pase()) {
3472     os::Aix::initialize_libo4();
3473   } else {
3474     os::Aix::initialize_libperfstat();
3475   }
3476 
3477   // Reset the perfstat information provided by ODM.
3478   if (os::Aix::on_aix()) {
3479     libperfstat::perfstat_reset();
3480   }
3481 
3482   // Now initialze basic system properties. Note that for some of the values we
3483   // need libperfstat etc.
3484   os::Aix::initialize_system_info();
3485 
3486   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3487 
3488   init_random(1234567);
3489 
3490   ThreadCritical::initialize();
3491 
3492   // Main_thread points to the aboriginal thread.
3493   Aix::_main_thread = pthread_self();
3494 
3495   initial_time_count = os::elapsed_counter();
3496 
3497   os::Posix::init();
3498 }
3499 
3500 // This is called _after_ the global arguments have been parsed.
3501 jint os::init_2(void) {
3502 
3503   os::Posix::init_2();
3504 
3505   if (os::Aix::on_pase()) {
3506     trcVerbose("Running on PASE.");
3507   } else {
3508     trcVerbose("Running on AIX (not PASE).");
3509   }
3510 
3511   trcVerbose("processor count: %d", os::_processor_count);
3512   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3513 
3514   // Initially build up the loaded dll map.
3515   LoadedLibraries::reload();
3516   if (Verbose) {
3517     trcVerbose("Loaded Libraries: ");
3518     LoadedLibraries::print(tty);
3519   }
3520 
3521   const int page_size = Aix::page_size();
3522   const int map_size = page_size;
3523 
3524   address map_address = (address) MAP_FAILED;
3525   const int prot  = PROT_READ;
3526   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3527 
3528   // Use optimized addresses for the polling page,
3529   // e.g. map it to a special 32-bit address.
3530   if (OptimizePollingPageLocation) {
3531     // architecture-specific list of address wishes:
3532     address address_wishes[] = {
3533       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3534       // PPC64: all address wishes are non-negative 32 bit values where
3535       // the lower 16 bits are all zero. we can load these addresses
3536       // with a single ppc_lis instruction.
3537       (address) 0x30000000, (address) 0x31000000,
3538       (address) 0x32000000, (address) 0x33000000,
3539       (address) 0x40000000, (address) 0x41000000,
3540       (address) 0x42000000, (address) 0x43000000,
3541       (address) 0x50000000, (address) 0x51000000,
3542       (address) 0x52000000, (address) 0x53000000,
3543       (address) 0x60000000, (address) 0x61000000,
3544       (address) 0x62000000, (address) 0x63000000
3545     };
3546     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3547 
3548     // iterate over the list of address wishes:
3549     for (int i=0; i<address_wishes_length; i++) {
3550       // Try to map with current address wish.
3551       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3552       // fail if the address is already mapped.
3553       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3554                                      map_size, prot,
3555                                      flags | MAP_FIXED,
3556                                      -1, 0);
3557       trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3558                    address_wishes[i], map_address + (ssize_t)page_size);
3559 
3560       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3561         // Map succeeded and map_address is at wished address, exit loop.
3562         break;
3563       }
3564 
3565       if (map_address != (address) MAP_FAILED) {
3566         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3567         ::munmap(map_address, map_size);
3568         map_address = (address) MAP_FAILED;
3569       }
3570       // Map failed, continue loop.
3571     }
3572   } // end OptimizePollingPageLocation
3573 
3574   if (map_address == (address) MAP_FAILED) {
3575     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3576   }
3577   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3578   os::set_polling_page(map_address);
3579 
3580   if (!UseMembar) {
3581     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3582     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3583     os::set_memory_serialize_page(mem_serialize_page);
3584 
3585     trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3586         mem_serialize_page, mem_serialize_page + Aix::page_size(),
3587         Aix::page_size(), Aix::page_size());
3588   }
3589 
3590   // initialize suspend/resume support - must do this before signal_sets_init()
3591   if (SR_initialize() != 0) {
3592     perror("SR_initialize failed");
3593     return JNI_ERR;
3594   }
3595 
3596   Aix::signal_sets_init();
3597   Aix::install_signal_handlers();
3598 
3599   // Check and sets minimum stack sizes against command line options
3600   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
3601     return JNI_ERR;
3602   }
3603 
3604   if (UseNUMA) {
3605     UseNUMA = false;
3606     warning("NUMA optimizations are not available on this OS.");
3607   }
3608 
3609   if (MaxFDLimit) {
3610     // Set the number of file descriptors to max. print out error
3611     // if getrlimit/setrlimit fails but continue regardless.
3612     struct rlimit nbr_files;
3613     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3614     if (status != 0) {
3615       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
3616     } else {
3617       nbr_files.rlim_cur = nbr_files.rlim_max;
3618       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3619       if (status != 0) {
3620         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3621       }
3622     }
3623   }
3624 
3625   if (PerfAllowAtExitRegistration) {
3626     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3627     // At exit functions can be delayed until process exit time, which
3628     // can be problematic for embedded VM situations. Embedded VMs should
3629     // call DestroyJavaVM() to assure that VM resources are released.
3630 
3631     // Note: perfMemory_exit_helper atexit function may be removed in
3632     // the future if the appropriate cleanup code can be added to the
3633     // VM_Exit VMOperation's doit method.
3634     if (atexit(perfMemory_exit_helper) != 0) {
3635       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3636     }
3637   }
3638 
3639   return JNI_OK;
3640 }
3641 
3642 // Mark the polling page as unreadable
3643 void os::make_polling_page_unreadable(void) {
3644   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3645     fatal("Could not disable polling page");
3646   }
3647 };
3648 
3649 // Mark the polling page as readable
3650 void os::make_polling_page_readable(void) {
3651   // Changed according to os_linux.cpp.
3652   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3653     fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3654   }
3655 };
3656 
3657 int os::active_processor_count() {
3658   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3659   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3660   return online_cpus;
3661 }
3662 
3663 void os::set_native_thread_name(const char *name) {
3664   // Not yet implemented.
3665   return;
3666 }
3667 
3668 bool os::distribute_processes(uint length, uint* distribution) {
3669   // Not yet implemented.
3670   return false;
3671 }
3672 
3673 bool os::bind_to_processor(uint processor_id) {
3674   // Not yet implemented.
3675   return false;
3676 }
3677 
3678 void os::SuspendedThreadTask::internal_do_task() {
3679   if (do_suspend(_thread->osthread())) {
3680     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3681     do_task(context);
3682     do_resume(_thread->osthread());
3683   }
3684 }
3685 
3686 class PcFetcher : public os::SuspendedThreadTask {
3687 public:
3688   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3689   ExtendedPC result();
3690 protected:
3691   void do_task(const os::SuspendedThreadTaskContext& context);
3692 private:
3693   ExtendedPC _epc;
3694 };
3695 
3696 ExtendedPC PcFetcher::result() {
3697   guarantee(is_done(), "task is not done yet.");
3698   return _epc;
3699 }
3700 
3701 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3702   Thread* thread = context.thread();
3703   OSThread* osthread = thread->osthread();
3704   if (osthread->ucontext() != NULL) {
3705     _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext());
3706   } else {
3707     // NULL context is unexpected, double-check this is the VMThread.
3708     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3709   }
3710 }
3711 
3712 // Suspends the target using the signal mechanism and then grabs the PC before
3713 // resuming the target. Used by the flat-profiler only
3714 ExtendedPC os::get_thread_pc(Thread* thread) {
3715   // Make sure that it is called by the watcher for the VMThread.
3716   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3717   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3718 
3719   PcFetcher fetcher(thread);
3720   fetcher.run();
3721   return fetcher.result();
3722 }
3723 
3724 ////////////////////////////////////////////////////////////////////////////////
3725 // debug support
3726 
3727 bool os::find(address addr, outputStream* st) {
3728 
3729   st->print(PTR_FORMAT ": ", addr);
3730 
3731   loaded_module_t lm;
3732   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3733       LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3734     st->print_cr("%s", lm.path);
3735     return true;
3736   }
3737 
3738   return false;
3739 }
3740 
3741 ////////////////////////////////////////////////////////////////////////////////
3742 // misc
3743 
3744 // This does not do anything on Aix. This is basically a hook for being
3745 // able to use structured exception handling (thread-local exception filters)
3746 // on, e.g., Win32.
3747 void
3748 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3749                          JavaCallArguments* args, Thread* thread) {
3750   f(value, method, args, thread);
3751 }
3752 
3753 void os::print_statistics() {
3754 }
3755 
3756 bool os::message_box(const char* title, const char* message) {
3757   int i;
3758   fdStream err(defaultStream::error_fd());
3759   for (i = 0; i < 78; i++) err.print_raw("=");
3760   err.cr();
3761   err.print_raw_cr(title);
3762   for (i = 0; i < 78; i++) err.print_raw("-");
3763   err.cr();
3764   err.print_raw_cr(message);
3765   for (i = 0; i < 78; i++) err.print_raw("=");
3766   err.cr();
3767 
3768   char buf[16];
3769   // Prevent process from exiting upon "read error" without consuming all CPU
3770   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3771 
3772   return buf[0] == 'y' || buf[0] == 'Y';
3773 }
3774 
3775 int os::stat(const char *path, struct stat *sbuf) {
3776   char pathbuf[MAX_PATH];
3777   if (strlen(path) > MAX_PATH - 1) {
3778     errno = ENAMETOOLONG;
3779     return -1;
3780   }
3781   os::native_path(strcpy(pathbuf, path));
3782   return ::stat(pathbuf, sbuf);
3783 }
3784 
3785 // Is a (classpath) directory empty?
3786 bool os::dir_is_empty(const char* path) {
3787   DIR *dir = NULL;
3788   struct dirent *ptr;
3789 
3790   dir = opendir(path);
3791   if (dir == NULL) return true;
3792 
3793   /* Scan the directory */
3794   bool result = true;
3795   char buf[sizeof(struct dirent) + MAX_PATH];
3796   while (result && (ptr = ::readdir(dir)) != NULL) {
3797     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3798       result = false;
3799     }
3800   }
3801   closedir(dir);
3802   return result;
3803 }
3804 
3805 // This code originates from JDK's sysOpen and open64_w
3806 // from src/solaris/hpi/src/system_md.c
3807 
3808 int os::open(const char *path, int oflag, int mode) {
3809 
3810   if (strlen(path) > MAX_PATH - 1) {
3811     errno = ENAMETOOLONG;
3812     return -1;
3813   }
3814   int fd;
3815 
3816   fd = ::open64(path, oflag, mode);
3817   if (fd == -1) return -1;
3818 
3819   // If the open succeeded, the file might still be a directory.
3820   {
3821     struct stat64 buf64;
3822     int ret = ::fstat64(fd, &buf64);
3823     int st_mode = buf64.st_mode;
3824 
3825     if (ret != -1) {
3826       if ((st_mode & S_IFMT) == S_IFDIR) {
3827         errno = EISDIR;
3828         ::close(fd);
3829         return -1;
3830       }
3831     } else {
3832       ::close(fd);
3833       return -1;
3834     }
3835   }
3836 
3837   // All file descriptors that are opened in the JVM and not
3838   // specifically destined for a subprocess should have the
3839   // close-on-exec flag set. If we don't set it, then careless 3rd
3840   // party native code might fork and exec without closing all
3841   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3842   // UNIXProcess.c), and this in turn might:
3843   //
3844   // - cause end-of-file to fail to be detected on some file
3845   //   descriptors, resulting in mysterious hangs, or
3846   //
3847   // - might cause an fopen in the subprocess to fail on a system
3848   //   suffering from bug 1085341.
3849   //
3850   // (Yes, the default setting of the close-on-exec flag is a Unix
3851   // design flaw.)
3852   //
3853   // See:
3854   // 1085341: 32-bit stdio routines should support file descriptors >255
3855   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3856   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3857 #ifdef FD_CLOEXEC
3858   {
3859     int flags = ::fcntl(fd, F_GETFD);
3860     if (flags != -1)
3861       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3862   }
3863 #endif
3864 
3865   return fd;
3866 }
3867 
3868 // create binary file, rewriting existing file if required
3869 int os::create_binary_file(const char* path, bool rewrite_existing) {
3870   int oflags = O_WRONLY | O_CREAT;
3871   if (!rewrite_existing) {
3872     oflags |= O_EXCL;
3873   }
3874   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3875 }
3876 
3877 // return current position of file pointer
3878 jlong os::current_file_offset(int fd) {
3879   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3880 }
3881 
3882 // move file pointer to the specified offset
3883 jlong os::seek_to_file_offset(int fd, jlong offset) {
3884   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3885 }
3886 
3887 // This code originates from JDK's sysAvailable
3888 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3889 
3890 int os::available(int fd, jlong *bytes) {
3891   jlong cur, end;
3892   int mode;
3893   struct stat64 buf64;
3894 
3895   if (::fstat64(fd, &buf64) >= 0) {
3896     mode = buf64.st_mode;
3897     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3898       int n;
3899       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3900         *bytes = n;
3901         return 1;
3902       }
3903     }
3904   }
3905   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3906     return 0;
3907   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3908     return 0;
3909   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3910     return 0;
3911   }
3912   *bytes = end - cur;
3913   return 1;
3914 }
3915 
3916 // Map a block of memory.
3917 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3918                         char *addr, size_t bytes, bool read_only,
3919                         bool allow_exec) {
3920   int prot;
3921   int flags = MAP_PRIVATE;
3922 
3923   if (read_only) {
3924     prot = PROT_READ;
3925     flags = MAP_SHARED;
3926   } else {
3927     prot = PROT_READ | PROT_WRITE;
3928     flags = MAP_PRIVATE;
3929   }
3930 
3931   if (allow_exec) {
3932     prot |= PROT_EXEC;
3933   }
3934 
3935   if (addr != NULL) {
3936     flags |= MAP_FIXED;
3937   }
3938 
3939   // Allow anonymous mappings if 'fd' is -1.
3940   if (fd == -1) {
3941     flags |= MAP_ANONYMOUS;
3942   }
3943 
3944   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3945                                      fd, file_offset);
3946   if (mapped_address == MAP_FAILED) {
3947     return NULL;
3948   }
3949   return mapped_address;
3950 }
3951 
3952 // Remap a block of memory.
3953 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3954                           char *addr, size_t bytes, bool read_only,
3955                           bool allow_exec) {
3956   // same as map_memory() on this OS
3957   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3958                         allow_exec);
3959 }
3960 
3961 // Unmap a block of memory.
3962 bool os::pd_unmap_memory(char* addr, size_t bytes) {
3963   return munmap(addr, bytes) == 0;
3964 }
3965 
3966 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3967 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
3968 // of a thread.
3969 //
3970 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3971 // the fast estimate available on the platform.
3972 
3973 jlong os::current_thread_cpu_time() {
3974   // return user + sys since the cost is the same
3975   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
3976   assert(n >= 0, "negative CPU time");
3977   return n;
3978 }
3979 
3980 jlong os::thread_cpu_time(Thread* thread) {
3981   // consistent with what current_thread_cpu_time() returns
3982   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
3983   assert(n >= 0, "negative CPU time");
3984   return n;
3985 }
3986 
3987 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
3988   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
3989   assert(n >= 0, "negative CPU time");
3990   return n;
3991 }
3992 
3993 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
3994   bool error = false;
3995 
3996   jlong sys_time = 0;
3997   jlong user_time = 0;
3998 
3999   // Reimplemented using getthrds64().
4000   //
4001   // Works like this:
4002   // For the thread in question, get the kernel thread id. Then get the
4003   // kernel thread statistics using that id.
4004   //
4005   // This only works of course when no pthread scheduling is used,
4006   // i.e. there is a 1:1 relationship to kernel threads.
4007   // On AIX, see AIXTHREAD_SCOPE variable.
4008 
4009   pthread_t pthtid = thread->osthread()->pthread_id();
4010 
4011   // retrieve kernel thread id for the pthread:
4012   tid64_t tid = 0;
4013   struct __pthrdsinfo pinfo;
4014   // I just love those otherworldly IBM APIs which force me to hand down
4015   // dummy buffers for stuff I dont care for...
4016   char dummy[1];
4017   int dummy_size = sizeof(dummy);
4018   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4019                           dummy, &dummy_size) == 0) {
4020     tid = pinfo.__pi_tid;
4021   } else {
4022     tty->print_cr("pthread_getthrds_np failed.");
4023     error = true;
4024   }
4025 
4026   // retrieve kernel timing info for that kernel thread
4027   if (!error) {
4028     struct thrdentry64 thrdentry;
4029     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4030       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4031       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4032     } else {
4033       tty->print_cr("pthread_getthrds_np failed.");
4034       error = true;
4035     }
4036   }
4037 
4038   if (p_sys_time) {
4039     *p_sys_time = sys_time;
4040   }
4041 
4042   if (p_user_time) {
4043     *p_user_time = user_time;
4044   }
4045 
4046   if (error) {
4047     return false;
4048   }
4049 
4050   return true;
4051 }
4052 
4053 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4054   jlong sys_time;
4055   jlong user_time;
4056 
4057   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4058     return -1;
4059   }
4060 
4061   return user_sys_cpu_time ? sys_time + user_time : user_time;
4062 }
4063 
4064 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4065   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4066   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4067   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4068   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4069 }
4070 
4071 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4072   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4073   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4074   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4075   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4076 }
4077 
4078 bool os::is_thread_cpu_time_supported() {
4079   return true;
4080 }
4081 
4082 // System loadavg support. Returns -1 if load average cannot be obtained.
4083 // For now just return the system wide load average (no processor sets).
4084 int os::loadavg(double values[], int nelem) {
4085 
4086   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4087   guarantee(values, "argument error");
4088 
4089   if (os::Aix::on_pase()) {
4090 
4091     // AS/400 PASE: use libo4 porting library
4092     double v[3] = { 0.0, 0.0, 0.0 };
4093 
4094     if (libo4::get_load_avg(v, v + 1, v + 2)) {
4095       for (int i = 0; i < nelem; i ++) {
4096         values[i] = v[i];
4097       }
4098       return nelem;
4099     } else {
4100       return -1;
4101     }
4102 
4103   } else {
4104 
4105     // AIX: use libperfstat
4106     libperfstat::cpuinfo_t ci;
4107     if (libperfstat::get_cpuinfo(&ci)) {
4108       for (int i = 0; i < nelem; i++) {
4109         values[i] = ci.loadavg[i];
4110       }
4111     } else {
4112       return -1;
4113     }
4114     return nelem;
4115   }
4116 }
4117 
4118 void os::pause() {
4119   char filename[MAX_PATH];
4120   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4121     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4122   } else {
4123     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4124   }
4125 
4126   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4127   if (fd != -1) {
4128     struct stat buf;
4129     ::close(fd);
4130     while (::stat(filename, &buf) == 0) {
4131       (void)::poll(NULL, 0, 100);
4132     }
4133   } else {
4134     trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4135   }
4136 }
4137 
4138 bool os::Aix::is_primordial_thread() {
4139   if (pthread_self() == (pthread_t)1) {
4140     return true;
4141   } else {
4142     return false;
4143   }
4144 }
4145 
4146 // OS recognitions (PASE/AIX, OS level) call this before calling any
4147 // one of Aix::on_pase(), Aix::os_version() static
4148 void os::Aix::initialize_os_info() {
4149 
4150   assert(_on_pase == -1 && _os_version == 0, "already called.");
4151 
4152   struct utsname uts;
4153   memset(&uts, 0, sizeof(uts));
4154   strcpy(uts.sysname, "?");
4155   if (::uname(&uts) == -1) {
4156     trcVerbose("uname failed (%d)", errno);
4157     guarantee(0, "Could not determine whether we run on AIX or PASE");
4158   } else {
4159     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4160                "node \"%s\" machine \"%s\"\n",
4161                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4162     const int major = atoi(uts.version);
4163     assert(major > 0, "invalid OS version");
4164     const int minor = atoi(uts.release);
4165     assert(minor > 0, "invalid OS release");
4166     _os_version = (major << 24) | (minor << 16);
4167     char ver_str[20] = {0};
4168     char *name_str = "unknown OS";
4169     if (strcmp(uts.sysname, "OS400") == 0) {
4170       // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4171       _on_pase = 1;
4172       if (os_version_short() < 0x0504) {
4173         trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4174         assert(false, "OS/400 release too old.");
4175       }
4176       name_str = "OS/400 (pase)";
4177       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4178     } else if (strcmp(uts.sysname, "AIX") == 0) {
4179       // We run on AIX. We do not support versions older than AIX 5.3.
4180       _on_pase = 0;
4181       // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4182       odmWrapper::determine_os_kernel_version(&_os_version);
4183       if (os_version_short() < 0x0503) {
4184         trcVerbose("AIX release older than AIX 5.3 not supported.");
4185         assert(false, "AIX release too old.");
4186       }
4187       name_str = "AIX";
4188       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4189                    major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4190     } else {
4191       assert(false, name_str);
4192     }
4193     trcVerbose("We run on %s %s", name_str, ver_str);
4194   }
4195 
4196   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4197 } // end: os::Aix::initialize_os_info()
4198 
4199 // Scan environment for important settings which might effect the VM.
4200 // Trace out settings. Warn about invalid settings and/or correct them.
4201 //
4202 // Must run after os::Aix::initialue_os_info().
4203 void os::Aix::scan_environment() {
4204 
4205   char* p;
4206   int rc;
4207 
4208   // Warn explicity if EXTSHM=ON is used. That switch changes how
4209   // System V shared memory behaves. One effect is that page size of
4210   // shared memory cannot be change dynamically, effectivly preventing
4211   // large pages from working.
4212   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4213   // recommendation is (in OSS notes) to switch it off.
4214   p = ::getenv("EXTSHM");
4215   trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4216   if (p && strcasecmp(p, "ON") == 0) {
4217     _extshm = 1;
4218     trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4219     if (!AllowExtshm) {
4220       // We allow under certain conditions the user to continue. However, we want this
4221       // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4222       // that the VM is not able to allocate 64k pages for the heap.
4223       // We do not want to run with reduced performance.
4224       vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4225     }
4226   } else {
4227     _extshm = 0;
4228   }
4229 
4230   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4231   // Not tested, not supported.
4232   //
4233   // Note that it might be worth the trouble to test and to require it, if only to
4234   // get useful return codes for mprotect.
4235   //
4236   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4237   // exec() ? before loading the libjvm ? ....)
4238   p = ::getenv("XPG_SUS_ENV");
4239   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4240   if (p && strcmp(p, "ON") == 0) {
4241     _xpg_sus_mode = 1;
4242     trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4243     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4244     // clobber address ranges. If we ever want to support that, we have to do some
4245     // testing first.
4246     guarantee(false, "XPG_SUS_ENV=ON not supported");
4247   } else {
4248     _xpg_sus_mode = 0;
4249   }
4250 
4251   if (os::Aix::on_pase()) {
4252     p = ::getenv("QIBM_MULTI_THREADED");
4253     trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4254   }
4255 
4256   p = ::getenv("LDR_CNTRL");
4257   trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4258   if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4259     if (p && ::strstr(p, "TEXTPSIZE")) {
4260       trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4261         "you may experience hangs or crashes on OS/400 V7R1.");
4262     }
4263   }
4264 
4265   p = ::getenv("AIXTHREAD_GUARDPAGES");
4266   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4267 
4268 } // end: os::Aix::scan_environment()
4269 
4270 // PASE: initialize the libo4 library (PASE porting library).
4271 void os::Aix::initialize_libo4() {
4272   guarantee(os::Aix::on_pase(), "OS/400 only.");
4273   if (!libo4::init()) {
4274     trcVerbose("libo4 initialization failed.");
4275     assert(false, "libo4 initialization failed");
4276   } else {
4277     trcVerbose("libo4 initialized.");
4278   }
4279 }
4280 
4281 // AIX: initialize the libperfstat library.
4282 void os::Aix::initialize_libperfstat() {
4283   assert(os::Aix::on_aix(), "AIX only");
4284   if (!libperfstat::init()) {
4285     trcVerbose("libperfstat initialization failed.");
4286     assert(false, "libperfstat initialization failed");
4287   } else {
4288     trcVerbose("libperfstat initialized.");
4289   }
4290 }
4291 
4292 /////////////////////////////////////////////////////////////////////////////
4293 // thread stack
4294 
4295 // Get the current stack base from the OS (actually, the pthread library).
4296 // Note: usually not page aligned.
4297 address os::current_stack_base() {
4298   AixMisc::stackbounds_t bounds;
4299   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4300   guarantee(rc, "Unable to retrieve stack bounds.");
4301   return bounds.base;
4302 }
4303 
4304 // Get the current stack size from the OS (actually, the pthread library).
4305 // Returned size is such that (base - size) is always aligned to page size.
4306 size_t os::current_stack_size() {
4307   AixMisc::stackbounds_t bounds;
4308   bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4309   guarantee(rc, "Unable to retrieve stack bounds.");
4310   // Align the returned stack size such that the stack low address
4311   // is aligned to page size (Note: base is usually not and we do not care).
4312   // We need to do this because caller code will assume stack low address is
4313   // page aligned and will place guard pages without checking.
4314   address low = bounds.base - bounds.size;
4315   address low_aligned = (address)align_up(low, os::vm_page_size());
4316   size_t s = bounds.base - low_aligned;
4317   return s;
4318 }
4319 
4320 extern char** environ;
4321 
4322 // Run the specified command in a separate process. Return its exit value,
4323 // or -1 on failure (e.g. can't fork a new process).
4324 // Unlike system(), this function can be called from signal handler. It
4325 // doesn't block SIGINT et al.
4326 int os::fork_and_exec(char* cmd) {
4327   char * argv[4] = {"sh", "-c", cmd, NULL};
4328 
4329   pid_t pid = fork();
4330 
4331   if (pid < 0) {
4332     // fork failed
4333     return -1;
4334 
4335   } else if (pid == 0) {
4336     // child process
4337 
4338     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4339     execve("/usr/bin/sh", argv, environ);
4340 
4341     // execve failed
4342     _exit(-1);
4343 
4344   } else {
4345     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4346     // care about the actual exit code, for now.
4347 
4348     int status;
4349 
4350     // Wait for the child process to exit. This returns immediately if
4351     // the child has already exited. */
4352     while (waitpid(pid, &status, 0) < 0) {
4353       switch (errno) {
4354         case ECHILD: return 0;
4355         case EINTR: break;
4356         default: return -1;
4357       }
4358     }
4359 
4360     if (WIFEXITED(status)) {
4361       // The child exited normally; get its exit code.
4362       return WEXITSTATUS(status);
4363     } else if (WIFSIGNALED(status)) {
4364       // The child exited because of a signal.
4365       // The best value to return is 0x80 + signal number,
4366       // because that is what all Unix shells do, and because
4367       // it allows callers to distinguish between process exit and
4368       // process death by signal.
4369       return 0x80 + WTERMSIG(status);
4370     } else {
4371       // Unknown exit code; pass it through.
4372       return status;
4373     }
4374   }
4375   return -1;
4376 }
4377 
4378 // is_headless_jre()
4379 //
4380 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4381 // in order to report if we are running in a headless jre.
4382 //
4383 // Since JDK8 xawt/libmawt.so is moved into the same directory
4384 // as libawt.so, and renamed libawt_xawt.so
4385 bool os::is_headless_jre() {
4386   struct stat statbuf;
4387   char buf[MAXPATHLEN];
4388   char libmawtpath[MAXPATHLEN];
4389   const char *xawtstr = "/xawt/libmawt.so";
4390   const char *new_xawtstr = "/libawt_xawt.so";
4391 
4392   char *p;
4393 
4394   // Get path to libjvm.so
4395   os::jvm_path(buf, sizeof(buf));
4396 
4397   // Get rid of libjvm.so
4398   p = strrchr(buf, '/');
4399   if (p == NULL) return false;
4400   else *p = '\0';
4401 
4402   // Get rid of client or server
4403   p = strrchr(buf, '/');
4404   if (p == NULL) return false;
4405   else *p = '\0';
4406 
4407   // check xawt/libmawt.so
4408   strcpy(libmawtpath, buf);
4409   strcat(libmawtpath, xawtstr);
4410   if (::stat(libmawtpath, &statbuf) == 0) return false;
4411 
4412   // check libawt_xawt.so
4413   strcpy(libmawtpath, buf);
4414   strcat(libmawtpath, new_xawtstr);
4415   if (::stat(libmawtpath, &statbuf) == 0) return false;
4416 
4417   return true;
4418 }
4419 
4420 // Get the default path to the core file
4421 // Returns the length of the string
4422 int os::get_core_path(char* buffer, size_t bufferSize) {
4423   const char* p = get_current_directory(buffer, bufferSize);
4424 
4425   if (p == NULL) {
4426     assert(p != NULL, "failed to get current directory");
4427     return 0;
4428   }
4429 
4430   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4431                                                p, current_process_id());
4432 
4433   return strlen(buffer);
4434 }
4435 
4436 #ifndef PRODUCT
4437 void TestReserveMemorySpecial_test() {
4438   // No tests available for this platform
4439 }
4440 #endif
4441 
4442 bool os::start_debugging(char *buf, int buflen) {
4443   int len = (int)strlen(buf);
4444   char *p = &buf[len];
4445 
4446   jio_snprintf(p, buflen -len,
4447                  "\n\n"
4448                  "Do you want to debug the problem?\n\n"
4449                  "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4450                  "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4451                  "Otherwise, press RETURN to abort...",
4452                  os::current_process_id(),
4453                  os::current_thread_id(), thread_self());
4454 
4455   bool yes = os::message_box("Unexpected Error", buf);
4456 
4457   if (yes) {
4458     // yes, user asked VM to launch debugger
4459     jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4460 
4461     os::fork_and_exec(buf);
4462     yes = false;
4463   }
4464   return yes;
4465 }
4466 
4467 static inline time_t get_mtime(const char* filename) {
4468   struct stat st;
4469   int ret = os::stat(filename, &st);
4470   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
4471   return st.st_mtime;
4472 }
4473 
4474 int os::compare_file_modified_times(const char* file1, const char* file2) {
4475   time_t t1 = get_mtime(file1);
4476   time_t t2 = get_mtime(file2);
4477   return t1 - t2;
4478 }