1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "logging/log.hpp"
  40 #include "libo4.hpp"
  41 #include "libperfstat_aix.hpp"
  42 #include "libodm_aix.hpp"
  43 #include "loadlib_aix.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/filemap.hpp"
  46 #include "misc_aix.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "os_aix.inline.hpp"
  49 #include "os_share_aix.hpp"
  50 #include "porting_aix.hpp"
  51 #include "prims/jniFastGetField.hpp"
  52 #include "prims/jvm.h"
  53 #include "prims/jvm_misc.hpp"
  54 #include "runtime/arguments.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/extendedPC.hpp"
  57 #include "runtime/globals.hpp"
  58 #include "runtime/interfaceSupport.hpp"
  59 #include "runtime/java.hpp"
  60 #include "runtime/javaCalls.hpp"
  61 #include "runtime/mutexLocker.hpp"
  62 #include "runtime/objectMonitor.hpp"
  63 #include "runtime/orderAccess.inline.hpp"
  64 #include "runtime/os.hpp"
  65 #include "runtime/osThread.hpp"
  66 #include "runtime/perfMemory.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/statSampler.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/thread.inline.hpp"
  71 #include "runtime/threadCritical.hpp"
  72 #include "runtime/timer.hpp"
  73 #include "runtime/vm_version.hpp"
  74 #include "services/attachListener.hpp"
  75 #include "services/runtimeService.hpp"
  76 #include "utilities/decoder.hpp"
  77 #include "utilities/defaultStream.hpp"
  78 #include "utilities/events.hpp"
  79 #include "utilities/growableArray.hpp"
  80 #include "utilities/vmError.hpp"
  81 
  82 // put OS-includes here (sorted alphabetically)
  83 #include <errno.h>
  84 #include <fcntl.h>
  85 #include <inttypes.h>
  86 #include <poll.h>
  87 #include <procinfo.h>
  88 #include <pthread.h>
  89 #include <pwd.h>
  90 #include <semaphore.h>
  91 #include <signal.h>
  92 #include <stdint.h>
  93 #include <stdio.h>
  94 #include <string.h>
  95 #include <unistd.h>
  96 #include <sys/ioctl.h>
  97 #include <sys/ipc.h>
  98 #include <sys/mman.h>
  99 #include <sys/resource.h>
 100 #include <sys/select.h>
 101 #include <sys/shm.h>
 102 #include <sys/socket.h>
 103 #include <sys/stat.h>
 104 #include <sys/sysinfo.h>
 105 #include <sys/systemcfg.h>
 106 #include <sys/time.h>
 107 #include <sys/times.h>
 108 #include <sys/types.h>
 109 #include <sys/utsname.h>
 110 #include <sys/vminfo.h>
 111 #include <sys/wait.h>
 112 
 113 // Missing prototypes for various system APIs.
 114 extern "C"
 115 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
 116 
 117 #if !defined(_AIXVERSION_610)
 118 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
 119 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
 120 extern "C" int getargs   (procsinfo*, int, char*, int);
 121 #endif
 122 
 123 #define MAX_PATH (2 * K)
 124 
 125 // for timer info max values which include all bits
 126 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 127 // for multipage initialization error analysis (in 'g_multipage_error')
 128 #define ERROR_MP_OS_TOO_OLD                          100
 129 #define ERROR_MP_EXTSHM_ACTIVE                       101
 130 #define ERROR_MP_VMGETINFO_FAILED                    102
 131 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 132 
 133 // Query dimensions of the stack of the calling thread.
 134 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 135 static address resolve_function_descriptor_to_code_pointer(address p);
 136 
 137 static void vmembk_print_on(outputStream* os);
 138 
 139 ////////////////////////////////////////////////////////////////////////////////
 140 // global variables (for a description see os_aix.hpp)
 141 
 142 julong    os::Aix::_physical_memory = 0;
 143 
 144 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 145 int       os::Aix::_page_size = -1;
 146 
 147 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 148 int       os::Aix::_on_pase = -1;
 149 
 150 // 0 = uninitialized, otherwise 32 bit number:
 151 //  0xVVRRTTSS
 152 //  VV - major version
 153 //  RR - minor version
 154 //  TT - tech level, if known, 0 otherwise
 155 //  SS - service pack, if known, 0 otherwise
 156 uint32_t  os::Aix::_os_version = 0;
 157 
 158 // -1 = uninitialized, 0 - no, 1 - yes
 159 int       os::Aix::_xpg_sus_mode = -1;
 160 
 161 // -1 = uninitialized, 0 - no, 1 - yes
 162 int       os::Aix::_extshm = -1;
 163 
 164 ////////////////////////////////////////////////////////////////////////////////
 165 // local variables
 166 
 167 static jlong    initial_time_count = 0;
 168 static int      clock_tics_per_sec = 100;
 169 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 170 static bool     check_signals      = true;
 171 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 172 static sigset_t SR_sigset;
 173 
 174 // Process break recorded at startup.
 175 static address g_brk_at_startup = NULL;
 176 
 177 // This describes the state of multipage support of the underlying
 178 // OS. Note that this is of no interest to the outsize world and
 179 // therefore should not be defined in AIX class.
 180 //
 181 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 182 // latter two (16M "large" resp. 16G "huge" pages) require special
 183 // setup and are normally not available.
 184 //
 185 // AIX supports multiple page sizes per process, for:
 186 //  - Stack (of the primordial thread, so not relevant for us)
 187 //  - Data - data, bss, heap, for us also pthread stacks
 188 //  - Text - text code
 189 //  - shared memory
 190 //
 191 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 192 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 193 //
 194 // For shared memory, page size can be set dynamically via
 195 // shmctl(). Different shared memory regions can have different page
 196 // sizes.
 197 //
 198 // More information can be found at AIBM info center:
 199 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 200 //
 201 static struct {
 202   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 203   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 204   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 205   size_t pthr_stack_pagesize; // stack page size of pthread threads
 206   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 207   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 208   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 209   int error;                  // Error describing if something went wrong at multipage init.
 210 } g_multipage_support = {
 211   (size_t) -1,
 212   (size_t) -1,
 213   (size_t) -1,
 214   (size_t) -1,
 215   (size_t) -1,
 216   false, false,
 217   0
 218 };
 219 
 220 // We must not accidentally allocate memory close to the BRK - even if
 221 // that would work - because then we prevent the BRK segment from
 222 // growing which may result in a malloc OOM even though there is
 223 // enough memory. The problem only arises if we shmat() or mmap() at
 224 // a specific wish address, e.g. to place the heap in a
 225 // compressed-oops-friendly way.
 226 static bool is_close_to_brk(address a) {
 227   assert0(g_brk_at_startup != NULL);
 228   if (a >= g_brk_at_startup &&
 229       a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
 230     return true;
 231   }
 232   return false;
 233 }
 234 
 235 julong os::available_memory() {
 236   return Aix::available_memory();
 237 }
 238 
 239 julong os::Aix::available_memory() {
 240   // Avoid expensive API call here, as returned value will always be null.
 241   if (os::Aix::on_pase()) {
 242     return 0x0LL;
 243   }
 244   os::Aix::meminfo_t mi;
 245   if (os::Aix::get_meminfo(&mi)) {
 246     return mi.real_free;
 247   } else {
 248     return ULONG_MAX;
 249   }
 250 }
 251 
 252 julong os::physical_memory() {
 253   return Aix::physical_memory();
 254 }
 255 
 256 // Return true if user is running as root.
 257 
 258 bool os::have_special_privileges() {
 259   static bool init = false;
 260   static bool privileges = false;
 261   if (!init) {
 262     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 263     init = true;
 264   }
 265   return privileges;
 266 }
 267 
 268 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 269 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 270 static bool my_disclaim64(char* addr, size_t size) {
 271 
 272   if (size == 0) {
 273     return true;
 274   }
 275 
 276   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 277   const unsigned int maxDisclaimSize = 0x40000000;
 278 
 279   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 280   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 281 
 282   char* p = addr;
 283 
 284   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 285     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 286       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 287       return false;
 288     }
 289     p += maxDisclaimSize;
 290   }
 291 
 292   if (lastDisclaimSize > 0) {
 293     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 294       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 295       return false;
 296     }
 297   }
 298 
 299   return true;
 300 }
 301 
 302 // Cpu architecture string
 303 #if defined(PPC32)
 304 static char cpu_arch[] = "ppc";
 305 #elif defined(PPC64)
 306 static char cpu_arch[] = "ppc64";
 307 #else
 308 #error Add appropriate cpu_arch setting
 309 #endif
 310 
 311 // Wrap the function "vmgetinfo" which is not available on older OS releases.
 312 static int checked_vmgetinfo(void *out, int command, int arg) {
 313   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 314     guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
 315   }
 316   return ::vmgetinfo(out, command, arg);
 317 }
 318 
 319 // Given an address, returns the size of the page backing that address.
 320 size_t os::Aix::query_pagesize(void* addr) {
 321 
 322   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 323     // AS/400 older than V6R1: no vmgetinfo here, default to 4K
 324     return 4*K;
 325   }
 326 
 327   vm_page_info pi;
 328   pi.addr = (uint64_t)addr;
 329   if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 330     return pi.pagesize;
 331   } else {
 332     assert(false, "vmgetinfo failed to retrieve page size");
 333     return 4*K;
 334   }
 335 }
 336 
 337 void os::Aix::initialize_system_info() {
 338 
 339   // Get the number of online(logical) cpus instead of configured.
 340   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 341   assert(_processor_count > 0, "_processor_count must be > 0");
 342 
 343   // Retrieve total physical storage.
 344   os::Aix::meminfo_t mi;
 345   if (!os::Aix::get_meminfo(&mi)) {
 346     assert(false, "os::Aix::get_meminfo failed.");
 347   }
 348   _physical_memory = (julong) mi.real_total;
 349 }
 350 
 351 // Helper function for tracing page sizes.
 352 static const char* describe_pagesize(size_t pagesize) {
 353   switch (pagesize) {
 354     case 4*K : return "4K";
 355     case 64*K: return "64K";
 356     case 16*M: return "16M";
 357     case 16*G: return "16G";
 358     default:
 359       assert(false, "surprise");
 360       return "??";
 361   }
 362 }
 363 
 364 // Probe OS for multipage support.
 365 // Will fill the global g_multipage_support structure.
 366 // Must be called before calling os::large_page_init().
 367 static void query_multipage_support() {
 368 
 369   guarantee(g_multipage_support.pagesize == -1,
 370             "do not call twice");
 371 
 372   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 373 
 374   // This really would surprise me.
 375   assert(g_multipage_support.pagesize == 4*K, "surprise!");
 376 
 377   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 378   // Default data page size is defined either by linker options (-bdatapsize)
 379   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 380   // default should be 4K.
 381   {
 382     void* p = ::malloc(16*M);
 383     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 384     ::free(p);
 385   }
 386 
 387   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 388   // Note that this is pure curiosity. We do not rely on default page size but set
 389   // our own page size after allocated.
 390   {
 391     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 392     guarantee(shmid != -1, "shmget failed");
 393     void* p = ::shmat(shmid, NULL, 0);
 394     ::shmctl(shmid, IPC_RMID, NULL);
 395     guarantee(p != (void*) -1, "shmat failed");
 396     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 397     ::shmdt(p);
 398   }
 399 
 400   // Before querying the stack page size, make sure we are not running as primordial
 401   // thread (because primordial thread's stack may have different page size than
 402   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 403   // number of reasons so we may just as well guarantee it here.
 404   guarantee0(!os::Aix::is_primordial_thread());
 405 
 406   // Query pthread stack page size. Should be the same as data page size because
 407   // pthread stacks are allocated from C-Heap.
 408   {
 409     int dummy = 0;
 410     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 411   }
 412 
 413   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 414   {
 415     address any_function =
 416       resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 417     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 418   }
 419 
 420   // Now probe for support of 64K pages and 16M pages.
 421 
 422   // Before OS/400 V6R1, there is no support for pages other than 4K.
 423   if (os::Aix::on_pase_V5R4_or_older()) {
 424     trcVerbose("OS/400 < V6R1 - no large page support.");
 425     g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
 426     goto query_multipage_support_end;
 427   }
 428 
 429   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 430   {
 431     const int MAX_PAGE_SIZES = 4;
 432     psize_t sizes[MAX_PAGE_SIZES];
 433     const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 434     if (num_psizes == -1) {
 435       trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
 436       trcVerbose("disabling multipage support.");
 437       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 438       goto query_multipage_support_end;
 439     }
 440     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 441     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 442     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 443     for (int i = 0; i < num_psizes; i ++) {
 444       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 445     }
 446 
 447     // Can we use 64K, 16M pages?
 448     for (int i = 0; i < num_psizes; i ++) {
 449       const size_t pagesize = sizes[i];
 450       if (pagesize != 64*K && pagesize != 16*M) {
 451         continue;
 452       }
 453       bool can_use = false;
 454       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 455       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 456         IPC_CREAT | S_IRUSR | S_IWUSR);
 457       guarantee0(shmid != -1); // Should always work.
 458       // Try to set pagesize.
 459       struct shmid_ds shm_buf = { 0 };
 460       shm_buf.shm_pagesize = pagesize;
 461       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 462         const int en = errno;
 463         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 464         trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
 465           errno);
 466       } else {
 467         // Attach and double check pageisze.
 468         void* p = ::shmat(shmid, NULL, 0);
 469         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 470         guarantee0(p != (void*) -1); // Should always work.
 471         const size_t real_pagesize = os::Aix::query_pagesize(p);
 472         if (real_pagesize != pagesize) {
 473           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 474         } else {
 475           can_use = true;
 476         }
 477         ::shmdt(p);
 478       }
 479       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 480       if (pagesize == 64*K) {
 481         g_multipage_support.can_use_64K_pages = can_use;
 482       } else if (pagesize == 16*M) {
 483         g_multipage_support.can_use_16M_pages = can_use;
 484       }
 485     }
 486 
 487   } // end: check which pages can be used for shared memory
 488 
 489 query_multipage_support_end:
 490 
 491   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
 492       describe_pagesize(g_multipage_support.pagesize));
 493   trcVerbose("Data page size (C-Heap, bss, etc): %s",
 494       describe_pagesize(g_multipage_support.datapsize));
 495   trcVerbose("Text page size: %s",
 496       describe_pagesize(g_multipage_support.textpsize));
 497   trcVerbose("Thread stack page size (pthread): %s",
 498       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 499   trcVerbose("Default shared memory page size: %s",
 500       describe_pagesize(g_multipage_support.shmpsize));
 501   trcVerbose("Can use 64K pages dynamically with shared meory: %s",
 502       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 503   trcVerbose("Can use 16M pages dynamically with shared memory: %s",
 504       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 505   trcVerbose("Multipage error details: %d",
 506       g_multipage_support.error);
 507 
 508   // sanity checks
 509   assert0(g_multipage_support.pagesize == 4*K);
 510   assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K);
 511   assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K);
 512   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 513   assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K);
 514 
 515 }
 516 
 517 void os::init_system_properties_values() {
 518 
 519 #define DEFAULT_LIBPATH "/lib:/usr/lib"
 520 #define EXTENSIONS_DIR  "/lib/ext"
 521 
 522   // Buffer that fits several sprintfs.
 523   // Note that the space for the trailing null is provided
 524   // by the nulls included by the sizeof operator.
 525   const size_t bufsize =
 526     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 527          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 528   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 529 
 530   // sysclasspath, java_home, dll_dir
 531   {
 532     char *pslash;
 533     os::jvm_path(buf, bufsize);
 534 
 535     // Found the full path to libjvm.so.
 536     // Now cut the path to <java_home>/jre if we can.
 537     pslash = strrchr(buf, '/');
 538     if (pslash != NULL) {
 539       *pslash = '\0';            // Get rid of /libjvm.so.
 540     }
 541     pslash = strrchr(buf, '/');
 542     if (pslash != NULL) {
 543       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 544     }
 545     Arguments::set_dll_dir(buf);
 546 
 547     if (pslash != NULL) {
 548       pslash = strrchr(buf, '/');
 549       if (pslash != NULL) {
 550         *pslash = '\0';        // Get rid of /lib.
 551       }
 552     }
 553     Arguments::set_java_home(buf);
 554     set_boot_path('/', ':');
 555   }
 556 
 557   // Where to look for native libraries.
 558 
 559   // On Aix we get the user setting of LIBPATH.
 560   // Eventually, all the library path setting will be done here.
 561   // Get the user setting of LIBPATH.
 562   const char *v = ::getenv("LIBPATH");
 563   const char *v_colon = ":";
 564   if (v == NULL) { v = ""; v_colon = ""; }
 565 
 566   // Concatenate user and invariant part of ld_library_path.
 567   // That's +1 for the colon and +1 for the trailing '\0'.
 568   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 569   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 570   Arguments::set_library_path(ld_library_path);
 571   FREE_C_HEAP_ARRAY(char, ld_library_path);
 572 
 573   // Extensions directories.
 574   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 575   Arguments::set_ext_dirs(buf);
 576 
 577   FREE_C_HEAP_ARRAY(char, buf);
 578 
 579 #undef DEFAULT_LIBPATH
 580 #undef EXTENSIONS_DIR
 581 }
 582 
 583 ////////////////////////////////////////////////////////////////////////////////
 584 // breakpoint support
 585 
 586 void os::breakpoint() {
 587   BREAKPOINT;
 588 }
 589 
 590 extern "C" void breakpoint() {
 591   // use debugger to set breakpoint here
 592 }
 593 
 594 ////////////////////////////////////////////////////////////////////////////////
 595 // signal support
 596 
 597 debug_only(static bool signal_sets_initialized = false);
 598 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 599 
 600 bool os::Aix::is_sig_ignored(int sig) {
 601   struct sigaction oact;
 602   sigaction(sig, (struct sigaction*)NULL, &oact);
 603   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 604     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 605   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 606     return true;
 607   } else {
 608     return false;
 609   }
 610 }
 611 
 612 void os::Aix::signal_sets_init() {
 613   // Should also have an assertion stating we are still single-threaded.
 614   assert(!signal_sets_initialized, "Already initialized");
 615   // Fill in signals that are necessarily unblocked for all threads in
 616   // the VM. Currently, we unblock the following signals:
 617   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 618   //                         by -Xrs (=ReduceSignalUsage));
 619   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 620   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 621   // the dispositions or masks wrt these signals.
 622   // Programs embedding the VM that want to use the above signals for their
 623   // own purposes must, at this time, use the "-Xrs" option to prevent
 624   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 625   // (See bug 4345157, and other related bugs).
 626   // In reality, though, unblocking these signals is really a nop, since
 627   // these signals are not blocked by default.
 628   sigemptyset(&unblocked_sigs);
 629   sigemptyset(&allowdebug_blocked_sigs);
 630   sigaddset(&unblocked_sigs, SIGILL);
 631   sigaddset(&unblocked_sigs, SIGSEGV);
 632   sigaddset(&unblocked_sigs, SIGBUS);
 633   sigaddset(&unblocked_sigs, SIGFPE);
 634   sigaddset(&unblocked_sigs, SIGTRAP);
 635   sigaddset(&unblocked_sigs, SR_signum);
 636 
 637   if (!ReduceSignalUsage) {
 638    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 639      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 640      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
 641    }
 642    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 643      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 644      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
 645    }
 646    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 647      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 648      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
 649    }
 650   }
 651   // Fill in signals that are blocked by all but the VM thread.
 652   sigemptyset(&vm_sigs);
 653   if (!ReduceSignalUsage)
 654     sigaddset(&vm_sigs, BREAK_SIGNAL);
 655   debug_only(signal_sets_initialized = true);
 656 }
 657 
 658 // These are signals that are unblocked while a thread is running Java.
 659 // (For some reason, they get blocked by default.)
 660 sigset_t* os::Aix::unblocked_signals() {
 661   assert(signal_sets_initialized, "Not initialized");
 662   return &unblocked_sigs;
 663 }
 664 
 665 // These are the signals that are blocked while a (non-VM) thread is
 666 // running Java. Only the VM thread handles these signals.
 667 sigset_t* os::Aix::vm_signals() {
 668   assert(signal_sets_initialized, "Not initialized");
 669   return &vm_sigs;
 670 }
 671 
 672 // These are signals that are blocked during cond_wait to allow debugger in
 673 sigset_t* os::Aix::allowdebug_blocked_signals() {
 674   assert(signal_sets_initialized, "Not initialized");
 675   return &allowdebug_blocked_sigs;
 676 }
 677 
 678 void os::Aix::hotspot_sigmask(Thread* thread) {
 679 
 680   //Save caller's signal mask before setting VM signal mask
 681   sigset_t caller_sigmask;
 682   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 683 
 684   OSThread* osthread = thread->osthread();
 685   osthread->set_caller_sigmask(caller_sigmask);
 686 
 687   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 688 
 689   if (!ReduceSignalUsage) {
 690     if (thread->is_VM_thread()) {
 691       // Only the VM thread handles BREAK_SIGNAL ...
 692       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 693     } else {
 694       // ... all other threads block BREAK_SIGNAL
 695       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 696     }
 697   }
 698 }
 699 
 700 // retrieve memory information.
 701 // Returns false if something went wrong;
 702 // content of pmi undefined in this case.
 703 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 704 
 705   assert(pmi, "get_meminfo: invalid parameter");
 706 
 707   memset(pmi, 0, sizeof(meminfo_t));
 708 
 709   if (os::Aix::on_pase()) {
 710     // On PASE, use the libo4 porting library.
 711 
 712     unsigned long long virt_total = 0;
 713     unsigned long long real_total = 0;
 714     unsigned long long real_free = 0;
 715     unsigned long long pgsp_total = 0;
 716     unsigned long long pgsp_free = 0;
 717     if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
 718       pmi->virt_total = virt_total;
 719       pmi->real_total = real_total;
 720       pmi->real_free = real_free;
 721       pmi->pgsp_total = pgsp_total;
 722       pmi->pgsp_free = pgsp_free;
 723       return true;
 724     }
 725     return false;
 726 
 727   } else {
 728 
 729     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 730     // See:
 731     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 732     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 733     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 734     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 735 
 736     perfstat_memory_total_t psmt;
 737     memset (&psmt, '\0', sizeof(psmt));
 738     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 739     if (rc == -1) {
 740       trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
 741       assert(0, "perfstat_memory_total() failed");
 742       return false;
 743     }
 744 
 745     assert(rc == 1, "perfstat_memory_total() - weird return code");
 746 
 747     // excerpt from
 748     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 749     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 750     // The fields of perfstat_memory_total_t:
 751     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 752     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 753     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 754     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 755     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 756 
 757     pmi->virt_total = psmt.virt_total * 4096;
 758     pmi->real_total = psmt.real_total * 4096;
 759     pmi->real_free = psmt.real_free * 4096;
 760     pmi->pgsp_total = psmt.pgsp_total * 4096;
 761     pmi->pgsp_free = psmt.pgsp_free * 4096;
 762 
 763     return true;
 764 
 765   }
 766 } // end os::Aix::get_meminfo
 767 
 768 //////////////////////////////////////////////////////////////////////////////
 769 // create new thread
 770 
 771 // Thread start routine for all newly created threads
 772 static void *thread_native_entry(Thread *thread) {
 773 
 774   // find out my own stack dimensions
 775   {
 776     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 777     address base = 0;
 778     size_t size = 0;
 779     query_stack_dimensions(&base, &size);
 780     thread->set_stack_base(base);
 781     thread->set_stack_size(size);
 782   }
 783 
 784   const pthread_t pthread_id = ::pthread_self();
 785   const tid_t kernel_thread_id = ::thread_self();
 786 
 787   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 788     os::current_thread_id(), (uintx) kernel_thread_id);
 789 
 790   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
 791   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
 792   // tools hook pthread_create(). In this case, we may run into problems establishing
 793   // guard pages on those stacks, because the stacks may reside in memory which is not
 794   // protectable (shmated).
 795   if (thread->stack_base() > ::sbrk(0)) {
 796     log_warning(os, thread)("Thread stack not in data segment.");
 797   }
 798 
 799   // Try to randomize the cache line index of hot stack frames.
 800   // This helps when threads of the same stack traces evict each other's
 801   // cache lines. The threads can be either from the same JVM instance, or
 802   // from different JVM instances. The benefit is especially true for
 803   // processors with hyperthreading technology.
 804 
 805   static int counter = 0;
 806   int pid = os::current_process_id();
 807   alloca(((pid ^ counter++) & 7) * 128);
 808 
 809   thread->initialize_thread_current();
 810 
 811   OSThread* osthread = thread->osthread();
 812 
 813   // Thread_id is pthread id.
 814   osthread->set_thread_id(pthread_id);
 815 
 816   // .. but keep kernel thread id too for diagnostics
 817   osthread->set_kernel_thread_id(kernel_thread_id);
 818 
 819   // Initialize signal mask for this thread.
 820   os::Aix::hotspot_sigmask(thread);
 821 
 822   // Initialize floating point control register.
 823   os::Aix::init_thread_fpu_state();
 824 
 825   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 826 
 827   // Call one more level start routine.
 828   thread->run();
 829 
 830   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 831     os::current_thread_id(), (uintx) kernel_thread_id);
 832 
 833   // If a thread has not deleted itself ("delete this") as part of its
 834   // termination sequence, we have to ensure thread-local-storage is
 835   // cleared before we actually terminate. No threads should ever be
 836   // deleted asynchronously with respect to their termination.
 837   if (Thread::current_or_null_safe() != NULL) {
 838     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 839     thread->clear_thread_current();
 840   }
 841 
 842   return 0;
 843 }
 844 
 845 bool os::create_thread(Thread* thread, ThreadType thr_type,
 846                        size_t req_stack_size) {
 847 
 848   assert(thread->osthread() == NULL, "caller responsible");
 849 
 850   // Allocate the OSThread object.
 851   OSThread* osthread = new OSThread(NULL, NULL);
 852   if (osthread == NULL) {
 853     return false;
 854   }
 855 
 856   // Set the correct thread state.
 857   osthread->set_thread_type(thr_type);
 858 
 859   // Initial state is ALLOCATED but not INITIALIZED
 860   osthread->set_state(ALLOCATED);
 861 
 862   thread->set_osthread(osthread);
 863 
 864   // Init thread attributes.
 865   pthread_attr_t attr;
 866   pthread_attr_init(&attr);
 867   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 868 
 869   // Make sure we run in 1:1 kernel-user-thread mode.
 870   if (os::Aix::on_aix()) {
 871     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 872     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 873   }
 874 
 875   // Start in suspended state, and in os::thread_start, wake the thread up.
 876   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 877 
 878   // Calculate stack size if it's not specified by caller.
 879   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 880   pthread_attr_setstacksize(&attr, stack_size);
 881 
 882   // Configure libc guard page.
 883   pthread_attr_setguardsize(&attr, os::Aix::default_guard_size(thr_type));
 884 
 885   pthread_t tid;
 886   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
 887 
 888   char buf[64];
 889   if (ret == 0) {
 890     log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
 891       (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 892   } else {
 893     log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
 894       ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 895   }
 896 
 897   pthread_attr_destroy(&attr);
 898 
 899   if (ret != 0) {
 900     // Need to clean up stuff we've allocated so far.
 901     thread->set_osthread(NULL);
 902     delete osthread;
 903     return false;
 904   }
 905 
 906   // OSThread::thread_id is the pthread id.
 907   osthread->set_thread_id(tid);
 908 
 909   return true;
 910 }
 911 
 912 /////////////////////////////////////////////////////////////////////////////
 913 // attach existing thread
 914 
 915 // bootstrap the main thread
 916 bool os::create_main_thread(JavaThread* thread) {
 917   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 918   return create_attached_thread(thread);
 919 }
 920 
 921 bool os::create_attached_thread(JavaThread* thread) {
 922 #ifdef ASSERT
 923     thread->verify_not_published();
 924 #endif
 925 
 926   // Allocate the OSThread object
 927   OSThread* osthread = new OSThread(NULL, NULL);
 928 
 929   if (osthread == NULL) {
 930     return false;
 931   }
 932 
 933   const pthread_t pthread_id = ::pthread_self();
 934   const tid_t kernel_thread_id = ::thread_self();
 935 
 936   // OSThread::thread_id is the pthread id.
 937   osthread->set_thread_id(pthread_id);
 938 
 939   // .. but keep kernel thread id too for diagnostics
 940   osthread->set_kernel_thread_id(kernel_thread_id);
 941 
 942   // initialize floating point control register
 943   os::Aix::init_thread_fpu_state();
 944 
 945   // Initial thread state is RUNNABLE
 946   osthread->set_state(RUNNABLE);
 947 
 948   thread->set_osthread(osthread);
 949 
 950   if (UseNUMA) {
 951     int lgrp_id = os::numa_get_group_id();
 952     if (lgrp_id != -1) {
 953       thread->set_lgrp_id(lgrp_id);
 954     }
 955   }
 956 
 957   // initialize signal mask for this thread
 958   // and save the caller's signal mask
 959   os::Aix::hotspot_sigmask(thread);
 960 
 961   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 962     os::current_thread_id(), (uintx) kernel_thread_id);
 963 
 964   return true;
 965 }
 966 
 967 void os::pd_start_thread(Thread* thread) {
 968   int status = pthread_continue_np(thread->osthread()->pthread_id());
 969   assert(status == 0, "thr_continue failed");
 970 }
 971 
 972 // Free OS resources related to the OSThread
 973 void os::free_thread(OSThread* osthread) {
 974   assert(osthread != NULL, "osthread not set");
 975 
 976   // We are told to free resources of the argument thread,
 977   // but we can only really operate on the current thread.
 978   assert(Thread::current()->osthread() == osthread,
 979          "os::free_thread but not current thread");
 980 
 981   // Restore caller's signal mask
 982   sigset_t sigmask = osthread->caller_sigmask();
 983   pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
 984 
 985   delete osthread;
 986 }
 987 
 988 ////////////////////////////////////////////////////////////////////////////////
 989 // time support
 990 
 991 // Time since start-up in seconds to a fine granularity.
 992 // Used by VMSelfDestructTimer and the MemProfiler.
 993 double os::elapsedTime() {
 994   return (double)(os::elapsed_counter()) * 0.000001;
 995 }
 996 
 997 jlong os::elapsed_counter() {
 998   timeval time;
 999   int status = gettimeofday(&time, NULL);
1000   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1001 }
1002 
1003 jlong os::elapsed_frequency() {
1004   return (1000 * 1000);
1005 }
1006 
1007 bool os::supports_vtime() { return true; }
1008 bool os::enable_vtime()   { return false; }
1009 bool os::vtime_enabled()  { return false; }
1010 
1011 double os::elapsedVTime() {
1012   struct rusage usage;
1013   int retval = getrusage(RUSAGE_THREAD, &usage);
1014   if (retval == 0) {
1015     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1016   } else {
1017     // better than nothing, but not much
1018     return elapsedTime();
1019   }
1020 }
1021 
1022 jlong os::javaTimeMillis() {
1023   timeval time;
1024   int status = gettimeofday(&time, NULL);
1025   assert(status != -1, "aix error at gettimeofday()");
1026   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1027 }
1028 
1029 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1030   timeval time;
1031   int status = gettimeofday(&time, NULL);
1032   assert(status != -1, "aix error at gettimeofday()");
1033   seconds = jlong(time.tv_sec);
1034   nanos = jlong(time.tv_usec) * 1000;
1035 }
1036 
1037 jlong os::javaTimeNanos() {
1038   if (os::Aix::on_pase()) {
1039 
1040     timeval time;
1041     int status = gettimeofday(&time, NULL);
1042     assert(status != -1, "PASE error at gettimeofday()");
1043     jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1044     return 1000 * usecs;
1045 
1046   } else {
1047     // On AIX use the precision of processors real time clock
1048     // or time base registers.
1049     timebasestruct_t time;
1050     int rc;
1051 
1052     // If the CPU has a time register, it will be used and
1053     // we have to convert to real time first. After convertion we have following data:
1054     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1055     // time.tb_low  [nanoseconds after the last full second above]
1056     // We better use mread_real_time here instead of read_real_time
1057     // to ensure that we will get a monotonic increasing time.
1058     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1059       rc = time_base_to_time(&time, TIMEBASE_SZ);
1060       assert(rc != -1, "aix error at time_base_to_time()");
1061     }
1062     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1063   }
1064 }
1065 
1066 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1067   info_ptr->max_value = ALL_64_BITS;
1068   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1069   info_ptr->may_skip_backward = false;
1070   info_ptr->may_skip_forward = false;
1071   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1072 }
1073 
1074 // Return the real, user, and system times in seconds from an
1075 // arbitrary fixed point in the past.
1076 bool os::getTimesSecs(double* process_real_time,
1077                       double* process_user_time,
1078                       double* process_system_time) {
1079   struct tms ticks;
1080   clock_t real_ticks = times(&ticks);
1081 
1082   if (real_ticks == (clock_t) (-1)) {
1083     return false;
1084   } else {
1085     double ticks_per_second = (double) clock_tics_per_sec;
1086     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1087     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1088     *process_real_time = ((double) real_ticks) / ticks_per_second;
1089 
1090     return true;
1091   }
1092 }
1093 
1094 char * os::local_time_string(char *buf, size_t buflen) {
1095   struct tm t;
1096   time_t long_time;
1097   time(&long_time);
1098   localtime_r(&long_time, &t);
1099   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1100                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1101                t.tm_hour, t.tm_min, t.tm_sec);
1102   return buf;
1103 }
1104 
1105 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1106   return localtime_r(clock, res);
1107 }
1108 
1109 ////////////////////////////////////////////////////////////////////////////////
1110 // runtime exit support
1111 
1112 // Note: os::shutdown() might be called very early during initialization, or
1113 // called from signal handler. Before adding something to os::shutdown(), make
1114 // sure it is async-safe and can handle partially initialized VM.
1115 void os::shutdown() {
1116 
1117   // allow PerfMemory to attempt cleanup of any persistent resources
1118   perfMemory_exit();
1119 
1120   // needs to remove object in file system
1121   AttachListener::abort();
1122 
1123   // flush buffered output, finish log files
1124   ostream_abort();
1125 
1126   // Check for abort hook
1127   abort_hook_t abort_hook = Arguments::abort_hook();
1128   if (abort_hook != NULL) {
1129     abort_hook();
1130   }
1131 }
1132 
1133 // Note: os::abort() might be called very early during initialization, or
1134 // called from signal handler. Before adding something to os::abort(), make
1135 // sure it is async-safe and can handle partially initialized VM.
1136 void os::abort(bool dump_core, void* siginfo, const void* context) {
1137   os::shutdown();
1138   if (dump_core) {
1139 #ifndef PRODUCT
1140     fdStream out(defaultStream::output_fd());
1141     out.print_raw("Current thread is ");
1142     char buf[16];
1143     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1144     out.print_raw_cr(buf);
1145     out.print_raw_cr("Dumping core ...");
1146 #endif
1147     ::abort(); // dump core
1148   }
1149 
1150   ::exit(1);
1151 }
1152 
1153 // Die immediately, no exit hook, no abort hook, no cleanup.
1154 void os::die() {
1155   ::abort();
1156 }
1157 
1158 // This method is a copy of JDK's sysGetLastErrorString
1159 // from src/solaris/hpi/src/system_md.c
1160 
1161 size_t os::lasterror(char *buf, size_t len) {
1162   if (errno == 0) return 0;
1163 
1164   const char *s = os::strerror(errno);
1165   size_t n = ::strlen(s);
1166   if (n >= len) {
1167     n = len - 1;
1168   }
1169   ::strncpy(buf, s, n);
1170   buf[n] = '\0';
1171   return n;
1172 }
1173 
1174 intx os::current_thread_id() {
1175   return (intx)pthread_self();
1176 }
1177 
1178 int os::current_process_id() {
1179   return getpid();
1180 }
1181 
1182 // DLL functions
1183 
1184 const char* os::dll_file_extension() { return ".so"; }
1185 
1186 // This must be hard coded because it's the system's temporary
1187 // directory not the java application's temp directory, ala java.io.tmpdir.
1188 const char* os::get_temp_directory() { return "/tmp"; }
1189 
1190 static bool file_exists(const char* filename) {
1191   struct stat statbuf;
1192   if (filename == NULL || strlen(filename) == 0) {
1193     return false;
1194   }
1195   return os::stat(filename, &statbuf) == 0;
1196 }
1197 
1198 bool os::dll_build_name(char* buffer, size_t buflen,
1199                         const char* pname, const char* fname) {
1200   bool retval = false;
1201   // Copied from libhpi
1202   const size_t pnamelen = pname ? strlen(pname) : 0;
1203 
1204   // Return error on buffer overflow.
1205   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1206     *buffer = '\0';
1207     return retval;
1208   }
1209 
1210   if (pnamelen == 0) {
1211     snprintf(buffer, buflen, "lib%s.so", fname);
1212     retval = true;
1213   } else if (strchr(pname, *os::path_separator()) != NULL) {
1214     int n;
1215     char** pelements = split_path(pname, &n);
1216     if (pelements == NULL) {
1217       return false;
1218     }
1219     for (int i = 0; i < n; i++) {
1220       // Really shouldn't be NULL, but check can't hurt
1221       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1222         continue; // skip the empty path values
1223       }
1224       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1225       if (file_exists(buffer)) {
1226         retval = true;
1227         break;
1228       }
1229     }
1230     // release the storage
1231     for (int i = 0; i < n; i++) {
1232       if (pelements[i] != NULL) {
1233         FREE_C_HEAP_ARRAY(char, pelements[i]);
1234       }
1235     }
1236     if (pelements != NULL) {
1237       FREE_C_HEAP_ARRAY(char*, pelements);
1238     }
1239   } else {
1240     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1241     retval = true;
1242   }
1243   return retval;
1244 }
1245 
1246 // Check if addr is inside libjvm.so.
1247 bool os::address_is_in_vm(address addr) {
1248 
1249   // Input could be a real pc or a function pointer literal. The latter
1250   // would be a function descriptor residing in the data segment of a module.
1251   loaded_module_t lm;
1252   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1253     return lm.is_in_vm;
1254   } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1255     return lm.is_in_vm;
1256   } else {
1257     return false;
1258   }
1259 
1260 }
1261 
1262 // Resolve an AIX function descriptor literal to a code pointer.
1263 // If the input is a valid code pointer to a text segment of a loaded module,
1264 //   it is returned unchanged.
1265 // If the input is a valid AIX function descriptor, it is resolved to the
1266 //   code entry point.
1267 // If the input is neither a valid function descriptor nor a valid code pointer,
1268 //   NULL is returned.
1269 static address resolve_function_descriptor_to_code_pointer(address p) {
1270 
1271   if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1272     // It is a real code pointer.
1273     return p;
1274   } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1275     // Pointer to data segment, potential function descriptor.
1276     address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1277     if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1278       // It is a function descriptor.
1279       return code_entry;
1280     }
1281   }
1282 
1283   return NULL;
1284 }
1285 
1286 bool os::dll_address_to_function_name(address addr, char *buf,
1287                                       int buflen, int *offset,
1288                                       bool demangle) {
1289   if (offset) {
1290     *offset = -1;
1291   }
1292   // Buf is not optional, but offset is optional.
1293   assert(buf != NULL, "sanity check");
1294   buf[0] = '\0';
1295 
1296   // Resolve function ptr literals first.
1297   addr = resolve_function_descriptor_to_code_pointer(addr);
1298   if (!addr) {
1299     return false;
1300   }
1301 
1302   return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle);
1303 }
1304 
1305 bool os::dll_address_to_library_name(address addr, char* buf,
1306                                      int buflen, int* offset) {
1307   if (offset) {
1308     *offset = -1;
1309   }
1310   // Buf is not optional, but offset is optional.
1311   assert(buf != NULL, "sanity check");
1312   buf[0] = '\0';
1313 
1314   // Resolve function ptr literals first.
1315   addr = resolve_function_descriptor_to_code_pointer(addr);
1316   if (!addr) {
1317     return false;
1318   }
1319 
1320   return AixSymbols::get_module_name(addr, buf, buflen);
1321 }
1322 
1323 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1324 // for the same architecture as Hotspot is running on.
1325 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1326 
1327   if (ebuf && ebuflen > 0) {
1328     ebuf[0] = '\0';
1329     ebuf[ebuflen - 1] = '\0';
1330   }
1331 
1332   if (!filename || strlen(filename) == 0) {
1333     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1334     return NULL;
1335   }
1336 
1337   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1338   void * result= ::dlopen(filename, RTLD_LAZY);
1339   if (result != NULL) {
1340     // Reload dll cache. Don't do this in signal handling.
1341     LoadedLibraries::reload();
1342     return result;
1343   } else {
1344     // error analysis when dlopen fails
1345     const char* const error_report = ::dlerror();
1346     if (error_report && ebuf && ebuflen > 0) {
1347       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1348                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1349     }
1350   }
1351   return NULL;
1352 }
1353 
1354 void* os::dll_lookup(void* handle, const char* name) {
1355   void* res = dlsym(handle, name);
1356   return res;
1357 }
1358 
1359 void* os::get_default_process_handle() {
1360   return (void*)::dlopen(NULL, RTLD_LAZY);
1361 }
1362 
1363 void os::print_dll_info(outputStream *st) {
1364   st->print_cr("Dynamic libraries:");
1365   LoadedLibraries::print(st);
1366 }
1367 
1368 void os::get_summary_os_info(char* buf, size_t buflen) {
1369   // There might be something more readable than uname results for AIX.
1370   struct utsname name;
1371   uname(&name);
1372   snprintf(buf, buflen, "%s %s", name.release, name.version);
1373 }
1374 
1375 void os::print_os_info(outputStream* st) {
1376   st->print("OS:");
1377 
1378   st->print("uname:");
1379   struct utsname name;
1380   uname(&name);
1381   st->print(name.sysname); st->print(" ");
1382   st->print(name.nodename); st->print(" ");
1383   st->print(name.release); st->print(" ");
1384   st->print(name.version); st->print(" ");
1385   st->print(name.machine);
1386   st->cr();
1387 
1388   uint32_t ver = os::Aix::os_version();
1389   st->print_cr("AIX kernel version %u.%u.%u.%u",
1390                (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1391 
1392   // rlimit
1393   st->print("rlimit:");
1394   struct rlimit rlim;
1395 
1396   st->print(" STACK ");
1397   getrlimit(RLIMIT_STACK, &rlim);
1398   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1399   else st->print("%uk", rlim.rlim_cur >> 10);
1400 
1401   st->print(", CORE ");
1402   getrlimit(RLIMIT_CORE, &rlim);
1403   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1404   else st->print("%uk", rlim.rlim_cur >> 10);
1405 
1406   st->print(", NPROC ");
1407   st->print("%d", sysconf(_SC_CHILD_MAX));
1408 
1409   st->print(", NOFILE ");
1410   getrlimit(RLIMIT_NOFILE, &rlim);
1411   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1412   else st->print("%d", rlim.rlim_cur);
1413 
1414   st->print(", AS ");
1415   getrlimit(RLIMIT_AS, &rlim);
1416   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1417   else st->print("%uk", rlim.rlim_cur >> 10);
1418 
1419   // Print limits on DATA, because it limits the C-heap.
1420   st->print(", DATA ");
1421   getrlimit(RLIMIT_DATA, &rlim);
1422   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1423   else st->print("%uk", rlim.rlim_cur >> 10);
1424   st->cr();
1425 
1426   // load average
1427   st->print("load average:");
1428   double loadavg[3] = {-1.L, -1.L, -1.L};
1429   os::loadavg(loadavg, 3);
1430   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1431   st->cr();
1432 
1433   // print wpar info
1434   libperfstat::wparinfo_t wi;
1435   if (libperfstat::get_wparinfo(&wi)) {
1436     st->print_cr("wpar info");
1437     st->print_cr("name: %s", wi.name);
1438     st->print_cr("id:   %d", wi.wpar_id);
1439     st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1440   }
1441 
1442   // print partition info
1443   libperfstat::partitioninfo_t pi;
1444   if (libperfstat::get_partitioninfo(&pi)) {
1445     st->print_cr("partition info");
1446     st->print_cr(" name: %s", pi.name);
1447   }
1448 
1449 }
1450 
1451 void os::print_memory_info(outputStream* st) {
1452 
1453   st->print_cr("Memory:");
1454 
1455   st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1456     describe_pagesize(g_multipage_support.pagesize));
1457   st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1458     describe_pagesize(g_multipage_support.datapsize));
1459   st->print_cr("  Text page size:                         %s",
1460     describe_pagesize(g_multipage_support.textpsize));
1461   st->print_cr("  Thread stack page size (pthread):       %s",
1462     describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1463   st->print_cr("  Default shared memory page size:        %s",
1464     describe_pagesize(g_multipage_support.shmpsize));
1465   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1466     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1467   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1468     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1469   st->print_cr("  Multipage error: %d",
1470     g_multipage_support.error);
1471   st->cr();
1472   st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1473 
1474   // print out LDR_CNTRL because it affects the default page sizes
1475   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1476   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1477 
1478   // Print out EXTSHM because it is an unsupported setting.
1479   const char* const extshm = ::getenv("EXTSHM");
1480   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1481   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1482     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1483   }
1484 
1485   // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1486   const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1487   st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1488       aixthread_guardpages ? aixthread_guardpages : "<unset>");
1489 
1490   os::Aix::meminfo_t mi;
1491   if (os::Aix::get_meminfo(&mi)) {
1492     char buffer[256];
1493     if (os::Aix::on_aix()) {
1494       st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1495       st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1496       st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1497       st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1498     } else {
1499       // PASE - Numbers are result of QWCRSSTS; they mean:
1500       // real_total: Sum of all system pools
1501       // real_free: always 0
1502       // pgsp_total: we take the size of the system ASP
1503       // pgsp_free: size of system ASP times percentage of system ASP unused
1504       st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1505       st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1506       st->print_cr("%% system asp used : " SIZE_FORMAT,
1507         mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1508     }
1509     st->print_raw(buffer);
1510   }
1511   st->cr();
1512 
1513   // Print segments allocated with os::reserve_memory.
1514   st->print_cr("internal virtual memory regions used by vm:");
1515   vmembk_print_on(st);
1516 }
1517 
1518 // Get a string for the cpuinfo that is a summary of the cpu type
1519 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1520   // This looks good
1521   libperfstat::cpuinfo_t ci;
1522   if (libperfstat::get_cpuinfo(&ci)) {
1523     strncpy(buf, ci.version, buflen);
1524   } else {
1525     strncpy(buf, "AIX", buflen);
1526   }
1527 }
1528 
1529 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1530   st->print("CPU:");
1531   st->print("total %d", os::processor_count());
1532   // It's not safe to query number of active processors after crash.
1533   // st->print("(active %d)", os::active_processor_count());
1534   st->print(" %s", VM_Version::features());
1535   st->cr();
1536 }
1537 
1538 static void print_signal_handler(outputStream* st, int sig,
1539                                  char* buf, size_t buflen);
1540 
1541 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1542   st->print_cr("Signal Handlers:");
1543   print_signal_handler(st, SIGSEGV, buf, buflen);
1544   print_signal_handler(st, SIGBUS , buf, buflen);
1545   print_signal_handler(st, SIGFPE , buf, buflen);
1546   print_signal_handler(st, SIGPIPE, buf, buflen);
1547   print_signal_handler(st, SIGXFSZ, buf, buflen);
1548   print_signal_handler(st, SIGILL , buf, buflen);
1549   print_signal_handler(st, SR_signum, buf, buflen);
1550   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1551   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1552   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1553   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1554   print_signal_handler(st, SIGTRAP, buf, buflen);
1555   // We also want to know if someone else adds a SIGDANGER handler because
1556   // that will interfere with OOM killling.
1557   print_signal_handler(st, SIGDANGER, buf, buflen);
1558 }
1559 
1560 static char saved_jvm_path[MAXPATHLEN] = {0};
1561 
1562 // Find the full path to the current module, libjvm.so.
1563 void os::jvm_path(char *buf, jint buflen) {
1564   // Error checking.
1565   if (buflen < MAXPATHLEN) {
1566     assert(false, "must use a large-enough buffer");
1567     buf[0] = '\0';
1568     return;
1569   }
1570   // Lazy resolve the path to current module.
1571   if (saved_jvm_path[0] != 0) {
1572     strcpy(buf, saved_jvm_path);
1573     return;
1574   }
1575 
1576   Dl_info dlinfo;
1577   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1578   assert(ret != 0, "cannot locate libjvm");
1579   char* rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1580   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1581 
1582   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1583   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1584 }
1585 
1586 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1587   // no prefix required, not even "_"
1588 }
1589 
1590 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1591   // no suffix required
1592 }
1593 
1594 ////////////////////////////////////////////////////////////////////////////////
1595 // sun.misc.Signal support
1596 
1597 static volatile jint sigint_count = 0;
1598 
1599 static void
1600 UserHandler(int sig, void *siginfo, void *context) {
1601   // 4511530 - sem_post is serialized and handled by the manager thread. When
1602   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1603   // don't want to flood the manager thread with sem_post requests.
1604   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1605     return;
1606 
1607   // Ctrl-C is pressed during error reporting, likely because the error
1608   // handler fails to abort. Let VM die immediately.
1609   if (sig == SIGINT && is_error_reported()) {
1610     os::die();
1611   }
1612 
1613   os::signal_notify(sig);
1614 }
1615 
1616 void* os::user_handler() {
1617   return CAST_FROM_FN_PTR(void*, UserHandler);
1618 }
1619 
1620 extern "C" {
1621   typedef void (*sa_handler_t)(int);
1622   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1623 }
1624 
1625 void* os::signal(int signal_number, void* handler) {
1626   struct sigaction sigAct, oldSigAct;
1627 
1628   sigfillset(&(sigAct.sa_mask));
1629 
1630   // Do not block out synchronous signals in the signal handler.
1631   // Blocking synchronous signals only makes sense if you can really
1632   // be sure that those signals won't happen during signal handling,
1633   // when the blocking applies. Normal signal handlers are lean and
1634   // do not cause signals. But our signal handlers tend to be "risky"
1635   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1636   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1637   // by a SIGILL, which was blocked due to the signal mask. The process
1638   // just hung forever. Better to crash from a secondary signal than to hang.
1639   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1640   sigdelset(&(sigAct.sa_mask), SIGBUS);
1641   sigdelset(&(sigAct.sa_mask), SIGILL);
1642   sigdelset(&(sigAct.sa_mask), SIGFPE);
1643   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1644 
1645   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1646 
1647   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1648 
1649   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1650     // -1 means registration failed
1651     return (void *)-1;
1652   }
1653 
1654   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1655 }
1656 
1657 void os::signal_raise(int signal_number) {
1658   ::raise(signal_number);
1659 }
1660 
1661 //
1662 // The following code is moved from os.cpp for making this
1663 // code platform specific, which it is by its very nature.
1664 //
1665 
1666 // Will be modified when max signal is changed to be dynamic
1667 int os::sigexitnum_pd() {
1668   return NSIG;
1669 }
1670 
1671 // a counter for each possible signal value
1672 static volatile jint pending_signals[NSIG+1] = { 0 };
1673 
1674 // Wrapper functions for: sem_init(), sem_post(), sem_wait()
1675 // On AIX, we use sem_init(), sem_post(), sem_wait()
1676 // On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1677 // do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1678 // Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1679 // on AIX, msem_..() calls are suspected of causing problems.
1680 static sem_t sig_sem;
1681 static msemaphore* p_sig_msem = 0;
1682 
1683 static void local_sem_init() {
1684   if (os::Aix::on_aix()) {
1685     int rc = ::sem_init(&sig_sem, 0, 0);
1686     guarantee(rc != -1, "sem_init failed");
1687   } else {
1688     // Memory semaphores must live in shared mem.
1689     guarantee0(p_sig_msem == NULL);
1690     p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1691     guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1692     guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1693   }
1694 }
1695 
1696 static void local_sem_post() {
1697   static bool warn_only_once = false;
1698   if (os::Aix::on_aix()) {
1699     int rc = ::sem_post(&sig_sem);
1700     if (rc == -1 && !warn_only_once) {
1701       trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
1702       warn_only_once = true;
1703     }
1704   } else {
1705     guarantee0(p_sig_msem != NULL);
1706     int rc = ::msem_unlock(p_sig_msem, 0);
1707     if (rc == -1 && !warn_only_once) {
1708       trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
1709       warn_only_once = true;
1710     }
1711   }
1712 }
1713 
1714 static void local_sem_wait() {
1715   static bool warn_only_once = false;
1716   if (os::Aix::on_aix()) {
1717     int rc = ::sem_wait(&sig_sem);
1718     if (rc == -1 && !warn_only_once) {
1719       trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
1720       warn_only_once = true;
1721     }
1722   } else {
1723     guarantee0(p_sig_msem != NULL); // must init before use
1724     int rc = ::msem_lock(p_sig_msem, 0);
1725     if (rc == -1 && !warn_only_once) {
1726       trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
1727       warn_only_once = true;
1728     }
1729   }
1730 }
1731 
1732 void os::signal_init_pd() {
1733   // Initialize signal structures
1734   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1735 
1736   // Initialize signal semaphore
1737   local_sem_init();
1738 }
1739 
1740 void os::signal_notify(int sig) {
1741   Atomic::inc(&pending_signals[sig]);
1742   local_sem_post();
1743 }
1744 
1745 static int check_pending_signals(bool wait) {
1746   Atomic::store(0, &sigint_count);
1747   for (;;) {
1748     for (int i = 0; i < NSIG + 1; i++) {
1749       jint n = pending_signals[i];
1750       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1751         return i;
1752       }
1753     }
1754     if (!wait) {
1755       return -1;
1756     }
1757     JavaThread *thread = JavaThread::current();
1758     ThreadBlockInVM tbivm(thread);
1759 
1760     bool threadIsSuspended;
1761     do {
1762       thread->set_suspend_equivalent();
1763       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1764 
1765       local_sem_wait();
1766 
1767       // were we externally suspended while we were waiting?
1768       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1769       if (threadIsSuspended) {
1770         //
1771         // The semaphore has been incremented, but while we were waiting
1772         // another thread suspended us. We don't want to continue running
1773         // while suspended because that would surprise the thread that
1774         // suspended us.
1775         //
1776 
1777         local_sem_post();
1778 
1779         thread->java_suspend_self();
1780       }
1781     } while (threadIsSuspended);
1782   }
1783 }
1784 
1785 int os::signal_lookup() {
1786   return check_pending_signals(false);
1787 }
1788 
1789 int os::signal_wait() {
1790   return check_pending_signals(true);
1791 }
1792 
1793 ////////////////////////////////////////////////////////////////////////////////
1794 // Virtual Memory
1795 
1796 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1797 
1798 #define VMEM_MAPPED  1
1799 #define VMEM_SHMATED 2
1800 
1801 struct vmembk_t {
1802   int type;         // 1 - mmap, 2 - shmat
1803   char* addr;
1804   size_t size;      // Real size, may be larger than usersize.
1805   size_t pagesize;  // page size of area
1806   vmembk_t* next;
1807 
1808   bool contains_addr(char* p) const {
1809     return p >= addr && p < (addr + size);
1810   }
1811 
1812   bool contains_range(char* p, size_t s) const {
1813     return contains_addr(p) && contains_addr(p + s - 1);
1814   }
1815 
1816   void print_on(outputStream* os) const {
1817     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1818       " bytes, %d %s pages), %s",
1819       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1820       (type == VMEM_SHMATED ? "shmat" : "mmap")
1821     );
1822   }
1823 
1824   // Check that range is a sub range of memory block (or equal to memory block);
1825   // also check that range is fully page aligned to the page size if the block.
1826   void assert_is_valid_subrange(char* p, size_t s) const {
1827     if (!contains_range(p, s)) {
1828       trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1829               "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1830               p, p + s, addr, addr + size);
1831       guarantee0(false);
1832     }
1833     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1834       trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1835               " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1836       guarantee0(false);
1837     }
1838   }
1839 };
1840 
1841 static struct {
1842   vmembk_t* first;
1843   MiscUtils::CritSect cs;
1844 } vmem;
1845 
1846 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1847   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1848   assert0(p);
1849   if (p) {
1850     MiscUtils::AutoCritSect lck(&vmem.cs);
1851     p->addr = addr; p->size = size;
1852     p->pagesize = pagesize;
1853     p->type = type;
1854     p->next = vmem.first;
1855     vmem.first = p;
1856   }
1857 }
1858 
1859 static vmembk_t* vmembk_find(char* addr) {
1860   MiscUtils::AutoCritSect lck(&vmem.cs);
1861   for (vmembk_t* p = vmem.first; p; p = p->next) {
1862     if (p->addr <= addr && (p->addr + p->size) > addr) {
1863       return p;
1864     }
1865   }
1866   return NULL;
1867 }
1868 
1869 static void vmembk_remove(vmembk_t* p0) {
1870   MiscUtils::AutoCritSect lck(&vmem.cs);
1871   assert0(p0);
1872   assert0(vmem.first); // List should not be empty.
1873   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1874     if (*pp == p0) {
1875       *pp = p0->next;
1876       ::free(p0);
1877       return;
1878     }
1879   }
1880   assert0(false); // Not found?
1881 }
1882 
1883 static void vmembk_print_on(outputStream* os) {
1884   MiscUtils::AutoCritSect lck(&vmem.cs);
1885   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1886     vmi->print_on(os);
1887     os->cr();
1888   }
1889 }
1890 
1891 // Reserve and attach a section of System V memory.
1892 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1893 // address. Failing that, it will attach the memory anywhere.
1894 // If <requested_addr> is NULL, function will attach the memory anywhere.
1895 //
1896 // <alignment_hint> is being ignored by this function. It is very probable however that the
1897 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1898 // Should this be not enogh, we can put more work into it.
1899 static char* reserve_shmated_memory (
1900   size_t bytes,
1901   char* requested_addr,
1902   size_t alignment_hint) {
1903 
1904   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1905     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1906     bytes, requested_addr, alignment_hint);
1907 
1908   // Either give me wish address or wish alignment but not both.
1909   assert0(!(requested_addr != NULL && alignment_hint != 0));
1910 
1911   // We must prevent anyone from attaching too close to the
1912   // BRK because that may cause malloc OOM.
1913   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1914     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1915       "Will attach anywhere.", requested_addr);
1916     // Act like the OS refused to attach there.
1917     requested_addr = NULL;
1918   }
1919 
1920   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1921   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1922   if (os::Aix::on_pase_V5R4_or_older()) {
1923     ShouldNotReachHere();
1924   }
1925 
1926   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1927   const size_t size = align_size_up(bytes, 64*K);
1928 
1929   // Reserve the shared segment.
1930   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1931   if (shmid == -1) {
1932     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1933     return NULL;
1934   }
1935 
1936   // Important note:
1937   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1938   // We must right after attaching it remove it from the system. System V shm segments are global and
1939   // survive the process.
1940   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1941 
1942   struct shmid_ds shmbuf;
1943   memset(&shmbuf, 0, sizeof(shmbuf));
1944   shmbuf.shm_pagesize = 64*K;
1945   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1946     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1947                size / (64*K), errno);
1948     // I want to know if this ever happens.
1949     assert(false, "failed to set page size for shmat");
1950   }
1951 
1952   // Now attach the shared segment.
1953   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1954   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1955   // were not a segment boundary.
1956   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1957   const int errno_shmat = errno;
1958 
1959   // (A) Right after shmat and before handing shmat errors delete the shm segment.
1960   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
1961     trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1962     assert(false, "failed to remove shared memory segment!");
1963   }
1964 
1965   // Handle shmat error. If we failed to attach, just return.
1966   if (addr == (char*)-1) {
1967     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
1968     return NULL;
1969   }
1970 
1971   // Just for info: query the real page size. In case setting the page size did not
1972   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1973   const size_t real_pagesize = os::Aix::query_pagesize(addr);
1974   if (real_pagesize != shmbuf.shm_pagesize) {
1975     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
1976   }
1977 
1978   if (addr) {
1979     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
1980       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
1981   } else {
1982     if (requested_addr != NULL) {
1983       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
1984     } else {
1985       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
1986     }
1987   }
1988 
1989   // book-keeping
1990   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
1991   assert0(is_aligned_to(addr, os::vm_page_size()));
1992 
1993   return addr;
1994 }
1995 
1996 static bool release_shmated_memory(char* addr, size_t size) {
1997 
1998   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1999     addr, addr + size - 1);
2000 
2001   bool rc = false;
2002 
2003   // TODO: is there a way to verify shm size without doing bookkeeping?
2004   if (::shmdt(addr) != 0) {
2005     trcVerbose("error (%d).", errno);
2006   } else {
2007     trcVerbose("ok.");
2008     rc = true;
2009   }
2010   return rc;
2011 }
2012 
2013 static bool uncommit_shmated_memory(char* addr, size_t size) {
2014   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2015     addr, addr + size - 1);
2016 
2017   const bool rc = my_disclaim64(addr, size);
2018 
2019   if (!rc) {
2020     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2021     return false;
2022   }
2023   return true;
2024 }
2025 
2026 ////////////////////////////////  mmap-based routines /////////////////////////////////
2027 
2028 // Reserve memory via mmap.
2029 // If <requested_addr> is given, an attempt is made to attach at the given address.
2030 // Failing that, memory is allocated at any address.
2031 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2032 // allocate at an address aligned with the given alignment. Failing that, memory
2033 // is aligned anywhere.
2034 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2035   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2036     "alignment_hint " UINTX_FORMAT "...",
2037     bytes, requested_addr, alignment_hint);
2038 
2039   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2040   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2041     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2042     return NULL;
2043   }
2044 
2045   // We must prevent anyone from attaching too close to the
2046   // BRK because that may cause malloc OOM.
2047   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2048     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2049       "Will attach anywhere.", requested_addr);
2050     // Act like the OS refused to attach there.
2051     requested_addr = NULL;
2052   }
2053 
2054   // Specify one or the other but not both.
2055   assert0(!(requested_addr != NULL && alignment_hint > 0));
2056 
2057   // In 64K mode, we claim the global page size (os::vm_page_size())
2058   // is 64K. This is one of the few points where that illusion may
2059   // break, because mmap() will always return memory aligned to 4K. So
2060   // we must ensure we only ever return memory aligned to 64k.
2061   if (alignment_hint) {
2062     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2063   } else {
2064     alignment_hint = os::vm_page_size();
2065   }
2066 
2067   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2068   const size_t size = align_size_up(bytes, os::vm_page_size());
2069 
2070   // alignment: Allocate memory large enough to include an aligned range of the right size and
2071   // cut off the leading and trailing waste pages.
2072   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2073   const size_t extra_size = size + alignment_hint;
2074 
2075   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2076   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2077   int flags = MAP_ANONYMOUS | MAP_SHARED;
2078 
2079   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2080   // it means if wishaddress is given but MAP_FIXED is not set.
2081   //
2082   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2083   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2084   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2085   // get clobbered.
2086   if (requested_addr != NULL) {
2087     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2088       flags |= MAP_FIXED;
2089     }
2090   }
2091 
2092   char* addr = (char*)::mmap(requested_addr, extra_size,
2093       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2094 
2095   if (addr == MAP_FAILED) {
2096     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2097     return NULL;
2098   }
2099 
2100   // Handle alignment.
2101   char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2102   const size_t waste_pre = addr_aligned - addr;
2103   char* const addr_aligned_end = addr_aligned + size;
2104   const size_t waste_post = extra_size - waste_pre - size;
2105   if (waste_pre > 0) {
2106     ::munmap(addr, waste_pre);
2107   }
2108   if (waste_post > 0) {
2109     ::munmap(addr_aligned_end, waste_post);
2110   }
2111   addr = addr_aligned;
2112 
2113   if (addr) {
2114     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2115       addr, addr + bytes, bytes);
2116   } else {
2117     if (requested_addr != NULL) {
2118       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2119     } else {
2120       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2121     }
2122   }
2123 
2124   // bookkeeping
2125   vmembk_add(addr, size, 4*K, VMEM_MAPPED);
2126 
2127   // Test alignment, see above.
2128   assert0(is_aligned_to(addr, os::vm_page_size()));
2129 
2130   return addr;
2131 }
2132 
2133 static bool release_mmaped_memory(char* addr, size_t size) {
2134   assert0(is_aligned_to(addr, os::vm_page_size()));
2135   assert0(is_aligned_to(size, os::vm_page_size()));
2136 
2137   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2138     addr, addr + size - 1);
2139   bool rc = false;
2140 
2141   if (::munmap(addr, size) != 0) {
2142     trcVerbose("failed (%d)\n", errno);
2143     rc = false;
2144   } else {
2145     trcVerbose("ok.");
2146     rc = true;
2147   }
2148 
2149   return rc;
2150 }
2151 
2152 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2153 
2154   assert0(is_aligned_to(addr, os::vm_page_size()));
2155   assert0(is_aligned_to(size, os::vm_page_size()));
2156 
2157   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2158     addr, addr + size - 1);
2159   bool rc = false;
2160 
2161   // Uncommit mmap memory with msync MS_INVALIDATE.
2162   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2163     trcVerbose("failed (%d)\n", errno);
2164     rc = false;
2165   } else {
2166     trcVerbose("ok.");
2167     rc = true;
2168   }
2169 
2170   return rc;
2171 }
2172 
2173 int os::vm_page_size() {
2174   // Seems redundant as all get out.
2175   assert(os::Aix::page_size() != -1, "must call os::init");
2176   return os::Aix::page_size();
2177 }
2178 
2179 // Aix allocates memory by pages.
2180 int os::vm_allocation_granularity() {
2181   assert(os::Aix::page_size() != -1, "must call os::init");
2182   return os::Aix::page_size();
2183 }
2184 
2185 #ifdef PRODUCT
2186 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2187                                     int err) {
2188   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2189           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2190           os::errno_name(err), err);
2191 }
2192 #endif
2193 
2194 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2195                                   const char* mesg) {
2196   assert(mesg != NULL, "mesg must be specified");
2197   if (!pd_commit_memory(addr, size, exec)) {
2198     // Add extra info in product mode for vm_exit_out_of_memory():
2199     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2200     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2201   }
2202 }
2203 
2204 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2205 
2206   assert(is_aligned_to(addr, os::vm_page_size()),
2207     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2208     p2i(addr), os::vm_page_size());
2209   assert(is_aligned_to(size, os::vm_page_size()),
2210     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2211     size, os::vm_page_size());
2212 
2213   vmembk_t* const vmi = vmembk_find(addr);
2214   guarantee0(vmi);
2215   vmi->assert_is_valid_subrange(addr, size);
2216 
2217   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2218 
2219   if (UseExplicitCommit) {
2220     // AIX commits memory on touch. So, touch all pages to be committed.
2221     for (char* p = addr; p < (addr + size); p += 4*K) {
2222       *p = '\0';
2223     }
2224   }
2225 
2226   return true;
2227 }
2228 
2229 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2230   return pd_commit_memory(addr, size, exec);
2231 }
2232 
2233 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2234                                   size_t alignment_hint, bool exec,
2235                                   const char* mesg) {
2236   // Alignment_hint is ignored on this OS.
2237   pd_commit_memory_or_exit(addr, size, exec, mesg);
2238 }
2239 
2240 bool os::pd_uncommit_memory(char* addr, size_t size) {
2241   assert(is_aligned_to(addr, os::vm_page_size()),
2242     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2243     p2i(addr), os::vm_page_size());
2244   assert(is_aligned_to(size, os::vm_page_size()),
2245     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2246     size, os::vm_page_size());
2247 
2248   // Dynamically do different things for mmap/shmat.
2249   const vmembk_t* const vmi = vmembk_find(addr);
2250   guarantee0(vmi);
2251   vmi->assert_is_valid_subrange(addr, size);
2252 
2253   if (vmi->type == VMEM_SHMATED) {
2254     return uncommit_shmated_memory(addr, size);
2255   } else {
2256     return uncommit_mmaped_memory(addr, size);
2257   }
2258 }
2259 
2260 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2261   // Do not call this; no need to commit stack pages on AIX.
2262   ShouldNotReachHere();
2263   return true;
2264 }
2265 
2266 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2267   // Do not call this; no need to commit stack pages on AIX.
2268   ShouldNotReachHere();
2269   return true;
2270 }
2271 
2272 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2273 }
2274 
2275 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2276 }
2277 
2278 void os::numa_make_global(char *addr, size_t bytes) {
2279 }
2280 
2281 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2282 }
2283 
2284 bool os::numa_topology_changed() {
2285   return false;
2286 }
2287 
2288 size_t os::numa_get_groups_num() {
2289   return 1;
2290 }
2291 
2292 int os::numa_get_group_id() {
2293   return 0;
2294 }
2295 
2296 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2297   if (size > 0) {
2298     ids[0] = 0;
2299     return 1;
2300   }
2301   return 0;
2302 }
2303 
2304 bool os::get_page_info(char *start, page_info* info) {
2305   return false;
2306 }
2307 
2308 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2309   return end;
2310 }
2311 
2312 // Reserves and attaches a shared memory segment.
2313 // Will assert if a wish address is given and could not be obtained.
2314 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2315 
2316   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2317   // thereby clobbering old mappings at that place. That is probably
2318   // not intended, never used and almost certainly an error were it
2319   // ever be used this way (to try attaching at a specified address
2320   // without clobbering old mappings an alternate API exists,
2321   // os::attempt_reserve_memory_at()).
2322   // Instead of mimicking the dangerous coding of the other platforms, here I
2323   // just ignore the request address (release) or assert(debug).
2324   assert0(requested_addr == NULL);
2325 
2326   // Always round to os::vm_page_size(), which may be larger than 4K.
2327   bytes = align_size_up(bytes, os::vm_page_size());
2328   const size_t alignment_hint0 =
2329     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2330 
2331   // In 4K mode always use mmap.
2332   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2333   if (os::vm_page_size() == 4*K) {
2334     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2335   } else {
2336     if (bytes >= Use64KPagesThreshold) {
2337       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2338     } else {
2339       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2340     }
2341   }
2342 }
2343 
2344 bool os::pd_release_memory(char* addr, size_t size) {
2345 
2346   // Dynamically do different things for mmap/shmat.
2347   vmembk_t* const vmi = vmembk_find(addr);
2348   guarantee0(vmi);
2349 
2350   // Always round to os::vm_page_size(), which may be larger than 4K.
2351   size = align_size_up(size, os::vm_page_size());
2352   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2353 
2354   bool rc = false;
2355   bool remove_bookkeeping = false;
2356   if (vmi->type == VMEM_SHMATED) {
2357     // For shmatted memory, we do:
2358     // - If user wants to release the whole range, release the memory (shmdt).
2359     // - If user only wants to release a partial range, uncommit (disclaim) that
2360     //   range. That way, at least, we do not use memory anymore (bust still page
2361     //   table space).
2362     vmi->assert_is_valid_subrange(addr, size);
2363     if (addr == vmi->addr && size == vmi->size) {
2364       rc = release_shmated_memory(addr, size);
2365       remove_bookkeeping = true;
2366     } else {
2367       rc = uncommit_shmated_memory(addr, size);
2368     }
2369   } else {
2370     // User may unmap partial regions but region has to be fully contained.
2371 #ifdef ASSERT
2372     vmi->assert_is_valid_subrange(addr, size);
2373 #endif
2374     rc = release_mmaped_memory(addr, size);
2375     remove_bookkeeping = true;
2376   }
2377 
2378   // update bookkeeping
2379   if (rc && remove_bookkeeping) {
2380     vmembk_remove(vmi);
2381   }
2382 
2383   return rc;
2384 }
2385 
2386 static bool checked_mprotect(char* addr, size_t size, int prot) {
2387 
2388   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2389   // not tell me if protection failed when trying to protect an un-protectable range.
2390   //
2391   // This means if the memory was allocated using shmget/shmat, protection wont work
2392   // but mprotect will still return 0:
2393   //
2394   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2395 
2396   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2397 
2398   if (!rc) {
2399     const char* const s_errno = os::errno_name(errno);
2400     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2401     return false;
2402   }
2403 
2404   // mprotect success check
2405   //
2406   // Mprotect said it changed the protection but can I believe it?
2407   //
2408   // To be sure I need to check the protection afterwards. Try to
2409   // read from protected memory and check whether that causes a segfault.
2410   //
2411   if (!os::Aix::xpg_sus_mode()) {
2412 
2413     if (CanUseSafeFetch32()) {
2414 
2415       const bool read_protected =
2416         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2417          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2418 
2419       if (prot & PROT_READ) {
2420         rc = !read_protected;
2421       } else {
2422         rc = read_protected;
2423       }
2424 
2425       if (!rc) {
2426         if (os::Aix::on_pase()) {
2427           // There is an issue on older PASE systems where mprotect() will return success but the
2428           // memory will not be protected.
2429           // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2430           // machines; we only see it rarely, when using mprotect() to protect the guard page of
2431           // a stack. It is an OS error.
2432           //
2433           // A valid strategy is just to try again. This usually works. :-/
2434 
2435           ::usleep(1000);
2436           if (::mprotect(addr, size, prot) == 0) {
2437             const bool read_protected_2 =
2438               (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2439               SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2440             rc = true;
2441           }
2442         }
2443       }
2444     }
2445   }
2446 
2447   assert(rc == true, "mprotect failed.");
2448 
2449   return rc;
2450 }
2451 
2452 // Set protections specified
2453 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2454   unsigned int p = 0;
2455   switch (prot) {
2456   case MEM_PROT_NONE: p = PROT_NONE; break;
2457   case MEM_PROT_READ: p = PROT_READ; break;
2458   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2459   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2460   default:
2461     ShouldNotReachHere();
2462   }
2463   // is_committed is unused.
2464   return checked_mprotect(addr, size, p);
2465 }
2466 
2467 bool os::guard_memory(char* addr, size_t size) {
2468   return checked_mprotect(addr, size, PROT_NONE);
2469 }
2470 
2471 bool os::unguard_memory(char* addr, size_t size) {
2472   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2473 }
2474 
2475 // Large page support
2476 
2477 static size_t _large_page_size = 0;
2478 
2479 // Enable large page support if OS allows that.
2480 void os::large_page_init() {
2481   return; // Nothing to do. See query_multipage_support and friends.
2482 }
2483 
2484 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2485   // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2486   // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2487   // so this is not needed.
2488   assert(false, "should not be called on AIX");
2489   return NULL;
2490 }
2491 
2492 bool os::release_memory_special(char* base, size_t bytes) {
2493   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2494   Unimplemented();
2495   return false;
2496 }
2497 
2498 size_t os::large_page_size() {
2499   return _large_page_size;
2500 }
2501 
2502 bool os::can_commit_large_page_memory() {
2503   // Does not matter, we do not support huge pages.
2504   return false;
2505 }
2506 
2507 bool os::can_execute_large_page_memory() {
2508   // Does not matter, we do not support huge pages.
2509   return false;
2510 }
2511 
2512 // Reserve memory at an arbitrary address, only if that area is
2513 // available (and not reserved for something else).
2514 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2515   char* addr = NULL;
2516 
2517   // Always round to os::vm_page_size(), which may be larger than 4K.
2518   bytes = align_size_up(bytes, os::vm_page_size());
2519 
2520   // In 4K mode always use mmap.
2521   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2522   if (os::vm_page_size() == 4*K) {
2523     return reserve_mmaped_memory(bytes, requested_addr, 0);
2524   } else {
2525     if (bytes >= Use64KPagesThreshold) {
2526       return reserve_shmated_memory(bytes, requested_addr, 0);
2527     } else {
2528       return reserve_mmaped_memory(bytes, requested_addr, 0);
2529     }
2530   }
2531 
2532   return addr;
2533 }
2534 
2535 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2536   return ::read(fd, buf, nBytes);
2537 }
2538 
2539 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2540   return ::pread(fd, buf, nBytes, offset);
2541 }
2542 
2543 void os::naked_short_sleep(jlong ms) {
2544   struct timespec req;
2545 
2546   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2547   req.tv_sec = 0;
2548   if (ms > 0) {
2549     req.tv_nsec = (ms % 1000) * 1000000;
2550   }
2551   else {
2552     req.tv_nsec = 1;
2553   }
2554 
2555   nanosleep(&req, NULL);
2556 
2557   return;
2558 }
2559 
2560 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2561 void os::infinite_sleep() {
2562   while (true) {    // sleep forever ...
2563     ::sleep(100);   // ... 100 seconds at a time
2564   }
2565 }
2566 
2567 // Used to convert frequent JVM_Yield() to nops
2568 bool os::dont_yield() {
2569   return DontYieldALot;
2570 }
2571 
2572 void os::naked_yield() {
2573   sched_yield();
2574 }
2575 
2576 ////////////////////////////////////////////////////////////////////////////////
2577 // thread priority support
2578 
2579 // From AIX manpage to pthread_setschedparam
2580 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2581 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2582 //
2583 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2584 // range from 40 to 80, where 40 is the least favored priority and 80
2585 // is the most favored."
2586 //
2587 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2588 // scheduling there; however, this still leaves iSeries.)
2589 //
2590 // We use the same values for AIX and PASE.
2591 int os::java_to_os_priority[CriticalPriority + 1] = {
2592   54,             // 0 Entry should never be used
2593 
2594   55,             // 1 MinPriority
2595   55,             // 2
2596   56,             // 3
2597 
2598   56,             // 4
2599   57,             // 5 NormPriority
2600   57,             // 6
2601 
2602   58,             // 7
2603   58,             // 8
2604   59,             // 9 NearMaxPriority
2605 
2606   60,             // 10 MaxPriority
2607 
2608   60              // 11 CriticalPriority
2609 };
2610 
2611 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2612   if (!UseThreadPriorities) return OS_OK;
2613   pthread_t thr = thread->osthread()->pthread_id();
2614   int policy = SCHED_OTHER;
2615   struct sched_param param;
2616   param.sched_priority = newpri;
2617   int ret = pthread_setschedparam(thr, policy, &param);
2618 
2619   if (ret != 0) {
2620     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2621         (int)thr, newpri, ret, os::errno_name(ret));
2622   }
2623   return (ret == 0) ? OS_OK : OS_ERR;
2624 }
2625 
2626 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2627   if (!UseThreadPriorities) {
2628     *priority_ptr = java_to_os_priority[NormPriority];
2629     return OS_OK;
2630   }
2631   pthread_t thr = thread->osthread()->pthread_id();
2632   int policy = SCHED_OTHER;
2633   struct sched_param param;
2634   int ret = pthread_getschedparam(thr, &policy, &param);
2635   *priority_ptr = param.sched_priority;
2636 
2637   return (ret == 0) ? OS_OK : OS_ERR;
2638 }
2639 
2640 // Hint to the underlying OS that a task switch would not be good.
2641 // Void return because it's a hint and can fail.
2642 void os::hint_no_preempt() {}
2643 
2644 ////////////////////////////////////////////////////////////////////////////////
2645 // suspend/resume support
2646 
2647 //  the low-level signal-based suspend/resume support is a remnant from the
2648 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2649 //  within hotspot. Now there is a single use-case for this:
2650 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
2651 //      that runs in the watcher thread.
2652 //  The remaining code is greatly simplified from the more general suspension
2653 //  code that used to be used.
2654 //
2655 //  The protocol is quite simple:
2656 //  - suspend:
2657 //      - sends a signal to the target thread
2658 //      - polls the suspend state of the osthread using a yield loop
2659 //      - target thread signal handler (SR_handler) sets suspend state
2660 //        and blocks in sigsuspend until continued
2661 //  - resume:
2662 //      - sets target osthread state to continue
2663 //      - sends signal to end the sigsuspend loop in the SR_handler
2664 //
2665 //  Note that the SR_lock plays no role in this suspend/resume protocol,
2666 //  but is checked for NULL in SR_handler as a thread termination indicator.
2667 //
2668 
2669 static void resume_clear_context(OSThread *osthread) {
2670   osthread->set_ucontext(NULL);
2671   osthread->set_siginfo(NULL);
2672 }
2673 
2674 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2675   osthread->set_ucontext(context);
2676   osthread->set_siginfo(siginfo);
2677 }
2678 
2679 //
2680 // Handler function invoked when a thread's execution is suspended or
2681 // resumed. We have to be careful that only async-safe functions are
2682 // called here (Note: most pthread functions are not async safe and
2683 // should be avoided.)
2684 //
2685 // Note: sigwait() is a more natural fit than sigsuspend() from an
2686 // interface point of view, but sigwait() prevents the signal hander
2687 // from being run. libpthread would get very confused by not having
2688 // its signal handlers run and prevents sigwait()'s use with the
2689 // mutex granting granting signal.
2690 //
2691 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2692 //
2693 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2694   // Save and restore errno to avoid confusing native code with EINTR
2695   // after sigsuspend.
2696   int old_errno = errno;
2697 
2698   Thread* thread = Thread::current_or_null_safe();
2699   assert(thread != NULL, "Missing current thread in SR_handler");
2700 
2701   // On some systems we have seen signal delivery get "stuck" until the signal
2702   // mask is changed as part of thread termination. Check that the current thread
2703   // has not already terminated (via SR_lock()) - else the following assertion
2704   // will fail because the thread is no longer a JavaThread as the ~JavaThread
2705   // destructor has completed.
2706 
2707   if (thread->SR_lock() == NULL) {
2708     return;
2709   }
2710 
2711   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2712 
2713   OSThread* osthread = thread->osthread();
2714 
2715   os::SuspendResume::State current = osthread->sr.state();
2716   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2717     suspend_save_context(osthread, siginfo, context);
2718 
2719     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2720     os::SuspendResume::State state = osthread->sr.suspended();
2721     if (state == os::SuspendResume::SR_SUSPENDED) {
2722       sigset_t suspend_set;  // signals for sigsuspend()
2723 
2724       // get current set of blocked signals and unblock resume signal
2725       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2726       sigdelset(&suspend_set, SR_signum);
2727 
2728       // wait here until we are resumed
2729       while (1) {
2730         sigsuspend(&suspend_set);
2731 
2732         os::SuspendResume::State result = osthread->sr.running();
2733         if (result == os::SuspendResume::SR_RUNNING) {
2734           break;
2735         }
2736       }
2737 
2738     } else if (state == os::SuspendResume::SR_RUNNING) {
2739       // request was cancelled, continue
2740     } else {
2741       ShouldNotReachHere();
2742     }
2743 
2744     resume_clear_context(osthread);
2745   } else if (current == os::SuspendResume::SR_RUNNING) {
2746     // request was cancelled, continue
2747   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2748     // ignore
2749   } else {
2750     ShouldNotReachHere();
2751   }
2752 
2753   errno = old_errno;
2754 }
2755 
2756 static int SR_initialize() {
2757   struct sigaction act;
2758   char *s;
2759   // Get signal number to use for suspend/resume
2760   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2761     int sig = ::strtol(s, 0, 10);
2762     if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2763         sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2764       SR_signum = sig;
2765     } else {
2766       warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2767               sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2768     }
2769   }
2770 
2771   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2772         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2773 
2774   sigemptyset(&SR_sigset);
2775   sigaddset(&SR_sigset, SR_signum);
2776 
2777   // Set up signal handler for suspend/resume.
2778   act.sa_flags = SA_RESTART|SA_SIGINFO;
2779   act.sa_handler = (void (*)(int)) SR_handler;
2780 
2781   // SR_signum is blocked by default.
2782   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2783 
2784   if (sigaction(SR_signum, &act, 0) == -1) {
2785     return -1;
2786   }
2787 
2788   // Save signal flag
2789   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2790   return 0;
2791 }
2792 
2793 static int SR_finalize() {
2794   return 0;
2795 }
2796 
2797 static int sr_notify(OSThread* osthread) {
2798   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2799   assert_status(status == 0, status, "pthread_kill");
2800   return status;
2801 }
2802 
2803 // "Randomly" selected value for how long we want to spin
2804 // before bailing out on suspending a thread, also how often
2805 // we send a signal to a thread we want to resume
2806 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2807 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2808 
2809 // returns true on success and false on error - really an error is fatal
2810 // but this seems the normal response to library errors
2811 static bool do_suspend(OSThread* osthread) {
2812   assert(osthread->sr.is_running(), "thread should be running");
2813   // mark as suspended and send signal
2814 
2815   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2816     // failed to switch, state wasn't running?
2817     ShouldNotReachHere();
2818     return false;
2819   }
2820 
2821   if (sr_notify(osthread) != 0) {
2822     // try to cancel, switch to running
2823 
2824     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2825     if (result == os::SuspendResume::SR_RUNNING) {
2826       // cancelled
2827       return false;
2828     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2829       // somehow managed to suspend
2830       return true;
2831     } else {
2832       ShouldNotReachHere();
2833       return false;
2834     }
2835   }
2836 
2837   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2838 
2839   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2840     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2841       os::naked_yield();
2842     }
2843 
2844     // timeout, try to cancel the request
2845     if (n >= RANDOMLY_LARGE_INTEGER) {
2846       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2847       if (cancelled == os::SuspendResume::SR_RUNNING) {
2848         return false;
2849       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2850         return true;
2851       } else {
2852         ShouldNotReachHere();
2853         return false;
2854       }
2855     }
2856   }
2857 
2858   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2859   return true;
2860 }
2861 
2862 static void do_resume(OSThread* osthread) {
2863   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2864 
2865   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2866     // failed to switch to WAKEUP_REQUEST
2867     ShouldNotReachHere();
2868     return;
2869   }
2870 
2871   while (!osthread->sr.is_running()) {
2872     if (sr_notify(osthread) == 0) {
2873       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2874         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2875           os::naked_yield();
2876         }
2877       }
2878     } else {
2879       ShouldNotReachHere();
2880     }
2881   }
2882 
2883   guarantee(osthread->sr.is_running(), "Must be running!");
2884 }
2885 
2886 ///////////////////////////////////////////////////////////////////////////////////
2887 // signal handling (except suspend/resume)
2888 
2889 // This routine may be used by user applications as a "hook" to catch signals.
2890 // The user-defined signal handler must pass unrecognized signals to this
2891 // routine, and if it returns true (non-zero), then the signal handler must
2892 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2893 // routine will never retun false (zero), but instead will execute a VM panic
2894 // routine kill the process.
2895 //
2896 // If this routine returns false, it is OK to call it again. This allows
2897 // the user-defined signal handler to perform checks either before or after
2898 // the VM performs its own checks. Naturally, the user code would be making
2899 // a serious error if it tried to handle an exception (such as a null check
2900 // or breakpoint) that the VM was generating for its own correct operation.
2901 //
2902 // This routine may recognize any of the following kinds of signals:
2903 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2904 // It should be consulted by handlers for any of those signals.
2905 //
2906 // The caller of this routine must pass in the three arguments supplied
2907 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2908 // field of the structure passed to sigaction(). This routine assumes that
2909 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2910 //
2911 // Note that the VM will print warnings if it detects conflicting signal
2912 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2913 //
2914 extern "C" JNIEXPORT int
2915 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2916 
2917 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2918 // to be the thing to call; documentation is not terribly clear about whether
2919 // pthread_sigmask also works, and if it does, whether it does the same.
2920 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2921   const int rc = ::pthread_sigmask(how, set, oset);
2922   // return value semantics differ slightly for error case:
2923   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2924   // (so, pthread_sigmask is more theadsafe for error handling)
2925   // But success is always 0.
2926   return rc == 0 ? true : false;
2927 }
2928 
2929 // Function to unblock all signals which are, according
2930 // to POSIX, typical program error signals. If they happen while being blocked,
2931 // they typically will bring down the process immediately.
2932 bool unblock_program_error_signals() {
2933   sigset_t set;
2934   ::sigemptyset(&set);
2935   ::sigaddset(&set, SIGILL);
2936   ::sigaddset(&set, SIGBUS);
2937   ::sigaddset(&set, SIGFPE);
2938   ::sigaddset(&set, SIGSEGV);
2939   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2940 }
2941 
2942 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2943 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2944   assert(info != NULL && uc != NULL, "it must be old kernel");
2945 
2946   // Never leave program error signals blocked;
2947   // on all our platforms they would bring down the process immediately when
2948   // getting raised while being blocked.
2949   unblock_program_error_signals();
2950 
2951   int orig_errno = errno;  // Preserve errno value over signal handler.
2952   JVM_handle_aix_signal(sig, info, uc, true);
2953   errno = orig_errno;
2954 }
2955 
2956 // This boolean allows users to forward their own non-matching signals
2957 // to JVM_handle_aix_signal, harmlessly.
2958 bool os::Aix::signal_handlers_are_installed = false;
2959 
2960 // For signal-chaining
2961 struct sigaction sigact[NSIG];
2962 sigset_t sigs;
2963 bool os::Aix::libjsig_is_loaded = false;
2964 typedef struct sigaction *(*get_signal_t)(int);
2965 get_signal_t os::Aix::get_signal_action = NULL;
2966 
2967 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2968   struct sigaction *actp = NULL;
2969 
2970   if (libjsig_is_loaded) {
2971     // Retrieve the old signal handler from libjsig
2972     actp = (*get_signal_action)(sig);
2973   }
2974   if (actp == NULL) {
2975     // Retrieve the preinstalled signal handler from jvm
2976     actp = get_preinstalled_handler(sig);
2977   }
2978 
2979   return actp;
2980 }
2981 
2982 static bool call_chained_handler(struct sigaction *actp, int sig,
2983                                  siginfo_t *siginfo, void *context) {
2984   // Call the old signal handler
2985   if (actp->sa_handler == SIG_DFL) {
2986     // It's more reasonable to let jvm treat it as an unexpected exception
2987     // instead of taking the default action.
2988     return false;
2989   } else if (actp->sa_handler != SIG_IGN) {
2990     if ((actp->sa_flags & SA_NODEFER) == 0) {
2991       // automaticlly block the signal
2992       sigaddset(&(actp->sa_mask), sig);
2993     }
2994 
2995     sa_handler_t hand = NULL;
2996     sa_sigaction_t sa = NULL;
2997     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
2998     // retrieve the chained handler
2999     if (siginfo_flag_set) {
3000       sa = actp->sa_sigaction;
3001     } else {
3002       hand = actp->sa_handler;
3003     }
3004 
3005     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3006       actp->sa_handler = SIG_DFL;
3007     }
3008 
3009     // try to honor the signal mask
3010     sigset_t oset;
3011     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3012 
3013     // call into the chained handler
3014     if (siginfo_flag_set) {
3015       (*sa)(sig, siginfo, context);
3016     } else {
3017       (*hand)(sig);
3018     }
3019 
3020     // restore the signal mask
3021     pthread_sigmask(SIG_SETMASK, &oset, 0);
3022   }
3023   // Tell jvm's signal handler the signal is taken care of.
3024   return true;
3025 }
3026 
3027 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3028   bool chained = false;
3029   // signal-chaining
3030   if (UseSignalChaining) {
3031     struct sigaction *actp = get_chained_signal_action(sig);
3032     if (actp != NULL) {
3033       chained = call_chained_handler(actp, sig, siginfo, context);
3034     }
3035   }
3036   return chained;
3037 }
3038 
3039 size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
3040   // Creating guard page is very expensive. Java thread has HotSpot
3041   // guard pages, only enable glibc guard page for non-Java threads.
3042   // (Remember: compiler thread is a Java thread, too!)
3043   //
3044   // Aix can have different page sizes for stack (4K) and heap (64K).
3045   // As Hotspot knows only one page size, we assume the stack has
3046   // the same page size as the heap. Returning page_size() here can
3047   // cause 16 guard pages which we want to avoid.  Thus we return 4K
3048   // which will be rounded to the real page size by the OS.
3049   return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : 4 * K);
3050 }
3051 
3052 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3053   if (sigismember(&sigs, sig)) {
3054     return &sigact[sig];
3055   }
3056   return NULL;
3057 }
3058 
3059 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3060   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3061   sigact[sig] = oldAct;
3062   sigaddset(&sigs, sig);
3063 }
3064 
3065 // for diagnostic
3066 int sigflags[NSIG];
3067 
3068 int os::Aix::get_our_sigflags(int sig) {
3069   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3070   return sigflags[sig];
3071 }
3072 
3073 void os::Aix::set_our_sigflags(int sig, int flags) {
3074   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3075   if (sig > 0 && sig < NSIG) {
3076     sigflags[sig] = flags;
3077   }
3078 }
3079 
3080 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3081   // Check for overwrite.
3082   struct sigaction oldAct;
3083   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3084 
3085   void* oldhand = oldAct.sa_sigaction
3086     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3087     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3088   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3089       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3090       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3091     if (AllowUserSignalHandlers || !set_installed) {
3092       // Do not overwrite; user takes responsibility to forward to us.
3093       return;
3094     } else if (UseSignalChaining) {
3095       // save the old handler in jvm
3096       save_preinstalled_handler(sig, oldAct);
3097       // libjsig also interposes the sigaction() call below and saves the
3098       // old sigaction on it own.
3099     } else {
3100       fatal("Encountered unexpected pre-existing sigaction handler "
3101             "%#lx for signal %d.", (long)oldhand, sig);
3102     }
3103   }
3104 
3105   struct sigaction sigAct;
3106   sigfillset(&(sigAct.sa_mask));
3107   if (!set_installed) {
3108     sigAct.sa_handler = SIG_DFL;
3109     sigAct.sa_flags = SA_RESTART;
3110   } else {
3111     sigAct.sa_sigaction = javaSignalHandler;
3112     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3113   }
3114   // Save flags, which are set by ours
3115   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3116   sigflags[sig] = sigAct.sa_flags;
3117 
3118   int ret = sigaction(sig, &sigAct, &oldAct);
3119   assert(ret == 0, "check");
3120 
3121   void* oldhand2 = oldAct.sa_sigaction
3122                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3123                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3124   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3125 }
3126 
3127 // install signal handlers for signals that HotSpot needs to
3128 // handle in order to support Java-level exception handling.
3129 void os::Aix::install_signal_handlers() {
3130   if (!signal_handlers_are_installed) {
3131     signal_handlers_are_installed = true;
3132 
3133     // signal-chaining
3134     typedef void (*signal_setting_t)();
3135     signal_setting_t begin_signal_setting = NULL;
3136     signal_setting_t end_signal_setting = NULL;
3137     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3138                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3139     if (begin_signal_setting != NULL) {
3140       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3141                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3142       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3143                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3144       libjsig_is_loaded = true;
3145       assert(UseSignalChaining, "should enable signal-chaining");
3146     }
3147     if (libjsig_is_loaded) {
3148       // Tell libjsig jvm is setting signal handlers.
3149       (*begin_signal_setting)();
3150     }
3151 
3152     ::sigemptyset(&sigs);
3153     set_signal_handler(SIGSEGV, true);
3154     set_signal_handler(SIGPIPE, true);
3155     set_signal_handler(SIGBUS, true);
3156     set_signal_handler(SIGILL, true);
3157     set_signal_handler(SIGFPE, true);
3158     set_signal_handler(SIGTRAP, true);
3159     set_signal_handler(SIGXFSZ, true);
3160 
3161     if (libjsig_is_loaded) {
3162       // Tell libjsig jvm finishes setting signal handlers.
3163       (*end_signal_setting)();
3164     }
3165 
3166     // We don't activate signal checker if libjsig is in place, we trust ourselves
3167     // and if UserSignalHandler is installed all bets are off.
3168     // Log that signal checking is off only if -verbose:jni is specified.
3169     if (CheckJNICalls) {
3170       if (libjsig_is_loaded) {
3171         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3172         check_signals = false;
3173       }
3174       if (AllowUserSignalHandlers) {
3175         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3176         check_signals = false;
3177       }
3178       // Need to initialize check_signal_done.
3179       ::sigemptyset(&check_signal_done);
3180     }
3181   }
3182 }
3183 
3184 static const char* get_signal_handler_name(address handler,
3185                                            char* buf, int buflen) {
3186   int offset;
3187   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3188   if (found) {
3189     // skip directory names
3190     const char *p1, *p2;
3191     p1 = buf;
3192     size_t len = strlen(os::file_separator());
3193     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3194     // The way os::dll_address_to_library_name is implemented on Aix
3195     // right now, it always returns -1 for the offset which is not
3196     // terribly informative.
3197     // Will fix that. For now, omit the offset.
3198     jio_snprintf(buf, buflen, "%s", p1);
3199   } else {
3200     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3201   }
3202   return buf;
3203 }
3204 
3205 static void print_signal_handler(outputStream* st, int sig,
3206                                  char* buf, size_t buflen) {
3207   struct sigaction sa;
3208   sigaction(sig, NULL, &sa);
3209 
3210   st->print("%s: ", os::exception_name(sig, buf, buflen));
3211 
3212   address handler = (sa.sa_flags & SA_SIGINFO)
3213     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3214     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3215 
3216   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3217     st->print("SIG_DFL");
3218   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3219     st->print("SIG_IGN");
3220   } else {
3221     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3222   }
3223 
3224   // Print readable mask.
3225   st->print(", sa_mask[0]=");
3226   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3227 
3228   address rh = VMError::get_resetted_sighandler(sig);
3229   // May be, handler was resetted by VMError?
3230   if (rh != NULL) {
3231     handler = rh;
3232     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3233   }
3234 
3235   // Print textual representation of sa_flags.
3236   st->print(", sa_flags=");
3237   os::Posix::print_sa_flags(st, sa.sa_flags);
3238 
3239   // Check: is it our handler?
3240   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3241       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3242     // It is our signal handler.
3243     // Check for flags, reset system-used one!
3244     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3245       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3246                 os::Aix::get_our_sigflags(sig));
3247     }
3248   }
3249   st->cr();
3250 }
3251 
3252 #define DO_SIGNAL_CHECK(sig) \
3253   if (!sigismember(&check_signal_done, sig)) \
3254     os::Aix::check_signal_handler(sig)
3255 
3256 // This method is a periodic task to check for misbehaving JNI applications
3257 // under CheckJNI, we can add any periodic checks here
3258 
3259 void os::run_periodic_checks() {
3260 
3261   if (check_signals == false) return;
3262 
3263   // SEGV and BUS if overridden could potentially prevent
3264   // generation of hs*.log in the event of a crash, debugging
3265   // such a case can be very challenging, so we absolutely
3266   // check the following for a good measure:
3267   DO_SIGNAL_CHECK(SIGSEGV);
3268   DO_SIGNAL_CHECK(SIGILL);
3269   DO_SIGNAL_CHECK(SIGFPE);
3270   DO_SIGNAL_CHECK(SIGBUS);
3271   DO_SIGNAL_CHECK(SIGPIPE);
3272   DO_SIGNAL_CHECK(SIGXFSZ);
3273   if (UseSIGTRAP) {
3274     DO_SIGNAL_CHECK(SIGTRAP);
3275   }
3276 
3277   // ReduceSignalUsage allows the user to override these handlers
3278   // see comments at the very top and jvm_solaris.h
3279   if (!ReduceSignalUsage) {
3280     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3281     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3282     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3283     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3284   }
3285 
3286   DO_SIGNAL_CHECK(SR_signum);
3287 }
3288 
3289 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3290 
3291 static os_sigaction_t os_sigaction = NULL;
3292 
3293 void os::Aix::check_signal_handler(int sig) {
3294   char buf[O_BUFLEN];
3295   address jvmHandler = NULL;
3296 
3297   struct sigaction act;
3298   if (os_sigaction == NULL) {
3299     // only trust the default sigaction, in case it has been interposed
3300     os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3301     if (os_sigaction == NULL) return;
3302   }
3303 
3304   os_sigaction(sig, (struct sigaction*)NULL, &act);
3305 
3306   address thisHandler = (act.sa_flags & SA_SIGINFO)
3307     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3308     : CAST_FROM_FN_PTR(address, act.sa_handler);
3309 
3310   switch(sig) {
3311   case SIGSEGV:
3312   case SIGBUS:
3313   case SIGFPE:
3314   case SIGPIPE:
3315   case SIGILL:
3316   case SIGXFSZ:
3317     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3318     break;
3319 
3320   case SHUTDOWN1_SIGNAL:
3321   case SHUTDOWN2_SIGNAL:
3322   case SHUTDOWN3_SIGNAL:
3323   case BREAK_SIGNAL:
3324     jvmHandler = (address)user_handler();
3325     break;
3326 
3327   default:
3328     if (sig == SR_signum) {
3329       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3330     } else {
3331       return;
3332     }
3333     break;
3334   }
3335 
3336   if (thisHandler != jvmHandler) {
3337     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3338     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3339     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3340     // No need to check this sig any longer
3341     sigaddset(&check_signal_done, sig);
3342     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3343     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3344       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3345                     exception_name(sig, buf, O_BUFLEN));
3346     }
3347   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3348     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3349     tty->print("expected:");
3350     os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3351     tty->cr();
3352     tty->print("  found:");
3353     os::Posix::print_sa_flags(tty, act.sa_flags);
3354     tty->cr();
3355     // No need to check this sig any longer
3356     sigaddset(&check_signal_done, sig);
3357   }
3358 
3359   // Dump all the signal
3360   if (sigismember(&check_signal_done, sig)) {
3361     print_signal_handlers(tty, buf, O_BUFLEN);
3362   }
3363 }
3364 
3365 // To install functions for atexit system call
3366 extern "C" {
3367   static void perfMemory_exit_helper() {
3368     perfMemory_exit();
3369   }
3370 }
3371 
3372 // This is called _before_ the most of global arguments have been parsed.
3373 void os::init(void) {
3374   // This is basic, we want to know if that ever changes.
3375   // (Shared memory boundary is supposed to be a 256M aligned.)
3376   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3377 
3378   // Record process break at startup.
3379   g_brk_at_startup = (address) ::sbrk(0);
3380   assert(g_brk_at_startup != (address) -1, "sbrk failed");
3381 
3382   // First off, we need to know whether we run on AIX or PASE, and
3383   // the OS level we run on.
3384   os::Aix::initialize_os_info();
3385 
3386   // Scan environment (SPEC1170 behaviour, etc).
3387   os::Aix::scan_environment();
3388 
3389   // Probe multipage support.
3390   query_multipage_support();
3391 
3392   // Act like we only have one page size by eliminating corner cases which
3393   // we did not support very well anyway.
3394   // We have two input conditions:
3395   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3396   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3397   //    setting.
3398   //    Data segment page size is important for us because it defines the thread stack page
3399   //    size, which is needed for guard page handling, stack banging etc.
3400   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3401   //    and should be allocated with 64k pages.
3402   //
3403   // So, we do the following:
3404   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3405   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3406   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3407   // 64k          no              --- AIX 5.2 ? ---
3408   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3409 
3410   // We explicitly leave no option to change page size, because only upgrading would work,
3411   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3412 
3413   if (g_multipage_support.datapsize == 4*K) {
3414     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3415     if (g_multipage_support.can_use_64K_pages) {
3416       // .. but we are able to use 64K pages dynamically.
3417       // This would be typical for java launchers which are not linked
3418       // with datapsize=64K (like, any other launcher but our own).
3419       //
3420       // In this case it would be smart to allocate the java heap with 64K
3421       // to get the performance benefit, and to fake 64k pages for the
3422       // data segment (when dealing with thread stacks).
3423       //
3424       // However, leave a possibility to downgrade to 4K, using
3425       // -XX:-Use64KPages.
3426       if (Use64KPages) {
3427         trcVerbose("64K page mode (faked for data segment)");
3428         Aix::_page_size = 64*K;
3429       } else {
3430         trcVerbose("4K page mode (Use64KPages=off)");
3431         Aix::_page_size = 4*K;
3432       }
3433     } else {
3434       // .. and not able to allocate 64k pages dynamically. Here, just
3435       // fall back to 4K paged mode and use mmap for everything.
3436       trcVerbose("4K page mode");
3437       Aix::_page_size = 4*K;
3438       FLAG_SET_ERGO(bool, Use64KPages, false);
3439     }
3440   } else {
3441     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3442     // This normally means that we can allocate 64k pages dynamically.
3443     // (There is one special case where this may be false: EXTSHM=on.
3444     // but we decided to not support that mode).
3445     assert0(g_multipage_support.can_use_64K_pages);
3446     Aix::_page_size = 64*K;
3447     trcVerbose("64K page mode");
3448     FLAG_SET_ERGO(bool, Use64KPages, true);
3449   }
3450 
3451   // For now UseLargePages is just ignored.
3452   FLAG_SET_ERGO(bool, UseLargePages, false);
3453   _page_sizes[0] = 0;
3454 
3455   // debug trace
3456   trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3457 
3458   // Next, we need to initialize libo4 and libperfstat libraries.
3459   if (os::Aix::on_pase()) {
3460     os::Aix::initialize_libo4();
3461   } else {
3462     os::Aix::initialize_libperfstat();
3463   }
3464 
3465   // Reset the perfstat information provided by ODM.
3466   if (os::Aix::on_aix()) {
3467     libperfstat::perfstat_reset();
3468   }
3469 
3470   // Now initialze basic system properties. Note that for some of the values we
3471   // need libperfstat etc.
3472   os::Aix::initialize_system_info();
3473 
3474   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3475 
3476   init_random(1234567);
3477 
3478   ThreadCritical::initialize();
3479 
3480   // Main_thread points to the aboriginal thread.
3481   Aix::_main_thread = pthread_self();
3482 
3483   initial_time_count = os::elapsed_counter();
3484 }
3485 
3486 // This is called _after_ the global arguments have been parsed.
3487 jint os::init_2(void) {
3488 
3489   if (os::Aix::on_pase()) {
3490     trcVerbose("Running on PASE.");
3491   } else {
3492     trcVerbose("Running on AIX (not PASE).");
3493   }
3494 
3495   trcVerbose("processor count: %d", os::_processor_count);
3496   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3497 
3498   // Initially build up the loaded dll map.
3499   LoadedLibraries::reload();
3500   if (Verbose) {
3501     trcVerbose("Loaded Libraries: ");
3502     LoadedLibraries::print(tty);
3503   }
3504 
3505   const int page_size = Aix::page_size();
3506   const int map_size = page_size;
3507 
3508   address map_address = (address) MAP_FAILED;
3509   const int prot  = PROT_READ;
3510   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3511 
3512   // Use optimized addresses for the polling page,
3513   // e.g. map it to a special 32-bit address.
3514   if (OptimizePollingPageLocation) {
3515     // architecture-specific list of address wishes:
3516     address address_wishes[] = {
3517       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3518       // PPC64: all address wishes are non-negative 32 bit values where
3519       // the lower 16 bits are all zero. we can load these addresses
3520       // with a single ppc_lis instruction.
3521       (address) 0x30000000, (address) 0x31000000,
3522       (address) 0x32000000, (address) 0x33000000,
3523       (address) 0x40000000, (address) 0x41000000,
3524       (address) 0x42000000, (address) 0x43000000,
3525       (address) 0x50000000, (address) 0x51000000,
3526       (address) 0x52000000, (address) 0x53000000,
3527       (address) 0x60000000, (address) 0x61000000,
3528       (address) 0x62000000, (address) 0x63000000
3529     };
3530     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3531 
3532     // iterate over the list of address wishes:
3533     for (int i=0; i<address_wishes_length; i++) {
3534       // Try to map with current address wish.
3535       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3536       // fail if the address is already mapped.
3537       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3538                                      map_size, prot,
3539                                      flags | MAP_FIXED,
3540                                      -1, 0);
3541       trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3542                    address_wishes[i], map_address + (ssize_t)page_size);
3543 
3544       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3545         // Map succeeded and map_address is at wished address, exit loop.
3546         break;
3547       }
3548 
3549       if (map_address != (address) MAP_FAILED) {
3550         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3551         ::munmap(map_address, map_size);
3552         map_address = (address) MAP_FAILED;
3553       }
3554       // Map failed, continue loop.
3555     }
3556   } // end OptimizePollingPageLocation
3557 
3558   if (map_address == (address) MAP_FAILED) {
3559     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3560   }
3561   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3562   os::set_polling_page(map_address);
3563 
3564   if (!UseMembar) {
3565     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3566     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3567     os::set_memory_serialize_page(mem_serialize_page);
3568 
3569     trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3570         mem_serialize_page, mem_serialize_page + Aix::page_size(),
3571         Aix::page_size(), Aix::page_size());
3572   }
3573 
3574   // initialize suspend/resume support - must do this before signal_sets_init()
3575   if (SR_initialize() != 0) {
3576     perror("SR_initialize failed");
3577     return JNI_ERR;
3578   }
3579 
3580   Aix::signal_sets_init();
3581   Aix::install_signal_handlers();
3582 
3583   // Check and sets minimum stack sizes against command line options
3584   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
3585     return JNI_ERR;
3586   }
3587 
3588   if (UseNUMA) {
3589     UseNUMA = false;
3590     warning("NUMA optimizations are not available on this OS.");
3591   }
3592 
3593   if (MaxFDLimit) {
3594     // Set the number of file descriptors to max. print out error
3595     // if getrlimit/setrlimit fails but continue regardless.
3596     struct rlimit nbr_files;
3597     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3598     if (status != 0) {
3599       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
3600     } else {
3601       nbr_files.rlim_cur = nbr_files.rlim_max;
3602       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3603       if (status != 0) {
3604         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3605       }
3606     }
3607   }
3608 
3609   if (PerfAllowAtExitRegistration) {
3610     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3611     // At exit functions can be delayed until process exit time, which
3612     // can be problematic for embedded VM situations. Embedded VMs should
3613     // call DestroyJavaVM() to assure that VM resources are released.
3614 
3615     // Note: perfMemory_exit_helper atexit function may be removed in
3616     // the future if the appropriate cleanup code can be added to the
3617     // VM_Exit VMOperation's doit method.
3618     if (atexit(perfMemory_exit_helper) != 0) {
3619       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3620     }
3621   }
3622 
3623   return JNI_OK;
3624 }
3625 
3626 // Mark the polling page as unreadable
3627 void os::make_polling_page_unreadable(void) {
3628   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3629     fatal("Could not disable polling page");
3630   }
3631 };
3632 
3633 // Mark the polling page as readable
3634 void os::make_polling_page_readable(void) {
3635   // Changed according to os_linux.cpp.
3636   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3637     fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3638   }
3639 };
3640 
3641 int os::active_processor_count() {
3642   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3643   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3644   return online_cpus;
3645 }
3646 
3647 void os::set_native_thread_name(const char *name) {
3648   // Not yet implemented.
3649   return;
3650 }
3651 
3652 bool os::distribute_processes(uint length, uint* distribution) {
3653   // Not yet implemented.
3654   return false;
3655 }
3656 
3657 bool os::bind_to_processor(uint processor_id) {
3658   // Not yet implemented.
3659   return false;
3660 }
3661 
3662 void os::SuspendedThreadTask::internal_do_task() {
3663   if (do_suspend(_thread->osthread())) {
3664     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3665     do_task(context);
3666     do_resume(_thread->osthread());
3667   }
3668 }
3669 
3670 class PcFetcher : public os::SuspendedThreadTask {
3671 public:
3672   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3673   ExtendedPC result();
3674 protected:
3675   void do_task(const os::SuspendedThreadTaskContext& context);
3676 private:
3677   ExtendedPC _epc;
3678 };
3679 
3680 ExtendedPC PcFetcher::result() {
3681   guarantee(is_done(), "task is not done yet.");
3682   return _epc;
3683 }
3684 
3685 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3686   Thread* thread = context.thread();
3687   OSThread* osthread = thread->osthread();
3688   if (osthread->ucontext() != NULL) {
3689     _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext());
3690   } else {
3691     // NULL context is unexpected, double-check this is the VMThread.
3692     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3693   }
3694 }
3695 
3696 // Suspends the target using the signal mechanism and then grabs the PC before
3697 // resuming the target. Used by the flat-profiler only
3698 ExtendedPC os::get_thread_pc(Thread* thread) {
3699   // Make sure that it is called by the watcher for the VMThread.
3700   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3701   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3702 
3703   PcFetcher fetcher(thread);
3704   fetcher.run();
3705   return fetcher.result();
3706 }
3707 
3708 ////////////////////////////////////////////////////////////////////////////////
3709 // debug support
3710 
3711 bool os::find(address addr, outputStream* st) {
3712 
3713   st->print(PTR_FORMAT ": ", addr);
3714 
3715   loaded_module_t lm;
3716   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3717       LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3718     st->print_cr("%s", lm.path);
3719     return true;
3720   }
3721 
3722   return false;
3723 }
3724 
3725 ////////////////////////////////////////////////////////////////////////////////
3726 // misc
3727 
3728 // This does not do anything on Aix. This is basically a hook for being
3729 // able to use structured exception handling (thread-local exception filters)
3730 // on, e.g., Win32.
3731 void
3732 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3733                          JavaCallArguments* args, Thread* thread) {
3734   f(value, method, args, thread);
3735 }
3736 
3737 void os::print_statistics() {
3738 }
3739 
3740 bool os::message_box(const char* title, const char* message) {
3741   int i;
3742   fdStream err(defaultStream::error_fd());
3743   for (i = 0; i < 78; i++) err.print_raw("=");
3744   err.cr();
3745   err.print_raw_cr(title);
3746   for (i = 0; i < 78; i++) err.print_raw("-");
3747   err.cr();
3748   err.print_raw_cr(message);
3749   for (i = 0; i < 78; i++) err.print_raw("=");
3750   err.cr();
3751 
3752   char buf[16];
3753   // Prevent process from exiting upon "read error" without consuming all CPU
3754   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3755 
3756   return buf[0] == 'y' || buf[0] == 'Y';
3757 }
3758 
3759 int os::stat(const char *path, struct stat *sbuf) {
3760   char pathbuf[MAX_PATH];
3761   if (strlen(path) > MAX_PATH - 1) {
3762     errno = ENAMETOOLONG;
3763     return -1;
3764   }
3765   os::native_path(strcpy(pathbuf, path));
3766   return ::stat(pathbuf, sbuf);
3767 }
3768 
3769 // Is a (classpath) directory empty?
3770 bool os::dir_is_empty(const char* path) {
3771   DIR *dir = NULL;
3772   struct dirent *ptr;
3773 
3774   dir = opendir(path);
3775   if (dir == NULL) return true;
3776 
3777   /* Scan the directory */
3778   bool result = true;
3779   char buf[sizeof(struct dirent) + MAX_PATH];
3780   while (result && (ptr = ::readdir(dir)) != NULL) {
3781     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3782       result = false;
3783     }
3784   }
3785   closedir(dir);
3786   return result;
3787 }
3788 
3789 // This code originates from JDK's sysOpen and open64_w
3790 // from src/solaris/hpi/src/system_md.c
3791 
3792 int os::open(const char *path, int oflag, int mode) {
3793 
3794   if (strlen(path) > MAX_PATH - 1) {
3795     errno = ENAMETOOLONG;
3796     return -1;
3797   }
3798   int fd;
3799 
3800   fd = ::open64(path, oflag, mode);
3801   if (fd == -1) return -1;
3802 
3803   // If the open succeeded, the file might still be a directory.
3804   {
3805     struct stat64 buf64;
3806     int ret = ::fstat64(fd, &buf64);
3807     int st_mode = buf64.st_mode;
3808 
3809     if (ret != -1) {
3810       if ((st_mode & S_IFMT) == S_IFDIR) {
3811         errno = EISDIR;
3812         ::close(fd);
3813         return -1;
3814       }
3815     } else {
3816       ::close(fd);
3817       return -1;
3818     }
3819   }
3820 
3821   // All file descriptors that are opened in the JVM and not
3822   // specifically destined for a subprocess should have the
3823   // close-on-exec flag set. If we don't set it, then careless 3rd
3824   // party native code might fork and exec without closing all
3825   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3826   // UNIXProcess.c), and this in turn might:
3827   //
3828   // - cause end-of-file to fail to be detected on some file
3829   //   descriptors, resulting in mysterious hangs, or
3830   //
3831   // - might cause an fopen in the subprocess to fail on a system
3832   //   suffering from bug 1085341.
3833   //
3834   // (Yes, the default setting of the close-on-exec flag is a Unix
3835   // design flaw.)
3836   //
3837   // See:
3838   // 1085341: 32-bit stdio routines should support file descriptors >255
3839   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3840   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3841 #ifdef FD_CLOEXEC
3842   {
3843     int flags = ::fcntl(fd, F_GETFD);
3844     if (flags != -1)
3845       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3846   }
3847 #endif
3848 
3849   return fd;
3850 }
3851 
3852 // create binary file, rewriting existing file if required
3853 int os::create_binary_file(const char* path, bool rewrite_existing) {
3854   int oflags = O_WRONLY | O_CREAT;
3855   if (!rewrite_existing) {
3856     oflags |= O_EXCL;
3857   }
3858   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3859 }
3860 
3861 // return current position of file pointer
3862 jlong os::current_file_offset(int fd) {
3863   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3864 }
3865 
3866 // move file pointer to the specified offset
3867 jlong os::seek_to_file_offset(int fd, jlong offset) {
3868   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3869 }
3870 
3871 // This code originates from JDK's sysAvailable
3872 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3873 
3874 int os::available(int fd, jlong *bytes) {
3875   jlong cur, end;
3876   int mode;
3877   struct stat64 buf64;
3878 
3879   if (::fstat64(fd, &buf64) >= 0) {
3880     mode = buf64.st_mode;
3881     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3882       int n;
3883       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3884         *bytes = n;
3885         return 1;
3886       }
3887     }
3888   }
3889   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3890     return 0;
3891   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3892     return 0;
3893   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3894     return 0;
3895   }
3896   *bytes = end - cur;
3897   return 1;
3898 }
3899 
3900 // Map a block of memory.
3901 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3902                         char *addr, size_t bytes, bool read_only,
3903                         bool allow_exec) {
3904   int prot;
3905   int flags = MAP_PRIVATE;
3906 
3907   if (read_only) {
3908     prot = PROT_READ;
3909     flags = MAP_SHARED;
3910   } else {
3911     prot = PROT_READ | PROT_WRITE;
3912     flags = MAP_PRIVATE;
3913   }
3914 
3915   if (allow_exec) {
3916     prot |= PROT_EXEC;
3917   }
3918 
3919   if (addr != NULL) {
3920     flags |= MAP_FIXED;
3921   }
3922 
3923   // Allow anonymous mappings if 'fd' is -1.
3924   if (fd == -1) {
3925     flags |= MAP_ANONYMOUS;
3926   }
3927 
3928   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3929                                      fd, file_offset);
3930   if (mapped_address == MAP_FAILED) {
3931     return NULL;
3932   }
3933   return mapped_address;
3934 }
3935 
3936 // Remap a block of memory.
3937 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3938                           char *addr, size_t bytes, bool read_only,
3939                           bool allow_exec) {
3940   // same as map_memory() on this OS
3941   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3942                         allow_exec);
3943 }
3944 
3945 // Unmap a block of memory.
3946 bool os::pd_unmap_memory(char* addr, size_t bytes) {
3947   return munmap(addr, bytes) == 0;
3948 }
3949 
3950 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3951 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
3952 // of a thread.
3953 //
3954 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3955 // the fast estimate available on the platform.
3956 
3957 jlong os::current_thread_cpu_time() {
3958   // return user + sys since the cost is the same
3959   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
3960   assert(n >= 0, "negative CPU time");
3961   return n;
3962 }
3963 
3964 jlong os::thread_cpu_time(Thread* thread) {
3965   // consistent with what current_thread_cpu_time() returns
3966   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
3967   assert(n >= 0, "negative CPU time");
3968   return n;
3969 }
3970 
3971 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
3972   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
3973   assert(n >= 0, "negative CPU time");
3974   return n;
3975 }
3976 
3977 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
3978   bool error = false;
3979 
3980   jlong sys_time = 0;
3981   jlong user_time = 0;
3982 
3983   // Reimplemented using getthrds64().
3984   //
3985   // Works like this:
3986   // For the thread in question, get the kernel thread id. Then get the
3987   // kernel thread statistics using that id.
3988   //
3989   // This only works of course when no pthread scheduling is used,
3990   // i.e. there is a 1:1 relationship to kernel threads.
3991   // On AIX, see AIXTHREAD_SCOPE variable.
3992 
3993   pthread_t pthtid = thread->osthread()->pthread_id();
3994 
3995   // retrieve kernel thread id for the pthread:
3996   tid64_t tid = 0;
3997   struct __pthrdsinfo pinfo;
3998   // I just love those otherworldly IBM APIs which force me to hand down
3999   // dummy buffers for stuff I dont care for...
4000   char dummy[1];
4001   int dummy_size = sizeof(dummy);
4002   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4003                           dummy, &dummy_size) == 0) {
4004     tid = pinfo.__pi_tid;
4005   } else {
4006     tty->print_cr("pthread_getthrds_np failed.");
4007     error = true;
4008   }
4009 
4010   // retrieve kernel timing info for that kernel thread
4011   if (!error) {
4012     struct thrdentry64 thrdentry;
4013     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4014       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4015       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4016     } else {
4017       tty->print_cr("pthread_getthrds_np failed.");
4018       error = true;
4019     }
4020   }
4021 
4022   if (p_sys_time) {
4023     *p_sys_time = sys_time;
4024   }
4025 
4026   if (p_user_time) {
4027     *p_user_time = user_time;
4028   }
4029 
4030   if (error) {
4031     return false;
4032   }
4033 
4034   return true;
4035 }
4036 
4037 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4038   jlong sys_time;
4039   jlong user_time;
4040 
4041   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4042     return -1;
4043   }
4044 
4045   return user_sys_cpu_time ? sys_time + user_time : user_time;
4046 }
4047 
4048 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4049   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4050   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4051   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4052   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4053 }
4054 
4055 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4056   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4057   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4058   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4059   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4060 }
4061 
4062 bool os::is_thread_cpu_time_supported() {
4063   return true;
4064 }
4065 
4066 // System loadavg support. Returns -1 if load average cannot be obtained.
4067 // For now just return the system wide load average (no processor sets).
4068 int os::loadavg(double values[], int nelem) {
4069 
4070   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4071   guarantee(values, "argument error");
4072 
4073   if (os::Aix::on_pase()) {
4074 
4075     // AS/400 PASE: use libo4 porting library
4076     double v[3] = { 0.0, 0.0, 0.0 };
4077 
4078     if (libo4::get_load_avg(v, v + 1, v + 2)) {
4079       for (int i = 0; i < nelem; i ++) {
4080         values[i] = v[i];
4081       }
4082       return nelem;
4083     } else {
4084       return -1;
4085     }
4086 
4087   } else {
4088 
4089     // AIX: use libperfstat
4090     libperfstat::cpuinfo_t ci;
4091     if (libperfstat::get_cpuinfo(&ci)) {
4092       for (int i = 0; i < nelem; i++) {
4093         values[i] = ci.loadavg[i];
4094       }
4095     } else {
4096       return -1;
4097     }
4098     return nelem;
4099   }
4100 }
4101 
4102 void os::pause() {
4103   char filename[MAX_PATH];
4104   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4105     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4106   } else {
4107     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4108   }
4109 
4110   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4111   if (fd != -1) {
4112     struct stat buf;
4113     ::close(fd);
4114     while (::stat(filename, &buf) == 0) {
4115       (void)::poll(NULL, 0, 100);
4116     }
4117   } else {
4118     trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4119   }
4120 }
4121 
4122 bool os::Aix::is_primordial_thread() {
4123   if (pthread_self() == (pthread_t)1) {
4124     return true;
4125   } else {
4126     return false;
4127   }
4128 }
4129 
4130 // OS recognitions (PASE/AIX, OS level) call this before calling any
4131 // one of Aix::on_pase(), Aix::os_version() static
4132 void os::Aix::initialize_os_info() {
4133 
4134   assert(_on_pase == -1 && _os_version == 0, "already called.");
4135 
4136   struct utsname uts;
4137   memset(&uts, 0, sizeof(uts));
4138   strcpy(uts.sysname, "?");
4139   if (::uname(&uts) == -1) {
4140     trcVerbose("uname failed (%d)", errno);
4141     guarantee(0, "Could not determine whether we run on AIX or PASE");
4142   } else {
4143     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4144                "node \"%s\" machine \"%s\"\n",
4145                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4146     const int major = atoi(uts.version);
4147     assert(major > 0, "invalid OS version");
4148     const int minor = atoi(uts.release);
4149     assert(minor > 0, "invalid OS release");
4150     _os_version = (major << 24) | (minor << 16);
4151     char ver_str[20] = {0};
4152     char *name_str = "unknown OS";
4153     if (strcmp(uts.sysname, "OS400") == 0) {
4154       // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4155       _on_pase = 1;
4156       if (os_version_short() < 0x0504) {
4157         trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4158         assert(false, "OS/400 release too old.");
4159       }
4160       name_str = "OS/400 (pase)";
4161       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4162     } else if (strcmp(uts.sysname, "AIX") == 0) {
4163       // We run on AIX. We do not support versions older than AIX 5.3.
4164       _on_pase = 0;
4165       // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4166       odmWrapper::determine_os_kernel_version(&_os_version);
4167       if (os_version_short() < 0x0503) {
4168         trcVerbose("AIX release older than AIX 5.3 not supported.");
4169         assert(false, "AIX release too old.");
4170       }
4171       name_str = "AIX";
4172       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4173                    major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4174     } else {
4175       assert(false, name_str);
4176     }
4177     trcVerbose("We run on %s %s", name_str, ver_str);
4178   }
4179 
4180   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4181 } // end: os::Aix::initialize_os_info()
4182 
4183 // Scan environment for important settings which might effect the VM.
4184 // Trace out settings. Warn about invalid settings and/or correct them.
4185 //
4186 // Must run after os::Aix::initialue_os_info().
4187 void os::Aix::scan_environment() {
4188 
4189   char* p;
4190   int rc;
4191 
4192   // Warn explicity if EXTSHM=ON is used. That switch changes how
4193   // System V shared memory behaves. One effect is that page size of
4194   // shared memory cannot be change dynamically, effectivly preventing
4195   // large pages from working.
4196   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4197   // recommendation is (in OSS notes) to switch it off.
4198   p = ::getenv("EXTSHM");
4199   trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4200   if (p && strcasecmp(p, "ON") == 0) {
4201     _extshm = 1;
4202     trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4203     if (!AllowExtshm) {
4204       // We allow under certain conditions the user to continue. However, we want this
4205       // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4206       // that the VM is not able to allocate 64k pages for the heap.
4207       // We do not want to run with reduced performance.
4208       vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4209     }
4210   } else {
4211     _extshm = 0;
4212   }
4213 
4214   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4215   // Not tested, not supported.
4216   //
4217   // Note that it might be worth the trouble to test and to require it, if only to
4218   // get useful return codes for mprotect.
4219   //
4220   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4221   // exec() ? before loading the libjvm ? ....)
4222   p = ::getenv("XPG_SUS_ENV");
4223   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4224   if (p && strcmp(p, "ON") == 0) {
4225     _xpg_sus_mode = 1;
4226     trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4227     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4228     // clobber address ranges. If we ever want to support that, we have to do some
4229     // testing first.
4230     guarantee(false, "XPG_SUS_ENV=ON not supported");
4231   } else {
4232     _xpg_sus_mode = 0;
4233   }
4234 
4235   if (os::Aix::on_pase()) {
4236     p = ::getenv("QIBM_MULTI_THREADED");
4237     trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4238   }
4239 
4240   p = ::getenv("LDR_CNTRL");
4241   trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4242   if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4243     if (p && ::strstr(p, "TEXTPSIZE")) {
4244       trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4245         "you may experience hangs or crashes on OS/400 V7R1.");
4246     }
4247   }
4248 
4249   p = ::getenv("AIXTHREAD_GUARDPAGES");
4250   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4251 
4252 } // end: os::Aix::scan_environment()
4253 
4254 // PASE: initialize the libo4 library (PASE porting library).
4255 void os::Aix::initialize_libo4() {
4256   guarantee(os::Aix::on_pase(), "OS/400 only.");
4257   if (!libo4::init()) {
4258     trcVerbose("libo4 initialization failed.");
4259     assert(false, "libo4 initialization failed");
4260   } else {
4261     trcVerbose("libo4 initialized.");
4262   }
4263 }
4264 
4265 // AIX: initialize the libperfstat library.
4266 void os::Aix::initialize_libperfstat() {
4267   assert(os::Aix::on_aix(), "AIX only");
4268   if (!libperfstat::init()) {
4269     trcVerbose("libperfstat initialization failed.");
4270     assert(false, "libperfstat initialization failed");
4271   } else {
4272     trcVerbose("libperfstat initialized.");
4273   }
4274 }
4275 
4276 /////////////////////////////////////////////////////////////////////////////
4277 // thread stack
4278 
4279 // Function to query the current stack size using pthread_getthrds_np.
4280 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4281   // This only works when invoked on a pthread. As we agreed not to use
4282   // primordial threads anyway, I assert here.
4283   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4284 
4285   // Information about this api can be found (a) in the pthread.h header and
4286   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4287   //
4288   // The use of this API to find out the current stack is kind of undefined.
4289   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4290   // enough for cases where I let the pthread library create its stacks. For cases
4291   // where I create an own stack and pass this to pthread_create, it seems not to
4292   // work (the returned stack size in that case is 0).
4293 
4294   pthread_t tid = pthread_self();
4295   struct __pthrdsinfo pinfo;
4296   char dummy[1]; // Just needed to satisfy pthread_getthrds_np.
4297   int dummy_size = sizeof(dummy);
4298 
4299   memset(&pinfo, 0, sizeof(pinfo));
4300 
4301   const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4302                                      sizeof(pinfo), dummy, &dummy_size);
4303 
4304   if (rc != 0) {
4305     assert0(false);
4306     trcVerbose("pthread_getthrds_np failed (%d)", rc);
4307     return false;
4308   }
4309   guarantee0(pinfo.__pi_stackend);
4310 
4311   // The following may happen when invoking pthread_getthrds_np on a pthread
4312   // running on a user provided stack (when handing down a stack to pthread
4313   // create, see pthread_attr_setstackaddr).
4314   // Not sure what to do then.
4315 
4316   guarantee0(pinfo.__pi_stacksize);
4317 
4318   // Note: we get three values from pthread_getthrds_np:
4319   //       __pi_stackaddr, __pi_stacksize, __pi_stackend
4320   //
4321   // high addr    ---------------------
4322   //
4323   //    |         pthread internal data, like ~2K
4324   //    |
4325   //    |         ---------------------   __pi_stackend   (usually not page aligned, (xxxxF890))
4326   //    |
4327   //    |
4328   //    |
4329   //    |
4330   //    |
4331   //    |
4332   //    |          ---------------------   (__pi_stackend - __pi_stacksize)
4333   //    |
4334   //    |          padding to align the following AIX guard pages, if enabled.
4335   //    |
4336   //    V          ---------------------   __pi_stackaddr
4337   //
4338   // low addr      AIX guard pages, if enabled (AIXTHREAD_GUARDPAGES > 0)
4339   //
4340 
4341   address stack_base = (address)(pinfo.__pi_stackend);
4342   address stack_low_addr = (address)align_ptr_up(pinfo.__pi_stackaddr,
4343     os::vm_page_size());
4344   size_t stack_size = stack_base - stack_low_addr;
4345 
4346   if (p_stack_base) {
4347     *p_stack_base = stack_base;
4348   }
4349 
4350   if (p_stack_size) {
4351     *p_stack_size = stack_size;
4352   }
4353 
4354   return true;
4355 }
4356 
4357 // Get the current stack base from the OS (actually, the pthread library).
4358 address os::current_stack_base() {
4359   address p;
4360   query_stack_dimensions(&p, 0);
4361   return p;
4362 }
4363 
4364 // Get the current stack size from the OS (actually, the pthread library).
4365 size_t os::current_stack_size() {
4366   size_t s;
4367   query_stack_dimensions(0, &s);
4368   return s;
4369 }
4370 
4371 // Refer to the comments in os_solaris.cpp park-unpark.
4372 
4373 // utility to compute the abstime argument to timedwait:
4374 // millis is the relative timeout time
4375 // abstime will be the absolute timeout time
4376 // TODO: replace compute_abstime() with unpackTime()
4377 
4378 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4379   if (millis < 0) millis = 0;
4380   struct timeval now;
4381   int status = gettimeofday(&now, NULL);
4382   assert(status == 0, "gettimeofday");
4383   jlong seconds = millis / 1000;
4384   millis %= 1000;
4385   if (seconds > 50000000) { // see man cond_timedwait(3T)
4386     seconds = 50000000;
4387   }
4388   abstime->tv_sec = now.tv_sec  + seconds;
4389   long       usec = now.tv_usec + millis * 1000;
4390   if (usec >= 1000000) {
4391     abstime->tv_sec += 1;
4392     usec -= 1000000;
4393   }
4394   abstime->tv_nsec = usec * 1000;
4395   return abstime;
4396 }
4397 
4398 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4399 // Conceptually TryPark() should be equivalent to park(0).
4400 
4401 int os::PlatformEvent::TryPark() {
4402   for (;;) {
4403     const int v = _Event;
4404     guarantee ((v == 0) || (v == 1), "invariant");
4405     if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4406   }
4407 }
4408 
4409 void os::PlatformEvent::park() {       // AKA "down()"
4410   // Invariant: Only the thread associated with the Event/PlatformEvent
4411   // may call park().
4412   // TODO: assert that _Assoc != NULL or _Assoc == Self
4413   int v;
4414   for (;;) {
4415     v = _Event;
4416     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4417   }
4418   guarantee (v >= 0, "invariant");
4419   if (v == 0) {
4420     // Do this the hard way by blocking ...
4421     int status = pthread_mutex_lock(_mutex);
4422     assert_status(status == 0, status, "mutex_lock");
4423     guarantee (_nParked == 0, "invariant");
4424     ++ _nParked;
4425     while (_Event < 0) {
4426       status = pthread_cond_wait(_cond, _mutex);
4427       assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4428     }
4429     -- _nParked;
4430 
4431     // In theory we could move the ST of 0 into _Event past the unlock(),
4432     // but then we'd need a MEMBAR after the ST.
4433     _Event = 0;
4434     status = pthread_mutex_unlock(_mutex);
4435     assert_status(status == 0, status, "mutex_unlock");
4436   }
4437   guarantee (_Event >= 0, "invariant");
4438 }
4439 
4440 int os::PlatformEvent::park(jlong millis) {
4441   guarantee (_nParked == 0, "invariant");
4442 
4443   int v;
4444   for (;;) {
4445     v = _Event;
4446     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4447   }
4448   guarantee (v >= 0, "invariant");
4449   if (v != 0) return OS_OK;
4450 
4451   // We do this the hard way, by blocking the thread.
4452   // Consider enforcing a minimum timeout value.
4453   struct timespec abst;
4454   compute_abstime(&abst, millis);
4455 
4456   int ret = OS_TIMEOUT;
4457   int status = pthread_mutex_lock(_mutex);
4458   assert_status(status == 0, status, "mutex_lock");
4459   guarantee (_nParked == 0, "invariant");
4460   ++_nParked;
4461 
4462   // Object.wait(timo) will return because of
4463   // (a) notification
4464   // (b) timeout
4465   // (c) thread.interrupt
4466   //
4467   // Thread.interrupt and object.notify{All} both call Event::set.
4468   // That is, we treat thread.interrupt as a special case of notification.
4469   // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4470   // We assume all ETIME returns are valid.
4471   //
4472   // TODO: properly differentiate simultaneous notify+interrupt.
4473   // In that case, we should propagate the notify to another waiter.
4474 
4475   while (_Event < 0) {
4476     status = pthread_cond_timedwait(_cond, _mutex, &abst);
4477     assert_status(status == 0 || status == ETIMEDOUT,
4478                   status, "cond_timedwait");
4479     if (!FilterSpuriousWakeups) break;         // previous semantics
4480     if (status == ETIMEDOUT) break;
4481     // We consume and ignore EINTR and spurious wakeups.
4482   }
4483   --_nParked;
4484   if (_Event >= 0) {
4485      ret = OS_OK;
4486   }
4487   _Event = 0;
4488   status = pthread_mutex_unlock(_mutex);
4489   assert_status(status == 0, status, "mutex_unlock");
4490   assert (_nParked == 0, "invariant");
4491   return ret;
4492 }
4493 
4494 void os::PlatformEvent::unpark() {
4495   int v, AnyWaiters;
4496   for (;;) {
4497     v = _Event;
4498     if (v > 0) {
4499       // The LD of _Event could have reordered or be satisfied
4500       // by a read-aside from this processor's write buffer.
4501       // To avoid problems execute a barrier and then
4502       // ratify the value.
4503       OrderAccess::fence();
4504       if (_Event == v) return;
4505       continue;
4506     }
4507     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4508   }
4509   if (v < 0) {
4510     // Wait for the thread associated with the event to vacate
4511     int status = pthread_mutex_lock(_mutex);
4512     assert_status(status == 0, status, "mutex_lock");
4513     AnyWaiters = _nParked;
4514 
4515     if (AnyWaiters != 0) {
4516       // We intentional signal *after* dropping the lock
4517       // to avoid a common class of futile wakeups.
4518       status = pthread_cond_signal(_cond);
4519       assert_status(status == 0, status, "cond_signal");
4520     }
4521     // Mutex should be locked for pthread_cond_signal(_cond).
4522     status = pthread_mutex_unlock(_mutex);
4523     assert_status(status == 0, status, "mutex_unlock");
4524   }
4525 
4526   // Note that we signal() _after dropping the lock for "immortal" Events.
4527   // This is safe and avoids a common class of futile wakeups. In rare
4528   // circumstances this can cause a thread to return prematurely from
4529   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4530   // simply re-test the condition and re-park itself.
4531 }
4532 
4533 
4534 // JSR166
4535 // -------------------------------------------------------
4536 
4537 //
4538 // The solaris and linux implementations of park/unpark are fairly
4539 // conservative for now, but can be improved. They currently use a
4540 // mutex/condvar pair, plus a a count.
4541 // Park decrements count if > 0, else does a condvar wait. Unpark
4542 // sets count to 1 and signals condvar. Only one thread ever waits
4543 // on the condvar. Contention seen when trying to park implies that someone
4544 // is unparking you, so don't wait. And spurious returns are fine, so there
4545 // is no need to track notifications.
4546 //
4547 
4548 #define MAX_SECS 100000000
4549 //
4550 // This code is common to linux and solaris and will be moved to a
4551 // common place in dolphin.
4552 //
4553 // The passed in time value is either a relative time in nanoseconds
4554 // or an absolute time in milliseconds. Either way it has to be unpacked
4555 // into suitable seconds and nanoseconds components and stored in the
4556 // given timespec structure.
4557 // Given time is a 64-bit value and the time_t used in the timespec is only
4558 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
4559 // overflow if times way in the future are given. Further on Solaris versions
4560 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4561 // number of seconds, in abstime, is less than current_time + 100,000,000.
4562 // As it will be 28 years before "now + 100000000" will overflow we can
4563 // ignore overflow and just impose a hard-limit on seconds using the value
4564 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4565 // years from "now".
4566 //
4567 
4568 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4569   assert (time > 0, "convertTime");
4570 
4571   struct timeval now;
4572   int status = gettimeofday(&now, NULL);
4573   assert(status == 0, "gettimeofday");
4574 
4575   time_t max_secs = now.tv_sec + MAX_SECS;
4576 
4577   if (isAbsolute) {
4578     jlong secs = time / 1000;
4579     if (secs > max_secs) {
4580       absTime->tv_sec = max_secs;
4581     }
4582     else {
4583       absTime->tv_sec = secs;
4584     }
4585     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4586   }
4587   else {
4588     jlong secs = time / NANOSECS_PER_SEC;
4589     if (secs >= MAX_SECS) {
4590       absTime->tv_sec = max_secs;
4591       absTime->tv_nsec = 0;
4592     }
4593     else {
4594       absTime->tv_sec = now.tv_sec + secs;
4595       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4596       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4597         absTime->tv_nsec -= NANOSECS_PER_SEC;
4598         ++absTime->tv_sec; // note: this must be <= max_secs
4599       }
4600     }
4601   }
4602   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4603   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4604   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4605   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4606 }
4607 
4608 void Parker::park(bool isAbsolute, jlong time) {
4609   // Optional fast-path check:
4610   // Return immediately if a permit is available.
4611   if (_counter > 0) {
4612     _counter = 0;
4613     OrderAccess::fence();
4614     return;
4615   }
4616 
4617   Thread* thread = Thread::current();
4618   assert(thread->is_Java_thread(), "Must be JavaThread");
4619   JavaThread *jt = (JavaThread *)thread;
4620 
4621   // Optional optimization -- avoid state transitions if there's an interrupt pending.
4622   // Check interrupt before trying to wait
4623   if (Thread::is_interrupted(thread, false)) {
4624     return;
4625   }
4626 
4627   // Next, demultiplex/decode time arguments
4628   timespec absTime;
4629   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4630     return;
4631   }
4632   if (time > 0) {
4633     unpackTime(&absTime, isAbsolute, time);
4634   }
4635 
4636   // Enter safepoint region
4637   // Beware of deadlocks such as 6317397.
4638   // The per-thread Parker:: mutex is a classic leaf-lock.
4639   // In particular a thread must never block on the Threads_lock while
4640   // holding the Parker:: mutex. If safepoints are pending both the
4641   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4642   ThreadBlockInVM tbivm(jt);
4643 
4644   // Don't wait if cannot get lock since interference arises from
4645   // unblocking. Also. check interrupt before trying wait
4646   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4647     return;
4648   }
4649 
4650   int status;
4651   if (_counter > 0) { // no wait needed
4652     _counter = 0;
4653     status = pthread_mutex_unlock(_mutex);
4654     assert (status == 0, "invariant");
4655     OrderAccess::fence();
4656     return;
4657   }
4658 
4659 #ifdef ASSERT
4660   // Don't catch signals while blocked; let the running threads have the signals.
4661   // (This allows a debugger to break into the running thread.)
4662   sigset_t oldsigs;
4663   sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4664   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4665 #endif
4666 
4667   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4668   jt->set_suspend_equivalent();
4669   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4670 
4671   if (time == 0) {
4672     status = pthread_cond_wait (_cond, _mutex);
4673   } else {
4674     status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4675   }
4676   assert_status(status == 0 || status == EINTR ||
4677                 status == ETIME || status == ETIMEDOUT,
4678                 status, "cond_timedwait");
4679 
4680 #ifdef ASSERT
4681   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4682 #endif
4683 
4684   _counter = 0;
4685   status = pthread_mutex_unlock(_mutex);
4686   assert_status(status == 0, status, "invariant");
4687   // If externally suspended while waiting, re-suspend
4688   if (jt->handle_special_suspend_equivalent_condition()) {
4689     jt->java_suspend_self();
4690   }
4691 
4692   OrderAccess::fence();
4693 }
4694 
4695 void Parker::unpark() {
4696   int s, status;
4697   status = pthread_mutex_lock(_mutex);
4698   assert (status == 0, "invariant");
4699   s = _counter;
4700   _counter = 1;
4701   if (s < 1) {
4702     status = pthread_mutex_unlock(_mutex);
4703     assert (status == 0, "invariant");
4704     status = pthread_cond_signal (_cond);
4705     assert (status == 0, "invariant");
4706   } else {
4707     pthread_mutex_unlock(_mutex);
4708     assert (status == 0, "invariant");
4709   }
4710 }
4711 
4712 extern char** environ;
4713 
4714 // Run the specified command in a separate process. Return its exit value,
4715 // or -1 on failure (e.g. can't fork a new process).
4716 // Unlike system(), this function can be called from signal handler. It
4717 // doesn't block SIGINT et al.
4718 int os::fork_and_exec(char* cmd) {
4719   char * argv[4] = {"sh", "-c", cmd, NULL};
4720 
4721   pid_t pid = fork();
4722 
4723   if (pid < 0) {
4724     // fork failed
4725     return -1;
4726 
4727   } else if (pid == 0) {
4728     // child process
4729 
4730     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4731     execve("/usr/bin/sh", argv, environ);
4732 
4733     // execve failed
4734     _exit(-1);
4735 
4736   } else {
4737     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4738     // care about the actual exit code, for now.
4739 
4740     int status;
4741 
4742     // Wait for the child process to exit. This returns immediately if
4743     // the child has already exited. */
4744     while (waitpid(pid, &status, 0) < 0) {
4745       switch (errno) {
4746         case ECHILD: return 0;
4747         case EINTR: break;
4748         default: return -1;
4749       }
4750     }
4751 
4752     if (WIFEXITED(status)) {
4753       // The child exited normally; get its exit code.
4754       return WEXITSTATUS(status);
4755     } else if (WIFSIGNALED(status)) {
4756       // The child exited because of a signal.
4757       // The best value to return is 0x80 + signal number,
4758       // because that is what all Unix shells do, and because
4759       // it allows callers to distinguish between process exit and
4760       // process death by signal.
4761       return 0x80 + WTERMSIG(status);
4762     } else {
4763       // Unknown exit code; pass it through.
4764       return status;
4765     }
4766   }
4767   return -1;
4768 }
4769 
4770 // is_headless_jre()
4771 //
4772 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4773 // in order to report if we are running in a headless jre.
4774 //
4775 // Since JDK8 xawt/libmawt.so is moved into the same directory
4776 // as libawt.so, and renamed libawt_xawt.so
4777 bool os::is_headless_jre() {
4778   struct stat statbuf;
4779   char buf[MAXPATHLEN];
4780   char libmawtpath[MAXPATHLEN];
4781   const char *xawtstr = "/xawt/libmawt.so";
4782   const char *new_xawtstr = "/libawt_xawt.so";
4783 
4784   char *p;
4785 
4786   // Get path to libjvm.so
4787   os::jvm_path(buf, sizeof(buf));
4788 
4789   // Get rid of libjvm.so
4790   p = strrchr(buf, '/');
4791   if (p == NULL) return false;
4792   else *p = '\0';
4793 
4794   // Get rid of client or server
4795   p = strrchr(buf, '/');
4796   if (p == NULL) return false;
4797   else *p = '\0';
4798 
4799   // check xawt/libmawt.so
4800   strcpy(libmawtpath, buf);
4801   strcat(libmawtpath, xawtstr);
4802   if (::stat(libmawtpath, &statbuf) == 0) return false;
4803 
4804   // check libawt_xawt.so
4805   strcpy(libmawtpath, buf);
4806   strcat(libmawtpath, new_xawtstr);
4807   if (::stat(libmawtpath, &statbuf) == 0) return false;
4808 
4809   return true;
4810 }
4811 
4812 // Get the default path to the core file
4813 // Returns the length of the string
4814 int os::get_core_path(char* buffer, size_t bufferSize) {
4815   const char* p = get_current_directory(buffer, bufferSize);
4816 
4817   if (p == NULL) {
4818     assert(p != NULL, "failed to get current directory");
4819     return 0;
4820   }
4821 
4822   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4823                                                p, current_process_id());
4824 
4825   return strlen(buffer);
4826 }
4827 
4828 #ifndef PRODUCT
4829 void TestReserveMemorySpecial_test() {
4830   // No tests available for this platform
4831 }
4832 #endif
4833 
4834 bool os::start_debugging(char *buf, int buflen) {
4835   int len = (int)strlen(buf);
4836   char *p = &buf[len];
4837 
4838   jio_snprintf(p, buflen -len,
4839                  "\n\n"
4840                  "Do you want to debug the problem?\n\n"
4841                  "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4842                  "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4843                  "Otherwise, press RETURN to abort...",
4844                  os::current_process_id(),
4845                  os::current_thread_id(), thread_self());
4846 
4847   bool yes = os::message_box("Unexpected Error", buf);
4848 
4849   if (yes) {
4850     // yes, user asked VM to launch debugger
4851     jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4852 
4853     os::fork_and_exec(buf);
4854     yes = false;
4855   }
4856   return yes;
4857 }
4858 
4859 static inline time_t get_mtime(const char* filename) {
4860   struct stat st;
4861   int ret = os::stat(filename, &st);
4862   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
4863   return st.st_mtime;
4864 }
4865 
4866 int os::compare_file_modified_times(const char* file1, const char* file2) {
4867   time_t t1 = get_mtime(file1);
4868   time_t t2 = get_mtime(file2);
4869   return t1 - t2;
4870 }