1 /*
   2  * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "logging/log.hpp"
  40 #include "libo4.hpp"
  41 #include "libperfstat_aix.hpp"
  42 #include "libodm_aix.hpp"
  43 #include "loadlib_aix.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/filemap.hpp"
  46 #include "misc_aix.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "os_aix.inline.hpp"
  49 #include "os_share_aix.hpp"
  50 #include "porting_aix.hpp"
  51 #include "prims/jniFastGetField.hpp"
  52 #include "prims/jvm.h"
  53 #include "prims/jvm_misc.hpp"
  54 #include "runtime/arguments.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/extendedPC.hpp"
  57 #include "runtime/globals.hpp"
  58 #include "runtime/interfaceSupport.hpp"
  59 #include "runtime/java.hpp"
  60 #include "runtime/javaCalls.hpp"
  61 #include "runtime/mutexLocker.hpp"
  62 #include "runtime/objectMonitor.hpp"
  63 #include "runtime/orderAccess.inline.hpp"
  64 #include "runtime/os.hpp"
  65 #include "runtime/osThread.hpp"
  66 #include "runtime/perfMemory.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/statSampler.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/thread.inline.hpp"
  71 #include "runtime/threadCritical.hpp"
  72 #include "runtime/timer.hpp"
  73 #include "runtime/vm_version.hpp"
  74 #include "services/attachListener.hpp"
  75 #include "services/runtimeService.hpp"
  76 #include "utilities/decoder.hpp"
  77 #include "utilities/defaultStream.hpp"
  78 #include "utilities/events.hpp"
  79 #include "utilities/growableArray.hpp"
  80 #include "utilities/vmError.hpp"
  81 
  82 // put OS-includes here (sorted alphabetically)
  83 #include <errno.h>
  84 #include <fcntl.h>
  85 #include <inttypes.h>
  86 #include <poll.h>
  87 #include <procinfo.h>
  88 #include <pthread.h>
  89 #include <pwd.h>
  90 #include <semaphore.h>
  91 #include <signal.h>
  92 #include <stdint.h>
  93 #include <stdio.h>
  94 #include <string.h>
  95 #include <unistd.h>
  96 #include <sys/ioctl.h>
  97 #include <sys/ipc.h>
  98 #include <sys/mman.h>
  99 #include <sys/resource.h>
 100 #include <sys/select.h>
 101 #include <sys/shm.h>
 102 #include <sys/socket.h>
 103 #include <sys/stat.h>
 104 #include <sys/sysinfo.h>
 105 #include <sys/systemcfg.h>
 106 #include <sys/time.h>
 107 #include <sys/times.h>
 108 #include <sys/types.h>
 109 #include <sys/utsname.h>
 110 #include <sys/vminfo.h>
 111 #include <sys/wait.h>
 112 
 113 // Missing prototypes for various system APIs.
 114 extern "C"
 115 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
 116 
 117 #if !defined(_AIXVERSION_610)
 118 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
 119 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
 120 extern "C" int getargs   (procsinfo*, int, char*, int);
 121 #endif
 122 
 123 #define MAX_PATH (2 * K)
 124 
 125 // for timer info max values which include all bits
 126 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 127 // for multipage initialization error analysis (in 'g_multipage_error')
 128 #define ERROR_MP_OS_TOO_OLD                          100
 129 #define ERROR_MP_EXTSHM_ACTIVE                       101
 130 #define ERROR_MP_VMGETINFO_FAILED                    102
 131 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 132 
 133 // Query dimensions of the stack of the calling thread.
 134 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 135 static address resolve_function_descriptor_to_code_pointer(address p);
 136 
 137 static void vmembk_print_on(outputStream* os);
 138 
 139 ////////////////////////////////////////////////////////////////////////////////
 140 // global variables (for a description see os_aix.hpp)
 141 
 142 julong    os::Aix::_physical_memory = 0;
 143 
 144 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 145 int       os::Aix::_page_size = -1;
 146 
 147 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 148 int       os::Aix::_on_pase = -1;
 149 
 150 // 0 = uninitialized, otherwise 32 bit number:
 151 //  0xVVRRTTSS
 152 //  VV - major version
 153 //  RR - minor version
 154 //  TT - tech level, if known, 0 otherwise
 155 //  SS - service pack, if known, 0 otherwise
 156 uint32_t  os::Aix::_os_version = 0;
 157 
 158 // -1 = uninitialized, 0 - no, 1 - yes
 159 int       os::Aix::_xpg_sus_mode = -1;
 160 
 161 // -1 = uninitialized, 0 - no, 1 - yes
 162 int       os::Aix::_extshm = -1;
 163 
 164 ////////////////////////////////////////////////////////////////////////////////
 165 // local variables
 166 
 167 static jlong    initial_time_count = 0;
 168 static int      clock_tics_per_sec = 100;
 169 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 170 static bool     check_signals      = true;
 171 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 172 static sigset_t SR_sigset;
 173 
 174 // Process break recorded at startup.
 175 static address g_brk_at_startup = NULL;
 176 
 177 // This describes the state of multipage support of the underlying
 178 // OS. Note that this is of no interest to the outsize world and
 179 // therefore should not be defined in AIX class.
 180 //
 181 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 182 // latter two (16M "large" resp. 16G "huge" pages) require special
 183 // setup and are normally not available.
 184 //
 185 // AIX supports multiple page sizes per process, for:
 186 //  - Stack (of the primordial thread, so not relevant for us)
 187 //  - Data - data, bss, heap, for us also pthread stacks
 188 //  - Text - text code
 189 //  - shared memory
 190 //
 191 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 192 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 193 //
 194 // For shared memory, page size can be set dynamically via
 195 // shmctl(). Different shared memory regions can have different page
 196 // sizes.
 197 //
 198 // More information can be found at AIBM info center:
 199 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 200 //
 201 static struct {
 202   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 203   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 204   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 205   size_t pthr_stack_pagesize; // stack page size of pthread threads
 206   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 207   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 208   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 209   int error;                  // Error describing if something went wrong at multipage init.
 210 } g_multipage_support = {
 211   (size_t) -1,
 212   (size_t) -1,
 213   (size_t) -1,
 214   (size_t) -1,
 215   (size_t) -1,
 216   false, false,
 217   0
 218 };
 219 
 220 // We must not accidentally allocate memory close to the BRK - even if
 221 // that would work - because then we prevent the BRK segment from
 222 // growing which may result in a malloc OOM even though there is
 223 // enough memory. The problem only arises if we shmat() or mmap() at
 224 // a specific wish address, e.g. to place the heap in a
 225 // compressed-oops-friendly way.
 226 static bool is_close_to_brk(address a) {
 227   assert0(g_brk_at_startup != NULL);
 228   if (a >= g_brk_at_startup &&
 229       a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
 230     return true;
 231   }
 232   return false;
 233 }
 234 
 235 julong os::available_memory() {
 236   return Aix::available_memory();
 237 }
 238 
 239 julong os::Aix::available_memory() {
 240   // Avoid expensive API call here, as returned value will always be null.
 241   if (os::Aix::on_pase()) {
 242     return 0x0LL;
 243   }
 244   os::Aix::meminfo_t mi;
 245   if (os::Aix::get_meminfo(&mi)) {
 246     return mi.real_free;
 247   } else {
 248     return ULONG_MAX;
 249   }
 250 }
 251 
 252 julong os::physical_memory() {
 253   return Aix::physical_memory();
 254 }
 255 
 256 // Return true if user is running as root.
 257 
 258 bool os::have_special_privileges() {
 259   static bool init = false;
 260   static bool privileges = false;
 261   if (!init) {
 262     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 263     init = true;
 264   }
 265   return privileges;
 266 }
 267 
 268 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 269 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 270 static bool my_disclaim64(char* addr, size_t size) {
 271 
 272   if (size == 0) {
 273     return true;
 274   }
 275 
 276   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 277   const unsigned int maxDisclaimSize = 0x40000000;
 278 
 279   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 280   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 281 
 282   char* p = addr;
 283 
 284   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 285     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 286       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 287       return false;
 288     }
 289     p += maxDisclaimSize;
 290   }
 291 
 292   if (lastDisclaimSize > 0) {
 293     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 294       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 295       return false;
 296     }
 297   }
 298 
 299   return true;
 300 }
 301 
 302 // Cpu architecture string
 303 #if defined(PPC32)
 304 static char cpu_arch[] = "ppc";
 305 #elif defined(PPC64)
 306 static char cpu_arch[] = "ppc64";
 307 #else
 308 #error Add appropriate cpu_arch setting
 309 #endif
 310 
 311 // Wrap the function "vmgetinfo" which is not available on older OS releases.
 312 static int checked_vmgetinfo(void *out, int command, int arg) {
 313   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 314     guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
 315   }
 316   return ::vmgetinfo(out, command, arg);
 317 }
 318 
 319 // Given an address, returns the size of the page backing that address.
 320 size_t os::Aix::query_pagesize(void* addr) {
 321 
 322   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 323     // AS/400 older than V6R1: no vmgetinfo here, default to 4K
 324     return 4*K;
 325   }
 326 
 327   vm_page_info pi;
 328   pi.addr = (uint64_t)addr;
 329   if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 330     return pi.pagesize;
 331   } else {
 332     assert(false, "vmgetinfo failed to retrieve page size");
 333     return 4*K;
 334   }
 335 }
 336 
 337 void os::Aix::initialize_system_info() {
 338 
 339   // Get the number of online(logical) cpus instead of configured.
 340   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 341   assert(_processor_count > 0, "_processor_count must be > 0");
 342 
 343   // Retrieve total physical storage.
 344   os::Aix::meminfo_t mi;
 345   if (!os::Aix::get_meminfo(&mi)) {
 346     assert(false, "os::Aix::get_meminfo failed.");
 347   }
 348   _physical_memory = (julong) mi.real_total;
 349 }
 350 
 351 // Helper function for tracing page sizes.
 352 static const char* describe_pagesize(size_t pagesize) {
 353   switch (pagesize) {
 354     case 4*K : return "4K";
 355     case 64*K: return "64K";
 356     case 16*M: return "16M";
 357     case 16*G: return "16G";
 358     default:
 359       assert(false, "surprise");
 360       return "??";
 361   }
 362 }
 363 
 364 // Probe OS for multipage support.
 365 // Will fill the global g_multipage_support structure.
 366 // Must be called before calling os::large_page_init().
 367 static void query_multipage_support() {
 368 
 369   guarantee(g_multipage_support.pagesize == -1,
 370             "do not call twice");
 371 
 372   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 373 
 374   // This really would surprise me.
 375   assert(g_multipage_support.pagesize == 4*K, "surprise!");
 376 
 377   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 378   // Default data page size is defined either by linker options (-bdatapsize)
 379   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 380   // default should be 4K.
 381   {
 382     void* p = ::malloc(16*M);
 383     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 384     ::free(p);
 385   }
 386 
 387   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 388   // Note that this is pure curiosity. We do not rely on default page size but set
 389   // our own page size after allocated.
 390   {
 391     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 392     guarantee(shmid != -1, "shmget failed");
 393     void* p = ::shmat(shmid, NULL, 0);
 394     ::shmctl(shmid, IPC_RMID, NULL);
 395     guarantee(p != (void*) -1, "shmat failed");
 396     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 397     ::shmdt(p);
 398   }
 399 
 400   // Before querying the stack page size, make sure we are not running as primordial
 401   // thread (because primordial thread's stack may have different page size than
 402   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 403   // number of reasons so we may just as well guarantee it here.
 404   guarantee0(!os::Aix::is_primordial_thread());
 405 
 406   // Query pthread stack page size. Should be the same as data page size because
 407   // pthread stacks are allocated from C-Heap.
 408   {
 409     int dummy = 0;
 410     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 411   }
 412 
 413   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 414   {
 415     address any_function =
 416       resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 417     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 418   }
 419 
 420   // Now probe for support of 64K pages and 16M pages.
 421 
 422   // Before OS/400 V6R1, there is no support for pages other than 4K.
 423   if (os::Aix::on_pase_V5R4_or_older()) {
 424     trcVerbose("OS/400 < V6R1 - no large page support.");
 425     g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
 426     goto query_multipage_support_end;
 427   }
 428 
 429   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 430   {
 431     const int MAX_PAGE_SIZES = 4;
 432     psize_t sizes[MAX_PAGE_SIZES];
 433     const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 434     if (num_psizes == -1) {
 435       trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
 436       trcVerbose("disabling multipage support.");
 437       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 438       goto query_multipage_support_end;
 439     }
 440     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 441     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 442     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 443     for (int i = 0; i < num_psizes; i ++) {
 444       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 445     }
 446 
 447     // Can we use 64K, 16M pages?
 448     for (int i = 0; i < num_psizes; i ++) {
 449       const size_t pagesize = sizes[i];
 450       if (pagesize != 64*K && pagesize != 16*M) {
 451         continue;
 452       }
 453       bool can_use = false;
 454       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 455       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 456         IPC_CREAT | S_IRUSR | S_IWUSR);
 457       guarantee0(shmid != -1); // Should always work.
 458       // Try to set pagesize.
 459       struct shmid_ds shm_buf = { 0 };
 460       shm_buf.shm_pagesize = pagesize;
 461       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 462         const int en = errno;
 463         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 464         trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
 465           errno);
 466       } else {
 467         // Attach and double check pageisze.
 468         void* p = ::shmat(shmid, NULL, 0);
 469         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 470         guarantee0(p != (void*) -1); // Should always work.
 471         const size_t real_pagesize = os::Aix::query_pagesize(p);
 472         if (real_pagesize != pagesize) {
 473           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 474         } else {
 475           can_use = true;
 476         }
 477         ::shmdt(p);
 478       }
 479       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 480       if (pagesize == 64*K) {
 481         g_multipage_support.can_use_64K_pages = can_use;
 482       } else if (pagesize == 16*M) {
 483         g_multipage_support.can_use_16M_pages = can_use;
 484       }
 485     }
 486 
 487   } // end: check which pages can be used for shared memory
 488 
 489 query_multipage_support_end:
 490 
 491   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
 492       describe_pagesize(g_multipage_support.pagesize));
 493   trcVerbose("Data page size (C-Heap, bss, etc): %s",
 494       describe_pagesize(g_multipage_support.datapsize));
 495   trcVerbose("Text page size: %s",
 496       describe_pagesize(g_multipage_support.textpsize));
 497   trcVerbose("Thread stack page size (pthread): %s",
 498       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 499   trcVerbose("Default shared memory page size: %s",
 500       describe_pagesize(g_multipage_support.shmpsize));
 501   trcVerbose("Can use 64K pages dynamically with shared meory: %s",
 502       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 503   trcVerbose("Can use 16M pages dynamically with shared memory: %s",
 504       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 505   trcVerbose("Multipage error details: %d",
 506       g_multipage_support.error);
 507 
 508   // sanity checks
 509   assert0(g_multipage_support.pagesize == 4*K);
 510   assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K);
 511   assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K);
 512   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 513   assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K);
 514 
 515 }
 516 
 517 void os::init_system_properties_values() {
 518 
 519 #define DEFAULT_LIBPATH "/lib:/usr/lib"
 520 #define EXTENSIONS_DIR  "/lib/ext"
 521 
 522   // Buffer that fits several sprintfs.
 523   // Note that the space for the trailing null is provided
 524   // by the nulls included by the sizeof operator.
 525   const size_t bufsize =
 526     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 527          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 528   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 529 
 530   // sysclasspath, java_home, dll_dir
 531   {
 532     char *pslash;
 533     os::jvm_path(buf, bufsize);
 534 
 535     // Found the full path to libjvm.so.
 536     // Now cut the path to <java_home>/jre if we can.
 537     pslash = strrchr(buf, '/');
 538     if (pslash != NULL) {
 539       *pslash = '\0';            // Get rid of /libjvm.so.
 540     }
 541     pslash = strrchr(buf, '/');
 542     if (pslash != NULL) {
 543       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 544     }
 545     Arguments::set_dll_dir(buf);
 546 
 547     if (pslash != NULL) {
 548       pslash = strrchr(buf, '/');
 549       if (pslash != NULL) {
 550         *pslash = '\0';          // Get rid of /<arch>.
 551         pslash = strrchr(buf, '/');
 552         if (pslash != NULL) {
 553           *pslash = '\0';        // Get rid of /lib.
 554         }
 555       }
 556     }
 557     Arguments::set_java_home(buf);
 558     set_boot_path('/', ':');
 559   }
 560 
 561   // Where to look for native libraries.
 562 
 563   // On Aix we get the user setting of LIBPATH.
 564   // Eventually, all the library path setting will be done here.
 565   // Get the user setting of LIBPATH.
 566   const char *v = ::getenv("LIBPATH");
 567   const char *v_colon = ":";
 568   if (v == NULL) { v = ""; v_colon = ""; }
 569 
 570   // Concatenate user and invariant part of ld_library_path.
 571   // That's +1 for the colon and +1 for the trailing '\0'.
 572   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 573   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 574   Arguments::set_library_path(ld_library_path);
 575   FREE_C_HEAP_ARRAY(char, ld_library_path);
 576 
 577   // Extensions directories.
 578   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 579   Arguments::set_ext_dirs(buf);
 580 
 581   FREE_C_HEAP_ARRAY(char, buf);
 582 
 583 #undef DEFAULT_LIBPATH
 584 #undef EXTENSIONS_DIR
 585 }
 586 
 587 ////////////////////////////////////////////////////////////////////////////////
 588 // breakpoint support
 589 
 590 void os::breakpoint() {
 591   BREAKPOINT;
 592 }
 593 
 594 extern "C" void breakpoint() {
 595   // use debugger to set breakpoint here
 596 }
 597 
 598 ////////////////////////////////////////////////////////////////////////////////
 599 // signal support
 600 
 601 debug_only(static bool signal_sets_initialized = false);
 602 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 603 
 604 bool os::Aix::is_sig_ignored(int sig) {
 605   struct sigaction oact;
 606   sigaction(sig, (struct sigaction*)NULL, &oact);
 607   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 608     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 609   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 610     return true;
 611   } else {
 612     return false;
 613   }
 614 }
 615 
 616 void os::Aix::signal_sets_init() {
 617   // Should also have an assertion stating we are still single-threaded.
 618   assert(!signal_sets_initialized, "Already initialized");
 619   // Fill in signals that are necessarily unblocked for all threads in
 620   // the VM. Currently, we unblock the following signals:
 621   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 622   //                         by -Xrs (=ReduceSignalUsage));
 623   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 624   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 625   // the dispositions or masks wrt these signals.
 626   // Programs embedding the VM that want to use the above signals for their
 627   // own purposes must, at this time, use the "-Xrs" option to prevent
 628   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 629   // (See bug 4345157, and other related bugs).
 630   // In reality, though, unblocking these signals is really a nop, since
 631   // these signals are not blocked by default.
 632   sigemptyset(&unblocked_sigs);
 633   sigemptyset(&allowdebug_blocked_sigs);
 634   sigaddset(&unblocked_sigs, SIGILL);
 635   sigaddset(&unblocked_sigs, SIGSEGV);
 636   sigaddset(&unblocked_sigs, SIGBUS);
 637   sigaddset(&unblocked_sigs, SIGFPE);
 638   sigaddset(&unblocked_sigs, SIGTRAP);
 639   sigaddset(&unblocked_sigs, SIGDANGER);
 640   sigaddset(&unblocked_sigs, SR_signum);
 641 
 642   if (!ReduceSignalUsage) {
 643    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 644      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 645      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
 646    }
 647    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 648      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 649      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
 650    }
 651    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 652      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 653      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
 654    }
 655   }
 656   // Fill in signals that are blocked by all but the VM thread.
 657   sigemptyset(&vm_sigs);
 658   if (!ReduceSignalUsage)
 659     sigaddset(&vm_sigs, BREAK_SIGNAL);
 660   debug_only(signal_sets_initialized = true);
 661 }
 662 
 663 // These are signals that are unblocked while a thread is running Java.
 664 // (For some reason, they get blocked by default.)
 665 sigset_t* os::Aix::unblocked_signals() {
 666   assert(signal_sets_initialized, "Not initialized");
 667   return &unblocked_sigs;
 668 }
 669 
 670 // These are the signals that are blocked while a (non-VM) thread is
 671 // running Java. Only the VM thread handles these signals.
 672 sigset_t* os::Aix::vm_signals() {
 673   assert(signal_sets_initialized, "Not initialized");
 674   return &vm_sigs;
 675 }
 676 
 677 // These are signals that are blocked during cond_wait to allow debugger in
 678 sigset_t* os::Aix::allowdebug_blocked_signals() {
 679   assert(signal_sets_initialized, "Not initialized");
 680   return &allowdebug_blocked_sigs;
 681 }
 682 
 683 void os::Aix::hotspot_sigmask(Thread* thread) {
 684 
 685   //Save caller's signal mask before setting VM signal mask
 686   sigset_t caller_sigmask;
 687   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 688 
 689   OSThread* osthread = thread->osthread();
 690   osthread->set_caller_sigmask(caller_sigmask);
 691 
 692   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 693 
 694   if (!ReduceSignalUsage) {
 695     if (thread->is_VM_thread()) {
 696       // Only the VM thread handles BREAK_SIGNAL ...
 697       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 698     } else {
 699       // ... all other threads block BREAK_SIGNAL
 700       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 701     }
 702   }
 703 }
 704 
 705 // retrieve memory information.
 706 // Returns false if something went wrong;
 707 // content of pmi undefined in this case.
 708 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 709 
 710   assert(pmi, "get_meminfo: invalid parameter");
 711 
 712   memset(pmi, 0, sizeof(meminfo_t));
 713 
 714   if (os::Aix::on_pase()) {
 715     // On PASE, use the libo4 porting library.
 716 
 717     unsigned long long virt_total = 0;
 718     unsigned long long real_total = 0;
 719     unsigned long long real_free = 0;
 720     unsigned long long pgsp_total = 0;
 721     unsigned long long pgsp_free = 0;
 722     if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
 723       pmi->virt_total = virt_total;
 724       pmi->real_total = real_total;
 725       pmi->real_free = real_free;
 726       pmi->pgsp_total = pgsp_total;
 727       pmi->pgsp_free = pgsp_free;
 728       return true;
 729     }
 730     return false;
 731 
 732   } else {
 733 
 734     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 735     // See:
 736     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 737     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 738     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 739     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 740 
 741     perfstat_memory_total_t psmt;
 742     memset (&psmt, '\0', sizeof(psmt));
 743     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 744     if (rc == -1) {
 745       trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
 746       assert(0, "perfstat_memory_total() failed");
 747       return false;
 748     }
 749 
 750     assert(rc == 1, "perfstat_memory_total() - weird return code");
 751 
 752     // excerpt from
 753     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 754     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 755     // The fields of perfstat_memory_total_t:
 756     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 757     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 758     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 759     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 760     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 761 
 762     pmi->virt_total = psmt.virt_total * 4096;
 763     pmi->real_total = psmt.real_total * 4096;
 764     pmi->real_free = psmt.real_free * 4096;
 765     pmi->pgsp_total = psmt.pgsp_total * 4096;
 766     pmi->pgsp_free = psmt.pgsp_free * 4096;
 767 
 768     return true;
 769 
 770   }
 771 } // end os::Aix::get_meminfo
 772 
 773 //////////////////////////////////////////////////////////////////////////////
 774 // create new thread
 775 
 776 // Thread start routine for all newly created threads
 777 static void *thread_native_entry(Thread *thread) {
 778 
 779   // find out my own stack dimensions
 780   {
 781     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 782     address base = 0;
 783     size_t size = 0;
 784     query_stack_dimensions(&base, &size);
 785     thread->set_stack_base(base);
 786     thread->set_stack_size(size);
 787   }
 788 
 789   const pthread_t pthread_id = ::pthread_self();
 790   const tid_t kernel_thread_id = ::thread_self();
 791 
 792   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 793     os::current_thread_id(), (uintx) kernel_thread_id);
 794 
 795   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
 796   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
 797   // tools hook pthread_create(). In this case, we may run into problems establishing
 798   // guard pages on those stacks, because the stacks may reside in memory which is not
 799   // protectable (shmated).
 800   if (thread->stack_base() > ::sbrk(0)) {
 801     log_warning(os, thread)("Thread stack not in data segment.");
 802   }
 803 
 804   // Try to randomize the cache line index of hot stack frames.
 805   // This helps when threads of the same stack traces evict each other's
 806   // cache lines. The threads can be either from the same JVM instance, or
 807   // from different JVM instances. The benefit is especially true for
 808   // processors with hyperthreading technology.
 809 
 810   static int counter = 0;
 811   int pid = os::current_process_id();
 812   alloca(((pid ^ counter++) & 7) * 128);
 813 
 814   thread->initialize_thread_current();
 815 
 816   OSThread* osthread = thread->osthread();
 817 
 818   // Thread_id is pthread id.
 819   osthread->set_thread_id(pthread_id);
 820 
 821   // .. but keep kernel thread id too for diagnostics
 822   osthread->set_kernel_thread_id(kernel_thread_id);
 823 
 824   // Initialize signal mask for this thread.
 825   os::Aix::hotspot_sigmask(thread);
 826 
 827   // Initialize floating point control register.
 828   os::Aix::init_thread_fpu_state();
 829 
 830   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 831 
 832   // Call one more level start routine.
 833   thread->run();
 834 
 835   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 836     os::current_thread_id(), (uintx) kernel_thread_id);
 837 
 838   // If a thread has not deleted itself ("delete this") as part of its
 839   // termination sequence, we have to ensure thread-local-storage is
 840   // cleared before we actually terminate. No threads should ever be
 841   // deleted asynchronously with respect to their termination.
 842   if (Thread::current_or_null_safe() != NULL) {
 843     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 844     thread->clear_thread_current();
 845   }
 846 
 847   return 0;
 848 }
 849 
 850 bool os::create_thread(Thread* thread, ThreadType thr_type,
 851                        size_t req_stack_size) {
 852 
 853   assert(thread->osthread() == NULL, "caller responsible");
 854 
 855   // Allocate the OSThread object
 856   OSThread* osthread = new OSThread(NULL, NULL);
 857   if (osthread == NULL) {
 858     return false;
 859   }
 860 
 861   // set the correct thread state
 862   osthread->set_thread_type(thr_type);
 863 
 864   // Initial state is ALLOCATED but not INITIALIZED
 865   osthread->set_state(ALLOCATED);
 866 
 867   thread->set_osthread(osthread);
 868 
 869   // init thread attributes
 870   pthread_attr_t attr;
 871   pthread_attr_init(&attr);
 872   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 873 
 874   // Make sure we run in 1:1 kernel-user-thread mode.
 875   if (os::Aix::on_aix()) {
 876     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 877     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 878   } // end: aix
 879 
 880   // Start in suspended state, and in os::thread_start, wake the thread up.
 881   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 882 
 883   // calculate stack size if it's not specified by caller
 884   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 885   pthread_attr_setstacksize(&attr, stack_size);
 886 
 887   pthread_t tid;
 888   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
 889 
 890   char buf[64];
 891   if (ret == 0) {
 892     log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
 893       (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 894   } else {
 895     log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
 896       ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 897   }
 898 
 899   pthread_attr_destroy(&attr);
 900 
 901   if (ret != 0) {
 902     // Need to clean up stuff we've allocated so far
 903     thread->set_osthread(NULL);
 904     delete osthread;
 905     return false;
 906   }
 907 
 908   // OSThread::thread_id is the pthread id.
 909   osthread->set_thread_id(tid);
 910 
 911   return true;
 912 }
 913 
 914 /////////////////////////////////////////////////////////////////////////////
 915 // attach existing thread
 916 
 917 // bootstrap the main thread
 918 bool os::create_main_thread(JavaThread* thread) {
 919   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 920   return create_attached_thread(thread);
 921 }
 922 
 923 bool os::create_attached_thread(JavaThread* thread) {
 924 #ifdef ASSERT
 925     thread->verify_not_published();
 926 #endif
 927 
 928   // Allocate the OSThread object
 929   OSThread* osthread = new OSThread(NULL, NULL);
 930 
 931   if (osthread == NULL) {
 932     return false;
 933   }
 934 
 935   const pthread_t pthread_id = ::pthread_self();
 936   const tid_t kernel_thread_id = ::thread_self();
 937 
 938   // OSThread::thread_id is the pthread id.
 939   osthread->set_thread_id(pthread_id);
 940 
 941   // .. but keep kernel thread id too for diagnostics
 942   osthread->set_kernel_thread_id(kernel_thread_id);
 943 
 944   // initialize floating point control register
 945   os::Aix::init_thread_fpu_state();
 946 
 947   // Initial thread state is RUNNABLE
 948   osthread->set_state(RUNNABLE);
 949 
 950   thread->set_osthread(osthread);
 951 
 952   if (UseNUMA) {
 953     int lgrp_id = os::numa_get_group_id();
 954     if (lgrp_id != -1) {
 955       thread->set_lgrp_id(lgrp_id);
 956     }
 957   }
 958 
 959   // initialize signal mask for this thread
 960   // and save the caller's signal mask
 961   os::Aix::hotspot_sigmask(thread);
 962 
 963   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 964     os::current_thread_id(), (uintx) kernel_thread_id);
 965 
 966   return true;
 967 }
 968 
 969 void os::pd_start_thread(Thread* thread) {
 970   int status = pthread_continue_np(thread->osthread()->pthread_id());
 971   assert(status == 0, "thr_continue failed");
 972 }
 973 
 974 // Free OS resources related to the OSThread
 975 void os::free_thread(OSThread* osthread) {
 976   assert(osthread != NULL, "osthread not set");
 977 
 978   // We are told to free resources of the argument thread,
 979   // but we can only really operate on the current thread.
 980   assert(Thread::current()->osthread() == osthread,
 981          "os::free_thread but not current thread");
 982 
 983   // Restore caller's signal mask
 984   sigset_t sigmask = osthread->caller_sigmask();
 985   pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
 986 
 987   delete osthread;
 988 }
 989 
 990 ////////////////////////////////////////////////////////////////////////////////
 991 // time support
 992 
 993 // Time since start-up in seconds to a fine granularity.
 994 // Used by VMSelfDestructTimer and the MemProfiler.
 995 double os::elapsedTime() {
 996   return (double)(os::elapsed_counter()) * 0.000001;
 997 }
 998 
 999 jlong os::elapsed_counter() {
1000   timeval time;
1001   int status = gettimeofday(&time, NULL);
1002   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1003 }
1004 
1005 jlong os::elapsed_frequency() {
1006   return (1000 * 1000);
1007 }
1008 
1009 bool os::supports_vtime() { return true; }
1010 bool os::enable_vtime()   { return false; }
1011 bool os::vtime_enabled()  { return false; }
1012 
1013 double os::elapsedVTime() {
1014   struct rusage usage;
1015   int retval = getrusage(RUSAGE_THREAD, &usage);
1016   if (retval == 0) {
1017     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1018   } else {
1019     // better than nothing, but not much
1020     return elapsedTime();
1021   }
1022 }
1023 
1024 jlong os::javaTimeMillis() {
1025   timeval time;
1026   int status = gettimeofday(&time, NULL);
1027   assert(status != -1, "aix error at gettimeofday()");
1028   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1029 }
1030 
1031 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1032   timeval time;
1033   int status = gettimeofday(&time, NULL);
1034   assert(status != -1, "aix error at gettimeofday()");
1035   seconds = jlong(time.tv_sec);
1036   nanos = jlong(time.tv_usec) * 1000;
1037 }
1038 
1039 jlong os::javaTimeNanos() {
1040   if (os::Aix::on_pase()) {
1041 
1042     timeval time;
1043     int status = gettimeofday(&time, NULL);
1044     assert(status != -1, "PASE error at gettimeofday()");
1045     jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1046     return 1000 * usecs;
1047 
1048   } else {
1049     // On AIX use the precision of processors real time clock
1050     // or time base registers.
1051     timebasestruct_t time;
1052     int rc;
1053 
1054     // If the CPU has a time register, it will be used and
1055     // we have to convert to real time first. After convertion we have following data:
1056     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1057     // time.tb_low  [nanoseconds after the last full second above]
1058     // We better use mread_real_time here instead of read_real_time
1059     // to ensure that we will get a monotonic increasing time.
1060     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1061       rc = time_base_to_time(&time, TIMEBASE_SZ);
1062       assert(rc != -1, "aix error at time_base_to_time()");
1063     }
1064     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1065   }
1066 }
1067 
1068 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1069   info_ptr->max_value = ALL_64_BITS;
1070   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1071   info_ptr->may_skip_backward = false;
1072   info_ptr->may_skip_forward = false;
1073   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1074 }
1075 
1076 // Return the real, user, and system times in seconds from an
1077 // arbitrary fixed point in the past.
1078 bool os::getTimesSecs(double* process_real_time,
1079                       double* process_user_time,
1080                       double* process_system_time) {
1081   struct tms ticks;
1082   clock_t real_ticks = times(&ticks);
1083 
1084   if (real_ticks == (clock_t) (-1)) {
1085     return false;
1086   } else {
1087     double ticks_per_second = (double) clock_tics_per_sec;
1088     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1089     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1090     *process_real_time = ((double) real_ticks) / ticks_per_second;
1091 
1092     return true;
1093   }
1094 }
1095 
1096 char * os::local_time_string(char *buf, size_t buflen) {
1097   struct tm t;
1098   time_t long_time;
1099   time(&long_time);
1100   localtime_r(&long_time, &t);
1101   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1102                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1103                t.tm_hour, t.tm_min, t.tm_sec);
1104   return buf;
1105 }
1106 
1107 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1108   return localtime_r(clock, res);
1109 }
1110 
1111 ////////////////////////////////////////////////////////////////////////////////
1112 // runtime exit support
1113 
1114 // Note: os::shutdown() might be called very early during initialization, or
1115 // called from signal handler. Before adding something to os::shutdown(), make
1116 // sure it is async-safe and can handle partially initialized VM.
1117 void os::shutdown() {
1118 
1119   // allow PerfMemory to attempt cleanup of any persistent resources
1120   perfMemory_exit();
1121 
1122   // needs to remove object in file system
1123   AttachListener::abort();
1124 
1125   // flush buffered output, finish log files
1126   ostream_abort();
1127 
1128   // Check for abort hook
1129   abort_hook_t abort_hook = Arguments::abort_hook();
1130   if (abort_hook != NULL) {
1131     abort_hook();
1132   }
1133 }
1134 
1135 // Note: os::abort() might be called very early during initialization, or
1136 // called from signal handler. Before adding something to os::abort(), make
1137 // sure it is async-safe and can handle partially initialized VM.
1138 void os::abort(bool dump_core, void* siginfo, const void* context) {
1139   os::shutdown();
1140   if (dump_core) {
1141 #ifndef PRODUCT
1142     fdStream out(defaultStream::output_fd());
1143     out.print_raw("Current thread is ");
1144     char buf[16];
1145     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1146     out.print_raw_cr(buf);
1147     out.print_raw_cr("Dumping core ...");
1148 #endif
1149     ::abort(); // dump core
1150   }
1151 
1152   ::exit(1);
1153 }
1154 
1155 // Die immediately, no exit hook, no abort hook, no cleanup.
1156 void os::die() {
1157   ::abort();
1158 }
1159 
1160 // This method is a copy of JDK's sysGetLastErrorString
1161 // from src/solaris/hpi/src/system_md.c
1162 
1163 size_t os::lasterror(char *buf, size_t len) {
1164   if (errno == 0) return 0;
1165 
1166   const char *s = os::strerror(errno);
1167   size_t n = ::strlen(s);
1168   if (n >= len) {
1169     n = len - 1;
1170   }
1171   ::strncpy(buf, s, n);
1172   buf[n] = '\0';
1173   return n;
1174 }
1175 
1176 intx os::current_thread_id() {
1177   return (intx)pthread_self();
1178 }
1179 
1180 int os::current_process_id() {
1181   return getpid();
1182 }
1183 
1184 // DLL functions
1185 
1186 const char* os::dll_file_extension() { return ".so"; }
1187 
1188 // This must be hard coded because it's the system's temporary
1189 // directory not the java application's temp directory, ala java.io.tmpdir.
1190 const char* os::get_temp_directory() { return "/tmp"; }
1191 
1192 static bool file_exists(const char* filename) {
1193   struct stat statbuf;
1194   if (filename == NULL || strlen(filename) == 0) {
1195     return false;
1196   }
1197   return os::stat(filename, &statbuf) == 0;
1198 }
1199 
1200 bool os::dll_build_name(char* buffer, size_t buflen,
1201                         const char* pname, const char* fname) {
1202   bool retval = false;
1203   // Copied from libhpi
1204   const size_t pnamelen = pname ? strlen(pname) : 0;
1205 
1206   // Return error on buffer overflow.
1207   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1208     *buffer = '\0';
1209     return retval;
1210   }
1211 
1212   if (pnamelen == 0) {
1213     snprintf(buffer, buflen, "lib%s.so", fname);
1214     retval = true;
1215   } else if (strchr(pname, *os::path_separator()) != NULL) {
1216     int n;
1217     char** pelements = split_path(pname, &n);
1218     if (pelements == NULL) {
1219       return false;
1220     }
1221     for (int i = 0; i < n; i++) {
1222       // Really shouldn't be NULL, but check can't hurt
1223       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1224         continue; // skip the empty path values
1225       }
1226       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1227       if (file_exists(buffer)) {
1228         retval = true;
1229         break;
1230       }
1231     }
1232     // release the storage
1233     for (int i = 0; i < n; i++) {
1234       if (pelements[i] != NULL) {
1235         FREE_C_HEAP_ARRAY(char, pelements[i]);
1236       }
1237     }
1238     if (pelements != NULL) {
1239       FREE_C_HEAP_ARRAY(char*, pelements);
1240     }
1241   } else {
1242     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1243     retval = true;
1244   }
1245   return retval;
1246 }
1247 
1248 // Check if addr is inside libjvm.so.
1249 bool os::address_is_in_vm(address addr) {
1250 
1251   // Input could be a real pc or a function pointer literal. The latter
1252   // would be a function descriptor residing in the data segment of a module.
1253   loaded_module_t lm;
1254   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1255     return lm.is_in_vm;
1256   } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1257     return lm.is_in_vm;
1258   } else {
1259     return false;
1260   }
1261 
1262 }
1263 
1264 // Resolve an AIX function descriptor literal to a code pointer.
1265 // If the input is a valid code pointer to a text segment of a loaded module,
1266 //   it is returned unchanged.
1267 // If the input is a valid AIX function descriptor, it is resolved to the
1268 //   code entry point.
1269 // If the input is neither a valid function descriptor nor a valid code pointer,
1270 //   NULL is returned.
1271 static address resolve_function_descriptor_to_code_pointer(address p) {
1272 
1273   if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1274     // It is a real code pointer.
1275     return p;
1276   } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1277     // Pointer to data segment, potential function descriptor.
1278     address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1279     if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1280       // It is a function descriptor.
1281       return code_entry;
1282     }
1283   }
1284 
1285   return NULL;
1286 }
1287 
1288 bool os::dll_address_to_function_name(address addr, char *buf,
1289                                       int buflen, int *offset,
1290                                       bool demangle) {
1291   if (offset) {
1292     *offset = -1;
1293   }
1294   // Buf is not optional, but offset is optional.
1295   assert(buf != NULL, "sanity check");
1296   buf[0] = '\0';
1297 
1298   // Resolve function ptr literals first.
1299   addr = resolve_function_descriptor_to_code_pointer(addr);
1300   if (!addr) {
1301     return false;
1302   }
1303 
1304   return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle);
1305 }
1306 
1307 bool os::dll_address_to_library_name(address addr, char* buf,
1308                                      int buflen, int* offset) {
1309   if (offset) {
1310     *offset = -1;
1311   }
1312   // Buf is not optional, but offset is optional.
1313   assert(buf != NULL, "sanity check");
1314   buf[0] = '\0';
1315 
1316   // Resolve function ptr literals first.
1317   addr = resolve_function_descriptor_to_code_pointer(addr);
1318   if (!addr) {
1319     return false;
1320   }
1321 
1322   return AixSymbols::get_module_name(addr, buf, buflen);
1323 }
1324 
1325 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1326 // for the same architecture as Hotspot is running on.
1327 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1328 
1329   if (ebuf && ebuflen > 0) {
1330     ebuf[0] = '\0';
1331     ebuf[ebuflen - 1] = '\0';
1332   }
1333 
1334   if (!filename || strlen(filename) == 0) {
1335     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1336     return NULL;
1337   }
1338 
1339   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1340   void * result= ::dlopen(filename, RTLD_LAZY);
1341   if (result != NULL) {
1342     // Reload dll cache. Don't do this in signal handling.
1343     LoadedLibraries::reload();
1344     return result;
1345   } else {
1346     // error analysis when dlopen fails
1347     const char* const error_report = ::dlerror();
1348     if (error_report && ebuf && ebuflen > 0) {
1349       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1350                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1351     }
1352   }
1353   return NULL;
1354 }
1355 
1356 void* os::dll_lookup(void* handle, const char* name) {
1357   void* res = dlsym(handle, name);
1358   return res;
1359 }
1360 
1361 void* os::get_default_process_handle() {
1362   return (void*)::dlopen(NULL, RTLD_LAZY);
1363 }
1364 
1365 void os::print_dll_info(outputStream *st) {
1366   st->print_cr("Dynamic libraries:");
1367   LoadedLibraries::print(st);
1368 }
1369 
1370 void os::get_summary_os_info(char* buf, size_t buflen) {
1371   // There might be something more readable than uname results for AIX.
1372   struct utsname name;
1373   uname(&name);
1374   snprintf(buf, buflen, "%s %s", name.release, name.version);
1375 }
1376 
1377 void os::print_os_info(outputStream* st) {
1378   st->print("OS:");
1379 
1380   st->print("uname:");
1381   struct utsname name;
1382   uname(&name);
1383   st->print(name.sysname); st->print(" ");
1384   st->print(name.nodename); st->print(" ");
1385   st->print(name.release); st->print(" ");
1386   st->print(name.version); st->print(" ");
1387   st->print(name.machine);
1388   st->cr();
1389 
1390   uint32_t ver = os::Aix::os_version();
1391   st->print_cr("AIX kernel version %u.%u.%u.%u",
1392                (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1393 
1394   // rlimit
1395   st->print("rlimit:");
1396   struct rlimit rlim;
1397 
1398   st->print(" STACK ");
1399   getrlimit(RLIMIT_STACK, &rlim);
1400   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1401   else st->print("%uk", rlim.rlim_cur >> 10);
1402 
1403   st->print(", CORE ");
1404   getrlimit(RLIMIT_CORE, &rlim);
1405   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1406   else st->print("%uk", rlim.rlim_cur >> 10);
1407 
1408   st->print(", NPROC ");
1409   st->print("%d", sysconf(_SC_CHILD_MAX));
1410 
1411   st->print(", NOFILE ");
1412   getrlimit(RLIMIT_NOFILE, &rlim);
1413   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1414   else st->print("%d", rlim.rlim_cur);
1415 
1416   st->print(", AS ");
1417   getrlimit(RLIMIT_AS, &rlim);
1418   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1419   else st->print("%uk", rlim.rlim_cur >> 10);
1420 
1421   // Print limits on DATA, because it limits the C-heap.
1422   st->print(", DATA ");
1423   getrlimit(RLIMIT_DATA, &rlim);
1424   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1425   else st->print("%uk", rlim.rlim_cur >> 10);
1426   st->cr();
1427 
1428   // load average
1429   st->print("load average:");
1430   double loadavg[3] = {-1.L, -1.L, -1.L};
1431   os::loadavg(loadavg, 3);
1432   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1433   st->cr();
1434 
1435   // print wpar info
1436   libperfstat::wparinfo_t wi;
1437   if (libperfstat::get_wparinfo(&wi)) {
1438     st->print_cr("wpar info");
1439     st->print_cr("name: %s", wi.name);
1440     st->print_cr("id:   %d", wi.wpar_id);
1441     st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1442   }
1443 
1444   // print partition info
1445   libperfstat::partitioninfo_t pi;
1446   if (libperfstat::get_partitioninfo(&pi)) {
1447     st->print_cr("partition info");
1448     st->print_cr(" name: %s", pi.name);
1449   }
1450 
1451 }
1452 
1453 void os::print_memory_info(outputStream* st) {
1454 
1455   st->print_cr("Memory:");
1456 
1457   st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1458     describe_pagesize(g_multipage_support.pagesize));
1459   st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1460     describe_pagesize(g_multipage_support.datapsize));
1461   st->print_cr("  Text page size:                         %s",
1462     describe_pagesize(g_multipage_support.textpsize));
1463   st->print_cr("  Thread stack page size (pthread):       %s",
1464     describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1465   st->print_cr("  Default shared memory page size:        %s",
1466     describe_pagesize(g_multipage_support.shmpsize));
1467   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1468     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1469   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1470     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1471   st->print_cr("  Multipage error: %d",
1472     g_multipage_support.error);
1473   st->cr();
1474   st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1475 
1476   // print out LDR_CNTRL because it affects the default page sizes
1477   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1478   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1479 
1480   // Print out EXTSHM because it is an unsupported setting.
1481   const char* const extshm = ::getenv("EXTSHM");
1482   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1483   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1484     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1485   }
1486 
1487   // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1488   const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1489   st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1490       aixthread_guardpages ? aixthread_guardpages : "<unset>");
1491 
1492   os::Aix::meminfo_t mi;
1493   if (os::Aix::get_meminfo(&mi)) {
1494     char buffer[256];
1495     if (os::Aix::on_aix()) {
1496       st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1497       st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1498       st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1499       st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1500     } else {
1501       // PASE - Numbers are result of QWCRSSTS; they mean:
1502       // real_total: Sum of all system pools
1503       // real_free: always 0
1504       // pgsp_total: we take the size of the system ASP
1505       // pgsp_free: size of system ASP times percentage of system ASP unused
1506       st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1507       st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1508       st->print_cr("%% system asp used : " SIZE_FORMAT,
1509         mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1510     }
1511     st->print_raw(buffer);
1512   }
1513   st->cr();
1514 
1515   // Print segments allocated with os::reserve_memory.
1516   st->print_cr("internal virtual memory regions used by vm:");
1517   vmembk_print_on(st);
1518 }
1519 
1520 // Get a string for the cpuinfo that is a summary of the cpu type
1521 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1522   // This looks good
1523   libperfstat::cpuinfo_t ci;
1524   if (libperfstat::get_cpuinfo(&ci)) {
1525     strncpy(buf, ci.version, buflen);
1526   } else {
1527     strncpy(buf, "AIX", buflen);
1528   }
1529 }
1530 
1531 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1532   st->print("CPU:");
1533   st->print("total %d", os::processor_count());
1534   // It's not safe to query number of active processors after crash.
1535   // st->print("(active %d)", os::active_processor_count());
1536   st->print(" %s", VM_Version::features());
1537   st->cr();
1538 }
1539 
1540 static void print_signal_handler(outputStream* st, int sig,
1541                                  char* buf, size_t buflen);
1542 
1543 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1544   st->print_cr("Signal Handlers:");
1545   print_signal_handler(st, SIGSEGV, buf, buflen);
1546   print_signal_handler(st, SIGBUS , buf, buflen);
1547   print_signal_handler(st, SIGFPE , buf, buflen);
1548   print_signal_handler(st, SIGPIPE, buf, buflen);
1549   print_signal_handler(st, SIGXFSZ, buf, buflen);
1550   print_signal_handler(st, SIGILL , buf, buflen);
1551   print_signal_handler(st, SR_signum, buf, buflen);
1552   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1553   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1554   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1555   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1556   print_signal_handler(st, SIGTRAP, buf, buflen);
1557   print_signal_handler(st, SIGDANGER, buf, buflen);
1558 }
1559 
1560 static char saved_jvm_path[MAXPATHLEN] = {0};
1561 
1562 // Find the full path to the current module, libjvm.so.
1563 void os::jvm_path(char *buf, jint buflen) {
1564   // Error checking.
1565   if (buflen < MAXPATHLEN) {
1566     assert(false, "must use a large-enough buffer");
1567     buf[0] = '\0';
1568     return;
1569   }
1570   // Lazy resolve the path to current module.
1571   if (saved_jvm_path[0] != 0) {
1572     strcpy(buf, saved_jvm_path);
1573     return;
1574   }
1575 
1576   Dl_info dlinfo;
1577   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1578   assert(ret != 0, "cannot locate libjvm");
1579   char* rp = realpath((char *)dlinfo.dli_fname, buf);
1580   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1581 
1582   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1583   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1584 }
1585 
1586 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1587   // no prefix required, not even "_"
1588 }
1589 
1590 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1591   // no suffix required
1592 }
1593 
1594 ////////////////////////////////////////////////////////////////////////////////
1595 // sun.misc.Signal support
1596 
1597 static volatile jint sigint_count = 0;
1598 
1599 static void
1600 UserHandler(int sig, void *siginfo, void *context) {
1601   // 4511530 - sem_post is serialized and handled by the manager thread. When
1602   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1603   // don't want to flood the manager thread with sem_post requests.
1604   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1605     return;
1606 
1607   // Ctrl-C is pressed during error reporting, likely because the error
1608   // handler fails to abort. Let VM die immediately.
1609   if (sig == SIGINT && is_error_reported()) {
1610     os::die();
1611   }
1612 
1613   os::signal_notify(sig);
1614 }
1615 
1616 void* os::user_handler() {
1617   return CAST_FROM_FN_PTR(void*, UserHandler);
1618 }
1619 
1620 extern "C" {
1621   typedef void (*sa_handler_t)(int);
1622   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1623 }
1624 
1625 void* os::signal(int signal_number, void* handler) {
1626   struct sigaction sigAct, oldSigAct;
1627 
1628   sigfillset(&(sigAct.sa_mask));
1629 
1630   // Do not block out synchronous signals in the signal handler.
1631   // Blocking synchronous signals only makes sense if you can really
1632   // be sure that those signals won't happen during signal handling,
1633   // when the blocking applies. Normal signal handlers are lean and
1634   // do not cause signals. But our signal handlers tend to be "risky"
1635   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1636   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1637   // by a SIGILL, which was blocked due to the signal mask. The process
1638   // just hung forever. Better to crash from a secondary signal than to hang.
1639   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1640   sigdelset(&(sigAct.sa_mask), SIGBUS);
1641   sigdelset(&(sigAct.sa_mask), SIGILL);
1642   sigdelset(&(sigAct.sa_mask), SIGFPE);
1643   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1644 
1645   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1646 
1647   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1648 
1649   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1650     // -1 means registration failed
1651     return (void *)-1;
1652   }
1653 
1654   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1655 }
1656 
1657 void os::signal_raise(int signal_number) {
1658   ::raise(signal_number);
1659 }
1660 
1661 //
1662 // The following code is moved from os.cpp for making this
1663 // code platform specific, which it is by its very nature.
1664 //
1665 
1666 // Will be modified when max signal is changed to be dynamic
1667 int os::sigexitnum_pd() {
1668   return NSIG;
1669 }
1670 
1671 // a counter for each possible signal value
1672 static volatile jint pending_signals[NSIG+1] = { 0 };
1673 
1674 // Wrapper functions for: sem_init(), sem_post(), sem_wait()
1675 // On AIX, we use sem_init(), sem_post(), sem_wait()
1676 // On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1677 // do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1678 // Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1679 // on AIX, msem_..() calls are suspected of causing problems.
1680 static sem_t sig_sem;
1681 static msemaphore* p_sig_msem = 0;
1682 
1683 static void local_sem_init() {
1684   if (os::Aix::on_aix()) {
1685     int rc = ::sem_init(&sig_sem, 0, 0);
1686     guarantee(rc != -1, "sem_init failed");
1687   } else {
1688     // Memory semaphores must live in shared mem.
1689     guarantee0(p_sig_msem == NULL);
1690     p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1691     guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1692     guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1693   }
1694 }
1695 
1696 static void local_sem_post() {
1697   static bool warn_only_once = false;
1698   if (os::Aix::on_aix()) {
1699     int rc = ::sem_post(&sig_sem);
1700     if (rc == -1 && !warn_only_once) {
1701       trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
1702       warn_only_once = true;
1703     }
1704   } else {
1705     guarantee0(p_sig_msem != NULL);
1706     int rc = ::msem_unlock(p_sig_msem, 0);
1707     if (rc == -1 && !warn_only_once) {
1708       trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
1709       warn_only_once = true;
1710     }
1711   }
1712 }
1713 
1714 static void local_sem_wait() {
1715   static bool warn_only_once = false;
1716   if (os::Aix::on_aix()) {
1717     int rc = ::sem_wait(&sig_sem);
1718     if (rc == -1 && !warn_only_once) {
1719       trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
1720       warn_only_once = true;
1721     }
1722   } else {
1723     guarantee0(p_sig_msem != NULL); // must init before use
1724     int rc = ::msem_lock(p_sig_msem, 0);
1725     if (rc == -1 && !warn_only_once) {
1726       trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
1727       warn_only_once = true;
1728     }
1729   }
1730 }
1731 
1732 void os::signal_init_pd() {
1733   // Initialize signal structures
1734   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1735 
1736   // Initialize signal semaphore
1737   local_sem_init();
1738 }
1739 
1740 void os::signal_notify(int sig) {
1741   Atomic::inc(&pending_signals[sig]);
1742   local_sem_post();
1743 }
1744 
1745 static int check_pending_signals(bool wait) {
1746   Atomic::store(0, &sigint_count);
1747   for (;;) {
1748     for (int i = 0; i < NSIG + 1; i++) {
1749       jint n = pending_signals[i];
1750       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1751         return i;
1752       }
1753     }
1754     if (!wait) {
1755       return -1;
1756     }
1757     JavaThread *thread = JavaThread::current();
1758     ThreadBlockInVM tbivm(thread);
1759 
1760     bool threadIsSuspended;
1761     do {
1762       thread->set_suspend_equivalent();
1763       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1764 
1765       local_sem_wait();
1766 
1767       // were we externally suspended while we were waiting?
1768       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1769       if (threadIsSuspended) {
1770         //
1771         // The semaphore has been incremented, but while we were waiting
1772         // another thread suspended us. We don't want to continue running
1773         // while suspended because that would surprise the thread that
1774         // suspended us.
1775         //
1776 
1777         local_sem_post();
1778 
1779         thread->java_suspend_self();
1780       }
1781     } while (threadIsSuspended);
1782   }
1783 }
1784 
1785 int os::signal_lookup() {
1786   return check_pending_signals(false);
1787 }
1788 
1789 int os::signal_wait() {
1790   return check_pending_signals(true);
1791 }
1792 
1793 ////////////////////////////////////////////////////////////////////////////////
1794 // Virtual Memory
1795 
1796 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1797 
1798 #define VMEM_MAPPED  1
1799 #define VMEM_SHMATED 2
1800 
1801 struct vmembk_t {
1802   int type;         // 1 - mmap, 2 - shmat
1803   char* addr;
1804   size_t size;      // Real size, may be larger than usersize.
1805   size_t pagesize;  // page size of area
1806   vmembk_t* next;
1807 
1808   bool contains_addr(char* p) const {
1809     return p >= addr && p < (addr + size);
1810   }
1811 
1812   bool contains_range(char* p, size_t s) const {
1813     return contains_addr(p) && contains_addr(p + s - 1);
1814   }
1815 
1816   void print_on(outputStream* os) const {
1817     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1818       " bytes, %d %s pages), %s",
1819       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1820       (type == VMEM_SHMATED ? "shmat" : "mmap")
1821     );
1822   }
1823 
1824   // Check that range is a sub range of memory block (or equal to memory block);
1825   // also check that range is fully page aligned to the page size if the block.
1826   void assert_is_valid_subrange(char* p, size_t s) const {
1827     if (!contains_range(p, s)) {
1828       trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1829               "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1830               p, p + s, addr, addr + size);
1831       guarantee0(false);
1832     }
1833     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1834       trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1835               " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1836       guarantee0(false);
1837     }
1838   }
1839 };
1840 
1841 static struct {
1842   vmembk_t* first;
1843   MiscUtils::CritSect cs;
1844 } vmem;
1845 
1846 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1847   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1848   assert0(p);
1849   if (p) {
1850     MiscUtils::AutoCritSect lck(&vmem.cs);
1851     p->addr = addr; p->size = size;
1852     p->pagesize = pagesize;
1853     p->type = type;
1854     p->next = vmem.first;
1855     vmem.first = p;
1856   }
1857 }
1858 
1859 static vmembk_t* vmembk_find(char* addr) {
1860   MiscUtils::AutoCritSect lck(&vmem.cs);
1861   for (vmembk_t* p = vmem.first; p; p = p->next) {
1862     if (p->addr <= addr && (p->addr + p->size) > addr) {
1863       return p;
1864     }
1865   }
1866   return NULL;
1867 }
1868 
1869 static void vmembk_remove(vmembk_t* p0) {
1870   MiscUtils::AutoCritSect lck(&vmem.cs);
1871   assert0(p0);
1872   assert0(vmem.first); // List should not be empty.
1873   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1874     if (*pp == p0) {
1875       *pp = p0->next;
1876       ::free(p0);
1877       return;
1878     }
1879   }
1880   assert0(false); // Not found?
1881 }
1882 
1883 static void vmembk_print_on(outputStream* os) {
1884   MiscUtils::AutoCritSect lck(&vmem.cs);
1885   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1886     vmi->print_on(os);
1887     os->cr();
1888   }
1889 }
1890 
1891 // Reserve and attach a section of System V memory.
1892 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1893 // address. Failing that, it will attach the memory anywhere.
1894 // If <requested_addr> is NULL, function will attach the memory anywhere.
1895 //
1896 // <alignment_hint> is being ignored by this function. It is very probable however that the
1897 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1898 // Should this be not enogh, we can put more work into it.
1899 static char* reserve_shmated_memory (
1900   size_t bytes,
1901   char* requested_addr,
1902   size_t alignment_hint) {
1903 
1904   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1905     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1906     bytes, requested_addr, alignment_hint);
1907 
1908   // Either give me wish address or wish alignment but not both.
1909   assert0(!(requested_addr != NULL && alignment_hint != 0));
1910 
1911   // We must prevent anyone from attaching too close to the
1912   // BRK because that may cause malloc OOM.
1913   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1914     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1915       "Will attach anywhere.", requested_addr);
1916     // Act like the OS refused to attach there.
1917     requested_addr = NULL;
1918   }
1919 
1920   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1921   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1922   if (os::Aix::on_pase_V5R4_or_older()) {
1923     ShouldNotReachHere();
1924   }
1925 
1926   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1927   const size_t size = align_size_up(bytes, 64*K);
1928 
1929   // Reserve the shared segment.
1930   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1931   if (shmid == -1) {
1932     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1933     return NULL;
1934   }
1935 
1936   // Important note:
1937   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1938   // We must right after attaching it remove it from the system. System V shm segments are global and
1939   // survive the process.
1940   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1941 
1942   struct shmid_ds shmbuf;
1943   memset(&shmbuf, 0, sizeof(shmbuf));
1944   shmbuf.shm_pagesize = 64*K;
1945   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1946     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1947                size / (64*K), errno);
1948     // I want to know if this ever happens.
1949     assert(false, "failed to set page size for shmat");
1950   }
1951 
1952   // Now attach the shared segment.
1953   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1954   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1955   // were not a segment boundary.
1956   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1957   const int errno_shmat = errno;
1958 
1959   // (A) Right after shmat and before handing shmat errors delete the shm segment.
1960   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
1961     trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1962     assert(false, "failed to remove shared memory segment!");
1963   }
1964 
1965   // Handle shmat error. If we failed to attach, just return.
1966   if (addr == (char*)-1) {
1967     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
1968     return NULL;
1969   }
1970 
1971   // Just for info: query the real page size. In case setting the page size did not
1972   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1973   const size_t real_pagesize = os::Aix::query_pagesize(addr);
1974   if (real_pagesize != shmbuf.shm_pagesize) {
1975     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
1976   }
1977 
1978   if (addr) {
1979     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
1980       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
1981   } else {
1982     if (requested_addr != NULL) {
1983       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
1984     } else {
1985       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
1986     }
1987   }
1988 
1989   // book-keeping
1990   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
1991   assert0(is_aligned_to(addr, os::vm_page_size()));
1992 
1993   return addr;
1994 }
1995 
1996 static bool release_shmated_memory(char* addr, size_t size) {
1997 
1998   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1999     addr, addr + size - 1);
2000 
2001   bool rc = false;
2002 
2003   // TODO: is there a way to verify shm size without doing bookkeeping?
2004   if (::shmdt(addr) != 0) {
2005     trcVerbose("error (%d).", errno);
2006   } else {
2007     trcVerbose("ok.");
2008     rc = true;
2009   }
2010   return rc;
2011 }
2012 
2013 static bool uncommit_shmated_memory(char* addr, size_t size) {
2014   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2015     addr, addr + size - 1);
2016 
2017   const bool rc = my_disclaim64(addr, size);
2018 
2019   if (!rc) {
2020     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2021     return false;
2022   }
2023   return true;
2024 }
2025 
2026 ////////////////////////////////  mmap-based routines /////////////////////////////////
2027 
2028 // Reserve memory via mmap.
2029 // If <requested_addr> is given, an attempt is made to attach at the given address.
2030 // Failing that, memory is allocated at any address.
2031 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2032 // allocate at an address aligned with the given alignment. Failing that, memory
2033 // is aligned anywhere.
2034 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2035   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2036     "alignment_hint " UINTX_FORMAT "...",
2037     bytes, requested_addr, alignment_hint);
2038 
2039   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2040   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2041     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2042     return NULL;
2043   }
2044 
2045   // We must prevent anyone from attaching too close to the
2046   // BRK because that may cause malloc OOM.
2047   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2048     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2049       "Will attach anywhere.", requested_addr);
2050     // Act like the OS refused to attach there.
2051     requested_addr = NULL;
2052   }
2053 
2054   // Specify one or the other but not both.
2055   assert0(!(requested_addr != NULL && alignment_hint > 0));
2056 
2057   // In 64K mode, we claim the global page size (os::vm_page_size())
2058   // is 64K. This is one of the few points where that illusion may
2059   // break, because mmap() will always return memory aligned to 4K. So
2060   // we must ensure we only ever return memory aligned to 64k.
2061   if (alignment_hint) {
2062     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2063   } else {
2064     alignment_hint = os::vm_page_size();
2065   }
2066 
2067   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2068   const size_t size = align_size_up(bytes, os::vm_page_size());
2069 
2070   // alignment: Allocate memory large enough to include an aligned range of the right size and
2071   // cut off the leading and trailing waste pages.
2072   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2073   const size_t extra_size = size + alignment_hint;
2074 
2075   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2076   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2077   int flags = MAP_ANONYMOUS | MAP_SHARED;
2078 
2079   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2080   // it means if wishaddress is given but MAP_FIXED is not set.
2081   //
2082   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2083   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2084   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2085   // get clobbered.
2086   if (requested_addr != NULL) {
2087     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2088       flags |= MAP_FIXED;
2089     }
2090   }
2091 
2092   char* addr = (char*)::mmap(requested_addr, extra_size,
2093       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2094 
2095   if (addr == MAP_FAILED) {
2096     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2097     return NULL;
2098   }
2099 
2100   // Handle alignment.
2101   char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2102   const size_t waste_pre = addr_aligned - addr;
2103   char* const addr_aligned_end = addr_aligned + size;
2104   const size_t waste_post = extra_size - waste_pre - size;
2105   if (waste_pre > 0) {
2106     ::munmap(addr, waste_pre);
2107   }
2108   if (waste_post > 0) {
2109     ::munmap(addr_aligned_end, waste_post);
2110   }
2111   addr = addr_aligned;
2112 
2113   if (addr) {
2114     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2115       addr, addr + bytes, bytes);
2116   } else {
2117     if (requested_addr != NULL) {
2118       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2119     } else {
2120       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2121     }
2122   }
2123 
2124   // bookkeeping
2125   vmembk_add(addr, size, 4*K, VMEM_MAPPED);
2126 
2127   // Test alignment, see above.
2128   assert0(is_aligned_to(addr, os::vm_page_size()));
2129 
2130   return addr;
2131 }
2132 
2133 static bool release_mmaped_memory(char* addr, size_t size) {
2134   assert0(is_aligned_to(addr, os::vm_page_size()));
2135   assert0(is_aligned_to(size, os::vm_page_size()));
2136 
2137   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2138     addr, addr + size - 1);
2139   bool rc = false;
2140 
2141   if (::munmap(addr, size) != 0) {
2142     trcVerbose("failed (%d)\n", errno);
2143     rc = false;
2144   } else {
2145     trcVerbose("ok.");
2146     rc = true;
2147   }
2148 
2149   return rc;
2150 }
2151 
2152 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2153 
2154   assert0(is_aligned_to(addr, os::vm_page_size()));
2155   assert0(is_aligned_to(size, os::vm_page_size()));
2156 
2157   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2158     addr, addr + size - 1);
2159   bool rc = false;
2160 
2161   // Uncommit mmap memory with msync MS_INVALIDATE.
2162   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2163     trcVerbose("failed (%d)\n", errno);
2164     rc = false;
2165   } else {
2166     trcVerbose("ok.");
2167     rc = true;
2168   }
2169 
2170   return rc;
2171 }
2172 
2173 int os::vm_page_size() {
2174   // Seems redundant as all get out.
2175   assert(os::Aix::page_size() != -1, "must call os::init");
2176   return os::Aix::page_size();
2177 }
2178 
2179 // Aix allocates memory by pages.
2180 int os::vm_allocation_granularity() {
2181   assert(os::Aix::page_size() != -1, "must call os::init");
2182   return os::Aix::page_size();
2183 }
2184 
2185 #ifdef PRODUCT
2186 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2187                                     int err) {
2188   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2189           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2190           os::errno_name(err), err);
2191 }
2192 #endif
2193 
2194 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2195                                   const char* mesg) {
2196   assert(mesg != NULL, "mesg must be specified");
2197   if (!pd_commit_memory(addr, size, exec)) {
2198     // Add extra info in product mode for vm_exit_out_of_memory():
2199     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2200     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2201   }
2202 }
2203 
2204 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2205 
2206   assert(is_aligned_to(addr, os::vm_page_size()),
2207     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2208     p2i(addr), os::vm_page_size());
2209   assert(is_aligned_to(size, os::vm_page_size()),
2210     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2211     size, os::vm_page_size());
2212 
2213   vmembk_t* const vmi = vmembk_find(addr);
2214   guarantee0(vmi);
2215   vmi->assert_is_valid_subrange(addr, size);
2216 
2217   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2218 
2219   if (UseExplicitCommit) {
2220     // AIX commits memory on touch. So, touch all pages to be committed.
2221     for (char* p = addr; p < (addr + size); p += 4*K) {
2222       *p = '\0';
2223     }
2224   }
2225 
2226   return true;
2227 }
2228 
2229 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2230   return pd_commit_memory(addr, size, exec);
2231 }
2232 
2233 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2234                                   size_t alignment_hint, bool exec,
2235                                   const char* mesg) {
2236   // Alignment_hint is ignored on this OS.
2237   pd_commit_memory_or_exit(addr, size, exec, mesg);
2238 }
2239 
2240 bool os::pd_uncommit_memory(char* addr, size_t size) {
2241   assert(is_aligned_to(addr, os::vm_page_size()),
2242     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2243     p2i(addr), os::vm_page_size());
2244   assert(is_aligned_to(size, os::vm_page_size()),
2245     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2246     size, os::vm_page_size());
2247 
2248   // Dynamically do different things for mmap/shmat.
2249   const vmembk_t* const vmi = vmembk_find(addr);
2250   guarantee0(vmi);
2251   vmi->assert_is_valid_subrange(addr, size);
2252 
2253   if (vmi->type == VMEM_SHMATED) {
2254     return uncommit_shmated_memory(addr, size);
2255   } else {
2256     return uncommit_mmaped_memory(addr, size);
2257   }
2258 }
2259 
2260 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2261   // Do not call this; no need to commit stack pages on AIX.
2262   ShouldNotReachHere();
2263   return true;
2264 }
2265 
2266 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2267   // Do not call this; no need to commit stack pages on AIX.
2268   ShouldNotReachHere();
2269   return true;
2270 }
2271 
2272 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2273 }
2274 
2275 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2276 }
2277 
2278 void os::numa_make_global(char *addr, size_t bytes) {
2279 }
2280 
2281 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2282 }
2283 
2284 bool os::numa_topology_changed() {
2285   return false;
2286 }
2287 
2288 size_t os::numa_get_groups_num() {
2289   return 1;
2290 }
2291 
2292 int os::numa_get_group_id() {
2293   return 0;
2294 }
2295 
2296 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2297   if (size > 0) {
2298     ids[0] = 0;
2299     return 1;
2300   }
2301   return 0;
2302 }
2303 
2304 bool os::get_page_info(char *start, page_info* info) {
2305   return false;
2306 }
2307 
2308 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2309   return end;
2310 }
2311 
2312 // Reserves and attaches a shared memory segment.
2313 // Will assert if a wish address is given and could not be obtained.
2314 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2315 
2316   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2317   // thereby clobbering old mappings at that place. That is probably
2318   // not intended, never used and almost certainly an error were it
2319   // ever be used this way (to try attaching at a specified address
2320   // without clobbering old mappings an alternate API exists,
2321   // os::attempt_reserve_memory_at()).
2322   // Instead of mimicking the dangerous coding of the other platforms, here I
2323   // just ignore the request address (release) or assert(debug).
2324   assert0(requested_addr == NULL);
2325 
2326   // Always round to os::vm_page_size(), which may be larger than 4K.
2327   bytes = align_size_up(bytes, os::vm_page_size());
2328   const size_t alignment_hint0 =
2329     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2330 
2331   // In 4K mode always use mmap.
2332   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2333   if (os::vm_page_size() == 4*K) {
2334     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2335   } else {
2336     if (bytes >= Use64KPagesThreshold) {
2337       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2338     } else {
2339       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2340     }
2341   }
2342 }
2343 
2344 bool os::pd_release_memory(char* addr, size_t size) {
2345 
2346   // Dynamically do different things for mmap/shmat.
2347   vmembk_t* const vmi = vmembk_find(addr);
2348   guarantee0(vmi);
2349 
2350   // Always round to os::vm_page_size(), which may be larger than 4K.
2351   size = align_size_up(size, os::vm_page_size());
2352   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2353 
2354   bool rc = false;
2355   bool remove_bookkeeping = false;
2356   if (vmi->type == VMEM_SHMATED) {
2357     // For shmatted memory, we do:
2358     // - If user wants to release the whole range, release the memory (shmdt).
2359     // - If user only wants to release a partial range, uncommit (disclaim) that
2360     //   range. That way, at least, we do not use memory anymore (bust still page
2361     //   table space).
2362     vmi->assert_is_valid_subrange(addr, size);
2363     if (addr == vmi->addr && size == vmi->size) {
2364       rc = release_shmated_memory(addr, size);
2365       remove_bookkeeping = true;
2366     } else {
2367       rc = uncommit_shmated_memory(addr, size);
2368     }
2369   } else {
2370     // User may unmap partial regions but region has to be fully contained.
2371 #ifdef ASSERT
2372     vmi->assert_is_valid_subrange(addr, size);
2373 #endif
2374     rc = release_mmaped_memory(addr, size);
2375     remove_bookkeeping = true;
2376   }
2377 
2378   // update bookkeeping
2379   if (rc && remove_bookkeeping) {
2380     vmembk_remove(vmi);
2381   }
2382 
2383   return rc;
2384 }
2385 
2386 static bool checked_mprotect(char* addr, size_t size, int prot) {
2387 
2388   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2389   // not tell me if protection failed when trying to protect an un-protectable range.
2390   //
2391   // This means if the memory was allocated using shmget/shmat, protection wont work
2392   // but mprotect will still return 0:
2393   //
2394   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2395 
2396   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2397 
2398   if (!rc) {
2399     const char* const s_errno = os::errno_name(errno);
2400     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2401     return false;
2402   }
2403 
2404   // mprotect success check
2405   //
2406   // Mprotect said it changed the protection but can I believe it?
2407   //
2408   // To be sure I need to check the protection afterwards. Try to
2409   // read from protected memory and check whether that causes a segfault.
2410   //
2411   if (!os::Aix::xpg_sus_mode()) {
2412 
2413     if (CanUseSafeFetch32()) {
2414 
2415       const bool read_protected =
2416         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2417          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2418 
2419       if (prot & PROT_READ) {
2420         rc = !read_protected;
2421       } else {
2422         rc = read_protected;
2423       }
2424 
2425       if (!rc) {
2426         if (os::Aix::on_pase()) {
2427           // There is an issue on older PASE systems where mprotect() will return success but the
2428           // memory will not be protected.
2429           // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2430           // machines; we only see it rarely, when using mprotect() to protect the guard page of
2431           // a stack. It is an OS error.
2432           //
2433           // A valid strategy is just to try again. This usually works. :-/
2434 
2435           ::usleep(1000);
2436           if (::mprotect(addr, size, prot) == 0) {
2437             const bool read_protected_2 =
2438               (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2439               SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2440             rc = true;
2441           }
2442         }
2443       }
2444     }
2445   }
2446 
2447   assert(rc == true, "mprotect failed.");
2448 
2449   return rc;
2450 }
2451 
2452 // Set protections specified
2453 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2454   unsigned int p = 0;
2455   switch (prot) {
2456   case MEM_PROT_NONE: p = PROT_NONE; break;
2457   case MEM_PROT_READ: p = PROT_READ; break;
2458   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2459   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2460   default:
2461     ShouldNotReachHere();
2462   }
2463   // is_committed is unused.
2464   return checked_mprotect(addr, size, p);
2465 }
2466 
2467 bool os::guard_memory(char* addr, size_t size) {
2468   return checked_mprotect(addr, size, PROT_NONE);
2469 }
2470 
2471 bool os::unguard_memory(char* addr, size_t size) {
2472   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2473 }
2474 
2475 // Large page support
2476 
2477 static size_t _large_page_size = 0;
2478 
2479 // Enable large page support if OS allows that.
2480 void os::large_page_init() {
2481   return; // Nothing to do. See query_multipage_support and friends.
2482 }
2483 
2484 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2485   // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2486   // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2487   // so this is not needed.
2488   assert(false, "should not be called on AIX");
2489   return NULL;
2490 }
2491 
2492 bool os::release_memory_special(char* base, size_t bytes) {
2493   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2494   Unimplemented();
2495   return false;
2496 }
2497 
2498 size_t os::large_page_size() {
2499   return _large_page_size;
2500 }
2501 
2502 bool os::can_commit_large_page_memory() {
2503   // Does not matter, we do not support huge pages.
2504   return false;
2505 }
2506 
2507 bool os::can_execute_large_page_memory() {
2508   // Does not matter, we do not support huge pages.
2509   return false;
2510 }
2511 
2512 // Reserve memory at an arbitrary address, only if that area is
2513 // available (and not reserved for something else).
2514 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2515   char* addr = NULL;
2516 
2517   // Always round to os::vm_page_size(), which may be larger than 4K.
2518   bytes = align_size_up(bytes, os::vm_page_size());
2519 
2520   // In 4K mode always use mmap.
2521   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2522   if (os::vm_page_size() == 4*K) {
2523     return reserve_mmaped_memory(bytes, requested_addr, 0);
2524   } else {
2525     if (bytes >= Use64KPagesThreshold) {
2526       return reserve_shmated_memory(bytes, requested_addr, 0);
2527     } else {
2528       return reserve_mmaped_memory(bytes, requested_addr, 0);
2529     }
2530   }
2531 
2532   return addr;
2533 }
2534 
2535 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2536   return ::read(fd, buf, nBytes);
2537 }
2538 
2539 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2540   return ::pread(fd, buf, nBytes, offset);
2541 }
2542 
2543 void os::naked_short_sleep(jlong ms) {
2544   struct timespec req;
2545 
2546   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2547   req.tv_sec = 0;
2548   if (ms > 0) {
2549     req.tv_nsec = (ms % 1000) * 1000000;
2550   }
2551   else {
2552     req.tv_nsec = 1;
2553   }
2554 
2555   nanosleep(&req, NULL);
2556 
2557   return;
2558 }
2559 
2560 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2561 void os::infinite_sleep() {
2562   while (true) {    // sleep forever ...
2563     ::sleep(100);   // ... 100 seconds at a time
2564   }
2565 }
2566 
2567 // Used to convert frequent JVM_Yield() to nops
2568 bool os::dont_yield() {
2569   return DontYieldALot;
2570 }
2571 
2572 void os::naked_yield() {
2573   sched_yield();
2574 }
2575 
2576 ////////////////////////////////////////////////////////////////////////////////
2577 // thread priority support
2578 
2579 // From AIX manpage to pthread_setschedparam
2580 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2581 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2582 //
2583 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2584 // range from 40 to 80, where 40 is the least favored priority and 80
2585 // is the most favored."
2586 //
2587 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2588 // scheduling there; however, this still leaves iSeries.)
2589 //
2590 // We use the same values for AIX and PASE.
2591 int os::java_to_os_priority[CriticalPriority + 1] = {
2592   54,             // 0 Entry should never be used
2593 
2594   55,             // 1 MinPriority
2595   55,             // 2
2596   56,             // 3
2597 
2598   56,             // 4
2599   57,             // 5 NormPriority
2600   57,             // 6
2601 
2602   58,             // 7
2603   58,             // 8
2604   59,             // 9 NearMaxPriority
2605 
2606   60,             // 10 MaxPriority
2607 
2608   60              // 11 CriticalPriority
2609 };
2610 
2611 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2612   if (!UseThreadPriorities) return OS_OK;
2613   pthread_t thr = thread->osthread()->pthread_id();
2614   int policy = SCHED_OTHER;
2615   struct sched_param param;
2616   param.sched_priority = newpri;
2617   int ret = pthread_setschedparam(thr, policy, &param);
2618 
2619   if (ret != 0) {
2620     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2621         (int)thr, newpri, ret, os::errno_name(ret));
2622   }
2623   return (ret == 0) ? OS_OK : OS_ERR;
2624 }
2625 
2626 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2627   if (!UseThreadPriorities) {
2628     *priority_ptr = java_to_os_priority[NormPriority];
2629     return OS_OK;
2630   }
2631   pthread_t thr = thread->osthread()->pthread_id();
2632   int policy = SCHED_OTHER;
2633   struct sched_param param;
2634   int ret = pthread_getschedparam(thr, &policy, &param);
2635   *priority_ptr = param.sched_priority;
2636 
2637   return (ret == 0) ? OS_OK : OS_ERR;
2638 }
2639 
2640 // Hint to the underlying OS that a task switch would not be good.
2641 // Void return because it's a hint and can fail.
2642 void os::hint_no_preempt() {}
2643 
2644 ////////////////////////////////////////////////////////////////////////////////
2645 // suspend/resume support
2646 
2647 //  the low-level signal-based suspend/resume support is a remnant from the
2648 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2649 //  within hotspot. Now there is a single use-case for this:
2650 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
2651 //      that runs in the watcher thread.
2652 //  The remaining code is greatly simplified from the more general suspension
2653 //  code that used to be used.
2654 //
2655 //  The protocol is quite simple:
2656 //  - suspend:
2657 //      - sends a signal to the target thread
2658 //      - polls the suspend state of the osthread using a yield loop
2659 //      - target thread signal handler (SR_handler) sets suspend state
2660 //        and blocks in sigsuspend until continued
2661 //  - resume:
2662 //      - sets target osthread state to continue
2663 //      - sends signal to end the sigsuspend loop in the SR_handler
2664 //
2665 //  Note that the SR_lock plays no role in this suspend/resume protocol,
2666 //  but is checked for NULL in SR_handler as a thread termination indicator.
2667 //
2668 
2669 static void resume_clear_context(OSThread *osthread) {
2670   osthread->set_ucontext(NULL);
2671   osthread->set_siginfo(NULL);
2672 }
2673 
2674 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2675   osthread->set_ucontext(context);
2676   osthread->set_siginfo(siginfo);
2677 }
2678 
2679 //
2680 // Handler function invoked when a thread's execution is suspended or
2681 // resumed. We have to be careful that only async-safe functions are
2682 // called here (Note: most pthread functions are not async safe and
2683 // should be avoided.)
2684 //
2685 // Note: sigwait() is a more natural fit than sigsuspend() from an
2686 // interface point of view, but sigwait() prevents the signal hander
2687 // from being run. libpthread would get very confused by not having
2688 // its signal handlers run and prevents sigwait()'s use with the
2689 // mutex granting granting signal.
2690 //
2691 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2692 //
2693 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2694   // Save and restore errno to avoid confusing native code with EINTR
2695   // after sigsuspend.
2696   int old_errno = errno;
2697 
2698   Thread* thread = Thread::current_or_null_safe();
2699   assert(thread != NULL, "Missing current thread in SR_handler");
2700 
2701   // On some systems we have seen signal delivery get "stuck" until the signal
2702   // mask is changed as part of thread termination. Check that the current thread
2703   // has not already terminated (via SR_lock()) - else the following assertion
2704   // will fail because the thread is no longer a JavaThread as the ~JavaThread
2705   // destructor has completed.
2706 
2707   if (thread->SR_lock() == NULL) {
2708     return;
2709   }
2710 
2711   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2712 
2713   OSThread* osthread = thread->osthread();
2714 
2715   os::SuspendResume::State current = osthread->sr.state();
2716   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2717     suspend_save_context(osthread, siginfo, context);
2718 
2719     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2720     os::SuspendResume::State state = osthread->sr.suspended();
2721     if (state == os::SuspendResume::SR_SUSPENDED) {
2722       sigset_t suspend_set;  // signals for sigsuspend()
2723 
2724       // get current set of blocked signals and unblock resume signal
2725       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2726       sigdelset(&suspend_set, SR_signum);
2727 
2728       // wait here until we are resumed
2729       while (1) {
2730         sigsuspend(&suspend_set);
2731 
2732         os::SuspendResume::State result = osthread->sr.running();
2733         if (result == os::SuspendResume::SR_RUNNING) {
2734           break;
2735         }
2736       }
2737 
2738     } else if (state == os::SuspendResume::SR_RUNNING) {
2739       // request was cancelled, continue
2740     } else {
2741       ShouldNotReachHere();
2742     }
2743 
2744     resume_clear_context(osthread);
2745   } else if (current == os::SuspendResume::SR_RUNNING) {
2746     // request was cancelled, continue
2747   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2748     // ignore
2749   } else {
2750     ShouldNotReachHere();
2751   }
2752 
2753   errno = old_errno;
2754 }
2755 
2756 static int SR_initialize() {
2757   struct sigaction act;
2758   char *s;
2759   // Get signal number to use for suspend/resume
2760   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2761     int sig = ::strtol(s, 0, 10);
2762     if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2763         sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2764       SR_signum = sig;
2765     } else {
2766       warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2767               sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2768     }
2769   }
2770 
2771   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2772         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2773 
2774   sigemptyset(&SR_sigset);
2775   sigaddset(&SR_sigset, SR_signum);
2776 
2777   // Set up signal handler for suspend/resume.
2778   act.sa_flags = SA_RESTART|SA_SIGINFO;
2779   act.sa_handler = (void (*)(int)) SR_handler;
2780 
2781   // SR_signum is blocked by default.
2782   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2783 
2784   if (sigaction(SR_signum, &act, 0) == -1) {
2785     return -1;
2786   }
2787 
2788   // Save signal flag
2789   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2790   return 0;
2791 }
2792 
2793 static int SR_finalize() {
2794   return 0;
2795 }
2796 
2797 static int sr_notify(OSThread* osthread) {
2798   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2799   assert_status(status == 0, status, "pthread_kill");
2800   return status;
2801 }
2802 
2803 // "Randomly" selected value for how long we want to spin
2804 // before bailing out on suspending a thread, also how often
2805 // we send a signal to a thread we want to resume
2806 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2807 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2808 
2809 // returns true on success and false on error - really an error is fatal
2810 // but this seems the normal response to library errors
2811 static bool do_suspend(OSThread* osthread) {
2812   assert(osthread->sr.is_running(), "thread should be running");
2813   // mark as suspended and send signal
2814 
2815   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2816     // failed to switch, state wasn't running?
2817     ShouldNotReachHere();
2818     return false;
2819   }
2820 
2821   if (sr_notify(osthread) != 0) {
2822     // try to cancel, switch to running
2823 
2824     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2825     if (result == os::SuspendResume::SR_RUNNING) {
2826       // cancelled
2827       return false;
2828     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2829       // somehow managed to suspend
2830       return true;
2831     } else {
2832       ShouldNotReachHere();
2833       return false;
2834     }
2835   }
2836 
2837   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2838 
2839   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2840     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2841       os::naked_yield();
2842     }
2843 
2844     // timeout, try to cancel the request
2845     if (n >= RANDOMLY_LARGE_INTEGER) {
2846       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2847       if (cancelled == os::SuspendResume::SR_RUNNING) {
2848         return false;
2849       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2850         return true;
2851       } else {
2852         ShouldNotReachHere();
2853         return false;
2854       }
2855     }
2856   }
2857 
2858   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2859   return true;
2860 }
2861 
2862 static void do_resume(OSThread* osthread) {
2863   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2864 
2865   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2866     // failed to switch to WAKEUP_REQUEST
2867     ShouldNotReachHere();
2868     return;
2869   }
2870 
2871   while (!osthread->sr.is_running()) {
2872     if (sr_notify(osthread) == 0) {
2873       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2874         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2875           os::naked_yield();
2876         }
2877       }
2878     } else {
2879       ShouldNotReachHere();
2880     }
2881   }
2882 
2883   guarantee(osthread->sr.is_running(), "Must be running!");
2884 }
2885 
2886 ///////////////////////////////////////////////////////////////////////////////////
2887 // signal handling (except suspend/resume)
2888 
2889 // This routine may be used by user applications as a "hook" to catch signals.
2890 // The user-defined signal handler must pass unrecognized signals to this
2891 // routine, and if it returns true (non-zero), then the signal handler must
2892 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2893 // routine will never retun false (zero), but instead will execute a VM panic
2894 // routine kill the process.
2895 //
2896 // If this routine returns false, it is OK to call it again. This allows
2897 // the user-defined signal handler to perform checks either before or after
2898 // the VM performs its own checks. Naturally, the user code would be making
2899 // a serious error if it tried to handle an exception (such as a null check
2900 // or breakpoint) that the VM was generating for its own correct operation.
2901 //
2902 // This routine may recognize any of the following kinds of signals:
2903 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2904 // It should be consulted by handlers for any of those signals.
2905 //
2906 // The caller of this routine must pass in the three arguments supplied
2907 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2908 // field of the structure passed to sigaction(). This routine assumes that
2909 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2910 //
2911 // Note that the VM will print warnings if it detects conflicting signal
2912 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2913 //
2914 extern "C" JNIEXPORT int
2915 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2916 
2917 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2918 // to be the thing to call; documentation is not terribly clear about whether
2919 // pthread_sigmask also works, and if it does, whether it does the same.
2920 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2921   const int rc = ::pthread_sigmask(how, set, oset);
2922   // return value semantics differ slightly for error case:
2923   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2924   // (so, pthread_sigmask is more theadsafe for error handling)
2925   // But success is always 0.
2926   return rc == 0 ? true : false;
2927 }
2928 
2929 // Function to unblock all signals which are, according
2930 // to POSIX, typical program error signals. If they happen while being blocked,
2931 // they typically will bring down the process immediately.
2932 bool unblock_program_error_signals() {
2933   sigset_t set;
2934   ::sigemptyset(&set);
2935   ::sigaddset(&set, SIGILL);
2936   ::sigaddset(&set, SIGBUS);
2937   ::sigaddset(&set, SIGFPE);
2938   ::sigaddset(&set, SIGSEGV);
2939   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2940 }
2941 
2942 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2943 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2944   assert(info != NULL && uc != NULL, "it must be old kernel");
2945 
2946   // Never leave program error signals blocked;
2947   // on all our platforms they would bring down the process immediately when
2948   // getting raised while being blocked.
2949   unblock_program_error_signals();
2950 
2951   int orig_errno = errno;  // Preserve errno value over signal handler.
2952   JVM_handle_aix_signal(sig, info, uc, true);
2953   errno = orig_errno;
2954 }
2955 
2956 // This boolean allows users to forward their own non-matching signals
2957 // to JVM_handle_aix_signal, harmlessly.
2958 bool os::Aix::signal_handlers_are_installed = false;
2959 
2960 // For signal-chaining
2961 struct sigaction sigact[NSIG];
2962 sigset_t sigs;
2963 bool os::Aix::libjsig_is_loaded = false;
2964 typedef struct sigaction *(*get_signal_t)(int);
2965 get_signal_t os::Aix::get_signal_action = NULL;
2966 
2967 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2968   struct sigaction *actp = NULL;
2969 
2970   if (libjsig_is_loaded) {
2971     // Retrieve the old signal handler from libjsig
2972     actp = (*get_signal_action)(sig);
2973   }
2974   if (actp == NULL) {
2975     // Retrieve the preinstalled signal handler from jvm
2976     actp = get_preinstalled_handler(sig);
2977   }
2978 
2979   return actp;
2980 }
2981 
2982 static bool call_chained_handler(struct sigaction *actp, int sig,
2983                                  siginfo_t *siginfo, void *context) {
2984   // Call the old signal handler
2985   if (actp->sa_handler == SIG_DFL) {
2986     // It's more reasonable to let jvm treat it as an unexpected exception
2987     // instead of taking the default action.
2988     return false;
2989   } else if (actp->sa_handler != SIG_IGN) {
2990     if ((actp->sa_flags & SA_NODEFER) == 0) {
2991       // automaticlly block the signal
2992       sigaddset(&(actp->sa_mask), sig);
2993     }
2994 
2995     sa_handler_t hand = NULL;
2996     sa_sigaction_t sa = NULL;
2997     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
2998     // retrieve the chained handler
2999     if (siginfo_flag_set) {
3000       sa = actp->sa_sigaction;
3001     } else {
3002       hand = actp->sa_handler;
3003     }
3004 
3005     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3006       actp->sa_handler = SIG_DFL;
3007     }
3008 
3009     // try to honor the signal mask
3010     sigset_t oset;
3011     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3012 
3013     // call into the chained handler
3014     if (siginfo_flag_set) {
3015       (*sa)(sig, siginfo, context);
3016     } else {
3017       (*hand)(sig);
3018     }
3019 
3020     // restore the signal mask
3021     pthread_sigmask(SIG_SETMASK, &oset, 0);
3022   }
3023   // Tell jvm's signal handler the signal is taken care of.
3024   return true;
3025 }
3026 
3027 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3028   bool chained = false;
3029   // signal-chaining
3030   if (UseSignalChaining) {
3031     struct sigaction *actp = get_chained_signal_action(sig);
3032     if (actp != NULL) {
3033       chained = call_chained_handler(actp, sig, siginfo, context);
3034     }
3035   }
3036   return chained;
3037 }
3038 
3039 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3040   if (sigismember(&sigs, sig)) {
3041     return &sigact[sig];
3042   }
3043   return NULL;
3044 }
3045 
3046 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3047   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3048   sigact[sig] = oldAct;
3049   sigaddset(&sigs, sig);
3050 }
3051 
3052 // for diagnostic
3053 int sigflags[NSIG];
3054 
3055 int os::Aix::get_our_sigflags(int sig) {
3056   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3057   return sigflags[sig];
3058 }
3059 
3060 void os::Aix::set_our_sigflags(int sig, int flags) {
3061   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3062   if (sig > 0 && sig < NSIG) {
3063     sigflags[sig] = flags;
3064   }
3065 }
3066 
3067 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3068   // Check for overwrite.
3069   struct sigaction oldAct;
3070   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3071 
3072   void* oldhand = oldAct.sa_sigaction
3073     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3074     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3075   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3076       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3077       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3078     if (AllowUserSignalHandlers || !set_installed) {
3079       // Do not overwrite; user takes responsibility to forward to us.
3080       return;
3081     } else if (UseSignalChaining) {
3082       // save the old handler in jvm
3083       save_preinstalled_handler(sig, oldAct);
3084       // libjsig also interposes the sigaction() call below and saves the
3085       // old sigaction on it own.
3086     } else {
3087       fatal("Encountered unexpected pre-existing sigaction handler "
3088             "%#lx for signal %d.", (long)oldhand, sig);
3089     }
3090   }
3091 
3092   struct sigaction sigAct;
3093   sigfillset(&(sigAct.sa_mask));
3094   if (!set_installed) {
3095     sigAct.sa_handler = SIG_DFL;
3096     sigAct.sa_flags = SA_RESTART;
3097   } else {
3098     sigAct.sa_sigaction = javaSignalHandler;
3099     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3100   }
3101   // Save flags, which are set by ours
3102   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3103   sigflags[sig] = sigAct.sa_flags;
3104 
3105   int ret = sigaction(sig, &sigAct, &oldAct);
3106   assert(ret == 0, "check");
3107 
3108   void* oldhand2 = oldAct.sa_sigaction
3109                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3110                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3111   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3112 }
3113 
3114 // install signal handlers for signals that HotSpot needs to
3115 // handle in order to support Java-level exception handling.
3116 void os::Aix::install_signal_handlers() {
3117   if (!signal_handlers_are_installed) {
3118     signal_handlers_are_installed = true;
3119 
3120     // signal-chaining
3121     typedef void (*signal_setting_t)();
3122     signal_setting_t begin_signal_setting = NULL;
3123     signal_setting_t end_signal_setting = NULL;
3124     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3125                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3126     if (begin_signal_setting != NULL) {
3127       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3128                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3129       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3130                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3131       libjsig_is_loaded = true;
3132       assert(UseSignalChaining, "should enable signal-chaining");
3133     }
3134     if (libjsig_is_loaded) {
3135       // Tell libjsig jvm is setting signal handlers.
3136       (*begin_signal_setting)();
3137     }
3138 
3139     ::sigemptyset(&sigs);
3140     set_signal_handler(SIGSEGV, true);
3141     set_signal_handler(SIGPIPE, true);
3142     set_signal_handler(SIGBUS, true);
3143     set_signal_handler(SIGILL, true);
3144     set_signal_handler(SIGFPE, true);
3145     set_signal_handler(SIGTRAP, true);
3146     set_signal_handler(SIGXFSZ, true);
3147     set_signal_handler(SIGDANGER, true);
3148 
3149     if (libjsig_is_loaded) {
3150       // Tell libjsig jvm finishes setting signal handlers.
3151       (*end_signal_setting)();
3152     }
3153 
3154     // We don't activate signal checker if libjsig is in place, we trust ourselves
3155     // and if UserSignalHandler is installed all bets are off.
3156     // Log that signal checking is off only if -verbose:jni is specified.
3157     if (CheckJNICalls) {
3158       if (libjsig_is_loaded) {
3159         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3160         check_signals = false;
3161       }
3162       if (AllowUserSignalHandlers) {
3163         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3164         check_signals = false;
3165       }
3166       // Need to initialize check_signal_done.
3167       ::sigemptyset(&check_signal_done);
3168     }
3169   }
3170 }
3171 
3172 static const char* get_signal_handler_name(address handler,
3173                                            char* buf, int buflen) {
3174   int offset;
3175   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3176   if (found) {
3177     // skip directory names
3178     const char *p1, *p2;
3179     p1 = buf;
3180     size_t len = strlen(os::file_separator());
3181     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3182     // The way os::dll_address_to_library_name is implemented on Aix
3183     // right now, it always returns -1 for the offset which is not
3184     // terribly informative.
3185     // Will fix that. For now, omit the offset.
3186     jio_snprintf(buf, buflen, "%s", p1);
3187   } else {
3188     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3189   }
3190   return buf;
3191 }
3192 
3193 static void print_signal_handler(outputStream* st, int sig,
3194                                  char* buf, size_t buflen) {
3195   struct sigaction sa;
3196   sigaction(sig, NULL, &sa);
3197 
3198   st->print("%s: ", os::exception_name(sig, buf, buflen));
3199 
3200   address handler = (sa.sa_flags & SA_SIGINFO)
3201     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3202     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3203 
3204   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3205     st->print("SIG_DFL");
3206   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3207     st->print("SIG_IGN");
3208   } else {
3209     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3210   }
3211 
3212   // Print readable mask.
3213   st->print(", sa_mask[0]=");
3214   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3215 
3216   address rh = VMError::get_resetted_sighandler(sig);
3217   // May be, handler was resetted by VMError?
3218   if (rh != NULL) {
3219     handler = rh;
3220     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3221   }
3222 
3223   // Print textual representation of sa_flags.
3224   st->print(", sa_flags=");
3225   os::Posix::print_sa_flags(st, sa.sa_flags);
3226 
3227   // Check: is it our handler?
3228   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3229       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3230     // It is our signal handler.
3231     // Check for flags, reset system-used one!
3232     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3233       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3234                 os::Aix::get_our_sigflags(sig));
3235     }
3236   }
3237   st->cr();
3238 }
3239 
3240 #define DO_SIGNAL_CHECK(sig) \
3241   if (!sigismember(&check_signal_done, sig)) \
3242     os::Aix::check_signal_handler(sig)
3243 
3244 // This method is a periodic task to check for misbehaving JNI applications
3245 // under CheckJNI, we can add any periodic checks here
3246 
3247 void os::run_periodic_checks() {
3248 
3249   if (check_signals == false) return;
3250 
3251   // SEGV and BUS if overridden could potentially prevent
3252   // generation of hs*.log in the event of a crash, debugging
3253   // such a case can be very challenging, so we absolutely
3254   // check the following for a good measure:
3255   DO_SIGNAL_CHECK(SIGSEGV);
3256   DO_SIGNAL_CHECK(SIGILL);
3257   DO_SIGNAL_CHECK(SIGFPE);
3258   DO_SIGNAL_CHECK(SIGBUS);
3259   DO_SIGNAL_CHECK(SIGPIPE);
3260   DO_SIGNAL_CHECK(SIGXFSZ);
3261   if (UseSIGTRAP) {
3262     DO_SIGNAL_CHECK(SIGTRAP);
3263   }
3264   DO_SIGNAL_CHECK(SIGDANGER);
3265 
3266   // ReduceSignalUsage allows the user to override these handlers
3267   // see comments at the very top and jvm_solaris.h
3268   if (!ReduceSignalUsage) {
3269     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3270     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3271     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3272     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3273   }
3274 
3275   DO_SIGNAL_CHECK(SR_signum);
3276 }
3277 
3278 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3279 
3280 static os_sigaction_t os_sigaction = NULL;
3281 
3282 void os::Aix::check_signal_handler(int sig) {
3283   char buf[O_BUFLEN];
3284   address jvmHandler = NULL;
3285 
3286   struct sigaction act;
3287   if (os_sigaction == NULL) {
3288     // only trust the default sigaction, in case it has been interposed
3289     os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3290     if (os_sigaction == NULL) return;
3291   }
3292 
3293   os_sigaction(sig, (struct sigaction*)NULL, &act);
3294 
3295   address thisHandler = (act.sa_flags & SA_SIGINFO)
3296     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3297     : CAST_FROM_FN_PTR(address, act.sa_handler);
3298 
3299   switch(sig) {
3300   case SIGSEGV:
3301   case SIGBUS:
3302   case SIGFPE:
3303   case SIGPIPE:
3304   case SIGILL:
3305   case SIGXFSZ:
3306     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3307     break;
3308 
3309   case SHUTDOWN1_SIGNAL:
3310   case SHUTDOWN2_SIGNAL:
3311   case SHUTDOWN3_SIGNAL:
3312   case BREAK_SIGNAL:
3313     jvmHandler = (address)user_handler();
3314     break;
3315 
3316   default:
3317     if (sig == SR_signum) {
3318       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3319     } else {
3320       return;
3321     }
3322     break;
3323   }
3324 
3325   if (thisHandler != jvmHandler) {
3326     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3327     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3328     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3329     // No need to check this sig any longer
3330     sigaddset(&check_signal_done, sig);
3331     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3332     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3333       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3334                     exception_name(sig, buf, O_BUFLEN));
3335     }
3336   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3337     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3338     tty->print("expected:");
3339     os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3340     tty->cr();
3341     tty->print("  found:");
3342     os::Posix::print_sa_flags(tty, act.sa_flags);
3343     tty->cr();
3344     // No need to check this sig any longer
3345     sigaddset(&check_signal_done, sig);
3346   }
3347 
3348   // Dump all the signal
3349   if (sigismember(&check_signal_done, sig)) {
3350     print_signal_handlers(tty, buf, O_BUFLEN);
3351   }
3352 }
3353 
3354 // To install functions for atexit system call
3355 extern "C" {
3356   static void perfMemory_exit_helper() {
3357     perfMemory_exit();
3358   }
3359 }
3360 
3361 // This is called _before_ the most of global arguments have been parsed.
3362 void os::init(void) {
3363   // This is basic, we want to know if that ever changes.
3364   // (Shared memory boundary is supposed to be a 256M aligned.)
3365   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3366 
3367   // Record process break at startup.
3368   g_brk_at_startup = (address) ::sbrk(0);
3369   assert(g_brk_at_startup != (address) -1, "sbrk failed");
3370 
3371   // First off, we need to know whether we run on AIX or PASE, and
3372   // the OS level we run on.
3373   os::Aix::initialize_os_info();
3374 
3375   // Scan environment (SPEC1170 behaviour, etc).
3376   os::Aix::scan_environment();
3377 
3378   // Probe multipage support.
3379   query_multipage_support();
3380 
3381   // Act like we only have one page size by eliminating corner cases which
3382   // we did not support very well anyway.
3383   // We have two input conditions:
3384   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3385   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3386   //    setting.
3387   //    Data segment page size is important for us because it defines the thread stack page
3388   //    size, which is needed for guard page handling, stack banging etc.
3389   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3390   //    and should be allocated with 64k pages.
3391   //
3392   // So, we do the following:
3393   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3394   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3395   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3396   // 64k          no              --- AIX 5.2 ? ---
3397   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3398 
3399   // We explicitly leave no option to change page size, because only upgrading would work,
3400   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3401 
3402   if (g_multipage_support.datapsize == 4*K) {
3403     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3404     if (g_multipage_support.can_use_64K_pages) {
3405       // .. but we are able to use 64K pages dynamically.
3406       // This would be typical for java launchers which are not linked
3407       // with datapsize=64K (like, any other launcher but our own).
3408       //
3409       // In this case it would be smart to allocate the java heap with 64K
3410       // to get the performance benefit, and to fake 64k pages for the
3411       // data segment (when dealing with thread stacks).
3412       //
3413       // However, leave a possibility to downgrade to 4K, using
3414       // -XX:-Use64KPages.
3415       if (Use64KPages) {
3416         trcVerbose("64K page mode (faked for data segment)");
3417         Aix::_page_size = 64*K;
3418       } else {
3419         trcVerbose("4K page mode (Use64KPages=off)");
3420         Aix::_page_size = 4*K;
3421       }
3422     } else {
3423       // .. and not able to allocate 64k pages dynamically. Here, just
3424       // fall back to 4K paged mode and use mmap for everything.
3425       trcVerbose("4K page mode");
3426       Aix::_page_size = 4*K;
3427       FLAG_SET_ERGO(bool, Use64KPages, false);
3428     }
3429   } else {
3430     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3431     // This normally means that we can allocate 64k pages dynamically.
3432     // (There is one special case where this may be false: EXTSHM=on.
3433     // but we decided to not support that mode).
3434     assert0(g_multipage_support.can_use_64K_pages);
3435     Aix::_page_size = 64*K;
3436     trcVerbose("64K page mode");
3437     FLAG_SET_ERGO(bool, Use64KPages, true);
3438   }
3439 
3440   // For now UseLargePages is just ignored.
3441   FLAG_SET_ERGO(bool, UseLargePages, false);
3442   _page_sizes[0] = 0;
3443 
3444   // debug trace
3445   trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3446 
3447   // Next, we need to initialize libo4 and libperfstat libraries.
3448   if (os::Aix::on_pase()) {
3449     os::Aix::initialize_libo4();
3450   } else {
3451     os::Aix::initialize_libperfstat();
3452   }
3453 
3454   // Reset the perfstat information provided by ODM.
3455   if (os::Aix::on_aix()) {
3456     libperfstat::perfstat_reset();
3457   }
3458 
3459   // Now initialze basic system properties. Note that for some of the values we
3460   // need libperfstat etc.
3461   os::Aix::initialize_system_info();
3462 
3463   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3464 
3465   init_random(1234567);
3466 
3467   ThreadCritical::initialize();
3468 
3469   // Main_thread points to the aboriginal thread.
3470   Aix::_main_thread = pthread_self();
3471 
3472   initial_time_count = os::elapsed_counter();
3473 }
3474 
3475 // This is called _after_ the global arguments have been parsed.
3476 jint os::init_2(void) {
3477 
3478   if (os::Aix::on_pase()) {
3479     trcVerbose("Running on PASE.");
3480   } else {
3481     trcVerbose("Running on AIX (not PASE).");
3482   }
3483 
3484   trcVerbose("processor count: %d", os::_processor_count);
3485   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3486 
3487   // Initially build up the loaded dll map.
3488   LoadedLibraries::reload();
3489   if (Verbose) {
3490     trcVerbose("Loaded Libraries: ");
3491     LoadedLibraries::print(tty);
3492   }
3493 
3494   const int page_size = Aix::page_size();
3495   const int map_size = page_size;
3496 
3497   address map_address = (address) MAP_FAILED;
3498   const int prot  = PROT_READ;
3499   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3500 
3501   // Use optimized addresses for the polling page,
3502   // e.g. map it to a special 32-bit address.
3503   if (OptimizePollingPageLocation) {
3504     // architecture-specific list of address wishes:
3505     address address_wishes[] = {
3506       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3507       // PPC64: all address wishes are non-negative 32 bit values where
3508       // the lower 16 bits are all zero. we can load these addresses
3509       // with a single ppc_lis instruction.
3510       (address) 0x30000000, (address) 0x31000000,
3511       (address) 0x32000000, (address) 0x33000000,
3512       (address) 0x40000000, (address) 0x41000000,
3513       (address) 0x42000000, (address) 0x43000000,
3514       (address) 0x50000000, (address) 0x51000000,
3515       (address) 0x52000000, (address) 0x53000000,
3516       (address) 0x60000000, (address) 0x61000000,
3517       (address) 0x62000000, (address) 0x63000000
3518     };
3519     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3520 
3521     // iterate over the list of address wishes:
3522     for (int i=0; i<address_wishes_length; i++) {
3523       // Try to map with current address wish.
3524       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3525       // fail if the address is already mapped.
3526       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3527                                      map_size, prot,
3528                                      flags | MAP_FIXED,
3529                                      -1, 0);
3530       trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3531                    address_wishes[i], map_address + (ssize_t)page_size);
3532 
3533       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3534         // Map succeeded and map_address is at wished address, exit loop.
3535         break;
3536       }
3537 
3538       if (map_address != (address) MAP_FAILED) {
3539         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3540         ::munmap(map_address, map_size);
3541         map_address = (address) MAP_FAILED;
3542       }
3543       // Map failed, continue loop.
3544     }
3545   } // end OptimizePollingPageLocation
3546 
3547   if (map_address == (address) MAP_FAILED) {
3548     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3549   }
3550   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3551   os::set_polling_page(map_address);
3552 
3553   if (!UseMembar) {
3554     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3555     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3556     os::set_memory_serialize_page(mem_serialize_page);
3557 
3558     trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3559         mem_serialize_page, mem_serialize_page + Aix::page_size(),
3560         Aix::page_size(), Aix::page_size());
3561   }
3562 
3563   // initialize suspend/resume support - must do this before signal_sets_init()
3564   if (SR_initialize() != 0) {
3565     perror("SR_initialize failed");
3566     return JNI_ERR;
3567   }
3568 
3569   Aix::signal_sets_init();
3570   Aix::install_signal_handlers();
3571 
3572   // Check and sets minimum stack sizes against command line options
3573   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
3574     return JNI_ERR;
3575   }
3576 
3577   if (UseNUMA) {
3578     UseNUMA = false;
3579     warning("NUMA optimizations are not available on this OS.");
3580   }
3581 
3582   if (MaxFDLimit) {
3583     // Set the number of file descriptors to max. print out error
3584     // if getrlimit/setrlimit fails but continue regardless.
3585     struct rlimit nbr_files;
3586     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3587     if (status != 0) {
3588       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
3589     } else {
3590       nbr_files.rlim_cur = nbr_files.rlim_max;
3591       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3592       if (status != 0) {
3593         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3594       }
3595     }
3596   }
3597 
3598   if (PerfAllowAtExitRegistration) {
3599     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3600     // At exit functions can be delayed until process exit time, which
3601     // can be problematic for embedded VM situations. Embedded VMs should
3602     // call DestroyJavaVM() to assure that VM resources are released.
3603 
3604     // Note: perfMemory_exit_helper atexit function may be removed in
3605     // the future if the appropriate cleanup code can be added to the
3606     // VM_Exit VMOperation's doit method.
3607     if (atexit(perfMemory_exit_helper) != 0) {
3608       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3609     }
3610   }
3611 
3612   return JNI_OK;
3613 }
3614 
3615 // Mark the polling page as unreadable
3616 void os::make_polling_page_unreadable(void) {
3617   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3618     fatal("Could not disable polling page");
3619   }
3620 };
3621 
3622 // Mark the polling page as readable
3623 void os::make_polling_page_readable(void) {
3624   // Changed according to os_linux.cpp.
3625   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3626     fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3627   }
3628 };
3629 
3630 int os::active_processor_count() {
3631   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3632   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3633   return online_cpus;
3634 }
3635 
3636 void os::set_native_thread_name(const char *name) {
3637   // Not yet implemented.
3638   return;
3639 }
3640 
3641 bool os::distribute_processes(uint length, uint* distribution) {
3642   // Not yet implemented.
3643   return false;
3644 }
3645 
3646 bool os::bind_to_processor(uint processor_id) {
3647   // Not yet implemented.
3648   return false;
3649 }
3650 
3651 void os::SuspendedThreadTask::internal_do_task() {
3652   if (do_suspend(_thread->osthread())) {
3653     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3654     do_task(context);
3655     do_resume(_thread->osthread());
3656   }
3657 }
3658 
3659 class PcFetcher : public os::SuspendedThreadTask {
3660 public:
3661   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3662   ExtendedPC result();
3663 protected:
3664   void do_task(const os::SuspendedThreadTaskContext& context);
3665 private:
3666   ExtendedPC _epc;
3667 };
3668 
3669 ExtendedPC PcFetcher::result() {
3670   guarantee(is_done(), "task is not done yet.");
3671   return _epc;
3672 }
3673 
3674 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3675   Thread* thread = context.thread();
3676   OSThread* osthread = thread->osthread();
3677   if (osthread->ucontext() != NULL) {
3678     _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext());
3679   } else {
3680     // NULL context is unexpected, double-check this is the VMThread.
3681     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3682   }
3683 }
3684 
3685 // Suspends the target using the signal mechanism and then grabs the PC before
3686 // resuming the target. Used by the flat-profiler only
3687 ExtendedPC os::get_thread_pc(Thread* thread) {
3688   // Make sure that it is called by the watcher for the VMThread.
3689   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3690   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3691 
3692   PcFetcher fetcher(thread);
3693   fetcher.run();
3694   return fetcher.result();
3695 }
3696 
3697 ////////////////////////////////////////////////////////////////////////////////
3698 // debug support
3699 
3700 bool os::find(address addr, outputStream* st) {
3701 
3702   st->print(PTR_FORMAT ": ", addr);
3703 
3704   loaded_module_t lm;
3705   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3706       LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3707     st->print_cr("%s", lm.path);
3708     return true;
3709   }
3710 
3711   return false;
3712 }
3713 
3714 ////////////////////////////////////////////////////////////////////////////////
3715 // misc
3716 
3717 // This does not do anything on Aix. This is basically a hook for being
3718 // able to use structured exception handling (thread-local exception filters)
3719 // on, e.g., Win32.
3720 void
3721 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3722                          JavaCallArguments* args, Thread* thread) {
3723   f(value, method, args, thread);
3724 }
3725 
3726 void os::print_statistics() {
3727 }
3728 
3729 bool os::message_box(const char* title, const char* message) {
3730   int i;
3731   fdStream err(defaultStream::error_fd());
3732   for (i = 0; i < 78; i++) err.print_raw("=");
3733   err.cr();
3734   err.print_raw_cr(title);
3735   for (i = 0; i < 78; i++) err.print_raw("-");
3736   err.cr();
3737   err.print_raw_cr(message);
3738   for (i = 0; i < 78; i++) err.print_raw("=");
3739   err.cr();
3740 
3741   char buf[16];
3742   // Prevent process from exiting upon "read error" without consuming all CPU
3743   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3744 
3745   return buf[0] == 'y' || buf[0] == 'Y';
3746 }
3747 
3748 int os::stat(const char *path, struct stat *sbuf) {
3749   char pathbuf[MAX_PATH];
3750   if (strlen(path) > MAX_PATH - 1) {
3751     errno = ENAMETOOLONG;
3752     return -1;
3753   }
3754   os::native_path(strcpy(pathbuf, path));
3755   return ::stat(pathbuf, sbuf);
3756 }
3757 
3758 // Is a (classpath) directory empty?
3759 bool os::dir_is_empty(const char* path) {
3760   DIR *dir = NULL;
3761   struct dirent *ptr;
3762 
3763   dir = opendir(path);
3764   if (dir == NULL) return true;
3765 
3766   /* Scan the directory */
3767   bool result = true;
3768   char buf[sizeof(struct dirent) + MAX_PATH];
3769   while (result && (ptr = ::readdir(dir)) != NULL) {
3770     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3771       result = false;
3772     }
3773   }
3774   closedir(dir);
3775   return result;
3776 }
3777 
3778 // This code originates from JDK's sysOpen and open64_w
3779 // from src/solaris/hpi/src/system_md.c
3780 
3781 int os::open(const char *path, int oflag, int mode) {
3782 
3783   if (strlen(path) > MAX_PATH - 1) {
3784     errno = ENAMETOOLONG;
3785     return -1;
3786   }
3787   int fd;
3788 
3789   fd = ::open64(path, oflag, mode);
3790   if (fd == -1) return -1;
3791 
3792   // If the open succeeded, the file might still be a directory.
3793   {
3794     struct stat64 buf64;
3795     int ret = ::fstat64(fd, &buf64);
3796     int st_mode = buf64.st_mode;
3797 
3798     if (ret != -1) {
3799       if ((st_mode & S_IFMT) == S_IFDIR) {
3800         errno = EISDIR;
3801         ::close(fd);
3802         return -1;
3803       }
3804     } else {
3805       ::close(fd);
3806       return -1;
3807     }
3808   }
3809 
3810   // All file descriptors that are opened in the JVM and not
3811   // specifically destined for a subprocess should have the
3812   // close-on-exec flag set. If we don't set it, then careless 3rd
3813   // party native code might fork and exec without closing all
3814   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3815   // UNIXProcess.c), and this in turn might:
3816   //
3817   // - cause end-of-file to fail to be detected on some file
3818   //   descriptors, resulting in mysterious hangs, or
3819   //
3820   // - might cause an fopen in the subprocess to fail on a system
3821   //   suffering from bug 1085341.
3822   //
3823   // (Yes, the default setting of the close-on-exec flag is a Unix
3824   // design flaw.)
3825   //
3826   // See:
3827   // 1085341: 32-bit stdio routines should support file descriptors >255
3828   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3829   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3830 #ifdef FD_CLOEXEC
3831   {
3832     int flags = ::fcntl(fd, F_GETFD);
3833     if (flags != -1)
3834       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3835   }
3836 #endif
3837 
3838   return fd;
3839 }
3840 
3841 // create binary file, rewriting existing file if required
3842 int os::create_binary_file(const char* path, bool rewrite_existing) {
3843   int oflags = O_WRONLY | O_CREAT;
3844   if (!rewrite_existing) {
3845     oflags |= O_EXCL;
3846   }
3847   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3848 }
3849 
3850 // return current position of file pointer
3851 jlong os::current_file_offset(int fd) {
3852   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3853 }
3854 
3855 // move file pointer to the specified offset
3856 jlong os::seek_to_file_offset(int fd, jlong offset) {
3857   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3858 }
3859 
3860 // This code originates from JDK's sysAvailable
3861 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3862 
3863 int os::available(int fd, jlong *bytes) {
3864   jlong cur, end;
3865   int mode;
3866   struct stat64 buf64;
3867 
3868   if (::fstat64(fd, &buf64) >= 0) {
3869     mode = buf64.st_mode;
3870     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3871       int n;
3872       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3873         *bytes = n;
3874         return 1;
3875       }
3876     }
3877   }
3878   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3879     return 0;
3880   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3881     return 0;
3882   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3883     return 0;
3884   }
3885   *bytes = end - cur;
3886   return 1;
3887 }
3888 
3889 // Map a block of memory.
3890 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3891                         char *addr, size_t bytes, bool read_only,
3892                         bool allow_exec) {
3893   int prot;
3894   int flags = MAP_PRIVATE;
3895 
3896   if (read_only) {
3897     prot = PROT_READ;
3898     flags = MAP_SHARED;
3899   } else {
3900     prot = PROT_READ | PROT_WRITE;
3901     flags = MAP_PRIVATE;
3902   }
3903 
3904   if (allow_exec) {
3905     prot |= PROT_EXEC;
3906   }
3907 
3908   if (addr != NULL) {
3909     flags |= MAP_FIXED;
3910   }
3911 
3912   // Allow anonymous mappings if 'fd' is -1.
3913   if (fd == -1) {
3914     flags |= MAP_ANONYMOUS;
3915   }
3916 
3917   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3918                                      fd, file_offset);
3919   if (mapped_address == MAP_FAILED) {
3920     return NULL;
3921   }
3922   return mapped_address;
3923 }
3924 
3925 // Remap a block of memory.
3926 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3927                           char *addr, size_t bytes, bool read_only,
3928                           bool allow_exec) {
3929   // same as map_memory() on this OS
3930   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3931                         allow_exec);
3932 }
3933 
3934 // Unmap a block of memory.
3935 bool os::pd_unmap_memory(char* addr, size_t bytes) {
3936   return munmap(addr, bytes) == 0;
3937 }
3938 
3939 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3940 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
3941 // of a thread.
3942 //
3943 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3944 // the fast estimate available on the platform.
3945 
3946 jlong os::current_thread_cpu_time() {
3947   // return user + sys since the cost is the same
3948   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
3949   assert(n >= 0, "negative CPU time");
3950   return n;
3951 }
3952 
3953 jlong os::thread_cpu_time(Thread* thread) {
3954   // consistent with what current_thread_cpu_time() returns
3955   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
3956   assert(n >= 0, "negative CPU time");
3957   return n;
3958 }
3959 
3960 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
3961   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
3962   assert(n >= 0, "negative CPU time");
3963   return n;
3964 }
3965 
3966 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
3967   bool error = false;
3968 
3969   jlong sys_time = 0;
3970   jlong user_time = 0;
3971 
3972   // Reimplemented using getthrds64().
3973   //
3974   // Works like this:
3975   // For the thread in question, get the kernel thread id. Then get the
3976   // kernel thread statistics using that id.
3977   //
3978   // This only works of course when no pthread scheduling is used,
3979   // i.e. there is a 1:1 relationship to kernel threads.
3980   // On AIX, see AIXTHREAD_SCOPE variable.
3981 
3982   pthread_t pthtid = thread->osthread()->pthread_id();
3983 
3984   // retrieve kernel thread id for the pthread:
3985   tid64_t tid = 0;
3986   struct __pthrdsinfo pinfo;
3987   // I just love those otherworldly IBM APIs which force me to hand down
3988   // dummy buffers for stuff I dont care for...
3989   char dummy[1];
3990   int dummy_size = sizeof(dummy);
3991   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
3992                           dummy, &dummy_size) == 0) {
3993     tid = pinfo.__pi_tid;
3994   } else {
3995     tty->print_cr("pthread_getthrds_np failed.");
3996     error = true;
3997   }
3998 
3999   // retrieve kernel timing info for that kernel thread
4000   if (!error) {
4001     struct thrdentry64 thrdentry;
4002     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4003       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4004       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4005     } else {
4006       tty->print_cr("pthread_getthrds_np failed.");
4007       error = true;
4008     }
4009   }
4010 
4011   if (p_sys_time) {
4012     *p_sys_time = sys_time;
4013   }
4014 
4015   if (p_user_time) {
4016     *p_user_time = user_time;
4017   }
4018 
4019   if (error) {
4020     return false;
4021   }
4022 
4023   return true;
4024 }
4025 
4026 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4027   jlong sys_time;
4028   jlong user_time;
4029 
4030   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4031     return -1;
4032   }
4033 
4034   return user_sys_cpu_time ? sys_time + user_time : user_time;
4035 }
4036 
4037 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4038   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4039   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4040   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4041   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4042 }
4043 
4044 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4045   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4046   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4047   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4048   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4049 }
4050 
4051 bool os::is_thread_cpu_time_supported() {
4052   return true;
4053 }
4054 
4055 // System loadavg support. Returns -1 if load average cannot be obtained.
4056 // For now just return the system wide load average (no processor sets).
4057 int os::loadavg(double values[], int nelem) {
4058 
4059   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4060   guarantee(values, "argument error");
4061 
4062   if (os::Aix::on_pase()) {
4063 
4064     // AS/400 PASE: use libo4 porting library
4065     double v[3] = { 0.0, 0.0, 0.0 };
4066 
4067     if (libo4::get_load_avg(v, v + 1, v + 2)) {
4068       for (int i = 0; i < nelem; i ++) {
4069         values[i] = v[i];
4070       }
4071       return nelem;
4072     } else {
4073       return -1;
4074     }
4075 
4076   } else {
4077 
4078     // AIX: use libperfstat
4079     libperfstat::cpuinfo_t ci;
4080     if (libperfstat::get_cpuinfo(&ci)) {
4081       for (int i = 0; i < nelem; i++) {
4082         values[i] = ci.loadavg[i];
4083       }
4084     } else {
4085       return -1;
4086     }
4087     return nelem;
4088   }
4089 }
4090 
4091 void os::pause() {
4092   char filename[MAX_PATH];
4093   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4094     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4095   } else {
4096     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4097   }
4098 
4099   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4100   if (fd != -1) {
4101     struct stat buf;
4102     ::close(fd);
4103     while (::stat(filename, &buf) == 0) {
4104       (void)::poll(NULL, 0, 100);
4105     }
4106   } else {
4107     trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4108   }
4109 }
4110 
4111 bool os::Aix::is_primordial_thread() {
4112   if (pthread_self() == (pthread_t)1) {
4113     return true;
4114   } else {
4115     return false;
4116   }
4117 }
4118 
4119 // OS recognitions (PASE/AIX, OS level) call this before calling any
4120 // one of Aix::on_pase(), Aix::os_version() static
4121 void os::Aix::initialize_os_info() {
4122 
4123   assert(_on_pase == -1 && _os_version == 0, "already called.");
4124 
4125   struct utsname uts;
4126   memset(&uts, 0, sizeof(uts));
4127   strcpy(uts.sysname, "?");
4128   if (::uname(&uts) == -1) {
4129     trcVerbose("uname failed (%d)", errno);
4130     guarantee(0, "Could not determine whether we run on AIX or PASE");
4131   } else {
4132     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4133                "node \"%s\" machine \"%s\"\n",
4134                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4135     const int major = atoi(uts.version);
4136     assert(major > 0, "invalid OS version");
4137     const int minor = atoi(uts.release);
4138     assert(minor > 0, "invalid OS release");
4139     _os_version = (major << 24) | (minor << 16);
4140     char ver_str[20] = {0};
4141     char *name_str = "unknown OS";
4142     if (strcmp(uts.sysname, "OS400") == 0) {
4143       // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4144       _on_pase = 1;
4145       if (os_version_short() < 0x0504) {
4146         trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4147         assert(false, "OS/400 release too old.");
4148       }
4149       name_str = "OS/400 (pase)";
4150       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4151     } else if (strcmp(uts.sysname, "AIX") == 0) {
4152       // We run on AIX. We do not support versions older than AIX 5.3.
4153       _on_pase = 0;
4154       // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4155       odmWrapper::determine_os_kernel_version(&_os_version);
4156       if (os_version_short() < 0x0503) {
4157         trcVerbose("AIX release older than AIX 5.3 not supported.");
4158         assert(false, "AIX release too old.");
4159       }
4160       name_str = "AIX";
4161       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4162                    major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4163     } else {
4164       assert(false, name_str);
4165     }
4166     trcVerbose("We run on %s %s", name_str, ver_str);
4167   }
4168 
4169   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4170 } // end: os::Aix::initialize_os_info()
4171 
4172 // Scan environment for important settings which might effect the VM.
4173 // Trace out settings. Warn about invalid settings and/or correct them.
4174 //
4175 // Must run after os::Aix::initialue_os_info().
4176 void os::Aix::scan_environment() {
4177 
4178   char* p;
4179   int rc;
4180 
4181   // Warn explicity if EXTSHM=ON is used. That switch changes how
4182   // System V shared memory behaves. One effect is that page size of
4183   // shared memory cannot be change dynamically, effectivly preventing
4184   // large pages from working.
4185   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4186   // recommendation is (in OSS notes) to switch it off.
4187   p = ::getenv("EXTSHM");
4188   trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4189   if (p && strcasecmp(p, "ON") == 0) {
4190     _extshm = 1;
4191     trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4192     if (!AllowExtshm) {
4193       // We allow under certain conditions the user to continue. However, we want this
4194       // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4195       // that the VM is not able to allocate 64k pages for the heap.
4196       // We do not want to run with reduced performance.
4197       vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4198     }
4199   } else {
4200     _extshm = 0;
4201   }
4202 
4203   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4204   // Not tested, not supported.
4205   //
4206   // Note that it might be worth the trouble to test and to require it, if only to
4207   // get useful return codes for mprotect.
4208   //
4209   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4210   // exec() ? before loading the libjvm ? ....)
4211   p = ::getenv("XPG_SUS_ENV");
4212   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4213   if (p && strcmp(p, "ON") == 0) {
4214     _xpg_sus_mode = 1;
4215     trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4216     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4217     // clobber address ranges. If we ever want to support that, we have to do some
4218     // testing first.
4219     guarantee(false, "XPG_SUS_ENV=ON not supported");
4220   } else {
4221     _xpg_sus_mode = 0;
4222   }
4223 
4224   if (os::Aix::on_pase()) {
4225     p = ::getenv("QIBM_MULTI_THREADED");
4226     trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4227   }
4228 
4229   p = ::getenv("LDR_CNTRL");
4230   trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4231   if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4232     if (p && ::strstr(p, "TEXTPSIZE")) {
4233       trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4234         "you may experience hangs or crashes on OS/400 V7R1.");
4235     }
4236   }
4237 
4238   p = ::getenv("AIXTHREAD_GUARDPAGES");
4239   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4240 
4241 } // end: os::Aix::scan_environment()
4242 
4243 // PASE: initialize the libo4 library (PASE porting library).
4244 void os::Aix::initialize_libo4() {
4245   guarantee(os::Aix::on_pase(), "OS/400 only.");
4246   if (!libo4::init()) {
4247     trcVerbose("libo4 initialization failed.");
4248     assert(false, "libo4 initialization failed");
4249   } else {
4250     trcVerbose("libo4 initialized.");
4251   }
4252 }
4253 
4254 // AIX: initialize the libperfstat library.
4255 void os::Aix::initialize_libperfstat() {
4256   assert(os::Aix::on_aix(), "AIX only");
4257   if (!libperfstat::init()) {
4258     trcVerbose("libperfstat initialization failed.");
4259     assert(false, "libperfstat initialization failed");
4260   } else {
4261     trcVerbose("libperfstat initialized.");
4262   }
4263 }
4264 
4265 /////////////////////////////////////////////////////////////////////////////
4266 // thread stack
4267 
4268 // Function to query the current stack size using pthread_getthrds_np.
4269 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4270   // This only works when invoked on a pthread. As we agreed not to use
4271   // primordial threads anyway, I assert here.
4272   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4273 
4274   // Information about this api can be found (a) in the pthread.h header and
4275   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4276   //
4277   // The use of this API to find out the current stack is kind of undefined.
4278   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4279   // enough for cases where I let the pthread library create its stacks. For cases
4280   // where I create an own stack and pass this to pthread_create, it seems not to
4281   // work (the returned stack size in that case is 0).
4282 
4283   pthread_t tid = pthread_self();
4284   struct __pthrdsinfo pinfo;
4285   char dummy[1]; // Just needed to satisfy pthread_getthrds_np.
4286   int dummy_size = sizeof(dummy);
4287 
4288   memset(&pinfo, 0, sizeof(pinfo));
4289 
4290   const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4291                                      sizeof(pinfo), dummy, &dummy_size);
4292 
4293   if (rc != 0) {
4294     assert0(false);
4295     trcVerbose("pthread_getthrds_np failed (%d)", rc);
4296     return false;
4297   }
4298   guarantee0(pinfo.__pi_stackend);
4299 
4300   // The following may happen when invoking pthread_getthrds_np on a pthread
4301   // running on a user provided stack (when handing down a stack to pthread
4302   // create, see pthread_attr_setstackaddr).
4303   // Not sure what to do then.
4304 
4305   guarantee0(pinfo.__pi_stacksize);
4306 
4307   // Note: we get three values from pthread_getthrds_np:
4308   //       __pi_stackaddr, __pi_stacksize, __pi_stackend
4309   //
4310   // high addr    ---------------------
4311   //
4312   //    |         pthread internal data, like ~2K
4313   //    |
4314   //    |         ---------------------   __pi_stackend   (usually not page aligned, (xxxxF890))
4315   //    |
4316   //    |
4317   //    |
4318   //    |
4319   //    |
4320   //    |
4321   //    |          ---------------------   (__pi_stackend - __pi_stacksize)
4322   //    |
4323   //    |          padding to align the following AIX guard pages, if enabled.
4324   //    |
4325   //    V          ---------------------   __pi_stackaddr
4326   //
4327   // low addr      AIX guard pages, if enabled (AIXTHREAD_GUARDPAGES > 0)
4328   //
4329 
4330   address stack_base = (address)(pinfo.__pi_stackend);
4331   address stack_low_addr = (address)align_ptr_up(pinfo.__pi_stackaddr,
4332     os::vm_page_size());
4333   size_t stack_size = stack_base - stack_low_addr;
4334 
4335   if (p_stack_base) {
4336     *p_stack_base = stack_base;
4337   }
4338 
4339   if (p_stack_size) {
4340     *p_stack_size = stack_size;
4341   }
4342 
4343   return true;
4344 }
4345 
4346 // Get the current stack base from the OS (actually, the pthread library).
4347 address os::current_stack_base() {
4348   address p;
4349   query_stack_dimensions(&p, 0);
4350   return p;
4351 }
4352 
4353 // Get the current stack size from the OS (actually, the pthread library).
4354 size_t os::current_stack_size() {
4355   size_t s;
4356   query_stack_dimensions(0, &s);
4357   return s;
4358 }
4359 
4360 // Refer to the comments in os_solaris.cpp park-unpark.
4361 
4362 // utility to compute the abstime argument to timedwait:
4363 // millis is the relative timeout time
4364 // abstime will be the absolute timeout time
4365 // TODO: replace compute_abstime() with unpackTime()
4366 
4367 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4368   if (millis < 0) millis = 0;
4369   struct timeval now;
4370   int status = gettimeofday(&now, NULL);
4371   assert(status == 0, "gettimeofday");
4372   jlong seconds = millis / 1000;
4373   millis %= 1000;
4374   if (seconds > 50000000) { // see man cond_timedwait(3T)
4375     seconds = 50000000;
4376   }
4377   abstime->tv_sec = now.tv_sec  + seconds;
4378   long       usec = now.tv_usec + millis * 1000;
4379   if (usec >= 1000000) {
4380     abstime->tv_sec += 1;
4381     usec -= 1000000;
4382   }
4383   abstime->tv_nsec = usec * 1000;
4384   return abstime;
4385 }
4386 
4387 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4388 // Conceptually TryPark() should be equivalent to park(0).
4389 
4390 int os::PlatformEvent::TryPark() {
4391   for (;;) {
4392     const int v = _Event;
4393     guarantee ((v == 0) || (v == 1), "invariant");
4394     if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4395   }
4396 }
4397 
4398 void os::PlatformEvent::park() {       // AKA "down()"
4399   // Invariant: Only the thread associated with the Event/PlatformEvent
4400   // may call park().
4401   // TODO: assert that _Assoc != NULL or _Assoc == Self
4402   int v;
4403   for (;;) {
4404     v = _Event;
4405     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4406   }
4407   guarantee (v >= 0, "invariant");
4408   if (v == 0) {
4409     // Do this the hard way by blocking ...
4410     int status = pthread_mutex_lock(_mutex);
4411     assert_status(status == 0, status, "mutex_lock");
4412     guarantee (_nParked == 0, "invariant");
4413     ++ _nParked;
4414     while (_Event < 0) {
4415       status = pthread_cond_wait(_cond, _mutex);
4416       assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4417     }
4418     -- _nParked;
4419 
4420     // In theory we could move the ST of 0 into _Event past the unlock(),
4421     // but then we'd need a MEMBAR after the ST.
4422     _Event = 0;
4423     status = pthread_mutex_unlock(_mutex);
4424     assert_status(status == 0, status, "mutex_unlock");
4425   }
4426   guarantee (_Event >= 0, "invariant");
4427 }
4428 
4429 int os::PlatformEvent::park(jlong millis) {
4430   guarantee (_nParked == 0, "invariant");
4431 
4432   int v;
4433   for (;;) {
4434     v = _Event;
4435     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4436   }
4437   guarantee (v >= 0, "invariant");
4438   if (v != 0) return OS_OK;
4439 
4440   // We do this the hard way, by blocking the thread.
4441   // Consider enforcing a minimum timeout value.
4442   struct timespec abst;
4443   compute_abstime(&abst, millis);
4444 
4445   int ret = OS_TIMEOUT;
4446   int status = pthread_mutex_lock(_mutex);
4447   assert_status(status == 0, status, "mutex_lock");
4448   guarantee (_nParked == 0, "invariant");
4449   ++_nParked;
4450 
4451   // Object.wait(timo) will return because of
4452   // (a) notification
4453   // (b) timeout
4454   // (c) thread.interrupt
4455   //
4456   // Thread.interrupt and object.notify{All} both call Event::set.
4457   // That is, we treat thread.interrupt as a special case of notification.
4458   // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4459   // We assume all ETIME returns are valid.
4460   //
4461   // TODO: properly differentiate simultaneous notify+interrupt.
4462   // In that case, we should propagate the notify to another waiter.
4463 
4464   while (_Event < 0) {
4465     status = pthread_cond_timedwait(_cond, _mutex, &abst);
4466     assert_status(status == 0 || status == ETIMEDOUT,
4467                   status, "cond_timedwait");
4468     if (!FilterSpuriousWakeups) break;         // previous semantics
4469     if (status == ETIMEDOUT) break;
4470     // We consume and ignore EINTR and spurious wakeups.
4471   }
4472   --_nParked;
4473   if (_Event >= 0) {
4474      ret = OS_OK;
4475   }
4476   _Event = 0;
4477   status = pthread_mutex_unlock(_mutex);
4478   assert_status(status == 0, status, "mutex_unlock");
4479   assert (_nParked == 0, "invariant");
4480   return ret;
4481 }
4482 
4483 void os::PlatformEvent::unpark() {
4484   int v, AnyWaiters;
4485   for (;;) {
4486     v = _Event;
4487     if (v > 0) {
4488       // The LD of _Event could have reordered or be satisfied
4489       // by a read-aside from this processor's write buffer.
4490       // To avoid problems execute a barrier and then
4491       // ratify the value.
4492       OrderAccess::fence();
4493       if (_Event == v) return;
4494       continue;
4495     }
4496     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4497   }
4498   if (v < 0) {
4499     // Wait for the thread associated with the event to vacate
4500     int status = pthread_mutex_lock(_mutex);
4501     assert_status(status == 0, status, "mutex_lock");
4502     AnyWaiters = _nParked;
4503 
4504     if (AnyWaiters != 0) {
4505       // We intentional signal *after* dropping the lock
4506       // to avoid a common class of futile wakeups.
4507       status = pthread_cond_signal(_cond);
4508       assert_status(status == 0, status, "cond_signal");
4509     }
4510     // Mutex should be locked for pthread_cond_signal(_cond).
4511     status = pthread_mutex_unlock(_mutex);
4512     assert_status(status == 0, status, "mutex_unlock");
4513   }
4514 
4515   // Note that we signal() _after dropping the lock for "immortal" Events.
4516   // This is safe and avoids a common class of futile wakeups. In rare
4517   // circumstances this can cause a thread to return prematurely from
4518   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4519   // simply re-test the condition and re-park itself.
4520 }
4521 
4522 
4523 // JSR166
4524 // -------------------------------------------------------
4525 
4526 //
4527 // The solaris and linux implementations of park/unpark are fairly
4528 // conservative for now, but can be improved. They currently use a
4529 // mutex/condvar pair, plus a a count.
4530 // Park decrements count if > 0, else does a condvar wait. Unpark
4531 // sets count to 1 and signals condvar. Only one thread ever waits
4532 // on the condvar. Contention seen when trying to park implies that someone
4533 // is unparking you, so don't wait. And spurious returns are fine, so there
4534 // is no need to track notifications.
4535 //
4536 
4537 #define MAX_SECS 100000000
4538 //
4539 // This code is common to linux and solaris and will be moved to a
4540 // common place in dolphin.
4541 //
4542 // The passed in time value is either a relative time in nanoseconds
4543 // or an absolute time in milliseconds. Either way it has to be unpacked
4544 // into suitable seconds and nanoseconds components and stored in the
4545 // given timespec structure.
4546 // Given time is a 64-bit value and the time_t used in the timespec is only
4547 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
4548 // overflow if times way in the future are given. Further on Solaris versions
4549 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4550 // number of seconds, in abstime, is less than current_time + 100,000,000.
4551 // As it will be 28 years before "now + 100000000" will overflow we can
4552 // ignore overflow and just impose a hard-limit on seconds using the value
4553 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4554 // years from "now".
4555 //
4556 
4557 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4558   assert (time > 0, "convertTime");
4559 
4560   struct timeval now;
4561   int status = gettimeofday(&now, NULL);
4562   assert(status == 0, "gettimeofday");
4563 
4564   time_t max_secs = now.tv_sec + MAX_SECS;
4565 
4566   if (isAbsolute) {
4567     jlong secs = time / 1000;
4568     if (secs > max_secs) {
4569       absTime->tv_sec = max_secs;
4570     }
4571     else {
4572       absTime->tv_sec = secs;
4573     }
4574     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4575   }
4576   else {
4577     jlong secs = time / NANOSECS_PER_SEC;
4578     if (secs >= MAX_SECS) {
4579       absTime->tv_sec = max_secs;
4580       absTime->tv_nsec = 0;
4581     }
4582     else {
4583       absTime->tv_sec = now.tv_sec + secs;
4584       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4585       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4586         absTime->tv_nsec -= NANOSECS_PER_SEC;
4587         ++absTime->tv_sec; // note: this must be <= max_secs
4588       }
4589     }
4590   }
4591   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4592   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4593   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4594   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4595 }
4596 
4597 void Parker::park(bool isAbsolute, jlong time) {
4598   // Optional fast-path check:
4599   // Return immediately if a permit is available.
4600   if (_counter > 0) {
4601     _counter = 0;
4602     OrderAccess::fence();
4603     return;
4604   }
4605 
4606   Thread* thread = Thread::current();
4607   assert(thread->is_Java_thread(), "Must be JavaThread");
4608   JavaThread *jt = (JavaThread *)thread;
4609 
4610   // Optional optimization -- avoid state transitions if there's an interrupt pending.
4611   // Check interrupt before trying to wait
4612   if (Thread::is_interrupted(thread, false)) {
4613     return;
4614   }
4615 
4616   // Next, demultiplex/decode time arguments
4617   timespec absTime;
4618   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4619     return;
4620   }
4621   if (time > 0) {
4622     unpackTime(&absTime, isAbsolute, time);
4623   }
4624 
4625   // Enter safepoint region
4626   // Beware of deadlocks such as 6317397.
4627   // The per-thread Parker:: mutex is a classic leaf-lock.
4628   // In particular a thread must never block on the Threads_lock while
4629   // holding the Parker:: mutex. If safepoints are pending both the
4630   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4631   ThreadBlockInVM tbivm(jt);
4632 
4633   // Don't wait if cannot get lock since interference arises from
4634   // unblocking. Also. check interrupt before trying wait
4635   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4636     return;
4637   }
4638 
4639   int status;
4640   if (_counter > 0) { // no wait needed
4641     _counter = 0;
4642     status = pthread_mutex_unlock(_mutex);
4643     assert (status == 0, "invariant");
4644     OrderAccess::fence();
4645     return;
4646   }
4647 
4648 #ifdef ASSERT
4649   // Don't catch signals while blocked; let the running threads have the signals.
4650   // (This allows a debugger to break into the running thread.)
4651   sigset_t oldsigs;
4652   sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4653   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4654 #endif
4655 
4656   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4657   jt->set_suspend_equivalent();
4658   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4659 
4660   if (time == 0) {
4661     status = pthread_cond_wait (_cond, _mutex);
4662   } else {
4663     status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4664   }
4665   assert_status(status == 0 || status == EINTR ||
4666                 status == ETIME || status == ETIMEDOUT,
4667                 status, "cond_timedwait");
4668 
4669 #ifdef ASSERT
4670   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4671 #endif
4672 
4673   _counter = 0;
4674   status = pthread_mutex_unlock(_mutex);
4675   assert_status(status == 0, status, "invariant");
4676   // If externally suspended while waiting, re-suspend
4677   if (jt->handle_special_suspend_equivalent_condition()) {
4678     jt->java_suspend_self();
4679   }
4680 
4681   OrderAccess::fence();
4682 }
4683 
4684 void Parker::unpark() {
4685   int s, status;
4686   status = pthread_mutex_lock(_mutex);
4687   assert (status == 0, "invariant");
4688   s = _counter;
4689   _counter = 1;
4690   if (s < 1) {
4691     status = pthread_mutex_unlock(_mutex);
4692     assert (status == 0, "invariant");
4693     status = pthread_cond_signal (_cond);
4694     assert (status == 0, "invariant");
4695   } else {
4696     pthread_mutex_unlock(_mutex);
4697     assert (status == 0, "invariant");
4698   }
4699 }
4700 
4701 extern char** environ;
4702 
4703 // Run the specified command in a separate process. Return its exit value,
4704 // or -1 on failure (e.g. can't fork a new process).
4705 // Unlike system(), this function can be called from signal handler. It
4706 // doesn't block SIGINT et al.
4707 int os::fork_and_exec(char* cmd) {
4708   char * argv[4] = {"sh", "-c", cmd, NULL};
4709 
4710   pid_t pid = fork();
4711 
4712   if (pid < 0) {
4713     // fork failed
4714     return -1;
4715 
4716   } else if (pid == 0) {
4717     // child process
4718 
4719     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4720     execve("/usr/bin/sh", argv, environ);
4721 
4722     // execve failed
4723     _exit(-1);
4724 
4725   } else {
4726     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4727     // care about the actual exit code, for now.
4728 
4729     int status;
4730 
4731     // Wait for the child process to exit. This returns immediately if
4732     // the child has already exited. */
4733     while (waitpid(pid, &status, 0) < 0) {
4734       switch (errno) {
4735         case ECHILD: return 0;
4736         case EINTR: break;
4737         default: return -1;
4738       }
4739     }
4740 
4741     if (WIFEXITED(status)) {
4742       // The child exited normally; get its exit code.
4743       return WEXITSTATUS(status);
4744     } else if (WIFSIGNALED(status)) {
4745       // The child exited because of a signal.
4746       // The best value to return is 0x80 + signal number,
4747       // because that is what all Unix shells do, and because
4748       // it allows callers to distinguish between process exit and
4749       // process death by signal.
4750       return 0x80 + WTERMSIG(status);
4751     } else {
4752       // Unknown exit code; pass it through.
4753       return status;
4754     }
4755   }
4756   return -1;
4757 }
4758 
4759 // is_headless_jre()
4760 //
4761 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4762 // in order to report if we are running in a headless jre.
4763 //
4764 // Since JDK8 xawt/libmawt.so is moved into the same directory
4765 // as libawt.so, and renamed libawt_xawt.so
4766 bool os::is_headless_jre() {
4767   struct stat statbuf;
4768   char buf[MAXPATHLEN];
4769   char libmawtpath[MAXPATHLEN];
4770   const char *xawtstr = "/xawt/libmawt.so";
4771   const char *new_xawtstr = "/libawt_xawt.so";
4772 
4773   char *p;
4774 
4775   // Get path to libjvm.so
4776   os::jvm_path(buf, sizeof(buf));
4777 
4778   // Get rid of libjvm.so
4779   p = strrchr(buf, '/');
4780   if (p == NULL) return false;
4781   else *p = '\0';
4782 
4783   // Get rid of client or server
4784   p = strrchr(buf, '/');
4785   if (p == NULL) return false;
4786   else *p = '\0';
4787 
4788   // check xawt/libmawt.so
4789   strcpy(libmawtpath, buf);
4790   strcat(libmawtpath, xawtstr);
4791   if (::stat(libmawtpath, &statbuf) == 0) return false;
4792 
4793   // check libawt_xawt.so
4794   strcpy(libmawtpath, buf);
4795   strcat(libmawtpath, new_xawtstr);
4796   if (::stat(libmawtpath, &statbuf) == 0) return false;
4797 
4798   return true;
4799 }
4800 
4801 // Get the default path to the core file
4802 // Returns the length of the string
4803 int os::get_core_path(char* buffer, size_t bufferSize) {
4804   const char* p = get_current_directory(buffer, bufferSize);
4805 
4806   if (p == NULL) {
4807     assert(p != NULL, "failed to get current directory");
4808     return 0;
4809   }
4810 
4811   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4812                                                p, current_process_id());
4813 
4814   return strlen(buffer);
4815 }
4816 
4817 #ifndef PRODUCT
4818 void TestReserveMemorySpecial_test() {
4819   // No tests available for this platform
4820 }
4821 #endif
4822 
4823 bool os::start_debugging(char *buf, int buflen) {
4824   int len = (int)strlen(buf);
4825   char *p = &buf[len];
4826 
4827   jio_snprintf(p, buflen -len,
4828                  "\n\n"
4829                  "Do you want to debug the problem?\n\n"
4830                  "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4831                  "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4832                  "Otherwise, press RETURN to abort...",
4833                  os::current_process_id(),
4834                  os::current_thread_id(), thread_self());
4835 
4836   bool yes = os::message_box("Unexpected Error", buf);
4837 
4838   if (yes) {
4839     // yes, user asked VM to launch debugger
4840     jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4841 
4842     os::fork_and_exec(buf);
4843     yes = false;
4844   }
4845   return yes;
4846 }
4847 
4848 static inline time_t get_mtime(const char* filename) {
4849   struct stat st;
4850   int ret = os::stat(filename, &st);
4851   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
4852   return st.st_mtime;
4853 }
4854 
4855 int os::compare_file_modified_times(const char* file1, const char* file2) {
4856   time_t t1 = get_mtime(file1);
4857   time_t t2 = get_mtime(file2);
4858   return t1 - t2;
4859 }