1 /*
   2  * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "logging/log.hpp"
  40 #include "libo4.hpp"
  41 #include "libperfstat_aix.hpp"
  42 #include "libodm_aix.hpp"
  43 #include "loadlib_aix.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "memory/filemap.hpp"
  46 #include "misc_aix.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "os_aix.inline.hpp"
  49 #include "os_share_aix.hpp"
  50 #include "porting_aix.hpp"
  51 #include "prims/jniFastGetField.hpp"
  52 #include "prims/jvm.h"
  53 #include "prims/jvm_misc.hpp"
  54 #include "runtime/arguments.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/extendedPC.hpp"
  57 #include "runtime/globals.hpp"
  58 #include "runtime/interfaceSupport.hpp"
  59 #include "runtime/java.hpp"
  60 #include "runtime/javaCalls.hpp"
  61 #include "runtime/mutexLocker.hpp"
  62 #include "runtime/objectMonitor.hpp"
  63 #include "runtime/orderAccess.inline.hpp"
  64 #include "runtime/os.hpp"
  65 #include "runtime/osThread.hpp"
  66 #include "runtime/perfMemory.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/statSampler.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/thread.inline.hpp"
  71 #include "runtime/threadCritical.hpp"
  72 #include "runtime/timer.hpp"
  73 #include "runtime/vm_version.hpp"
  74 #include "services/attachListener.hpp"
  75 #include "services/runtimeService.hpp"
  76 #include "utilities/decoder.hpp"
  77 #include "utilities/defaultStream.hpp"
  78 #include "utilities/events.hpp"
  79 #include "utilities/growableArray.hpp"
  80 #include "utilities/vmError.hpp"
  81 
  82 // put OS-includes here (sorted alphabetically)
  83 #include <errno.h>
  84 #include <fcntl.h>
  85 #include <inttypes.h>
  86 #include <poll.h>
  87 #include <procinfo.h>
  88 #include <pthread.h>
  89 #include <pwd.h>
  90 #include <semaphore.h>
  91 #include <signal.h>
  92 #include <stdint.h>
  93 #include <stdio.h>
  94 #include <string.h>
  95 #include <unistd.h>
  96 #include <sys/ioctl.h>
  97 #include <sys/ipc.h>
  98 #include <sys/mman.h>
  99 #include <sys/resource.h>
 100 #include <sys/select.h>
 101 #include <sys/shm.h>
 102 #include <sys/socket.h>
 103 #include <sys/stat.h>
 104 #include <sys/sysinfo.h>
 105 #include <sys/systemcfg.h>
 106 #include <sys/time.h>
 107 #include <sys/times.h>
 108 #include <sys/types.h>
 109 #include <sys/utsname.h>
 110 #include <sys/vminfo.h>
 111 #include <sys/wait.h>
 112 
 113 // Missing prototypes for various system APIs.
 114 extern "C"
 115 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
 116 
 117 #if !defined(_AIXVERSION_610)
 118 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
 119 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
 120 extern "C" int getargs   (procsinfo*, int, char*, int);
 121 #endif
 122 
 123 #define MAX_PATH (2 * K)
 124 
 125 // for timer info max values which include all bits
 126 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 127 // for multipage initialization error analysis (in 'g_multipage_error')
 128 #define ERROR_MP_OS_TOO_OLD                          100
 129 #define ERROR_MP_EXTSHM_ACTIVE                       101
 130 #define ERROR_MP_VMGETINFO_FAILED                    102
 131 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 132 
 133 // Query dimensions of the stack of the calling thread.
 134 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 135 static address resolve_function_descriptor_to_code_pointer(address p);
 136 
 137 static void vmembk_print_on(outputStream* os);
 138 
 139 ////////////////////////////////////////////////////////////////////////////////
 140 // global variables (for a description see os_aix.hpp)
 141 
 142 julong    os::Aix::_physical_memory = 0;
 143 
 144 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 145 int       os::Aix::_page_size = -1;
 146 
 147 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 148 int       os::Aix::_on_pase = -1;
 149 
 150 // 0 = uninitialized, otherwise 32 bit number:
 151 //  0xVVRRTTSS
 152 //  VV - major version
 153 //  RR - minor version
 154 //  TT - tech level, if known, 0 otherwise
 155 //  SS - service pack, if known, 0 otherwise
 156 uint32_t  os::Aix::_os_version = 0;
 157 
 158 // -1 = uninitialized, 0 - no, 1 - yes
 159 int       os::Aix::_xpg_sus_mode = -1;
 160 
 161 // -1 = uninitialized, 0 - no, 1 - yes
 162 int       os::Aix::_extshm = -1;
 163 
 164 ////////////////////////////////////////////////////////////////////////////////
 165 // local variables
 166 
 167 static jlong    initial_time_count = 0;
 168 static int      clock_tics_per_sec = 100;
 169 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 170 static bool     check_signals      = true;
 171 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 172 static sigset_t SR_sigset;
 173 
 174 // Process break recorded at startup.
 175 static address g_brk_at_startup = NULL;
 176 
 177 // This describes the state of multipage support of the underlying
 178 // OS. Note that this is of no interest to the outsize world and
 179 // therefore should not be defined in AIX class.
 180 //
 181 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 182 // latter two (16M "large" resp. 16G "huge" pages) require special
 183 // setup and are normally not available.
 184 //
 185 // AIX supports multiple page sizes per process, for:
 186 //  - Stack (of the primordial thread, so not relevant for us)
 187 //  - Data - data, bss, heap, for us also pthread stacks
 188 //  - Text - text code
 189 //  - shared memory
 190 //
 191 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 192 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 193 //
 194 // For shared memory, page size can be set dynamically via
 195 // shmctl(). Different shared memory regions can have different page
 196 // sizes.
 197 //
 198 // More information can be found at AIBM info center:
 199 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 200 //
 201 static struct {
 202   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 203   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 204   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 205   size_t pthr_stack_pagesize; // stack page size of pthread threads
 206   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 207   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 208   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 209   int error;                  // Error describing if something went wrong at multipage init.
 210 } g_multipage_support = {
 211   (size_t) -1,
 212   (size_t) -1,
 213   (size_t) -1,
 214   (size_t) -1,
 215   (size_t) -1,
 216   false, false,
 217   0
 218 };
 219 
 220 // We must not accidentally allocate memory close to the BRK - even if
 221 // that would work - because then we prevent the BRK segment from
 222 // growing which may result in a malloc OOM even though there is
 223 // enough memory. The problem only arises if we shmat() or mmap() at
 224 // a specific wish address, e.g. to place the heap in a
 225 // compressed-oops-friendly way.
 226 static bool is_close_to_brk(address a) {
 227   assert0(g_brk_at_startup != NULL);
 228   if (a >= g_brk_at_startup &&
 229       a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
 230     return true;
 231   }
 232   return false;
 233 }
 234 
 235 julong os::available_memory() {
 236   return Aix::available_memory();
 237 }
 238 
 239 julong os::Aix::available_memory() {
 240   // Avoid expensive API call here, as returned value will always be null.
 241   if (os::Aix::on_pase()) {
 242     return 0x0LL;
 243   }
 244   os::Aix::meminfo_t mi;
 245   if (os::Aix::get_meminfo(&mi)) {
 246     return mi.real_free;
 247   } else {
 248     return ULONG_MAX;
 249   }
 250 }
 251 
 252 julong os::physical_memory() {
 253   return Aix::physical_memory();
 254 }
 255 
 256 // Return true if user is running as root.
 257 
 258 bool os::have_special_privileges() {
 259   static bool init = false;
 260   static bool privileges = false;
 261   if (!init) {
 262     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 263     init = true;
 264   }
 265   return privileges;
 266 }
 267 
 268 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 269 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 270 static bool my_disclaim64(char* addr, size_t size) {
 271 
 272   if (size == 0) {
 273     return true;
 274   }
 275 
 276   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 277   const unsigned int maxDisclaimSize = 0x40000000;
 278 
 279   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 280   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 281 
 282   char* p = addr;
 283 
 284   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 285     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 286       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 287       return false;
 288     }
 289     p += maxDisclaimSize;
 290   }
 291 
 292   if (lastDisclaimSize > 0) {
 293     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 294       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 295       return false;
 296     }
 297   }
 298 
 299   return true;
 300 }
 301 
 302 // Cpu architecture string
 303 #if defined(PPC32)
 304 static char cpu_arch[] = "ppc";
 305 #elif defined(PPC64)
 306 static char cpu_arch[] = "ppc64";
 307 #else
 308 #error Add appropriate cpu_arch setting
 309 #endif
 310 
 311 // Wrap the function "vmgetinfo" which is not available on older OS releases.
 312 static int checked_vmgetinfo(void *out, int command, int arg) {
 313   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 314     guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
 315   }
 316   return ::vmgetinfo(out, command, arg);
 317 }
 318 
 319 // Given an address, returns the size of the page backing that address.
 320 size_t os::Aix::query_pagesize(void* addr) {
 321 
 322   if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
 323     // AS/400 older than V6R1: no vmgetinfo here, default to 4K
 324     return 4*K;
 325   }
 326 
 327   vm_page_info pi;
 328   pi.addr = (uint64_t)addr;
 329   if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 330     return pi.pagesize;
 331   } else {
 332     assert(false, "vmgetinfo failed to retrieve page size");
 333     return 4*K;
 334   }
 335 }
 336 
 337 void os::Aix::initialize_system_info() {
 338 
 339   // Get the number of online(logical) cpus instead of configured.
 340   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 341   assert(_processor_count > 0, "_processor_count must be > 0");
 342 
 343   // Retrieve total physical storage.
 344   os::Aix::meminfo_t mi;
 345   if (!os::Aix::get_meminfo(&mi)) {
 346     assert(false, "os::Aix::get_meminfo failed.");
 347   }
 348   _physical_memory = (julong) mi.real_total;
 349 }
 350 
 351 // Helper function for tracing page sizes.
 352 static const char* describe_pagesize(size_t pagesize) {
 353   switch (pagesize) {
 354     case 4*K : return "4K";
 355     case 64*K: return "64K";
 356     case 16*M: return "16M";
 357     case 16*G: return "16G";
 358     default:
 359       assert(false, "surprise");
 360       return "??";
 361   }
 362 }
 363 
 364 // Probe OS for multipage support.
 365 // Will fill the global g_multipage_support structure.
 366 // Must be called before calling os::large_page_init().
 367 static void query_multipage_support() {
 368 
 369   guarantee(g_multipage_support.pagesize == -1,
 370             "do not call twice");
 371 
 372   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 373 
 374   // This really would surprise me.
 375   assert(g_multipage_support.pagesize == 4*K, "surprise!");
 376 
 377   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 378   // Default data page size is defined either by linker options (-bdatapsize)
 379   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 380   // default should be 4K.
 381   {
 382     void* p = ::malloc(16*M);
 383     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 384     ::free(p);
 385   }
 386 
 387   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 388   // Note that this is pure curiosity. We do not rely on default page size but set
 389   // our own page size after allocated.
 390   {
 391     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 392     guarantee(shmid != -1, "shmget failed");
 393     void* p = ::shmat(shmid, NULL, 0);
 394     ::shmctl(shmid, IPC_RMID, NULL);
 395     guarantee(p != (void*) -1, "shmat failed");
 396     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 397     ::shmdt(p);
 398   }
 399 
 400   // Before querying the stack page size, make sure we are not running as primordial
 401   // thread (because primordial thread's stack may have different page size than
 402   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 403   // number of reasons so we may just as well guarantee it here.
 404   guarantee0(!os::Aix::is_primordial_thread());
 405 
 406   // Query pthread stack page size. Should be the same as data page size because
 407   // pthread stacks are allocated from C-Heap.
 408   {
 409     int dummy = 0;
 410     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 411   }
 412 
 413   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 414   {
 415     address any_function =
 416       resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 417     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 418   }
 419 
 420   // Now probe for support of 64K pages and 16M pages.
 421 
 422   // Before OS/400 V6R1, there is no support for pages other than 4K.
 423   if (os::Aix::on_pase_V5R4_or_older()) {
 424     trcVerbose("OS/400 < V6R1 - no large page support.");
 425     g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
 426     goto query_multipage_support_end;
 427   }
 428 
 429   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 430   {
 431     const int MAX_PAGE_SIZES = 4;
 432     psize_t sizes[MAX_PAGE_SIZES];
 433     const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 434     if (num_psizes == -1) {
 435       trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
 436       trcVerbose("disabling multipage support.");
 437       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 438       goto query_multipage_support_end;
 439     }
 440     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 441     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 442     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 443     for (int i = 0; i < num_psizes; i ++) {
 444       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 445     }
 446 
 447     // Can we use 64K, 16M pages?
 448     for (int i = 0; i < num_psizes; i ++) {
 449       const size_t pagesize = sizes[i];
 450       if (pagesize != 64*K && pagesize != 16*M) {
 451         continue;
 452       }
 453       bool can_use = false;
 454       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 455       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 456         IPC_CREAT | S_IRUSR | S_IWUSR);
 457       guarantee0(shmid != -1); // Should always work.
 458       // Try to set pagesize.
 459       struct shmid_ds shm_buf = { 0 };
 460       shm_buf.shm_pagesize = pagesize;
 461       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 462         const int en = errno;
 463         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 464         trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
 465           errno);
 466       } else {
 467         // Attach and double check pageisze.
 468         void* p = ::shmat(shmid, NULL, 0);
 469         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 470         guarantee0(p != (void*) -1); // Should always work.
 471         const size_t real_pagesize = os::Aix::query_pagesize(p);
 472         if (real_pagesize != pagesize) {
 473           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 474         } else {
 475           can_use = true;
 476         }
 477         ::shmdt(p);
 478       }
 479       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 480       if (pagesize == 64*K) {
 481         g_multipage_support.can_use_64K_pages = can_use;
 482       } else if (pagesize == 16*M) {
 483         g_multipage_support.can_use_16M_pages = can_use;
 484       }
 485     }
 486 
 487   } // end: check which pages can be used for shared memory
 488 
 489 query_multipage_support_end:
 490 
 491   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
 492       describe_pagesize(g_multipage_support.pagesize));
 493   trcVerbose("Data page size (C-Heap, bss, etc): %s",
 494       describe_pagesize(g_multipage_support.datapsize));
 495   trcVerbose("Text page size: %s",
 496       describe_pagesize(g_multipage_support.textpsize));
 497   trcVerbose("Thread stack page size (pthread): %s",
 498       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 499   trcVerbose("Default shared memory page size: %s",
 500       describe_pagesize(g_multipage_support.shmpsize));
 501   trcVerbose("Can use 64K pages dynamically with shared meory: %s",
 502       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 503   trcVerbose("Can use 16M pages dynamically with shared memory: %s",
 504       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 505   trcVerbose("Multipage error details: %d",
 506       g_multipage_support.error);
 507 
 508   // sanity checks
 509   assert0(g_multipage_support.pagesize == 4*K);
 510   assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K);
 511   assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K);
 512   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 513   assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K);
 514 
 515 }
 516 
 517 void os::init_system_properties_values() {
 518 
 519 #define DEFAULT_LIBPATH "/lib:/usr/lib"
 520 #define EXTENSIONS_DIR  "/lib/ext"
 521 
 522   // Buffer that fits several sprintfs.
 523   // Note that the space for the trailing null is provided
 524   // by the nulls included by the sizeof operator.
 525   const size_t bufsize =
 526     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 527          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 528   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 529 
 530   // sysclasspath, java_home, dll_dir
 531   {
 532     char *pslash;
 533     os::jvm_path(buf, bufsize);
 534 
 535     // Found the full path to libjvm.so.
 536     // Now cut the path to <java_home>/jre if we can.
 537     pslash = strrchr(buf, '/');
 538     if (pslash != NULL) {
 539       *pslash = '\0';            // Get rid of /libjvm.so.
 540     }
 541     pslash = strrchr(buf, '/');
 542     if (pslash != NULL) {
 543       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 544     }
 545     Arguments::set_dll_dir(buf);
 546 
 547     if (pslash != NULL) {
 548       pslash = strrchr(buf, '/');
 549       if (pslash != NULL) {
 550         *pslash = '\0';          // Get rid of /<arch>.
 551         pslash = strrchr(buf, '/');
 552         if (pslash != NULL) {
 553           *pslash = '\0';        // Get rid of /lib.
 554         }
 555       }
 556     }
 557     Arguments::set_java_home(buf);
 558     set_boot_path('/', ':');
 559   }
 560 
 561   // Where to look for native libraries.
 562 
 563   // On Aix we get the user setting of LIBPATH.
 564   // Eventually, all the library path setting will be done here.
 565   // Get the user setting of LIBPATH.
 566   const char *v = ::getenv("LIBPATH");
 567   const char *v_colon = ":";
 568   if (v == NULL) { v = ""; v_colon = ""; }
 569 
 570   // Concatenate user and invariant part of ld_library_path.
 571   // That's +1 for the colon and +1 for the trailing '\0'.
 572   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 573   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 574   Arguments::set_library_path(ld_library_path);
 575   FREE_C_HEAP_ARRAY(char, ld_library_path);
 576 
 577   // Extensions directories.
 578   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 579   Arguments::set_ext_dirs(buf);
 580 
 581   FREE_C_HEAP_ARRAY(char, buf);
 582 
 583 #undef DEFAULT_LIBPATH
 584 #undef EXTENSIONS_DIR
 585 }
 586 
 587 ////////////////////////////////////////////////////////////////////////////////
 588 // breakpoint support
 589 
 590 void os::breakpoint() {
 591   BREAKPOINT;
 592 }
 593 
 594 extern "C" void breakpoint() {
 595   // use debugger to set breakpoint here
 596 }
 597 
 598 ////////////////////////////////////////////////////////////////////////////////
 599 // signal support
 600 
 601 debug_only(static bool signal_sets_initialized = false);
 602 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 603 
 604 bool os::Aix::is_sig_ignored(int sig) {
 605   struct sigaction oact;
 606   sigaction(sig, (struct sigaction*)NULL, &oact);
 607   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 608     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 609   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 610     return true;
 611   } else {
 612     return false;
 613   }
 614 }
 615 
 616 void os::Aix::signal_sets_init() {
 617   // Should also have an assertion stating we are still single-threaded.
 618   assert(!signal_sets_initialized, "Already initialized");
 619   // Fill in signals that are necessarily unblocked for all threads in
 620   // the VM. Currently, we unblock the following signals:
 621   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 622   //                         by -Xrs (=ReduceSignalUsage));
 623   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 624   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 625   // the dispositions or masks wrt these signals.
 626   // Programs embedding the VM that want to use the above signals for their
 627   // own purposes must, at this time, use the "-Xrs" option to prevent
 628   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 629   // (See bug 4345157, and other related bugs).
 630   // In reality, though, unblocking these signals is really a nop, since
 631   // these signals are not blocked by default.
 632   sigemptyset(&unblocked_sigs);
 633   sigemptyset(&allowdebug_blocked_sigs);
 634   sigaddset(&unblocked_sigs, SIGILL);
 635   sigaddset(&unblocked_sigs, SIGSEGV);
 636   sigaddset(&unblocked_sigs, SIGBUS);
 637   sigaddset(&unblocked_sigs, SIGFPE);
 638   sigaddset(&unblocked_sigs, SIGTRAP);
 639   sigaddset(&unblocked_sigs, SIGDANGER);
 640   sigaddset(&unblocked_sigs, SR_signum);
 641 
 642   if (!ReduceSignalUsage) {
 643    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 644      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 645      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
 646    }
 647    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 648      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 649      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
 650    }
 651    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 652      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 653      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
 654    }
 655   }
 656   // Fill in signals that are blocked by all but the VM thread.
 657   sigemptyset(&vm_sigs);
 658   if (!ReduceSignalUsage)
 659     sigaddset(&vm_sigs, BREAK_SIGNAL);
 660   debug_only(signal_sets_initialized = true);
 661 }
 662 
 663 // These are signals that are unblocked while a thread is running Java.
 664 // (For some reason, they get blocked by default.)
 665 sigset_t* os::Aix::unblocked_signals() {
 666   assert(signal_sets_initialized, "Not initialized");
 667   return &unblocked_sigs;
 668 }
 669 
 670 // These are the signals that are blocked while a (non-VM) thread is
 671 // running Java. Only the VM thread handles these signals.
 672 sigset_t* os::Aix::vm_signals() {
 673   assert(signal_sets_initialized, "Not initialized");
 674   return &vm_sigs;
 675 }
 676 
 677 // These are signals that are blocked during cond_wait to allow debugger in
 678 sigset_t* os::Aix::allowdebug_blocked_signals() {
 679   assert(signal_sets_initialized, "Not initialized");
 680   return &allowdebug_blocked_sigs;
 681 }
 682 
 683 void os::Aix::hotspot_sigmask(Thread* thread) {
 684 
 685   //Save caller's signal mask before setting VM signal mask
 686   sigset_t caller_sigmask;
 687   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 688 
 689   OSThread* osthread = thread->osthread();
 690   osthread->set_caller_sigmask(caller_sigmask);
 691 
 692   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 693 
 694   if (!ReduceSignalUsage) {
 695     if (thread->is_VM_thread()) {
 696       // Only the VM thread handles BREAK_SIGNAL ...
 697       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 698     } else {
 699       // ... all other threads block BREAK_SIGNAL
 700       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 701     }
 702   }
 703 }
 704 
 705 // retrieve memory information.
 706 // Returns false if something went wrong;
 707 // content of pmi undefined in this case.
 708 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 709 
 710   assert(pmi, "get_meminfo: invalid parameter");
 711 
 712   memset(pmi, 0, sizeof(meminfo_t));
 713 
 714   if (os::Aix::on_pase()) {
 715     // On PASE, use the libo4 porting library.
 716 
 717     unsigned long long virt_total = 0;
 718     unsigned long long real_total = 0;
 719     unsigned long long real_free = 0;
 720     unsigned long long pgsp_total = 0;
 721     unsigned long long pgsp_free = 0;
 722     if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
 723       pmi->virt_total = virt_total;
 724       pmi->real_total = real_total;
 725       pmi->real_free = real_free;
 726       pmi->pgsp_total = pgsp_total;
 727       pmi->pgsp_free = pgsp_free;
 728       return true;
 729     }
 730     return false;
 731 
 732   } else {
 733 
 734     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 735     // See:
 736     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 737     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 738     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 739     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 740 
 741     perfstat_memory_total_t psmt;
 742     memset (&psmt, '\0', sizeof(psmt));
 743     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 744     if (rc == -1) {
 745       trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
 746       assert(0, "perfstat_memory_total() failed");
 747       return false;
 748     }
 749 
 750     assert(rc == 1, "perfstat_memory_total() - weird return code");
 751 
 752     // excerpt from
 753     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 754     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 755     // The fields of perfstat_memory_total_t:
 756     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 757     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 758     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 759     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 760     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 761 
 762     pmi->virt_total = psmt.virt_total * 4096;
 763     pmi->real_total = psmt.real_total * 4096;
 764     pmi->real_free = psmt.real_free * 4096;
 765     pmi->pgsp_total = psmt.pgsp_total * 4096;
 766     pmi->pgsp_free = psmt.pgsp_free * 4096;
 767 
 768     return true;
 769 
 770   }
 771 } // end os::Aix::get_meminfo
 772 
 773 //////////////////////////////////////////////////////////////////////////////
 774 // create new thread
 775 
 776 // Thread start routine for all newly created threads
 777 static void *thread_native_entry(Thread *thread) {
 778 
 779   // find out my own stack dimensions
 780   {
 781     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 782     address base = 0;
 783     size_t size = 0;
 784     query_stack_dimensions(&base, &size);
 785     thread->set_stack_base(base);
 786     thread->set_stack_size(size);
 787   }
 788 
 789   const pthread_t pthread_id = ::pthread_self();
 790   const tid_t kernel_thread_id = ::thread_self();
 791 
 792   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 793     os::current_thread_id(), (uintx) kernel_thread_id);
 794 
 795   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
 796   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
 797   // tools hook pthread_create(). In this case, we may run into problems establishing
 798   // guard pages on those stacks, because the stacks may reside in memory which is not
 799   // protectable (shmated).
 800   if (thread->stack_base() > ::sbrk(0)) {
 801     log_warning(os, thread)("Thread stack not in data segment.");
 802   }
 803 
 804   // Try to randomize the cache line index of hot stack frames.
 805   // This helps when threads of the same stack traces evict each other's
 806   // cache lines. The threads can be either from the same JVM instance, or
 807   // from different JVM instances. The benefit is especially true for
 808   // processors with hyperthreading technology.
 809 
 810   static int counter = 0;
 811   int pid = os::current_process_id();
 812   alloca(((pid ^ counter++) & 7) * 128);
 813 
 814   thread->initialize_thread_current();
 815 
 816   OSThread* osthread = thread->osthread();
 817 
 818   // Thread_id is pthread id.
 819   osthread->set_thread_id(pthread_id);
 820 
 821   // .. but keep kernel thread id too for diagnostics
 822   osthread->set_kernel_thread_id(kernel_thread_id);
 823 
 824   // Initialize signal mask for this thread.
 825   os::Aix::hotspot_sigmask(thread);
 826 
 827   // Initialize floating point control register.
 828   os::Aix::init_thread_fpu_state();
 829 
 830   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 831 
 832   // Call one more level start routine.
 833   thread->run();
 834 
 835   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 836     os::current_thread_id(), (uintx) kernel_thread_id);
 837 
 838   // If a thread has not deleted itself ("delete this") as part of its
 839   // termination sequence, we have to ensure thread-local-storage is
 840   // cleared before we actually terminate. No threads should ever be
 841   // deleted asynchronously with respect to their termination.
 842   if (Thread::current_or_null_safe() != NULL) {
 843     assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
 844     thread->clear_thread_current();
 845   }
 846 
 847   return 0;
 848 }
 849 
 850 bool os::create_thread(Thread* thread, ThreadType thr_type,
 851                        size_t req_stack_size) {
 852 
 853   assert(thread->osthread() == NULL, "caller responsible");
 854 
 855   // Allocate the OSThread object.
 856   OSThread* osthread = new OSThread(NULL, NULL);
 857   if (osthread == NULL) {
 858     return false;
 859   }
 860 
 861   // Set the correct thread state.
 862   osthread->set_thread_type(thr_type);
 863 
 864   // Initial state is ALLOCATED but not INITIALIZED
 865   osthread->set_state(ALLOCATED);
 866 
 867   thread->set_osthread(osthread);
 868 
 869   // Init thread attributes.
 870   pthread_attr_t attr;
 871   pthread_attr_init(&attr);
 872   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 873 
 874   // Make sure we run in 1:1 kernel-user-thread mode.
 875   if (os::Aix::on_aix()) {
 876     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 877     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 878   }
 879 
 880   // Start in suspended state, and in os::thread_start, wake the thread up.
 881   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 882 
 883   // Calculate stack size if it's not specified by caller.
 884   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 885   pthread_attr_setstacksize(&attr, stack_size);
 886 
 887   // libc guard page
 888   pthread_attr_setguardsize(&attr, os::Aix::default_guard_size(thr_type));
 889 
 890   pthread_t tid;
 891   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
 892 
 893   char buf[64];
 894   if (ret == 0) {
 895     log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
 896       (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 897   } else {
 898     log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
 899       ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 900   }
 901 
 902   pthread_attr_destroy(&attr);
 903 
 904   if (ret != 0) {
 905     // Need to clean up stuff we've allocated so far.
 906     thread->set_osthread(NULL);
 907     delete osthread;
 908     return false;
 909   }
 910 
 911   // OSThread::thread_id is the pthread id.
 912   osthread->set_thread_id(tid);
 913 
 914   return true;
 915 }
 916 
 917 /////////////////////////////////////////////////////////////////////////////
 918 // attach existing thread
 919 
 920 // bootstrap the main thread
 921 bool os::create_main_thread(JavaThread* thread) {
 922   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 923   return create_attached_thread(thread);
 924 }
 925 
 926 bool os::create_attached_thread(JavaThread* thread) {
 927 #ifdef ASSERT
 928     thread->verify_not_published();
 929 #endif
 930 
 931   // Allocate the OSThread object
 932   OSThread* osthread = new OSThread(NULL, NULL);
 933 
 934   if (osthread == NULL) {
 935     return false;
 936   }
 937 
 938   const pthread_t pthread_id = ::pthread_self();
 939   const tid_t kernel_thread_id = ::thread_self();
 940 
 941   // OSThread::thread_id is the pthread id.
 942   osthread->set_thread_id(pthread_id);
 943 
 944   // .. but keep kernel thread id too for diagnostics
 945   osthread->set_kernel_thread_id(kernel_thread_id);
 946 
 947   // initialize floating point control register
 948   os::Aix::init_thread_fpu_state();
 949 
 950   // Initial thread state is RUNNABLE
 951   osthread->set_state(RUNNABLE);
 952 
 953   thread->set_osthread(osthread);
 954 
 955   if (UseNUMA) {
 956     int lgrp_id = os::numa_get_group_id();
 957     if (lgrp_id != -1) {
 958       thread->set_lgrp_id(lgrp_id);
 959     }
 960   }
 961 
 962   // initialize signal mask for this thread
 963   // and save the caller's signal mask
 964   os::Aix::hotspot_sigmask(thread);
 965 
 966   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
 967     os::current_thread_id(), (uintx) kernel_thread_id);
 968 
 969   return true;
 970 }
 971 
 972 void os::pd_start_thread(Thread* thread) {
 973   int status = pthread_continue_np(thread->osthread()->pthread_id());
 974   assert(status == 0, "thr_continue failed");
 975 }
 976 
 977 // Free OS resources related to the OSThread
 978 void os::free_thread(OSThread* osthread) {
 979   assert(osthread != NULL, "osthread not set");
 980 
 981   // We are told to free resources of the argument thread,
 982   // but we can only really operate on the current thread.
 983   assert(Thread::current()->osthread() == osthread,
 984          "os::free_thread but not current thread");
 985 
 986   // Restore caller's signal mask
 987   sigset_t sigmask = osthread->caller_sigmask();
 988   pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
 989 
 990   delete osthread;
 991 }
 992 
 993 ////////////////////////////////////////////////////////////////////////////////
 994 // time support
 995 
 996 // Time since start-up in seconds to a fine granularity.
 997 // Used by VMSelfDestructTimer and the MemProfiler.
 998 double os::elapsedTime() {
 999   return (double)(os::elapsed_counter()) * 0.000001;
1000 }
1001 
1002 jlong os::elapsed_counter() {
1003   timeval time;
1004   int status = gettimeofday(&time, NULL);
1005   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1006 }
1007 
1008 jlong os::elapsed_frequency() {
1009   return (1000 * 1000);
1010 }
1011 
1012 bool os::supports_vtime() { return true; }
1013 bool os::enable_vtime()   { return false; }
1014 bool os::vtime_enabled()  { return false; }
1015 
1016 double os::elapsedVTime() {
1017   struct rusage usage;
1018   int retval = getrusage(RUSAGE_THREAD, &usage);
1019   if (retval == 0) {
1020     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1021   } else {
1022     // better than nothing, but not much
1023     return elapsedTime();
1024   }
1025 }
1026 
1027 jlong os::javaTimeMillis() {
1028   timeval time;
1029   int status = gettimeofday(&time, NULL);
1030   assert(status != -1, "aix error at gettimeofday()");
1031   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1032 }
1033 
1034 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1035   timeval time;
1036   int status = gettimeofday(&time, NULL);
1037   assert(status != -1, "aix error at gettimeofday()");
1038   seconds = jlong(time.tv_sec);
1039   nanos = jlong(time.tv_usec) * 1000;
1040 }
1041 
1042 jlong os::javaTimeNanos() {
1043   if (os::Aix::on_pase()) {
1044 
1045     timeval time;
1046     int status = gettimeofday(&time, NULL);
1047     assert(status != -1, "PASE error at gettimeofday()");
1048     jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1049     return 1000 * usecs;
1050 
1051   } else {
1052     // On AIX use the precision of processors real time clock
1053     // or time base registers.
1054     timebasestruct_t time;
1055     int rc;
1056 
1057     // If the CPU has a time register, it will be used and
1058     // we have to convert to real time first. After convertion we have following data:
1059     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1060     // time.tb_low  [nanoseconds after the last full second above]
1061     // We better use mread_real_time here instead of read_real_time
1062     // to ensure that we will get a monotonic increasing time.
1063     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1064       rc = time_base_to_time(&time, TIMEBASE_SZ);
1065       assert(rc != -1, "aix error at time_base_to_time()");
1066     }
1067     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1068   }
1069 }
1070 
1071 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1072   info_ptr->max_value = ALL_64_BITS;
1073   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1074   info_ptr->may_skip_backward = false;
1075   info_ptr->may_skip_forward = false;
1076   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1077 }
1078 
1079 // Return the real, user, and system times in seconds from an
1080 // arbitrary fixed point in the past.
1081 bool os::getTimesSecs(double* process_real_time,
1082                       double* process_user_time,
1083                       double* process_system_time) {
1084   struct tms ticks;
1085   clock_t real_ticks = times(&ticks);
1086 
1087   if (real_ticks == (clock_t) (-1)) {
1088     return false;
1089   } else {
1090     double ticks_per_second = (double) clock_tics_per_sec;
1091     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1092     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1093     *process_real_time = ((double) real_ticks) / ticks_per_second;
1094 
1095     return true;
1096   }
1097 }
1098 
1099 char * os::local_time_string(char *buf, size_t buflen) {
1100   struct tm t;
1101   time_t long_time;
1102   time(&long_time);
1103   localtime_r(&long_time, &t);
1104   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1105                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1106                t.tm_hour, t.tm_min, t.tm_sec);
1107   return buf;
1108 }
1109 
1110 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1111   return localtime_r(clock, res);
1112 }
1113 
1114 ////////////////////////////////////////////////////////////////////////////////
1115 // runtime exit support
1116 
1117 // Note: os::shutdown() might be called very early during initialization, or
1118 // called from signal handler. Before adding something to os::shutdown(), make
1119 // sure it is async-safe and can handle partially initialized VM.
1120 void os::shutdown() {
1121 
1122   // allow PerfMemory to attempt cleanup of any persistent resources
1123   perfMemory_exit();
1124 
1125   // needs to remove object in file system
1126   AttachListener::abort();
1127 
1128   // flush buffered output, finish log files
1129   ostream_abort();
1130 
1131   // Check for abort hook
1132   abort_hook_t abort_hook = Arguments::abort_hook();
1133   if (abort_hook != NULL) {
1134     abort_hook();
1135   }
1136 }
1137 
1138 // Note: os::abort() might be called very early during initialization, or
1139 // called from signal handler. Before adding something to os::abort(), make
1140 // sure it is async-safe and can handle partially initialized VM.
1141 void os::abort(bool dump_core, void* siginfo, const void* context) {
1142   os::shutdown();
1143   if (dump_core) {
1144 #ifndef PRODUCT
1145     fdStream out(defaultStream::output_fd());
1146     out.print_raw("Current thread is ");
1147     char buf[16];
1148     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1149     out.print_raw_cr(buf);
1150     out.print_raw_cr("Dumping core ...");
1151 #endif
1152     ::abort(); // dump core
1153   }
1154 
1155   ::exit(1);
1156 }
1157 
1158 // Die immediately, no exit hook, no abort hook, no cleanup.
1159 void os::die() {
1160   ::abort();
1161 }
1162 
1163 // This method is a copy of JDK's sysGetLastErrorString
1164 // from src/solaris/hpi/src/system_md.c
1165 
1166 size_t os::lasterror(char *buf, size_t len) {
1167   if (errno == 0) return 0;
1168 
1169   const char *s = os::strerror(errno);
1170   size_t n = ::strlen(s);
1171   if (n >= len) {
1172     n = len - 1;
1173   }
1174   ::strncpy(buf, s, n);
1175   buf[n] = '\0';
1176   return n;
1177 }
1178 
1179 intx os::current_thread_id() {
1180   return (intx)pthread_self();
1181 }
1182 
1183 int os::current_process_id() {
1184   return getpid();
1185 }
1186 
1187 // DLL functions
1188 
1189 const char* os::dll_file_extension() { return ".so"; }
1190 
1191 // This must be hard coded because it's the system's temporary
1192 // directory not the java application's temp directory, ala java.io.tmpdir.
1193 const char* os::get_temp_directory() { return "/tmp"; }
1194 
1195 static bool file_exists(const char* filename) {
1196   struct stat statbuf;
1197   if (filename == NULL || strlen(filename) == 0) {
1198     return false;
1199   }
1200   return os::stat(filename, &statbuf) == 0;
1201 }
1202 
1203 bool os::dll_build_name(char* buffer, size_t buflen,
1204                         const char* pname, const char* fname) {
1205   bool retval = false;
1206   // Copied from libhpi
1207   const size_t pnamelen = pname ? strlen(pname) : 0;
1208 
1209   // Return error on buffer overflow.
1210   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1211     *buffer = '\0';
1212     return retval;
1213   }
1214 
1215   if (pnamelen == 0) {
1216     snprintf(buffer, buflen, "lib%s.so", fname);
1217     retval = true;
1218   } else if (strchr(pname, *os::path_separator()) != NULL) {
1219     int n;
1220     char** pelements = split_path(pname, &n);
1221     if (pelements == NULL) {
1222       return false;
1223     }
1224     for (int i = 0; i < n; i++) {
1225       // Really shouldn't be NULL, but check can't hurt
1226       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1227         continue; // skip the empty path values
1228       }
1229       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1230       if (file_exists(buffer)) {
1231         retval = true;
1232         break;
1233       }
1234     }
1235     // release the storage
1236     for (int i = 0; i < n; i++) {
1237       if (pelements[i] != NULL) {
1238         FREE_C_HEAP_ARRAY(char, pelements[i]);
1239       }
1240     }
1241     if (pelements != NULL) {
1242       FREE_C_HEAP_ARRAY(char*, pelements);
1243     }
1244   } else {
1245     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1246     retval = true;
1247   }
1248   return retval;
1249 }
1250 
1251 // Check if addr is inside libjvm.so.
1252 bool os::address_is_in_vm(address addr) {
1253 
1254   // Input could be a real pc or a function pointer literal. The latter
1255   // would be a function descriptor residing in the data segment of a module.
1256   loaded_module_t lm;
1257   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1258     return lm.is_in_vm;
1259   } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1260     return lm.is_in_vm;
1261   } else {
1262     return false;
1263   }
1264 
1265 }
1266 
1267 // Resolve an AIX function descriptor literal to a code pointer.
1268 // If the input is a valid code pointer to a text segment of a loaded module,
1269 //   it is returned unchanged.
1270 // If the input is a valid AIX function descriptor, it is resolved to the
1271 //   code entry point.
1272 // If the input is neither a valid function descriptor nor a valid code pointer,
1273 //   NULL is returned.
1274 static address resolve_function_descriptor_to_code_pointer(address p) {
1275 
1276   if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1277     // It is a real code pointer.
1278     return p;
1279   } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1280     // Pointer to data segment, potential function descriptor.
1281     address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1282     if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1283       // It is a function descriptor.
1284       return code_entry;
1285     }
1286   }
1287 
1288   return NULL;
1289 }
1290 
1291 bool os::dll_address_to_function_name(address addr, char *buf,
1292                                       int buflen, int *offset,
1293                                       bool demangle) {
1294   if (offset) {
1295     *offset = -1;
1296   }
1297   // Buf is not optional, but offset is optional.
1298   assert(buf != NULL, "sanity check");
1299   buf[0] = '\0';
1300 
1301   // Resolve function ptr literals first.
1302   addr = resolve_function_descriptor_to_code_pointer(addr);
1303   if (!addr) {
1304     return false;
1305   }
1306 
1307   return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle);
1308 }
1309 
1310 bool os::dll_address_to_library_name(address addr, char* buf,
1311                                      int buflen, int* offset) {
1312   if (offset) {
1313     *offset = -1;
1314   }
1315   // Buf is not optional, but offset is optional.
1316   assert(buf != NULL, "sanity check");
1317   buf[0] = '\0';
1318 
1319   // Resolve function ptr literals first.
1320   addr = resolve_function_descriptor_to_code_pointer(addr);
1321   if (!addr) {
1322     return false;
1323   }
1324 
1325   return AixSymbols::get_module_name(addr, buf, buflen);
1326 }
1327 
1328 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1329 // for the same architecture as Hotspot is running on.
1330 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1331 
1332   if (ebuf && ebuflen > 0) {
1333     ebuf[0] = '\0';
1334     ebuf[ebuflen - 1] = '\0';
1335   }
1336 
1337   if (!filename || strlen(filename) == 0) {
1338     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1339     return NULL;
1340   }
1341 
1342   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1343   void * result= ::dlopen(filename, RTLD_LAZY);
1344   if (result != NULL) {
1345     // Reload dll cache. Don't do this in signal handling.
1346     LoadedLibraries::reload();
1347     return result;
1348   } else {
1349     // error analysis when dlopen fails
1350     const char* const error_report = ::dlerror();
1351     if (error_report && ebuf && ebuflen > 0) {
1352       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1353                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1354     }
1355   }
1356   return NULL;
1357 }
1358 
1359 void* os::dll_lookup(void* handle, const char* name) {
1360   void* res = dlsym(handle, name);
1361   return res;
1362 }
1363 
1364 void* os::get_default_process_handle() {
1365   return (void*)::dlopen(NULL, RTLD_LAZY);
1366 }
1367 
1368 void os::print_dll_info(outputStream *st) {
1369   st->print_cr("Dynamic libraries:");
1370   LoadedLibraries::print(st);
1371 }
1372 
1373 void os::get_summary_os_info(char* buf, size_t buflen) {
1374   // There might be something more readable than uname results for AIX.
1375   struct utsname name;
1376   uname(&name);
1377   snprintf(buf, buflen, "%s %s", name.release, name.version);
1378 }
1379 
1380 void os::print_os_info(outputStream* st) {
1381   st->print("OS:");
1382 
1383   st->print("uname:");
1384   struct utsname name;
1385   uname(&name);
1386   st->print(name.sysname); st->print(" ");
1387   st->print(name.nodename); st->print(" ");
1388   st->print(name.release); st->print(" ");
1389   st->print(name.version); st->print(" ");
1390   st->print(name.machine);
1391   st->cr();
1392 
1393   uint32_t ver = os::Aix::os_version();
1394   st->print_cr("AIX kernel version %u.%u.%u.%u",
1395                (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1396 
1397   // rlimit
1398   st->print("rlimit:");
1399   struct rlimit rlim;
1400 
1401   st->print(" STACK ");
1402   getrlimit(RLIMIT_STACK, &rlim);
1403   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1404   else st->print("%uk", rlim.rlim_cur >> 10);
1405 
1406   st->print(", CORE ");
1407   getrlimit(RLIMIT_CORE, &rlim);
1408   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1409   else st->print("%uk", rlim.rlim_cur >> 10);
1410 
1411   st->print(", NPROC ");
1412   st->print("%d", sysconf(_SC_CHILD_MAX));
1413 
1414   st->print(", NOFILE ");
1415   getrlimit(RLIMIT_NOFILE, &rlim);
1416   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1417   else st->print("%d", rlim.rlim_cur);
1418 
1419   st->print(", AS ");
1420   getrlimit(RLIMIT_AS, &rlim);
1421   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1422   else st->print("%uk", rlim.rlim_cur >> 10);
1423 
1424   // Print limits on DATA, because it limits the C-heap.
1425   st->print(", DATA ");
1426   getrlimit(RLIMIT_DATA, &rlim);
1427   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1428   else st->print("%uk", rlim.rlim_cur >> 10);
1429   st->cr();
1430 
1431   // load average
1432   st->print("load average:");
1433   double loadavg[3] = {-1.L, -1.L, -1.L};
1434   os::loadavg(loadavg, 3);
1435   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1436   st->cr();
1437 
1438   // print wpar info
1439   libperfstat::wparinfo_t wi;
1440   if (libperfstat::get_wparinfo(&wi)) {
1441     st->print_cr("wpar info");
1442     st->print_cr("name: %s", wi.name);
1443     st->print_cr("id:   %d", wi.wpar_id);
1444     st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1445   }
1446 
1447   // print partition info
1448   libperfstat::partitioninfo_t pi;
1449   if (libperfstat::get_partitioninfo(&pi)) {
1450     st->print_cr("partition info");
1451     st->print_cr(" name: %s", pi.name);
1452   }
1453 
1454 }
1455 
1456 void os::print_memory_info(outputStream* st) {
1457 
1458   st->print_cr("Memory:");
1459 
1460   st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1461     describe_pagesize(g_multipage_support.pagesize));
1462   st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1463     describe_pagesize(g_multipage_support.datapsize));
1464   st->print_cr("  Text page size:                         %s",
1465     describe_pagesize(g_multipage_support.textpsize));
1466   st->print_cr("  Thread stack page size (pthread):       %s",
1467     describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1468   st->print_cr("  Default shared memory page size:        %s",
1469     describe_pagesize(g_multipage_support.shmpsize));
1470   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1471     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1472   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1473     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1474   st->print_cr("  Multipage error: %d",
1475     g_multipage_support.error);
1476   st->cr();
1477   st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1478 
1479   // print out LDR_CNTRL because it affects the default page sizes
1480   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1481   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1482 
1483   // Print out EXTSHM because it is an unsupported setting.
1484   const char* const extshm = ::getenv("EXTSHM");
1485   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1486   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1487     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1488   }
1489 
1490   // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1491   const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1492   st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1493       aixthread_guardpages ? aixthread_guardpages : "<unset>");
1494 
1495   os::Aix::meminfo_t mi;
1496   if (os::Aix::get_meminfo(&mi)) {
1497     char buffer[256];
1498     if (os::Aix::on_aix()) {
1499       st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1500       st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1501       st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1502       st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1503     } else {
1504       // PASE - Numbers are result of QWCRSSTS; they mean:
1505       // real_total: Sum of all system pools
1506       // real_free: always 0
1507       // pgsp_total: we take the size of the system ASP
1508       // pgsp_free: size of system ASP times percentage of system ASP unused
1509       st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1510       st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1511       st->print_cr("%% system asp used : " SIZE_FORMAT,
1512         mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1513     }
1514     st->print_raw(buffer);
1515   }
1516   st->cr();
1517 
1518   // Print segments allocated with os::reserve_memory.
1519   st->print_cr("internal virtual memory regions used by vm:");
1520   vmembk_print_on(st);
1521 }
1522 
1523 // Get a string for the cpuinfo that is a summary of the cpu type
1524 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1525   // This looks good
1526   libperfstat::cpuinfo_t ci;
1527   if (libperfstat::get_cpuinfo(&ci)) {
1528     strncpy(buf, ci.version, buflen);
1529   } else {
1530     strncpy(buf, "AIX", buflen);
1531   }
1532 }
1533 
1534 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1535   st->print("CPU:");
1536   st->print("total %d", os::processor_count());
1537   // It's not safe to query number of active processors after crash.
1538   // st->print("(active %d)", os::active_processor_count());
1539   st->print(" %s", VM_Version::features());
1540   st->cr();
1541 }
1542 
1543 static void print_signal_handler(outputStream* st, int sig,
1544                                  char* buf, size_t buflen);
1545 
1546 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1547   st->print_cr("Signal Handlers:");
1548   print_signal_handler(st, SIGSEGV, buf, buflen);
1549   print_signal_handler(st, SIGBUS , buf, buflen);
1550   print_signal_handler(st, SIGFPE , buf, buflen);
1551   print_signal_handler(st, SIGPIPE, buf, buflen);
1552   print_signal_handler(st, SIGXFSZ, buf, buflen);
1553   print_signal_handler(st, SIGILL , buf, buflen);
1554   print_signal_handler(st, SR_signum, buf, buflen);
1555   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1556   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1557   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1558   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1559   print_signal_handler(st, SIGTRAP, buf, buflen);
1560   print_signal_handler(st, SIGDANGER, buf, buflen);
1561 }
1562 
1563 static char saved_jvm_path[MAXPATHLEN] = {0};
1564 
1565 // Find the full path to the current module, libjvm.so.
1566 void os::jvm_path(char *buf, jint buflen) {
1567   // Error checking.
1568   if (buflen < MAXPATHLEN) {
1569     assert(false, "must use a large-enough buffer");
1570     buf[0] = '\0';
1571     return;
1572   }
1573   // Lazy resolve the path to current module.
1574   if (saved_jvm_path[0] != 0) {
1575     strcpy(buf, saved_jvm_path);
1576     return;
1577   }
1578 
1579   Dl_info dlinfo;
1580   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1581   assert(ret != 0, "cannot locate libjvm");
1582   char* rp = realpath((char *)dlinfo.dli_fname, buf);
1583   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1584 
1585   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1586   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1587 }
1588 
1589 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1590   // no prefix required, not even "_"
1591 }
1592 
1593 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1594   // no suffix required
1595 }
1596 
1597 ////////////////////////////////////////////////////////////////////////////////
1598 // sun.misc.Signal support
1599 
1600 static volatile jint sigint_count = 0;
1601 
1602 static void
1603 UserHandler(int sig, void *siginfo, void *context) {
1604   // 4511530 - sem_post is serialized and handled by the manager thread. When
1605   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1606   // don't want to flood the manager thread with sem_post requests.
1607   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1608     return;
1609 
1610   // Ctrl-C is pressed during error reporting, likely because the error
1611   // handler fails to abort. Let VM die immediately.
1612   if (sig == SIGINT && is_error_reported()) {
1613     os::die();
1614   }
1615 
1616   os::signal_notify(sig);
1617 }
1618 
1619 void* os::user_handler() {
1620   return CAST_FROM_FN_PTR(void*, UserHandler);
1621 }
1622 
1623 extern "C" {
1624   typedef void (*sa_handler_t)(int);
1625   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1626 }
1627 
1628 void* os::signal(int signal_number, void* handler) {
1629   struct sigaction sigAct, oldSigAct;
1630 
1631   sigfillset(&(sigAct.sa_mask));
1632 
1633   // Do not block out synchronous signals in the signal handler.
1634   // Blocking synchronous signals only makes sense if you can really
1635   // be sure that those signals won't happen during signal handling,
1636   // when the blocking applies. Normal signal handlers are lean and
1637   // do not cause signals. But our signal handlers tend to be "risky"
1638   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1639   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1640   // by a SIGILL, which was blocked due to the signal mask. The process
1641   // just hung forever. Better to crash from a secondary signal than to hang.
1642   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1643   sigdelset(&(sigAct.sa_mask), SIGBUS);
1644   sigdelset(&(sigAct.sa_mask), SIGILL);
1645   sigdelset(&(sigAct.sa_mask), SIGFPE);
1646   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1647 
1648   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1649 
1650   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1651 
1652   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1653     // -1 means registration failed
1654     return (void *)-1;
1655   }
1656 
1657   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1658 }
1659 
1660 void os::signal_raise(int signal_number) {
1661   ::raise(signal_number);
1662 }
1663 
1664 //
1665 // The following code is moved from os.cpp for making this
1666 // code platform specific, which it is by its very nature.
1667 //
1668 
1669 // Will be modified when max signal is changed to be dynamic
1670 int os::sigexitnum_pd() {
1671   return NSIG;
1672 }
1673 
1674 // a counter for each possible signal value
1675 static volatile jint pending_signals[NSIG+1] = { 0 };
1676 
1677 // Wrapper functions for: sem_init(), sem_post(), sem_wait()
1678 // On AIX, we use sem_init(), sem_post(), sem_wait()
1679 // On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1680 // do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1681 // Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1682 // on AIX, msem_..() calls are suspected of causing problems.
1683 static sem_t sig_sem;
1684 static msemaphore* p_sig_msem = 0;
1685 
1686 static void local_sem_init() {
1687   if (os::Aix::on_aix()) {
1688     int rc = ::sem_init(&sig_sem, 0, 0);
1689     guarantee(rc != -1, "sem_init failed");
1690   } else {
1691     // Memory semaphores must live in shared mem.
1692     guarantee0(p_sig_msem == NULL);
1693     p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1694     guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1695     guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1696   }
1697 }
1698 
1699 static void local_sem_post() {
1700   static bool warn_only_once = false;
1701   if (os::Aix::on_aix()) {
1702     int rc = ::sem_post(&sig_sem);
1703     if (rc == -1 && !warn_only_once) {
1704       trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
1705       warn_only_once = true;
1706     }
1707   } else {
1708     guarantee0(p_sig_msem != NULL);
1709     int rc = ::msem_unlock(p_sig_msem, 0);
1710     if (rc == -1 && !warn_only_once) {
1711       trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
1712       warn_only_once = true;
1713     }
1714   }
1715 }
1716 
1717 static void local_sem_wait() {
1718   static bool warn_only_once = false;
1719   if (os::Aix::on_aix()) {
1720     int rc = ::sem_wait(&sig_sem);
1721     if (rc == -1 && !warn_only_once) {
1722       trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
1723       warn_only_once = true;
1724     }
1725   } else {
1726     guarantee0(p_sig_msem != NULL); // must init before use
1727     int rc = ::msem_lock(p_sig_msem, 0);
1728     if (rc == -1 && !warn_only_once) {
1729       trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
1730       warn_only_once = true;
1731     }
1732   }
1733 }
1734 
1735 void os::signal_init_pd() {
1736   // Initialize signal structures
1737   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1738 
1739   // Initialize signal semaphore
1740   local_sem_init();
1741 }
1742 
1743 void os::signal_notify(int sig) {
1744   Atomic::inc(&pending_signals[sig]);
1745   local_sem_post();
1746 }
1747 
1748 static int check_pending_signals(bool wait) {
1749   Atomic::store(0, &sigint_count);
1750   for (;;) {
1751     for (int i = 0; i < NSIG + 1; i++) {
1752       jint n = pending_signals[i];
1753       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1754         return i;
1755       }
1756     }
1757     if (!wait) {
1758       return -1;
1759     }
1760     JavaThread *thread = JavaThread::current();
1761     ThreadBlockInVM tbivm(thread);
1762 
1763     bool threadIsSuspended;
1764     do {
1765       thread->set_suspend_equivalent();
1766       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1767 
1768       local_sem_wait();
1769 
1770       // were we externally suspended while we were waiting?
1771       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1772       if (threadIsSuspended) {
1773         //
1774         // The semaphore has been incremented, but while we were waiting
1775         // another thread suspended us. We don't want to continue running
1776         // while suspended because that would surprise the thread that
1777         // suspended us.
1778         //
1779 
1780         local_sem_post();
1781 
1782         thread->java_suspend_self();
1783       }
1784     } while (threadIsSuspended);
1785   }
1786 }
1787 
1788 int os::signal_lookup() {
1789   return check_pending_signals(false);
1790 }
1791 
1792 int os::signal_wait() {
1793   return check_pending_signals(true);
1794 }
1795 
1796 ////////////////////////////////////////////////////////////////////////////////
1797 // Virtual Memory
1798 
1799 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1800 
1801 #define VMEM_MAPPED  1
1802 #define VMEM_SHMATED 2
1803 
1804 struct vmembk_t {
1805   int type;         // 1 - mmap, 2 - shmat
1806   char* addr;
1807   size_t size;      // Real size, may be larger than usersize.
1808   size_t pagesize;  // page size of area
1809   vmembk_t* next;
1810 
1811   bool contains_addr(char* p) const {
1812     return p >= addr && p < (addr + size);
1813   }
1814 
1815   bool contains_range(char* p, size_t s) const {
1816     return contains_addr(p) && contains_addr(p + s - 1);
1817   }
1818 
1819   void print_on(outputStream* os) const {
1820     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1821       " bytes, %d %s pages), %s",
1822       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1823       (type == VMEM_SHMATED ? "shmat" : "mmap")
1824     );
1825   }
1826 
1827   // Check that range is a sub range of memory block (or equal to memory block);
1828   // also check that range is fully page aligned to the page size if the block.
1829   void assert_is_valid_subrange(char* p, size_t s) const {
1830     if (!contains_range(p, s)) {
1831       trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1832               "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1833               p, p + s, addr, addr + size);
1834       guarantee0(false);
1835     }
1836     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1837       trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1838               " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1839       guarantee0(false);
1840     }
1841   }
1842 };
1843 
1844 static struct {
1845   vmembk_t* first;
1846   MiscUtils::CritSect cs;
1847 } vmem;
1848 
1849 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1850   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1851   assert0(p);
1852   if (p) {
1853     MiscUtils::AutoCritSect lck(&vmem.cs);
1854     p->addr = addr; p->size = size;
1855     p->pagesize = pagesize;
1856     p->type = type;
1857     p->next = vmem.first;
1858     vmem.first = p;
1859   }
1860 }
1861 
1862 static vmembk_t* vmembk_find(char* addr) {
1863   MiscUtils::AutoCritSect lck(&vmem.cs);
1864   for (vmembk_t* p = vmem.first; p; p = p->next) {
1865     if (p->addr <= addr && (p->addr + p->size) > addr) {
1866       return p;
1867     }
1868   }
1869   return NULL;
1870 }
1871 
1872 static void vmembk_remove(vmembk_t* p0) {
1873   MiscUtils::AutoCritSect lck(&vmem.cs);
1874   assert0(p0);
1875   assert0(vmem.first); // List should not be empty.
1876   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1877     if (*pp == p0) {
1878       *pp = p0->next;
1879       ::free(p0);
1880       return;
1881     }
1882   }
1883   assert0(false); // Not found?
1884 }
1885 
1886 static void vmembk_print_on(outputStream* os) {
1887   MiscUtils::AutoCritSect lck(&vmem.cs);
1888   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1889     vmi->print_on(os);
1890     os->cr();
1891   }
1892 }
1893 
1894 // Reserve and attach a section of System V memory.
1895 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1896 // address. Failing that, it will attach the memory anywhere.
1897 // If <requested_addr> is NULL, function will attach the memory anywhere.
1898 //
1899 // <alignment_hint> is being ignored by this function. It is very probable however that the
1900 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1901 // Should this be not enogh, we can put more work into it.
1902 static char* reserve_shmated_memory (
1903   size_t bytes,
1904   char* requested_addr,
1905   size_t alignment_hint) {
1906 
1907   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1908     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1909     bytes, requested_addr, alignment_hint);
1910 
1911   // Either give me wish address or wish alignment but not both.
1912   assert0(!(requested_addr != NULL && alignment_hint != 0));
1913 
1914   // We must prevent anyone from attaching too close to the
1915   // BRK because that may cause malloc OOM.
1916   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1917     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1918       "Will attach anywhere.", requested_addr);
1919     // Act like the OS refused to attach there.
1920     requested_addr = NULL;
1921   }
1922 
1923   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1924   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1925   if (os::Aix::on_pase_V5R4_or_older()) {
1926     ShouldNotReachHere();
1927   }
1928 
1929   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1930   const size_t size = align_size_up(bytes, 64*K);
1931 
1932   // Reserve the shared segment.
1933   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1934   if (shmid == -1) {
1935     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1936     return NULL;
1937   }
1938 
1939   // Important note:
1940   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1941   // We must right after attaching it remove it from the system. System V shm segments are global and
1942   // survive the process.
1943   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1944 
1945   struct shmid_ds shmbuf;
1946   memset(&shmbuf, 0, sizeof(shmbuf));
1947   shmbuf.shm_pagesize = 64*K;
1948   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1949     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1950                size / (64*K), errno);
1951     // I want to know if this ever happens.
1952     assert(false, "failed to set page size for shmat");
1953   }
1954 
1955   // Now attach the shared segment.
1956   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1957   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1958   // were not a segment boundary.
1959   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1960   const int errno_shmat = errno;
1961 
1962   // (A) Right after shmat and before handing shmat errors delete the shm segment.
1963   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
1964     trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1965     assert(false, "failed to remove shared memory segment!");
1966   }
1967 
1968   // Handle shmat error. If we failed to attach, just return.
1969   if (addr == (char*)-1) {
1970     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
1971     return NULL;
1972   }
1973 
1974   // Just for info: query the real page size. In case setting the page size did not
1975   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1976   const size_t real_pagesize = os::Aix::query_pagesize(addr);
1977   if (real_pagesize != shmbuf.shm_pagesize) {
1978     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
1979   }
1980 
1981   if (addr) {
1982     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
1983       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
1984   } else {
1985     if (requested_addr != NULL) {
1986       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
1987     } else {
1988       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
1989     }
1990   }
1991 
1992   // book-keeping
1993   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
1994   assert0(is_aligned_to(addr, os::vm_page_size()));
1995 
1996   return addr;
1997 }
1998 
1999 static bool release_shmated_memory(char* addr, size_t size) {
2000 
2001   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2002     addr, addr + size - 1);
2003 
2004   bool rc = false;
2005 
2006   // TODO: is there a way to verify shm size without doing bookkeeping?
2007   if (::shmdt(addr) != 0) {
2008     trcVerbose("error (%d).", errno);
2009   } else {
2010     trcVerbose("ok.");
2011     rc = true;
2012   }
2013   return rc;
2014 }
2015 
2016 static bool uncommit_shmated_memory(char* addr, size_t size) {
2017   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2018     addr, addr + size - 1);
2019 
2020   const bool rc = my_disclaim64(addr, size);
2021 
2022   if (!rc) {
2023     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2024     return false;
2025   }
2026   return true;
2027 }
2028 
2029 ////////////////////////////////  mmap-based routines /////////////////////////////////
2030 
2031 // Reserve memory via mmap.
2032 // If <requested_addr> is given, an attempt is made to attach at the given address.
2033 // Failing that, memory is allocated at any address.
2034 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2035 // allocate at an address aligned with the given alignment. Failing that, memory
2036 // is aligned anywhere.
2037 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2038   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2039     "alignment_hint " UINTX_FORMAT "...",
2040     bytes, requested_addr, alignment_hint);
2041 
2042   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2043   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2044     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2045     return NULL;
2046   }
2047 
2048   // We must prevent anyone from attaching too close to the
2049   // BRK because that may cause malloc OOM.
2050   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2051     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2052       "Will attach anywhere.", requested_addr);
2053     // Act like the OS refused to attach there.
2054     requested_addr = NULL;
2055   }
2056 
2057   // Specify one or the other but not both.
2058   assert0(!(requested_addr != NULL && alignment_hint > 0));
2059 
2060   // In 64K mode, we claim the global page size (os::vm_page_size())
2061   // is 64K. This is one of the few points where that illusion may
2062   // break, because mmap() will always return memory aligned to 4K. So
2063   // we must ensure we only ever return memory aligned to 64k.
2064   if (alignment_hint) {
2065     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2066   } else {
2067     alignment_hint = os::vm_page_size();
2068   }
2069 
2070   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2071   const size_t size = align_size_up(bytes, os::vm_page_size());
2072 
2073   // alignment: Allocate memory large enough to include an aligned range of the right size and
2074   // cut off the leading and trailing waste pages.
2075   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2076   const size_t extra_size = size + alignment_hint;
2077 
2078   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2079   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2080   int flags = MAP_ANONYMOUS | MAP_SHARED;
2081 
2082   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2083   // it means if wishaddress is given but MAP_FIXED is not set.
2084   //
2085   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2086   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2087   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2088   // get clobbered.
2089   if (requested_addr != NULL) {
2090     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2091       flags |= MAP_FIXED;
2092     }
2093   }
2094 
2095   char* addr = (char*)::mmap(requested_addr, extra_size,
2096       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2097 
2098   if (addr == MAP_FAILED) {
2099     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2100     return NULL;
2101   }
2102 
2103   // Handle alignment.
2104   char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2105   const size_t waste_pre = addr_aligned - addr;
2106   char* const addr_aligned_end = addr_aligned + size;
2107   const size_t waste_post = extra_size - waste_pre - size;
2108   if (waste_pre > 0) {
2109     ::munmap(addr, waste_pre);
2110   }
2111   if (waste_post > 0) {
2112     ::munmap(addr_aligned_end, waste_post);
2113   }
2114   addr = addr_aligned;
2115 
2116   if (addr) {
2117     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2118       addr, addr + bytes, bytes);
2119   } else {
2120     if (requested_addr != NULL) {
2121       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2122     } else {
2123       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2124     }
2125   }
2126 
2127   // bookkeeping
2128   vmembk_add(addr, size, 4*K, VMEM_MAPPED);
2129 
2130   // Test alignment, see above.
2131   assert0(is_aligned_to(addr, os::vm_page_size()));
2132 
2133   return addr;
2134 }
2135 
2136 static bool release_mmaped_memory(char* addr, size_t size) {
2137   assert0(is_aligned_to(addr, os::vm_page_size()));
2138   assert0(is_aligned_to(size, os::vm_page_size()));
2139 
2140   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2141     addr, addr + size - 1);
2142   bool rc = false;
2143 
2144   if (::munmap(addr, size) != 0) {
2145     trcVerbose("failed (%d)\n", errno);
2146     rc = false;
2147   } else {
2148     trcVerbose("ok.");
2149     rc = true;
2150   }
2151 
2152   return rc;
2153 }
2154 
2155 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2156 
2157   assert0(is_aligned_to(addr, os::vm_page_size()));
2158   assert0(is_aligned_to(size, os::vm_page_size()));
2159 
2160   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2161     addr, addr + size - 1);
2162   bool rc = false;
2163 
2164   // Uncommit mmap memory with msync MS_INVALIDATE.
2165   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2166     trcVerbose("failed (%d)\n", errno);
2167     rc = false;
2168   } else {
2169     trcVerbose("ok.");
2170     rc = true;
2171   }
2172 
2173   return rc;
2174 }
2175 
2176 int os::vm_page_size() {
2177   // Seems redundant as all get out.
2178   assert(os::Aix::page_size() != -1, "must call os::init");
2179   return os::Aix::page_size();
2180 }
2181 
2182 // Aix allocates memory by pages.
2183 int os::vm_allocation_granularity() {
2184   assert(os::Aix::page_size() != -1, "must call os::init");
2185   return os::Aix::page_size();
2186 }
2187 
2188 #ifdef PRODUCT
2189 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2190                                     int err) {
2191   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2192           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2193           os::errno_name(err), err);
2194 }
2195 #endif
2196 
2197 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2198                                   const char* mesg) {
2199   assert(mesg != NULL, "mesg must be specified");
2200   if (!pd_commit_memory(addr, size, exec)) {
2201     // Add extra info in product mode for vm_exit_out_of_memory():
2202     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2203     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2204   }
2205 }
2206 
2207 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2208 
2209   assert(is_aligned_to(addr, os::vm_page_size()),
2210     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2211     p2i(addr), os::vm_page_size());
2212   assert(is_aligned_to(size, os::vm_page_size()),
2213     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2214     size, os::vm_page_size());
2215 
2216   vmembk_t* const vmi = vmembk_find(addr);
2217   guarantee0(vmi);
2218   vmi->assert_is_valid_subrange(addr, size);
2219 
2220   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2221 
2222   if (UseExplicitCommit) {
2223     // AIX commits memory on touch. So, touch all pages to be committed.
2224     for (char* p = addr; p < (addr + size); p += 4*K) {
2225       *p = '\0';
2226     }
2227   }
2228 
2229   return true;
2230 }
2231 
2232 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2233   return pd_commit_memory(addr, size, exec);
2234 }
2235 
2236 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2237                                   size_t alignment_hint, bool exec,
2238                                   const char* mesg) {
2239   // Alignment_hint is ignored on this OS.
2240   pd_commit_memory_or_exit(addr, size, exec, mesg);
2241 }
2242 
2243 bool os::pd_uncommit_memory(char* addr, size_t size) {
2244   assert(is_aligned_to(addr, os::vm_page_size()),
2245     "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2246     p2i(addr), os::vm_page_size());
2247   assert(is_aligned_to(size, os::vm_page_size()),
2248     "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2249     size, os::vm_page_size());
2250 
2251   // Dynamically do different things for mmap/shmat.
2252   const vmembk_t* const vmi = vmembk_find(addr);
2253   guarantee0(vmi);
2254   vmi->assert_is_valid_subrange(addr, size);
2255 
2256   if (vmi->type == VMEM_SHMATED) {
2257     return uncommit_shmated_memory(addr, size);
2258   } else {
2259     return uncommit_mmaped_memory(addr, size);
2260   }
2261 }
2262 
2263 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2264   // Do not call this; no need to commit stack pages on AIX.
2265   ShouldNotReachHere();
2266   return true;
2267 }
2268 
2269 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2270   // Do not call this; no need to commit stack pages on AIX.
2271   ShouldNotReachHere();
2272   return true;
2273 }
2274 
2275 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2276 }
2277 
2278 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2279 }
2280 
2281 void os::numa_make_global(char *addr, size_t bytes) {
2282 }
2283 
2284 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2285 }
2286 
2287 bool os::numa_topology_changed() {
2288   return false;
2289 }
2290 
2291 size_t os::numa_get_groups_num() {
2292   return 1;
2293 }
2294 
2295 int os::numa_get_group_id() {
2296   return 0;
2297 }
2298 
2299 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2300   if (size > 0) {
2301     ids[0] = 0;
2302     return 1;
2303   }
2304   return 0;
2305 }
2306 
2307 bool os::get_page_info(char *start, page_info* info) {
2308   return false;
2309 }
2310 
2311 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2312   return end;
2313 }
2314 
2315 // Reserves and attaches a shared memory segment.
2316 // Will assert if a wish address is given and could not be obtained.
2317 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2318 
2319   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2320   // thereby clobbering old mappings at that place. That is probably
2321   // not intended, never used and almost certainly an error were it
2322   // ever be used this way (to try attaching at a specified address
2323   // without clobbering old mappings an alternate API exists,
2324   // os::attempt_reserve_memory_at()).
2325   // Instead of mimicking the dangerous coding of the other platforms, here I
2326   // just ignore the request address (release) or assert(debug).
2327   assert0(requested_addr == NULL);
2328 
2329   // Always round to os::vm_page_size(), which may be larger than 4K.
2330   bytes = align_size_up(bytes, os::vm_page_size());
2331   const size_t alignment_hint0 =
2332     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2333 
2334   // In 4K mode always use mmap.
2335   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2336   if (os::vm_page_size() == 4*K) {
2337     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2338   } else {
2339     if (bytes >= Use64KPagesThreshold) {
2340       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2341     } else {
2342       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2343     }
2344   }
2345 }
2346 
2347 bool os::pd_release_memory(char* addr, size_t size) {
2348 
2349   // Dynamically do different things for mmap/shmat.
2350   vmembk_t* const vmi = vmembk_find(addr);
2351   guarantee0(vmi);
2352 
2353   // Always round to os::vm_page_size(), which may be larger than 4K.
2354   size = align_size_up(size, os::vm_page_size());
2355   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2356 
2357   bool rc = false;
2358   bool remove_bookkeeping = false;
2359   if (vmi->type == VMEM_SHMATED) {
2360     // For shmatted memory, we do:
2361     // - If user wants to release the whole range, release the memory (shmdt).
2362     // - If user only wants to release a partial range, uncommit (disclaim) that
2363     //   range. That way, at least, we do not use memory anymore (bust still page
2364     //   table space).
2365     vmi->assert_is_valid_subrange(addr, size);
2366     if (addr == vmi->addr && size == vmi->size) {
2367       rc = release_shmated_memory(addr, size);
2368       remove_bookkeeping = true;
2369     } else {
2370       rc = uncommit_shmated_memory(addr, size);
2371     }
2372   } else {
2373     // User may unmap partial regions but region has to be fully contained.
2374 #ifdef ASSERT
2375     vmi->assert_is_valid_subrange(addr, size);
2376 #endif
2377     rc = release_mmaped_memory(addr, size);
2378     remove_bookkeeping = true;
2379   }
2380 
2381   // update bookkeeping
2382   if (rc && remove_bookkeeping) {
2383     vmembk_remove(vmi);
2384   }
2385 
2386   return rc;
2387 }
2388 
2389 static bool checked_mprotect(char* addr, size_t size, int prot) {
2390 
2391   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2392   // not tell me if protection failed when trying to protect an un-protectable range.
2393   //
2394   // This means if the memory was allocated using shmget/shmat, protection wont work
2395   // but mprotect will still return 0:
2396   //
2397   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2398 
2399   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2400 
2401   if (!rc) {
2402     const char* const s_errno = os::errno_name(errno);
2403     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2404     return false;
2405   }
2406 
2407   // mprotect success check
2408   //
2409   // Mprotect said it changed the protection but can I believe it?
2410   //
2411   // To be sure I need to check the protection afterwards. Try to
2412   // read from protected memory and check whether that causes a segfault.
2413   //
2414   if (!os::Aix::xpg_sus_mode()) {
2415 
2416     if (CanUseSafeFetch32()) {
2417 
2418       const bool read_protected =
2419         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2420          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2421 
2422       if (prot & PROT_READ) {
2423         rc = !read_protected;
2424       } else {
2425         rc = read_protected;
2426       }
2427 
2428       if (!rc) {
2429         if (os::Aix::on_pase()) {
2430           // There is an issue on older PASE systems where mprotect() will return success but the
2431           // memory will not be protected.
2432           // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2433           // machines; we only see it rarely, when using mprotect() to protect the guard page of
2434           // a stack. It is an OS error.
2435           //
2436           // A valid strategy is just to try again. This usually works. :-/
2437 
2438           ::usleep(1000);
2439           if (::mprotect(addr, size, prot) == 0) {
2440             const bool read_protected_2 =
2441               (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2442               SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2443             rc = true;
2444           }
2445         }
2446       }
2447     }
2448   }
2449 
2450   assert(rc == true, "mprotect failed.");
2451 
2452   return rc;
2453 }
2454 
2455 // Set protections specified
2456 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2457   unsigned int p = 0;
2458   switch (prot) {
2459   case MEM_PROT_NONE: p = PROT_NONE; break;
2460   case MEM_PROT_READ: p = PROT_READ; break;
2461   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2462   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2463   default:
2464     ShouldNotReachHere();
2465   }
2466   // is_committed is unused.
2467   return checked_mprotect(addr, size, p);
2468 }
2469 
2470 bool os::guard_memory(char* addr, size_t size) {
2471   return checked_mprotect(addr, size, PROT_NONE);
2472 }
2473 
2474 bool os::unguard_memory(char* addr, size_t size) {
2475   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2476 }
2477 
2478 // Large page support
2479 
2480 static size_t _large_page_size = 0;
2481 
2482 // Enable large page support if OS allows that.
2483 void os::large_page_init() {
2484   return; // Nothing to do. See query_multipage_support and friends.
2485 }
2486 
2487 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2488   // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2489   // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2490   // so this is not needed.
2491   assert(false, "should not be called on AIX");
2492   return NULL;
2493 }
2494 
2495 bool os::release_memory_special(char* base, size_t bytes) {
2496   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2497   Unimplemented();
2498   return false;
2499 }
2500 
2501 size_t os::large_page_size() {
2502   return _large_page_size;
2503 }
2504 
2505 bool os::can_commit_large_page_memory() {
2506   // Does not matter, we do not support huge pages.
2507   return false;
2508 }
2509 
2510 bool os::can_execute_large_page_memory() {
2511   // Does not matter, we do not support huge pages.
2512   return false;
2513 }
2514 
2515 // Reserve memory at an arbitrary address, only if that area is
2516 // available (and not reserved for something else).
2517 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2518   char* addr = NULL;
2519 
2520   // Always round to os::vm_page_size(), which may be larger than 4K.
2521   bytes = align_size_up(bytes, os::vm_page_size());
2522 
2523   // In 4K mode always use mmap.
2524   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2525   if (os::vm_page_size() == 4*K) {
2526     return reserve_mmaped_memory(bytes, requested_addr, 0);
2527   } else {
2528     if (bytes >= Use64KPagesThreshold) {
2529       return reserve_shmated_memory(bytes, requested_addr, 0);
2530     } else {
2531       return reserve_mmaped_memory(bytes, requested_addr, 0);
2532     }
2533   }
2534 
2535   return addr;
2536 }
2537 
2538 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2539   return ::read(fd, buf, nBytes);
2540 }
2541 
2542 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2543   return ::pread(fd, buf, nBytes, offset);
2544 }
2545 
2546 void os::naked_short_sleep(jlong ms) {
2547   struct timespec req;
2548 
2549   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2550   req.tv_sec = 0;
2551   if (ms > 0) {
2552     req.tv_nsec = (ms % 1000) * 1000000;
2553   }
2554   else {
2555     req.tv_nsec = 1;
2556   }
2557 
2558   nanosleep(&req, NULL);
2559 
2560   return;
2561 }
2562 
2563 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2564 void os::infinite_sleep() {
2565   while (true) {    // sleep forever ...
2566     ::sleep(100);   // ... 100 seconds at a time
2567   }
2568 }
2569 
2570 // Used to convert frequent JVM_Yield() to nops
2571 bool os::dont_yield() {
2572   return DontYieldALot;
2573 }
2574 
2575 void os::naked_yield() {
2576   sched_yield();
2577 }
2578 
2579 ////////////////////////////////////////////////////////////////////////////////
2580 // thread priority support
2581 
2582 // From AIX manpage to pthread_setschedparam
2583 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2584 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2585 //
2586 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2587 // range from 40 to 80, where 40 is the least favored priority and 80
2588 // is the most favored."
2589 //
2590 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2591 // scheduling there; however, this still leaves iSeries.)
2592 //
2593 // We use the same values for AIX and PASE.
2594 int os::java_to_os_priority[CriticalPriority + 1] = {
2595   54,             // 0 Entry should never be used
2596 
2597   55,             // 1 MinPriority
2598   55,             // 2
2599   56,             // 3
2600 
2601   56,             // 4
2602   57,             // 5 NormPriority
2603   57,             // 6
2604 
2605   58,             // 7
2606   58,             // 8
2607   59,             // 9 NearMaxPriority
2608 
2609   60,             // 10 MaxPriority
2610 
2611   60              // 11 CriticalPriority
2612 };
2613 
2614 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2615   if (!UseThreadPriorities) return OS_OK;
2616   pthread_t thr = thread->osthread()->pthread_id();
2617   int policy = SCHED_OTHER;
2618   struct sched_param param;
2619   param.sched_priority = newpri;
2620   int ret = pthread_setschedparam(thr, policy, &param);
2621 
2622   if (ret != 0) {
2623     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2624         (int)thr, newpri, ret, os::errno_name(ret));
2625   }
2626   return (ret == 0) ? OS_OK : OS_ERR;
2627 }
2628 
2629 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2630   if (!UseThreadPriorities) {
2631     *priority_ptr = java_to_os_priority[NormPriority];
2632     return OS_OK;
2633   }
2634   pthread_t thr = thread->osthread()->pthread_id();
2635   int policy = SCHED_OTHER;
2636   struct sched_param param;
2637   int ret = pthread_getschedparam(thr, &policy, &param);
2638   *priority_ptr = param.sched_priority;
2639 
2640   return (ret == 0) ? OS_OK : OS_ERR;
2641 }
2642 
2643 // Hint to the underlying OS that a task switch would not be good.
2644 // Void return because it's a hint and can fail.
2645 void os::hint_no_preempt() {}
2646 
2647 ////////////////////////////////////////////////////////////////////////////////
2648 // suspend/resume support
2649 
2650 //  the low-level signal-based suspend/resume support is a remnant from the
2651 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2652 //  within hotspot. Now there is a single use-case for this:
2653 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
2654 //      that runs in the watcher thread.
2655 //  The remaining code is greatly simplified from the more general suspension
2656 //  code that used to be used.
2657 //
2658 //  The protocol is quite simple:
2659 //  - suspend:
2660 //      - sends a signal to the target thread
2661 //      - polls the suspend state of the osthread using a yield loop
2662 //      - target thread signal handler (SR_handler) sets suspend state
2663 //        and blocks in sigsuspend until continued
2664 //  - resume:
2665 //      - sets target osthread state to continue
2666 //      - sends signal to end the sigsuspend loop in the SR_handler
2667 //
2668 //  Note that the SR_lock plays no role in this suspend/resume protocol,
2669 //  but is checked for NULL in SR_handler as a thread termination indicator.
2670 //
2671 
2672 static void resume_clear_context(OSThread *osthread) {
2673   osthread->set_ucontext(NULL);
2674   osthread->set_siginfo(NULL);
2675 }
2676 
2677 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2678   osthread->set_ucontext(context);
2679   osthread->set_siginfo(siginfo);
2680 }
2681 
2682 //
2683 // Handler function invoked when a thread's execution is suspended or
2684 // resumed. We have to be careful that only async-safe functions are
2685 // called here (Note: most pthread functions are not async safe and
2686 // should be avoided.)
2687 //
2688 // Note: sigwait() is a more natural fit than sigsuspend() from an
2689 // interface point of view, but sigwait() prevents the signal hander
2690 // from being run. libpthread would get very confused by not having
2691 // its signal handlers run and prevents sigwait()'s use with the
2692 // mutex granting granting signal.
2693 //
2694 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2695 //
2696 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2697   // Save and restore errno to avoid confusing native code with EINTR
2698   // after sigsuspend.
2699   int old_errno = errno;
2700 
2701   Thread* thread = Thread::current_or_null_safe();
2702   assert(thread != NULL, "Missing current thread in SR_handler");
2703 
2704   // On some systems we have seen signal delivery get "stuck" until the signal
2705   // mask is changed as part of thread termination. Check that the current thread
2706   // has not already terminated (via SR_lock()) - else the following assertion
2707   // will fail because the thread is no longer a JavaThread as the ~JavaThread
2708   // destructor has completed.
2709 
2710   if (thread->SR_lock() == NULL) {
2711     return;
2712   }
2713 
2714   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2715 
2716   OSThread* osthread = thread->osthread();
2717 
2718   os::SuspendResume::State current = osthread->sr.state();
2719   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2720     suspend_save_context(osthread, siginfo, context);
2721 
2722     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2723     os::SuspendResume::State state = osthread->sr.suspended();
2724     if (state == os::SuspendResume::SR_SUSPENDED) {
2725       sigset_t suspend_set;  // signals for sigsuspend()
2726 
2727       // get current set of blocked signals and unblock resume signal
2728       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2729       sigdelset(&suspend_set, SR_signum);
2730 
2731       // wait here until we are resumed
2732       while (1) {
2733         sigsuspend(&suspend_set);
2734 
2735         os::SuspendResume::State result = osthread->sr.running();
2736         if (result == os::SuspendResume::SR_RUNNING) {
2737           break;
2738         }
2739       }
2740 
2741     } else if (state == os::SuspendResume::SR_RUNNING) {
2742       // request was cancelled, continue
2743     } else {
2744       ShouldNotReachHere();
2745     }
2746 
2747     resume_clear_context(osthread);
2748   } else if (current == os::SuspendResume::SR_RUNNING) {
2749     // request was cancelled, continue
2750   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2751     // ignore
2752   } else {
2753     ShouldNotReachHere();
2754   }
2755 
2756   errno = old_errno;
2757 }
2758 
2759 static int SR_initialize() {
2760   struct sigaction act;
2761   char *s;
2762   // Get signal number to use for suspend/resume
2763   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2764     int sig = ::strtol(s, 0, 10);
2765     if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2766         sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2767       SR_signum = sig;
2768     } else {
2769       warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2770               sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2771     }
2772   }
2773 
2774   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2775         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2776 
2777   sigemptyset(&SR_sigset);
2778   sigaddset(&SR_sigset, SR_signum);
2779 
2780   // Set up signal handler for suspend/resume.
2781   act.sa_flags = SA_RESTART|SA_SIGINFO;
2782   act.sa_handler = (void (*)(int)) SR_handler;
2783 
2784   // SR_signum is blocked by default.
2785   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2786 
2787   if (sigaction(SR_signum, &act, 0) == -1) {
2788     return -1;
2789   }
2790 
2791   // Save signal flag
2792   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2793   return 0;
2794 }
2795 
2796 static int SR_finalize() {
2797   return 0;
2798 }
2799 
2800 static int sr_notify(OSThread* osthread) {
2801   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2802   assert_status(status == 0, status, "pthread_kill");
2803   return status;
2804 }
2805 
2806 // "Randomly" selected value for how long we want to spin
2807 // before bailing out on suspending a thread, also how often
2808 // we send a signal to a thread we want to resume
2809 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2810 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2811 
2812 // returns true on success and false on error - really an error is fatal
2813 // but this seems the normal response to library errors
2814 static bool do_suspend(OSThread* osthread) {
2815   assert(osthread->sr.is_running(), "thread should be running");
2816   // mark as suspended and send signal
2817 
2818   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2819     // failed to switch, state wasn't running?
2820     ShouldNotReachHere();
2821     return false;
2822   }
2823 
2824   if (sr_notify(osthread) != 0) {
2825     // try to cancel, switch to running
2826 
2827     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2828     if (result == os::SuspendResume::SR_RUNNING) {
2829       // cancelled
2830       return false;
2831     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2832       // somehow managed to suspend
2833       return true;
2834     } else {
2835       ShouldNotReachHere();
2836       return false;
2837     }
2838   }
2839 
2840   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2841 
2842   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2843     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2844       os::naked_yield();
2845     }
2846 
2847     // timeout, try to cancel the request
2848     if (n >= RANDOMLY_LARGE_INTEGER) {
2849       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2850       if (cancelled == os::SuspendResume::SR_RUNNING) {
2851         return false;
2852       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2853         return true;
2854       } else {
2855         ShouldNotReachHere();
2856         return false;
2857       }
2858     }
2859   }
2860 
2861   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2862   return true;
2863 }
2864 
2865 static void do_resume(OSThread* osthread) {
2866   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2867 
2868   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2869     // failed to switch to WAKEUP_REQUEST
2870     ShouldNotReachHere();
2871     return;
2872   }
2873 
2874   while (!osthread->sr.is_running()) {
2875     if (sr_notify(osthread) == 0) {
2876       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2877         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2878           os::naked_yield();
2879         }
2880       }
2881     } else {
2882       ShouldNotReachHere();
2883     }
2884   }
2885 
2886   guarantee(osthread->sr.is_running(), "Must be running!");
2887 }
2888 
2889 ///////////////////////////////////////////////////////////////////////////////////
2890 // signal handling (except suspend/resume)
2891 
2892 // This routine may be used by user applications as a "hook" to catch signals.
2893 // The user-defined signal handler must pass unrecognized signals to this
2894 // routine, and if it returns true (non-zero), then the signal handler must
2895 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2896 // routine will never retun false (zero), but instead will execute a VM panic
2897 // routine kill the process.
2898 //
2899 // If this routine returns false, it is OK to call it again. This allows
2900 // the user-defined signal handler to perform checks either before or after
2901 // the VM performs its own checks. Naturally, the user code would be making
2902 // a serious error if it tried to handle an exception (such as a null check
2903 // or breakpoint) that the VM was generating for its own correct operation.
2904 //
2905 // This routine may recognize any of the following kinds of signals:
2906 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2907 // It should be consulted by handlers for any of those signals.
2908 //
2909 // The caller of this routine must pass in the three arguments supplied
2910 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2911 // field of the structure passed to sigaction(). This routine assumes that
2912 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2913 //
2914 // Note that the VM will print warnings if it detects conflicting signal
2915 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2916 //
2917 extern "C" JNIEXPORT int
2918 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2919 
2920 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2921 // to be the thing to call; documentation is not terribly clear about whether
2922 // pthread_sigmask also works, and if it does, whether it does the same.
2923 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2924   const int rc = ::pthread_sigmask(how, set, oset);
2925   // return value semantics differ slightly for error case:
2926   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2927   // (so, pthread_sigmask is more theadsafe for error handling)
2928   // But success is always 0.
2929   return rc == 0 ? true : false;
2930 }
2931 
2932 // Function to unblock all signals which are, according
2933 // to POSIX, typical program error signals. If they happen while being blocked,
2934 // they typically will bring down the process immediately.
2935 bool unblock_program_error_signals() {
2936   sigset_t set;
2937   ::sigemptyset(&set);
2938   ::sigaddset(&set, SIGILL);
2939   ::sigaddset(&set, SIGBUS);
2940   ::sigaddset(&set, SIGFPE);
2941   ::sigaddset(&set, SIGSEGV);
2942   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2943 }
2944 
2945 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2946 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2947   assert(info != NULL && uc != NULL, "it must be old kernel");
2948 
2949   // Never leave program error signals blocked;
2950   // on all our platforms they would bring down the process immediately when
2951   // getting raised while being blocked.
2952   unblock_program_error_signals();
2953 
2954   int orig_errno = errno;  // Preserve errno value over signal handler.
2955   JVM_handle_aix_signal(sig, info, uc, true);
2956   errno = orig_errno;
2957 }
2958 
2959 // This boolean allows users to forward their own non-matching signals
2960 // to JVM_handle_aix_signal, harmlessly.
2961 bool os::Aix::signal_handlers_are_installed = false;
2962 
2963 // For signal-chaining
2964 struct sigaction sigact[NSIG];
2965 sigset_t sigs;
2966 bool os::Aix::libjsig_is_loaded = false;
2967 typedef struct sigaction *(*get_signal_t)(int);
2968 get_signal_t os::Aix::get_signal_action = NULL;
2969 
2970 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2971   struct sigaction *actp = NULL;
2972 
2973   if (libjsig_is_loaded) {
2974     // Retrieve the old signal handler from libjsig
2975     actp = (*get_signal_action)(sig);
2976   }
2977   if (actp == NULL) {
2978     // Retrieve the preinstalled signal handler from jvm
2979     actp = get_preinstalled_handler(sig);
2980   }
2981 
2982   return actp;
2983 }
2984 
2985 static bool call_chained_handler(struct sigaction *actp, int sig,
2986                                  siginfo_t *siginfo, void *context) {
2987   // Call the old signal handler
2988   if (actp->sa_handler == SIG_DFL) {
2989     // It's more reasonable to let jvm treat it as an unexpected exception
2990     // instead of taking the default action.
2991     return false;
2992   } else if (actp->sa_handler != SIG_IGN) {
2993     if ((actp->sa_flags & SA_NODEFER) == 0) {
2994       // automaticlly block the signal
2995       sigaddset(&(actp->sa_mask), sig);
2996     }
2997 
2998     sa_handler_t hand = NULL;
2999     sa_sigaction_t sa = NULL;
3000     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3001     // retrieve the chained handler
3002     if (siginfo_flag_set) {
3003       sa = actp->sa_sigaction;
3004     } else {
3005       hand = actp->sa_handler;
3006     }
3007 
3008     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3009       actp->sa_handler = SIG_DFL;
3010     }
3011 
3012     // try to honor the signal mask
3013     sigset_t oset;
3014     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3015 
3016     // call into the chained handler
3017     if (siginfo_flag_set) {
3018       (*sa)(sig, siginfo, context);
3019     } else {
3020       (*hand)(sig);
3021     }
3022 
3023     // restore the signal mask
3024     pthread_sigmask(SIG_SETMASK, &oset, 0);
3025   }
3026   // Tell jvm's signal handler the signal is taken care of.
3027   return true;
3028 }
3029 
3030 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3031   bool chained = false;
3032   // signal-chaining
3033   if (UseSignalChaining) {
3034     struct sigaction *actp = get_chained_signal_action(sig);
3035     if (actp != NULL) {
3036       chained = call_chained_handler(actp, sig, siginfo, context);
3037     }
3038   }
3039   return chained;
3040 }
3041 
3042 size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
3043   // Creating guard page is very expensive. Java thread has HotSpot
3044   // guard pages, only enable glibc guard page for non-Java threads.
3045   // (Remember: compiler thread is a Java thread, too!)
3046   //
3047   // Aix can have different page sizes for stack (4K) and heap (64K).
3048   // As Hotspot knows only one page size, we assume the stack has
3049   // the same page size as the heap. Returning page_size() here can
3050   // cause 16 guard pages which we want to avoid.  Thus we return 4K
3051   // which will be rounded to the real page size by the OS.
3052   return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : 4 * K);
3053 }
3054 
3055 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3056   if (sigismember(&sigs, sig)) {
3057     return &sigact[sig];
3058   }
3059   return NULL;
3060 }
3061 
3062 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3063   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3064   sigact[sig] = oldAct;
3065   sigaddset(&sigs, sig);
3066 }
3067 
3068 // for diagnostic
3069 int sigflags[NSIG];
3070 
3071 int os::Aix::get_our_sigflags(int sig) {
3072   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3073   return sigflags[sig];
3074 }
3075 
3076 void os::Aix::set_our_sigflags(int sig, int flags) {
3077   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3078   if (sig > 0 && sig < NSIG) {
3079     sigflags[sig] = flags;
3080   }
3081 }
3082 
3083 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3084   // Check for overwrite.
3085   struct sigaction oldAct;
3086   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3087 
3088   void* oldhand = oldAct.sa_sigaction
3089     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3090     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3091   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3092       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3093       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3094     if (AllowUserSignalHandlers || !set_installed) {
3095       // Do not overwrite; user takes responsibility to forward to us.
3096       return;
3097     } else if (UseSignalChaining) {
3098       // save the old handler in jvm
3099       save_preinstalled_handler(sig, oldAct);
3100       // libjsig also interposes the sigaction() call below and saves the
3101       // old sigaction on it own.
3102     } else {
3103       fatal("Encountered unexpected pre-existing sigaction handler "
3104             "%#lx for signal %d.", (long)oldhand, sig);
3105     }
3106   }
3107 
3108   struct sigaction sigAct;
3109   sigfillset(&(sigAct.sa_mask));
3110   if (!set_installed) {
3111     sigAct.sa_handler = SIG_DFL;
3112     sigAct.sa_flags = SA_RESTART;
3113   } else {
3114     sigAct.sa_sigaction = javaSignalHandler;
3115     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3116   }
3117   // Save flags, which are set by ours
3118   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3119   sigflags[sig] = sigAct.sa_flags;
3120 
3121   int ret = sigaction(sig, &sigAct, &oldAct);
3122   assert(ret == 0, "check");
3123 
3124   void* oldhand2 = oldAct.sa_sigaction
3125                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3126                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3127   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3128 }
3129 
3130 // install signal handlers for signals that HotSpot needs to
3131 // handle in order to support Java-level exception handling.
3132 void os::Aix::install_signal_handlers() {
3133   if (!signal_handlers_are_installed) {
3134     signal_handlers_are_installed = true;
3135 
3136     // signal-chaining
3137     typedef void (*signal_setting_t)();
3138     signal_setting_t begin_signal_setting = NULL;
3139     signal_setting_t end_signal_setting = NULL;
3140     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3141                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3142     if (begin_signal_setting != NULL) {
3143       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3144                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3145       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3146                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3147       libjsig_is_loaded = true;
3148       assert(UseSignalChaining, "should enable signal-chaining");
3149     }
3150     if (libjsig_is_loaded) {
3151       // Tell libjsig jvm is setting signal handlers.
3152       (*begin_signal_setting)();
3153     }
3154 
3155     ::sigemptyset(&sigs);
3156     set_signal_handler(SIGSEGV, true);
3157     set_signal_handler(SIGPIPE, true);
3158     set_signal_handler(SIGBUS, true);
3159     set_signal_handler(SIGILL, true);
3160     set_signal_handler(SIGFPE, true);
3161     set_signal_handler(SIGTRAP, true);
3162     set_signal_handler(SIGXFSZ, true);
3163     set_signal_handler(SIGDANGER, true);
3164 
3165     if (libjsig_is_loaded) {
3166       // Tell libjsig jvm finishes setting signal handlers.
3167       (*end_signal_setting)();
3168     }
3169 
3170     // We don't activate signal checker if libjsig is in place, we trust ourselves
3171     // and if UserSignalHandler is installed all bets are off.
3172     // Log that signal checking is off only if -verbose:jni is specified.
3173     if (CheckJNICalls) {
3174       if (libjsig_is_loaded) {
3175         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3176         check_signals = false;
3177       }
3178       if (AllowUserSignalHandlers) {
3179         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3180         check_signals = false;
3181       }
3182       // Need to initialize check_signal_done.
3183       ::sigemptyset(&check_signal_done);
3184     }
3185   }
3186 }
3187 
3188 static const char* get_signal_handler_name(address handler,
3189                                            char* buf, int buflen) {
3190   int offset;
3191   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3192   if (found) {
3193     // skip directory names
3194     const char *p1, *p2;
3195     p1 = buf;
3196     size_t len = strlen(os::file_separator());
3197     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3198     // The way os::dll_address_to_library_name is implemented on Aix
3199     // right now, it always returns -1 for the offset which is not
3200     // terribly informative.
3201     // Will fix that. For now, omit the offset.
3202     jio_snprintf(buf, buflen, "%s", p1);
3203   } else {
3204     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3205   }
3206   return buf;
3207 }
3208 
3209 static void print_signal_handler(outputStream* st, int sig,
3210                                  char* buf, size_t buflen) {
3211   struct sigaction sa;
3212   sigaction(sig, NULL, &sa);
3213 
3214   st->print("%s: ", os::exception_name(sig, buf, buflen));
3215 
3216   address handler = (sa.sa_flags & SA_SIGINFO)
3217     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3218     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3219 
3220   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3221     st->print("SIG_DFL");
3222   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3223     st->print("SIG_IGN");
3224   } else {
3225     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3226   }
3227 
3228   // Print readable mask.
3229   st->print(", sa_mask[0]=");
3230   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3231 
3232   address rh = VMError::get_resetted_sighandler(sig);
3233   // May be, handler was resetted by VMError?
3234   if (rh != NULL) {
3235     handler = rh;
3236     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3237   }
3238 
3239   // Print textual representation of sa_flags.
3240   st->print(", sa_flags=");
3241   os::Posix::print_sa_flags(st, sa.sa_flags);
3242 
3243   // Check: is it our handler?
3244   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3245       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3246     // It is our signal handler.
3247     // Check for flags, reset system-used one!
3248     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3249       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3250                 os::Aix::get_our_sigflags(sig));
3251     }
3252   }
3253   st->cr();
3254 }
3255 
3256 #define DO_SIGNAL_CHECK(sig) \
3257   if (!sigismember(&check_signal_done, sig)) \
3258     os::Aix::check_signal_handler(sig)
3259 
3260 // This method is a periodic task to check for misbehaving JNI applications
3261 // under CheckJNI, we can add any periodic checks here
3262 
3263 void os::run_periodic_checks() {
3264 
3265   if (check_signals == false) return;
3266 
3267   // SEGV and BUS if overridden could potentially prevent
3268   // generation of hs*.log in the event of a crash, debugging
3269   // such a case can be very challenging, so we absolutely
3270   // check the following for a good measure:
3271   DO_SIGNAL_CHECK(SIGSEGV);
3272   DO_SIGNAL_CHECK(SIGILL);
3273   DO_SIGNAL_CHECK(SIGFPE);
3274   DO_SIGNAL_CHECK(SIGBUS);
3275   DO_SIGNAL_CHECK(SIGPIPE);
3276   DO_SIGNAL_CHECK(SIGXFSZ);
3277   if (UseSIGTRAP) {
3278     DO_SIGNAL_CHECK(SIGTRAP);
3279   }
3280   DO_SIGNAL_CHECK(SIGDANGER);
3281 
3282   // ReduceSignalUsage allows the user to override these handlers
3283   // see comments at the very top and jvm_solaris.h
3284   if (!ReduceSignalUsage) {
3285     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3286     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3287     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3288     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3289   }
3290 
3291   DO_SIGNAL_CHECK(SR_signum);
3292 }
3293 
3294 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3295 
3296 static os_sigaction_t os_sigaction = NULL;
3297 
3298 void os::Aix::check_signal_handler(int sig) {
3299   char buf[O_BUFLEN];
3300   address jvmHandler = NULL;
3301 
3302   struct sigaction act;
3303   if (os_sigaction == NULL) {
3304     // only trust the default sigaction, in case it has been interposed
3305     os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3306     if (os_sigaction == NULL) return;
3307   }
3308 
3309   os_sigaction(sig, (struct sigaction*)NULL, &act);
3310 
3311   address thisHandler = (act.sa_flags & SA_SIGINFO)
3312     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3313     : CAST_FROM_FN_PTR(address, act.sa_handler);
3314 
3315   switch(sig) {
3316   case SIGSEGV:
3317   case SIGBUS:
3318   case SIGFPE:
3319   case SIGPIPE:
3320   case SIGILL:
3321   case SIGXFSZ:
3322     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3323     break;
3324 
3325   case SHUTDOWN1_SIGNAL:
3326   case SHUTDOWN2_SIGNAL:
3327   case SHUTDOWN3_SIGNAL:
3328   case BREAK_SIGNAL:
3329     jvmHandler = (address)user_handler();
3330     break;
3331 
3332   default:
3333     if (sig == SR_signum) {
3334       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3335     } else {
3336       return;
3337     }
3338     break;
3339   }
3340 
3341   if (thisHandler != jvmHandler) {
3342     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3343     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3344     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3345     // No need to check this sig any longer
3346     sigaddset(&check_signal_done, sig);
3347     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3348     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3349       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3350                     exception_name(sig, buf, O_BUFLEN));
3351     }
3352   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3353     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3354     tty->print("expected:");
3355     os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3356     tty->cr();
3357     tty->print("  found:");
3358     os::Posix::print_sa_flags(tty, act.sa_flags);
3359     tty->cr();
3360     // No need to check this sig any longer
3361     sigaddset(&check_signal_done, sig);
3362   }
3363 
3364   // Dump all the signal
3365   if (sigismember(&check_signal_done, sig)) {
3366     print_signal_handlers(tty, buf, O_BUFLEN);
3367   }
3368 }
3369 
3370 // To install functions for atexit system call
3371 extern "C" {
3372   static void perfMemory_exit_helper() {
3373     perfMemory_exit();
3374   }
3375 }
3376 
3377 // This is called _before_ the most of global arguments have been parsed.
3378 void os::init(void) {
3379   // This is basic, we want to know if that ever changes.
3380   // (Shared memory boundary is supposed to be a 256M aligned.)
3381   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3382 
3383   // Record process break at startup.
3384   g_brk_at_startup = (address) ::sbrk(0);
3385   assert(g_brk_at_startup != (address) -1, "sbrk failed");
3386 
3387   // First off, we need to know whether we run on AIX or PASE, and
3388   // the OS level we run on.
3389   os::Aix::initialize_os_info();
3390 
3391   // Scan environment (SPEC1170 behaviour, etc).
3392   os::Aix::scan_environment();
3393 
3394   // Probe multipage support.
3395   query_multipage_support();
3396 
3397   // Act like we only have one page size by eliminating corner cases which
3398   // we did not support very well anyway.
3399   // We have two input conditions:
3400   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3401   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3402   //    setting.
3403   //    Data segment page size is important for us because it defines the thread stack page
3404   //    size, which is needed for guard page handling, stack banging etc.
3405   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3406   //    and should be allocated with 64k pages.
3407   //
3408   // So, we do the following:
3409   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3410   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3411   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3412   // 64k          no              --- AIX 5.2 ? ---
3413   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3414 
3415   // We explicitly leave no option to change page size, because only upgrading would work,
3416   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3417 
3418   if (g_multipage_support.datapsize == 4*K) {
3419     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3420     if (g_multipage_support.can_use_64K_pages) {
3421       // .. but we are able to use 64K pages dynamically.
3422       // This would be typical for java launchers which are not linked
3423       // with datapsize=64K (like, any other launcher but our own).
3424       //
3425       // In this case it would be smart to allocate the java heap with 64K
3426       // to get the performance benefit, and to fake 64k pages for the
3427       // data segment (when dealing with thread stacks).
3428       //
3429       // However, leave a possibility to downgrade to 4K, using
3430       // -XX:-Use64KPages.
3431       if (Use64KPages) {
3432         trcVerbose("64K page mode (faked for data segment)");
3433         Aix::_page_size = 64*K;
3434       } else {
3435         trcVerbose("4K page mode (Use64KPages=off)");
3436         Aix::_page_size = 4*K;
3437       }
3438     } else {
3439       // .. and not able to allocate 64k pages dynamically. Here, just
3440       // fall back to 4K paged mode and use mmap for everything.
3441       trcVerbose("4K page mode");
3442       Aix::_page_size = 4*K;
3443       FLAG_SET_ERGO(bool, Use64KPages, false);
3444     }
3445   } else {
3446     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3447     // This normally means that we can allocate 64k pages dynamically.
3448     // (There is one special case where this may be false: EXTSHM=on.
3449     // but we decided to not support that mode).
3450     assert0(g_multipage_support.can_use_64K_pages);
3451     Aix::_page_size = 64*K;
3452     trcVerbose("64K page mode");
3453     FLAG_SET_ERGO(bool, Use64KPages, true);
3454   }
3455 
3456   // For now UseLargePages is just ignored.
3457   FLAG_SET_ERGO(bool, UseLargePages, false);
3458   _page_sizes[0] = 0;
3459 
3460   // debug trace
3461   trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3462 
3463   // Next, we need to initialize libo4 and libperfstat libraries.
3464   if (os::Aix::on_pase()) {
3465     os::Aix::initialize_libo4();
3466   } else {
3467     os::Aix::initialize_libperfstat();
3468   }
3469 
3470   // Reset the perfstat information provided by ODM.
3471   if (os::Aix::on_aix()) {
3472     libperfstat::perfstat_reset();
3473   }
3474 
3475   // Now initialze basic system properties. Note that for some of the values we
3476   // need libperfstat etc.
3477   os::Aix::initialize_system_info();
3478 
3479   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3480 
3481   init_random(1234567);
3482 
3483   ThreadCritical::initialize();
3484 
3485   // Main_thread points to the aboriginal thread.
3486   Aix::_main_thread = pthread_self();
3487 
3488   initial_time_count = os::elapsed_counter();
3489 }
3490 
3491 // This is called _after_ the global arguments have been parsed.
3492 jint os::init_2(void) {
3493 
3494   if (os::Aix::on_pase()) {
3495     trcVerbose("Running on PASE.");
3496   } else {
3497     trcVerbose("Running on AIX (not PASE).");
3498   }
3499 
3500   trcVerbose("processor count: %d", os::_processor_count);
3501   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3502 
3503   // Initially build up the loaded dll map.
3504   LoadedLibraries::reload();
3505   if (Verbose) {
3506     trcVerbose("Loaded Libraries: ");
3507     LoadedLibraries::print(tty);
3508   }
3509 
3510   const int page_size = Aix::page_size();
3511   const int map_size = page_size;
3512 
3513   address map_address = (address) MAP_FAILED;
3514   const int prot  = PROT_READ;
3515   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3516 
3517   // Use optimized addresses for the polling page,
3518   // e.g. map it to a special 32-bit address.
3519   if (OptimizePollingPageLocation) {
3520     // architecture-specific list of address wishes:
3521     address address_wishes[] = {
3522       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3523       // PPC64: all address wishes are non-negative 32 bit values where
3524       // the lower 16 bits are all zero. we can load these addresses
3525       // with a single ppc_lis instruction.
3526       (address) 0x30000000, (address) 0x31000000,
3527       (address) 0x32000000, (address) 0x33000000,
3528       (address) 0x40000000, (address) 0x41000000,
3529       (address) 0x42000000, (address) 0x43000000,
3530       (address) 0x50000000, (address) 0x51000000,
3531       (address) 0x52000000, (address) 0x53000000,
3532       (address) 0x60000000, (address) 0x61000000,
3533       (address) 0x62000000, (address) 0x63000000
3534     };
3535     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3536 
3537     // iterate over the list of address wishes:
3538     for (int i=0; i<address_wishes_length; i++) {
3539       // Try to map with current address wish.
3540       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3541       // fail if the address is already mapped.
3542       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3543                                      map_size, prot,
3544                                      flags | MAP_FIXED,
3545                                      -1, 0);
3546       trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3547                    address_wishes[i], map_address + (ssize_t)page_size);
3548 
3549       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3550         // Map succeeded and map_address is at wished address, exit loop.
3551         break;
3552       }
3553 
3554       if (map_address != (address) MAP_FAILED) {
3555         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3556         ::munmap(map_address, map_size);
3557         map_address = (address) MAP_FAILED;
3558       }
3559       // Map failed, continue loop.
3560     }
3561   } // end OptimizePollingPageLocation
3562 
3563   if (map_address == (address) MAP_FAILED) {
3564     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3565   }
3566   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3567   os::set_polling_page(map_address);
3568 
3569   if (!UseMembar) {
3570     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3571     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3572     os::set_memory_serialize_page(mem_serialize_page);
3573 
3574     trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3575         mem_serialize_page, mem_serialize_page + Aix::page_size(),
3576         Aix::page_size(), Aix::page_size());
3577   }
3578 
3579   // initialize suspend/resume support - must do this before signal_sets_init()
3580   if (SR_initialize() != 0) {
3581     perror("SR_initialize failed");
3582     return JNI_ERR;
3583   }
3584 
3585   Aix::signal_sets_init();
3586   Aix::install_signal_handlers();
3587 
3588   // Check and sets minimum stack sizes against command line options
3589   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
3590     return JNI_ERR;
3591   }
3592 
3593   if (UseNUMA) {
3594     UseNUMA = false;
3595     warning("NUMA optimizations are not available on this OS.");
3596   }
3597 
3598   if (MaxFDLimit) {
3599     // Set the number of file descriptors to max. print out error
3600     // if getrlimit/setrlimit fails but continue regardless.
3601     struct rlimit nbr_files;
3602     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3603     if (status != 0) {
3604       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
3605     } else {
3606       nbr_files.rlim_cur = nbr_files.rlim_max;
3607       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3608       if (status != 0) {
3609         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3610       }
3611     }
3612   }
3613 
3614   if (PerfAllowAtExitRegistration) {
3615     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3616     // At exit functions can be delayed until process exit time, which
3617     // can be problematic for embedded VM situations. Embedded VMs should
3618     // call DestroyJavaVM() to assure that VM resources are released.
3619 
3620     // Note: perfMemory_exit_helper atexit function may be removed in
3621     // the future if the appropriate cleanup code can be added to the
3622     // VM_Exit VMOperation's doit method.
3623     if (atexit(perfMemory_exit_helper) != 0) {
3624       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3625     }
3626   }
3627 
3628   return JNI_OK;
3629 }
3630 
3631 // Mark the polling page as unreadable
3632 void os::make_polling_page_unreadable(void) {
3633   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3634     fatal("Could not disable polling page");
3635   }
3636 };
3637 
3638 // Mark the polling page as readable
3639 void os::make_polling_page_readable(void) {
3640   // Changed according to os_linux.cpp.
3641   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3642     fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3643   }
3644 };
3645 
3646 int os::active_processor_count() {
3647   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3648   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3649   return online_cpus;
3650 }
3651 
3652 void os::set_native_thread_name(const char *name) {
3653   // Not yet implemented.
3654   return;
3655 }
3656 
3657 bool os::distribute_processes(uint length, uint* distribution) {
3658   // Not yet implemented.
3659   return false;
3660 }
3661 
3662 bool os::bind_to_processor(uint processor_id) {
3663   // Not yet implemented.
3664   return false;
3665 }
3666 
3667 void os::SuspendedThreadTask::internal_do_task() {
3668   if (do_suspend(_thread->osthread())) {
3669     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3670     do_task(context);
3671     do_resume(_thread->osthread());
3672   }
3673 }
3674 
3675 class PcFetcher : public os::SuspendedThreadTask {
3676 public:
3677   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3678   ExtendedPC result();
3679 protected:
3680   void do_task(const os::SuspendedThreadTaskContext& context);
3681 private:
3682   ExtendedPC _epc;
3683 };
3684 
3685 ExtendedPC PcFetcher::result() {
3686   guarantee(is_done(), "task is not done yet.");
3687   return _epc;
3688 }
3689 
3690 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3691   Thread* thread = context.thread();
3692   OSThread* osthread = thread->osthread();
3693   if (osthread->ucontext() != NULL) {
3694     _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext());
3695   } else {
3696     // NULL context is unexpected, double-check this is the VMThread.
3697     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3698   }
3699 }
3700 
3701 // Suspends the target using the signal mechanism and then grabs the PC before
3702 // resuming the target. Used by the flat-profiler only
3703 ExtendedPC os::get_thread_pc(Thread* thread) {
3704   // Make sure that it is called by the watcher for the VMThread.
3705   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3706   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3707 
3708   PcFetcher fetcher(thread);
3709   fetcher.run();
3710   return fetcher.result();
3711 }
3712 
3713 ////////////////////////////////////////////////////////////////////////////////
3714 // debug support
3715 
3716 bool os::find(address addr, outputStream* st) {
3717 
3718   st->print(PTR_FORMAT ": ", addr);
3719 
3720   loaded_module_t lm;
3721   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3722       LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3723     st->print_cr("%s", lm.path);
3724     return true;
3725   }
3726 
3727   return false;
3728 }
3729 
3730 ////////////////////////////////////////////////////////////////////////////////
3731 // misc
3732 
3733 // This does not do anything on Aix. This is basically a hook for being
3734 // able to use structured exception handling (thread-local exception filters)
3735 // on, e.g., Win32.
3736 void
3737 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3738                          JavaCallArguments* args, Thread* thread) {
3739   f(value, method, args, thread);
3740 }
3741 
3742 void os::print_statistics() {
3743 }
3744 
3745 bool os::message_box(const char* title, const char* message) {
3746   int i;
3747   fdStream err(defaultStream::error_fd());
3748   for (i = 0; i < 78; i++) err.print_raw("=");
3749   err.cr();
3750   err.print_raw_cr(title);
3751   for (i = 0; i < 78; i++) err.print_raw("-");
3752   err.cr();
3753   err.print_raw_cr(message);
3754   for (i = 0; i < 78; i++) err.print_raw("=");
3755   err.cr();
3756 
3757   char buf[16];
3758   // Prevent process from exiting upon "read error" without consuming all CPU
3759   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3760 
3761   return buf[0] == 'y' || buf[0] == 'Y';
3762 }
3763 
3764 int os::stat(const char *path, struct stat *sbuf) {
3765   char pathbuf[MAX_PATH];
3766   if (strlen(path) > MAX_PATH - 1) {
3767     errno = ENAMETOOLONG;
3768     return -1;
3769   }
3770   os::native_path(strcpy(pathbuf, path));
3771   return ::stat(pathbuf, sbuf);
3772 }
3773 
3774 // Is a (classpath) directory empty?
3775 bool os::dir_is_empty(const char* path) {
3776   DIR *dir = NULL;
3777   struct dirent *ptr;
3778 
3779   dir = opendir(path);
3780   if (dir == NULL) return true;
3781 
3782   /* Scan the directory */
3783   bool result = true;
3784   char buf[sizeof(struct dirent) + MAX_PATH];
3785   while (result && (ptr = ::readdir(dir)) != NULL) {
3786     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3787       result = false;
3788     }
3789   }
3790   closedir(dir);
3791   return result;
3792 }
3793 
3794 // This code originates from JDK's sysOpen and open64_w
3795 // from src/solaris/hpi/src/system_md.c
3796 
3797 int os::open(const char *path, int oflag, int mode) {
3798 
3799   if (strlen(path) > MAX_PATH - 1) {
3800     errno = ENAMETOOLONG;
3801     return -1;
3802   }
3803   int fd;
3804 
3805   fd = ::open64(path, oflag, mode);
3806   if (fd == -1) return -1;
3807 
3808   // If the open succeeded, the file might still be a directory.
3809   {
3810     struct stat64 buf64;
3811     int ret = ::fstat64(fd, &buf64);
3812     int st_mode = buf64.st_mode;
3813 
3814     if (ret != -1) {
3815       if ((st_mode & S_IFMT) == S_IFDIR) {
3816         errno = EISDIR;
3817         ::close(fd);
3818         return -1;
3819       }
3820     } else {
3821       ::close(fd);
3822       return -1;
3823     }
3824   }
3825 
3826   // All file descriptors that are opened in the JVM and not
3827   // specifically destined for a subprocess should have the
3828   // close-on-exec flag set. If we don't set it, then careless 3rd
3829   // party native code might fork and exec without closing all
3830   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3831   // UNIXProcess.c), and this in turn might:
3832   //
3833   // - cause end-of-file to fail to be detected on some file
3834   //   descriptors, resulting in mysterious hangs, or
3835   //
3836   // - might cause an fopen in the subprocess to fail on a system
3837   //   suffering from bug 1085341.
3838   //
3839   // (Yes, the default setting of the close-on-exec flag is a Unix
3840   // design flaw.)
3841   //
3842   // See:
3843   // 1085341: 32-bit stdio routines should support file descriptors >255
3844   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3845   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3846 #ifdef FD_CLOEXEC
3847   {
3848     int flags = ::fcntl(fd, F_GETFD);
3849     if (flags != -1)
3850       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3851   }
3852 #endif
3853 
3854   return fd;
3855 }
3856 
3857 // create binary file, rewriting existing file if required
3858 int os::create_binary_file(const char* path, bool rewrite_existing) {
3859   int oflags = O_WRONLY | O_CREAT;
3860   if (!rewrite_existing) {
3861     oflags |= O_EXCL;
3862   }
3863   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3864 }
3865 
3866 // return current position of file pointer
3867 jlong os::current_file_offset(int fd) {
3868   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3869 }
3870 
3871 // move file pointer to the specified offset
3872 jlong os::seek_to_file_offset(int fd, jlong offset) {
3873   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3874 }
3875 
3876 // This code originates from JDK's sysAvailable
3877 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3878 
3879 int os::available(int fd, jlong *bytes) {
3880   jlong cur, end;
3881   int mode;
3882   struct stat64 buf64;
3883 
3884   if (::fstat64(fd, &buf64) >= 0) {
3885     mode = buf64.st_mode;
3886     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3887       int n;
3888       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3889         *bytes = n;
3890         return 1;
3891       }
3892     }
3893   }
3894   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3895     return 0;
3896   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3897     return 0;
3898   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3899     return 0;
3900   }
3901   *bytes = end - cur;
3902   return 1;
3903 }
3904 
3905 // Map a block of memory.
3906 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3907                         char *addr, size_t bytes, bool read_only,
3908                         bool allow_exec) {
3909   int prot;
3910   int flags = MAP_PRIVATE;
3911 
3912   if (read_only) {
3913     prot = PROT_READ;
3914     flags = MAP_SHARED;
3915   } else {
3916     prot = PROT_READ | PROT_WRITE;
3917     flags = MAP_PRIVATE;
3918   }
3919 
3920   if (allow_exec) {
3921     prot |= PROT_EXEC;
3922   }
3923 
3924   if (addr != NULL) {
3925     flags |= MAP_FIXED;
3926   }
3927 
3928   // Allow anonymous mappings if 'fd' is -1.
3929   if (fd == -1) {
3930     flags |= MAP_ANONYMOUS;
3931   }
3932 
3933   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3934                                      fd, file_offset);
3935   if (mapped_address == MAP_FAILED) {
3936     return NULL;
3937   }
3938   return mapped_address;
3939 }
3940 
3941 // Remap a block of memory.
3942 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3943                           char *addr, size_t bytes, bool read_only,
3944                           bool allow_exec) {
3945   // same as map_memory() on this OS
3946   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3947                         allow_exec);
3948 }
3949 
3950 // Unmap a block of memory.
3951 bool os::pd_unmap_memory(char* addr, size_t bytes) {
3952   return munmap(addr, bytes) == 0;
3953 }
3954 
3955 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3956 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
3957 // of a thread.
3958 //
3959 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3960 // the fast estimate available on the platform.
3961 
3962 jlong os::current_thread_cpu_time() {
3963   // return user + sys since the cost is the same
3964   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
3965   assert(n >= 0, "negative CPU time");
3966   return n;
3967 }
3968 
3969 jlong os::thread_cpu_time(Thread* thread) {
3970   // consistent with what current_thread_cpu_time() returns
3971   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
3972   assert(n >= 0, "negative CPU time");
3973   return n;
3974 }
3975 
3976 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
3977   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
3978   assert(n >= 0, "negative CPU time");
3979   return n;
3980 }
3981 
3982 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
3983   bool error = false;
3984 
3985   jlong sys_time = 0;
3986   jlong user_time = 0;
3987 
3988   // Reimplemented using getthrds64().
3989   //
3990   // Works like this:
3991   // For the thread in question, get the kernel thread id. Then get the
3992   // kernel thread statistics using that id.
3993   //
3994   // This only works of course when no pthread scheduling is used,
3995   // i.e. there is a 1:1 relationship to kernel threads.
3996   // On AIX, see AIXTHREAD_SCOPE variable.
3997 
3998   pthread_t pthtid = thread->osthread()->pthread_id();
3999 
4000   // retrieve kernel thread id for the pthread:
4001   tid64_t tid = 0;
4002   struct __pthrdsinfo pinfo;
4003   // I just love those otherworldly IBM APIs which force me to hand down
4004   // dummy buffers for stuff I dont care for...
4005   char dummy[1];
4006   int dummy_size = sizeof(dummy);
4007   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4008                           dummy, &dummy_size) == 0) {
4009     tid = pinfo.__pi_tid;
4010   } else {
4011     tty->print_cr("pthread_getthrds_np failed.");
4012     error = true;
4013   }
4014 
4015   // retrieve kernel timing info for that kernel thread
4016   if (!error) {
4017     struct thrdentry64 thrdentry;
4018     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4019       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4020       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4021     } else {
4022       tty->print_cr("pthread_getthrds_np failed.");
4023       error = true;
4024     }
4025   }
4026 
4027   if (p_sys_time) {
4028     *p_sys_time = sys_time;
4029   }
4030 
4031   if (p_user_time) {
4032     *p_user_time = user_time;
4033   }
4034 
4035   if (error) {
4036     return false;
4037   }
4038 
4039   return true;
4040 }
4041 
4042 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4043   jlong sys_time;
4044   jlong user_time;
4045 
4046   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4047     return -1;
4048   }
4049 
4050   return user_sys_cpu_time ? sys_time + user_time : user_time;
4051 }
4052 
4053 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4054   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4055   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4056   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4057   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4058 }
4059 
4060 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4061   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4062   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4063   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4064   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4065 }
4066 
4067 bool os::is_thread_cpu_time_supported() {
4068   return true;
4069 }
4070 
4071 // System loadavg support. Returns -1 if load average cannot be obtained.
4072 // For now just return the system wide load average (no processor sets).
4073 int os::loadavg(double values[], int nelem) {
4074 
4075   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4076   guarantee(values, "argument error");
4077 
4078   if (os::Aix::on_pase()) {
4079 
4080     // AS/400 PASE: use libo4 porting library
4081     double v[3] = { 0.0, 0.0, 0.0 };
4082 
4083     if (libo4::get_load_avg(v, v + 1, v + 2)) {
4084       for (int i = 0; i < nelem; i ++) {
4085         values[i] = v[i];
4086       }
4087       return nelem;
4088     } else {
4089       return -1;
4090     }
4091 
4092   } else {
4093 
4094     // AIX: use libperfstat
4095     libperfstat::cpuinfo_t ci;
4096     if (libperfstat::get_cpuinfo(&ci)) {
4097       for (int i = 0; i < nelem; i++) {
4098         values[i] = ci.loadavg[i];
4099       }
4100     } else {
4101       return -1;
4102     }
4103     return nelem;
4104   }
4105 }
4106 
4107 void os::pause() {
4108   char filename[MAX_PATH];
4109   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4110     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4111   } else {
4112     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4113   }
4114 
4115   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4116   if (fd != -1) {
4117     struct stat buf;
4118     ::close(fd);
4119     while (::stat(filename, &buf) == 0) {
4120       (void)::poll(NULL, 0, 100);
4121     }
4122   } else {
4123     trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4124   }
4125 }
4126 
4127 bool os::Aix::is_primordial_thread() {
4128   if (pthread_self() == (pthread_t)1) {
4129     return true;
4130   } else {
4131     return false;
4132   }
4133 }
4134 
4135 // OS recognitions (PASE/AIX, OS level) call this before calling any
4136 // one of Aix::on_pase(), Aix::os_version() static
4137 void os::Aix::initialize_os_info() {
4138 
4139   assert(_on_pase == -1 && _os_version == 0, "already called.");
4140 
4141   struct utsname uts;
4142   memset(&uts, 0, sizeof(uts));
4143   strcpy(uts.sysname, "?");
4144   if (::uname(&uts) == -1) {
4145     trcVerbose("uname failed (%d)", errno);
4146     guarantee(0, "Could not determine whether we run on AIX or PASE");
4147   } else {
4148     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4149                "node \"%s\" machine \"%s\"\n",
4150                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4151     const int major = atoi(uts.version);
4152     assert(major > 0, "invalid OS version");
4153     const int minor = atoi(uts.release);
4154     assert(minor > 0, "invalid OS release");
4155     _os_version = (major << 24) | (minor << 16);
4156     char ver_str[20] = {0};
4157     char *name_str = "unknown OS";
4158     if (strcmp(uts.sysname, "OS400") == 0) {
4159       // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4160       _on_pase = 1;
4161       if (os_version_short() < 0x0504) {
4162         trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4163         assert(false, "OS/400 release too old.");
4164       }
4165       name_str = "OS/400 (pase)";
4166       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4167     } else if (strcmp(uts.sysname, "AIX") == 0) {
4168       // We run on AIX. We do not support versions older than AIX 5.3.
4169       _on_pase = 0;
4170       // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4171       odmWrapper::determine_os_kernel_version(&_os_version);
4172       if (os_version_short() < 0x0503) {
4173         trcVerbose("AIX release older than AIX 5.3 not supported.");
4174         assert(false, "AIX release too old.");
4175       }
4176       name_str = "AIX";
4177       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4178                    major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4179     } else {
4180       assert(false, name_str);
4181     }
4182     trcVerbose("We run on %s %s", name_str, ver_str);
4183   }
4184 
4185   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4186 } // end: os::Aix::initialize_os_info()
4187 
4188 // Scan environment for important settings which might effect the VM.
4189 // Trace out settings. Warn about invalid settings and/or correct them.
4190 //
4191 // Must run after os::Aix::initialue_os_info().
4192 void os::Aix::scan_environment() {
4193 
4194   char* p;
4195   int rc;
4196 
4197   // Warn explicity if EXTSHM=ON is used. That switch changes how
4198   // System V shared memory behaves. One effect is that page size of
4199   // shared memory cannot be change dynamically, effectivly preventing
4200   // large pages from working.
4201   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4202   // recommendation is (in OSS notes) to switch it off.
4203   p = ::getenv("EXTSHM");
4204   trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4205   if (p && strcasecmp(p, "ON") == 0) {
4206     _extshm = 1;
4207     trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4208     if (!AllowExtshm) {
4209       // We allow under certain conditions the user to continue. However, we want this
4210       // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4211       // that the VM is not able to allocate 64k pages for the heap.
4212       // We do not want to run with reduced performance.
4213       vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4214     }
4215   } else {
4216     _extshm = 0;
4217   }
4218 
4219   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4220   // Not tested, not supported.
4221   //
4222   // Note that it might be worth the trouble to test and to require it, if only to
4223   // get useful return codes for mprotect.
4224   //
4225   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4226   // exec() ? before loading the libjvm ? ....)
4227   p = ::getenv("XPG_SUS_ENV");
4228   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4229   if (p && strcmp(p, "ON") == 0) {
4230     _xpg_sus_mode = 1;
4231     trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4232     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4233     // clobber address ranges. If we ever want to support that, we have to do some
4234     // testing first.
4235     guarantee(false, "XPG_SUS_ENV=ON not supported");
4236   } else {
4237     _xpg_sus_mode = 0;
4238   }
4239 
4240   if (os::Aix::on_pase()) {
4241     p = ::getenv("QIBM_MULTI_THREADED");
4242     trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4243   }
4244 
4245   p = ::getenv("LDR_CNTRL");
4246   trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4247   if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4248     if (p && ::strstr(p, "TEXTPSIZE")) {
4249       trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4250         "you may experience hangs or crashes on OS/400 V7R1.");
4251     }
4252   }
4253 
4254   p = ::getenv("AIXTHREAD_GUARDPAGES");
4255   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4256 
4257 } // end: os::Aix::scan_environment()
4258 
4259 // PASE: initialize the libo4 library (PASE porting library).
4260 void os::Aix::initialize_libo4() {
4261   guarantee(os::Aix::on_pase(), "OS/400 only.");
4262   if (!libo4::init()) {
4263     trcVerbose("libo4 initialization failed.");
4264     assert(false, "libo4 initialization failed");
4265   } else {
4266     trcVerbose("libo4 initialized.");
4267   }
4268 }
4269 
4270 // AIX: initialize the libperfstat library.
4271 void os::Aix::initialize_libperfstat() {
4272   assert(os::Aix::on_aix(), "AIX only");
4273   if (!libperfstat::init()) {
4274     trcVerbose("libperfstat initialization failed.");
4275     assert(false, "libperfstat initialization failed");
4276   } else {
4277     trcVerbose("libperfstat initialized.");
4278   }
4279 }
4280 
4281 /////////////////////////////////////////////////////////////////////////////
4282 // thread stack
4283 
4284 // Function to query the current stack size using pthread_getthrds_np.
4285 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4286   // This only works when invoked on a pthread. As we agreed not to use
4287   // primordial threads anyway, I assert here.
4288   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4289 
4290   // Information about this api can be found (a) in the pthread.h header and
4291   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4292   //
4293   // The use of this API to find out the current stack is kind of undefined.
4294   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4295   // enough for cases where I let the pthread library create its stacks. For cases
4296   // where I create an own stack and pass this to pthread_create, it seems not to
4297   // work (the returned stack size in that case is 0).
4298 
4299   pthread_t tid = pthread_self();
4300   struct __pthrdsinfo pinfo;
4301   char dummy[1]; // Just needed to satisfy pthread_getthrds_np.
4302   int dummy_size = sizeof(dummy);
4303 
4304   memset(&pinfo, 0, sizeof(pinfo));
4305 
4306   const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4307                                      sizeof(pinfo), dummy, &dummy_size);
4308 
4309   if (rc != 0) {
4310     assert0(false);
4311     trcVerbose("pthread_getthrds_np failed (%d)", rc);
4312     return false;
4313   }
4314   guarantee0(pinfo.__pi_stackend);
4315 
4316   // The following may happen when invoking pthread_getthrds_np on a pthread
4317   // running on a user provided stack (when handing down a stack to pthread
4318   // create, see pthread_attr_setstackaddr).
4319   // Not sure what to do then.
4320 
4321   guarantee0(pinfo.__pi_stacksize);
4322 
4323   // Note: we get three values from pthread_getthrds_np:
4324   //       __pi_stackaddr, __pi_stacksize, __pi_stackend
4325   //
4326   // high addr    ---------------------
4327   //
4328   //    |         pthread internal data, like ~2K
4329   //    |
4330   //    |         ---------------------   __pi_stackend   (usually not page aligned, (xxxxF890))
4331   //    |
4332   //    |
4333   //    |
4334   //    |
4335   //    |
4336   //    |
4337   //    |          ---------------------   (__pi_stackend - __pi_stacksize)
4338   //    |
4339   //    |          padding to align the following AIX guard pages, if enabled.
4340   //    |
4341   //    V          ---------------------   __pi_stackaddr
4342   //
4343   // low addr      AIX guard pages, if enabled (AIXTHREAD_GUARDPAGES > 0)
4344   //
4345 
4346   address stack_base = (address)(pinfo.__pi_stackend);
4347   address stack_low_addr = (address)align_ptr_up(pinfo.__pi_stackaddr,
4348     os::vm_page_size());
4349   size_t stack_size = stack_base - stack_low_addr;
4350 
4351   if (p_stack_base) {
4352     *p_stack_base = stack_base;
4353   }
4354 
4355   if (p_stack_size) {
4356     *p_stack_size = stack_size;
4357   }
4358 
4359   return true;
4360 }
4361 
4362 // Get the current stack base from the OS (actually, the pthread library).
4363 address os::current_stack_base() {
4364   address p;
4365   query_stack_dimensions(&p, 0);
4366   return p;
4367 }
4368 
4369 // Get the current stack size from the OS (actually, the pthread library).
4370 size_t os::current_stack_size() {
4371   size_t s;
4372   query_stack_dimensions(0, &s);
4373   return s;
4374 }
4375 
4376 // Refer to the comments in os_solaris.cpp park-unpark.
4377 
4378 // utility to compute the abstime argument to timedwait:
4379 // millis is the relative timeout time
4380 // abstime will be the absolute timeout time
4381 // TODO: replace compute_abstime() with unpackTime()
4382 
4383 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4384   if (millis < 0) millis = 0;
4385   struct timeval now;
4386   int status = gettimeofday(&now, NULL);
4387   assert(status == 0, "gettimeofday");
4388   jlong seconds = millis / 1000;
4389   millis %= 1000;
4390   if (seconds > 50000000) { // see man cond_timedwait(3T)
4391     seconds = 50000000;
4392   }
4393   abstime->tv_sec = now.tv_sec  + seconds;
4394   long       usec = now.tv_usec + millis * 1000;
4395   if (usec >= 1000000) {
4396     abstime->tv_sec += 1;
4397     usec -= 1000000;
4398   }
4399   abstime->tv_nsec = usec * 1000;
4400   return abstime;
4401 }
4402 
4403 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4404 // Conceptually TryPark() should be equivalent to park(0).
4405 
4406 int os::PlatformEvent::TryPark() {
4407   for (;;) {
4408     const int v = _Event;
4409     guarantee ((v == 0) || (v == 1), "invariant");
4410     if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4411   }
4412 }
4413 
4414 void os::PlatformEvent::park() {       // AKA "down()"
4415   // Invariant: Only the thread associated with the Event/PlatformEvent
4416   // may call park().
4417   // TODO: assert that _Assoc != NULL or _Assoc == Self
4418   int v;
4419   for (;;) {
4420     v = _Event;
4421     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4422   }
4423   guarantee (v >= 0, "invariant");
4424   if (v == 0) {
4425     // Do this the hard way by blocking ...
4426     int status = pthread_mutex_lock(_mutex);
4427     assert_status(status == 0, status, "mutex_lock");
4428     guarantee (_nParked == 0, "invariant");
4429     ++ _nParked;
4430     while (_Event < 0) {
4431       status = pthread_cond_wait(_cond, _mutex);
4432       assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4433     }
4434     -- _nParked;
4435 
4436     // In theory we could move the ST of 0 into _Event past the unlock(),
4437     // but then we'd need a MEMBAR after the ST.
4438     _Event = 0;
4439     status = pthread_mutex_unlock(_mutex);
4440     assert_status(status == 0, status, "mutex_unlock");
4441   }
4442   guarantee (_Event >= 0, "invariant");
4443 }
4444 
4445 int os::PlatformEvent::park(jlong millis) {
4446   guarantee (_nParked == 0, "invariant");
4447 
4448   int v;
4449   for (;;) {
4450     v = _Event;
4451     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4452   }
4453   guarantee (v >= 0, "invariant");
4454   if (v != 0) return OS_OK;
4455 
4456   // We do this the hard way, by blocking the thread.
4457   // Consider enforcing a minimum timeout value.
4458   struct timespec abst;
4459   compute_abstime(&abst, millis);
4460 
4461   int ret = OS_TIMEOUT;
4462   int status = pthread_mutex_lock(_mutex);
4463   assert_status(status == 0, status, "mutex_lock");
4464   guarantee (_nParked == 0, "invariant");
4465   ++_nParked;
4466 
4467   // Object.wait(timo) will return because of
4468   // (a) notification
4469   // (b) timeout
4470   // (c) thread.interrupt
4471   //
4472   // Thread.interrupt and object.notify{All} both call Event::set.
4473   // That is, we treat thread.interrupt as a special case of notification.
4474   // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4475   // We assume all ETIME returns are valid.
4476   //
4477   // TODO: properly differentiate simultaneous notify+interrupt.
4478   // In that case, we should propagate the notify to another waiter.
4479 
4480   while (_Event < 0) {
4481     status = pthread_cond_timedwait(_cond, _mutex, &abst);
4482     assert_status(status == 0 || status == ETIMEDOUT,
4483                   status, "cond_timedwait");
4484     if (!FilterSpuriousWakeups) break;         // previous semantics
4485     if (status == ETIMEDOUT) break;
4486     // We consume and ignore EINTR and spurious wakeups.
4487   }
4488   --_nParked;
4489   if (_Event >= 0) {
4490      ret = OS_OK;
4491   }
4492   _Event = 0;
4493   status = pthread_mutex_unlock(_mutex);
4494   assert_status(status == 0, status, "mutex_unlock");
4495   assert (_nParked == 0, "invariant");
4496   return ret;
4497 }
4498 
4499 void os::PlatformEvent::unpark() {
4500   int v, AnyWaiters;
4501   for (;;) {
4502     v = _Event;
4503     if (v > 0) {
4504       // The LD of _Event could have reordered or be satisfied
4505       // by a read-aside from this processor's write buffer.
4506       // To avoid problems execute a barrier and then
4507       // ratify the value.
4508       OrderAccess::fence();
4509       if (_Event == v) return;
4510       continue;
4511     }
4512     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4513   }
4514   if (v < 0) {
4515     // Wait for the thread associated with the event to vacate
4516     int status = pthread_mutex_lock(_mutex);
4517     assert_status(status == 0, status, "mutex_lock");
4518     AnyWaiters = _nParked;
4519 
4520     if (AnyWaiters != 0) {
4521       // We intentional signal *after* dropping the lock
4522       // to avoid a common class of futile wakeups.
4523       status = pthread_cond_signal(_cond);
4524       assert_status(status == 0, status, "cond_signal");
4525     }
4526     // Mutex should be locked for pthread_cond_signal(_cond).
4527     status = pthread_mutex_unlock(_mutex);
4528     assert_status(status == 0, status, "mutex_unlock");
4529   }
4530 
4531   // Note that we signal() _after dropping the lock for "immortal" Events.
4532   // This is safe and avoids a common class of futile wakeups. In rare
4533   // circumstances this can cause a thread to return prematurely from
4534   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4535   // simply re-test the condition and re-park itself.
4536 }
4537 
4538 
4539 // JSR166
4540 // -------------------------------------------------------
4541 
4542 //
4543 // The solaris and linux implementations of park/unpark are fairly
4544 // conservative for now, but can be improved. They currently use a
4545 // mutex/condvar pair, plus a a count.
4546 // Park decrements count if > 0, else does a condvar wait. Unpark
4547 // sets count to 1 and signals condvar. Only one thread ever waits
4548 // on the condvar. Contention seen when trying to park implies that someone
4549 // is unparking you, so don't wait. And spurious returns are fine, so there
4550 // is no need to track notifications.
4551 //
4552 
4553 #define MAX_SECS 100000000
4554 //
4555 // This code is common to linux and solaris and will be moved to a
4556 // common place in dolphin.
4557 //
4558 // The passed in time value is either a relative time in nanoseconds
4559 // or an absolute time in milliseconds. Either way it has to be unpacked
4560 // into suitable seconds and nanoseconds components and stored in the
4561 // given timespec structure.
4562 // Given time is a 64-bit value and the time_t used in the timespec is only
4563 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
4564 // overflow if times way in the future are given. Further on Solaris versions
4565 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4566 // number of seconds, in abstime, is less than current_time + 100,000,000.
4567 // As it will be 28 years before "now + 100000000" will overflow we can
4568 // ignore overflow and just impose a hard-limit on seconds using the value
4569 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4570 // years from "now".
4571 //
4572 
4573 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4574   assert (time > 0, "convertTime");
4575 
4576   struct timeval now;
4577   int status = gettimeofday(&now, NULL);
4578   assert(status == 0, "gettimeofday");
4579 
4580   time_t max_secs = now.tv_sec + MAX_SECS;
4581 
4582   if (isAbsolute) {
4583     jlong secs = time / 1000;
4584     if (secs > max_secs) {
4585       absTime->tv_sec = max_secs;
4586     }
4587     else {
4588       absTime->tv_sec = secs;
4589     }
4590     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4591   }
4592   else {
4593     jlong secs = time / NANOSECS_PER_SEC;
4594     if (secs >= MAX_SECS) {
4595       absTime->tv_sec = max_secs;
4596       absTime->tv_nsec = 0;
4597     }
4598     else {
4599       absTime->tv_sec = now.tv_sec + secs;
4600       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4601       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4602         absTime->tv_nsec -= NANOSECS_PER_SEC;
4603         ++absTime->tv_sec; // note: this must be <= max_secs
4604       }
4605     }
4606   }
4607   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4608   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4609   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4610   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4611 }
4612 
4613 void Parker::park(bool isAbsolute, jlong time) {
4614   // Optional fast-path check:
4615   // Return immediately if a permit is available.
4616   if (_counter > 0) {
4617     _counter = 0;
4618     OrderAccess::fence();
4619     return;
4620   }
4621 
4622   Thread* thread = Thread::current();
4623   assert(thread->is_Java_thread(), "Must be JavaThread");
4624   JavaThread *jt = (JavaThread *)thread;
4625 
4626   // Optional optimization -- avoid state transitions if there's an interrupt pending.
4627   // Check interrupt before trying to wait
4628   if (Thread::is_interrupted(thread, false)) {
4629     return;
4630   }
4631 
4632   // Next, demultiplex/decode time arguments
4633   timespec absTime;
4634   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4635     return;
4636   }
4637   if (time > 0) {
4638     unpackTime(&absTime, isAbsolute, time);
4639   }
4640 
4641   // Enter safepoint region
4642   // Beware of deadlocks such as 6317397.
4643   // The per-thread Parker:: mutex is a classic leaf-lock.
4644   // In particular a thread must never block on the Threads_lock while
4645   // holding the Parker:: mutex. If safepoints are pending both the
4646   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4647   ThreadBlockInVM tbivm(jt);
4648 
4649   // Don't wait if cannot get lock since interference arises from
4650   // unblocking. Also. check interrupt before trying wait
4651   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4652     return;
4653   }
4654 
4655   int status;
4656   if (_counter > 0) { // no wait needed
4657     _counter = 0;
4658     status = pthread_mutex_unlock(_mutex);
4659     assert (status == 0, "invariant");
4660     OrderAccess::fence();
4661     return;
4662   }
4663 
4664 #ifdef ASSERT
4665   // Don't catch signals while blocked; let the running threads have the signals.
4666   // (This allows a debugger to break into the running thread.)
4667   sigset_t oldsigs;
4668   sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4669   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4670 #endif
4671 
4672   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4673   jt->set_suspend_equivalent();
4674   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4675 
4676   if (time == 0) {
4677     status = pthread_cond_wait (_cond, _mutex);
4678   } else {
4679     status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4680   }
4681   assert_status(status == 0 || status == EINTR ||
4682                 status == ETIME || status == ETIMEDOUT,
4683                 status, "cond_timedwait");
4684 
4685 #ifdef ASSERT
4686   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4687 #endif
4688 
4689   _counter = 0;
4690   status = pthread_mutex_unlock(_mutex);
4691   assert_status(status == 0, status, "invariant");
4692   // If externally suspended while waiting, re-suspend
4693   if (jt->handle_special_suspend_equivalent_condition()) {
4694     jt->java_suspend_self();
4695   }
4696 
4697   OrderAccess::fence();
4698 }
4699 
4700 void Parker::unpark() {
4701   int s, status;
4702   status = pthread_mutex_lock(_mutex);
4703   assert (status == 0, "invariant");
4704   s = _counter;
4705   _counter = 1;
4706   if (s < 1) {
4707     status = pthread_mutex_unlock(_mutex);
4708     assert (status == 0, "invariant");
4709     status = pthread_cond_signal (_cond);
4710     assert (status == 0, "invariant");
4711   } else {
4712     pthread_mutex_unlock(_mutex);
4713     assert (status == 0, "invariant");
4714   }
4715 }
4716 
4717 extern char** environ;
4718 
4719 // Run the specified command in a separate process. Return its exit value,
4720 // or -1 on failure (e.g. can't fork a new process).
4721 // Unlike system(), this function can be called from signal handler. It
4722 // doesn't block SIGINT et al.
4723 int os::fork_and_exec(char* cmd) {
4724   char * argv[4] = {"sh", "-c", cmd, NULL};
4725 
4726   pid_t pid = fork();
4727 
4728   if (pid < 0) {
4729     // fork failed
4730     return -1;
4731 
4732   } else if (pid == 0) {
4733     // child process
4734 
4735     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4736     execve("/usr/bin/sh", argv, environ);
4737 
4738     // execve failed
4739     _exit(-1);
4740 
4741   } else {
4742     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4743     // care about the actual exit code, for now.
4744 
4745     int status;
4746 
4747     // Wait for the child process to exit. This returns immediately if
4748     // the child has already exited. */
4749     while (waitpid(pid, &status, 0) < 0) {
4750       switch (errno) {
4751         case ECHILD: return 0;
4752         case EINTR: break;
4753         default: return -1;
4754       }
4755     }
4756 
4757     if (WIFEXITED(status)) {
4758       // The child exited normally; get its exit code.
4759       return WEXITSTATUS(status);
4760     } else if (WIFSIGNALED(status)) {
4761       // The child exited because of a signal.
4762       // The best value to return is 0x80 + signal number,
4763       // because that is what all Unix shells do, and because
4764       // it allows callers to distinguish between process exit and
4765       // process death by signal.
4766       return 0x80 + WTERMSIG(status);
4767     } else {
4768       // Unknown exit code; pass it through.
4769       return status;
4770     }
4771   }
4772   return -1;
4773 }
4774 
4775 // is_headless_jre()
4776 //
4777 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4778 // in order to report if we are running in a headless jre.
4779 //
4780 // Since JDK8 xawt/libmawt.so is moved into the same directory
4781 // as libawt.so, and renamed libawt_xawt.so
4782 bool os::is_headless_jre() {
4783   struct stat statbuf;
4784   char buf[MAXPATHLEN];
4785   char libmawtpath[MAXPATHLEN];
4786   const char *xawtstr = "/xawt/libmawt.so";
4787   const char *new_xawtstr = "/libawt_xawt.so";
4788 
4789   char *p;
4790 
4791   // Get path to libjvm.so
4792   os::jvm_path(buf, sizeof(buf));
4793 
4794   // Get rid of libjvm.so
4795   p = strrchr(buf, '/');
4796   if (p == NULL) return false;
4797   else *p = '\0';
4798 
4799   // Get rid of client or server
4800   p = strrchr(buf, '/');
4801   if (p == NULL) return false;
4802   else *p = '\0';
4803 
4804   // check xawt/libmawt.so
4805   strcpy(libmawtpath, buf);
4806   strcat(libmawtpath, xawtstr);
4807   if (::stat(libmawtpath, &statbuf) == 0) return false;
4808 
4809   // check libawt_xawt.so
4810   strcpy(libmawtpath, buf);
4811   strcat(libmawtpath, new_xawtstr);
4812   if (::stat(libmawtpath, &statbuf) == 0) return false;
4813 
4814   return true;
4815 }
4816 
4817 // Get the default path to the core file
4818 // Returns the length of the string
4819 int os::get_core_path(char* buffer, size_t bufferSize) {
4820   const char* p = get_current_directory(buffer, bufferSize);
4821 
4822   if (p == NULL) {
4823     assert(p != NULL, "failed to get current directory");
4824     return 0;
4825   }
4826 
4827   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4828                                                p, current_process_id());
4829 
4830   return strlen(buffer);
4831 }
4832 
4833 #ifndef PRODUCT
4834 void TestReserveMemorySpecial_test() {
4835   // No tests available for this platform
4836 }
4837 #endif
4838 
4839 bool os::start_debugging(char *buf, int buflen) {
4840   int len = (int)strlen(buf);
4841   char *p = &buf[len];
4842 
4843   jio_snprintf(p, buflen -len,
4844                  "\n\n"
4845                  "Do you want to debug the problem?\n\n"
4846                  "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4847                  "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4848                  "Otherwise, press RETURN to abort...",
4849                  os::current_process_id(),
4850                  os::current_thread_id(), thread_self());
4851 
4852   bool yes = os::message_box("Unexpected Error", buf);
4853 
4854   if (yes) {
4855     // yes, user asked VM to launch debugger
4856     jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4857 
4858     os::fork_and_exec(buf);
4859     yes = false;
4860   }
4861   return yes;
4862 }
4863 
4864 static inline time_t get_mtime(const char* filename) {
4865   struct stat st;
4866   int ret = os::stat(filename, &st);
4867   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
4868   return st.st_mtime;
4869 }
4870 
4871 int os::compare_file_modified_times(const char* file1, const char* file2) {
4872   time_t t1 = get_mtime(file1);
4873   time_t t2 = get_mtime(file2);
4874   return t1 - t2;
4875 }