1 /*
   2  * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2015 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "libperfstat_aix.hpp"
  40 #include "loadlib_aix.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "misc_aix.hpp"
  44 #include "mutex_aix.inline.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "os_aix.inline.hpp"
  47 #include "os_share_aix.hpp"
  48 #include "porting_aix.hpp"
  49 #include "prims/jniFastGetField.hpp"
  50 #include "prims/jvm.h"
  51 #include "prims/jvm_misc.hpp"
  52 #include "runtime/arguments.hpp"
  53 #include "runtime/atomic.inline.hpp"
  54 #include "runtime/extendedPC.hpp"
  55 #include "runtime/globals.hpp"
  56 #include "runtime/interfaceSupport.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/javaCalls.hpp"
  59 #include "runtime/mutexLocker.hpp"
  60 #include "runtime/objectMonitor.hpp"
  61 #include "runtime/orderAccess.inline.hpp"
  62 #include "runtime/os.hpp"
  63 #include "runtime/osThread.hpp"
  64 #include "runtime/perfMemory.hpp"
  65 #include "runtime/sharedRuntime.hpp"
  66 #include "runtime/statSampler.hpp"
  67 #include "runtime/stubRoutines.hpp"
  68 #include "runtime/thread.inline.hpp"
  69 #include "runtime/threadCritical.hpp"
  70 #include "runtime/timer.hpp"
  71 #include "runtime/vm_version.hpp"
  72 #include "services/attachListener.hpp"
  73 #include "services/runtimeService.hpp"
  74 #include "utilities/decoder.hpp"
  75 #include "utilities/defaultStream.hpp"
  76 #include "utilities/events.hpp"
  77 #include "utilities/growableArray.hpp"
  78 #include "utilities/vmError.hpp"
  79 
  80 // put OS-includes here (sorted alphabetically)
  81 #include <errno.h>
  82 #include <fcntl.h>
  83 #include <inttypes.h>
  84 #include <poll.h>
  85 #include <procinfo.h>
  86 #include <pthread.h>
  87 #include <pwd.h>
  88 #include <semaphore.h>
  89 #include <signal.h>
  90 #include <stdint.h>
  91 #include <stdio.h>
  92 #include <string.h>
  93 #include <unistd.h>
  94 #include <sys/ioctl.h>
  95 #include <sys/ipc.h>
  96 #include <sys/mman.h>
  97 #include <sys/resource.h>
  98 #include <sys/select.h>
  99 #include <sys/shm.h>
 100 #include <sys/socket.h>
 101 #include <sys/stat.h>
 102 #include <sys/sysinfo.h>
 103 #include <sys/systemcfg.h>
 104 #include <sys/time.h>
 105 #include <sys/times.h>
 106 #include <sys/types.h>
 107 #include <sys/utsname.h>
 108 #include <sys/vminfo.h>
 109 #include <sys/wait.h>
 110 
 111 // If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 112 // getrusage() is prepared to handle the associated failure.
 113 #ifndef RUSAGE_THREAD
 114 #define RUSAGE_THREAD   (1)               /* only the calling thread */
 115 #endif
 116 
 117 // PPC port
 118 static const uintx Use64KPagesThreshold       = 1*M;
 119 static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
 120 
 121 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
 122 #if !defined(_AIXVERSION_610)
 123 extern "C" {
 124   int getthrds64(pid_t ProcessIdentifier,
 125                  struct thrdentry64* ThreadBuffer,
 126                  int ThreadSize,
 127                  tid64_t* IndexPointer,
 128                  int Count);
 129 }
 130 #endif
 131 
 132 #define MAX_PATH (2 * K)
 133 
 134 // for timer info max values which include all bits
 135 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 136 // for multipage initialization error analysis (in 'g_multipage_error')
 137 #define ERROR_MP_OS_TOO_OLD                          100
 138 #define ERROR_MP_EXTSHM_ACTIVE                       101
 139 #define ERROR_MP_VMGETINFO_FAILED                    102
 140 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 141 
 142 // The semantics in this file are thus that codeptr_t is a *real code ptr*.
 143 // This means that any function taking codeptr_t as arguments will assume
 144 // a real codeptr and won't handle function descriptors (eg getFuncName),
 145 // whereas functions taking address as args will deal with function
 146 // descriptors (eg os::dll_address_to_library_name).
 147 typedef unsigned int* codeptr_t;
 148 
 149 // Typedefs for stackslots, stack pointers, pointers to op codes.
 150 typedef unsigned long stackslot_t;
 151 typedef stackslot_t* stackptr_t;
 152 
 153 // Excerpts from systemcfg.h definitions newer than AIX 5.3.
 154 #ifndef PV_7
 155 #define PV_7 0x200000          /* Power PC 7 */
 156 #define PV_7_Compat 0x208000   /* Power PC 7 */
 157 #endif
 158 #ifndef PV_8
 159 #define PV_8 0x300000          /* Power PC 8 */
 160 #define PV_8_Compat 0x308000   /* Power PC 8 */
 161 #endif
 162 
 163 // Query dimensions of the stack of the calling thread.
 164 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 165 
 166 // Function to check a given stack pointer against given stack limits.
 167 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 168   if (((uintptr_t)sp) & 0x7) {
 169     return false;
 170   }
 171   if (sp > stack_base) {
 172     return false;
 173   }
 174   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 175     return false;
 176   }
 177   return true;
 178 }
 179 
 180 // Returns true if function is a valid codepointer.
 181 inline bool is_valid_codepointer(codeptr_t p) {
 182   if (!p) {
 183     return false;
 184   }
 185   if (((uintptr_t)p) & 0x3) {
 186     return false;
 187   }
 188   if (!LoadedLibraries::find_for_text_address(p, NULL)) {
 189     return false;
 190   }
 191   return true;
 192 }
 193 
 194 // Macro to check a given stack pointer against given stack limits and to die if test fails.
 195 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 196     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 197 }
 198 
 199 // Macro to check the current stack pointer against given stacklimits.
 200 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 201   address sp; \
 202   sp = os::current_stack_pointer(); \
 203   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 204 }
 205 
 206 ////////////////////////////////////////////////////////////////////////////////
 207 // global variables (for a description see os_aix.hpp)
 208 
 209 julong    os::Aix::_physical_memory = 0;
 210 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 211 int       os::Aix::_page_size = -1;
 212 int       os::Aix::_on_pase = -1;
 213 int       os::Aix::_os_version = -1;
 214 int       os::Aix::_stack_page_size = -1;
 215 int       os::Aix::_xpg_sus_mode = -1;
 216 int       os::Aix::_extshm = -1;
 217 int       os::Aix::_logical_cpus = -1;
 218 
 219 ////////////////////////////////////////////////////////////////////////////////
 220 // local variables
 221 
 222 static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 223 static jlong    initial_time_count = 0;
 224 static int      clock_tics_per_sec = 100;
 225 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 226 static bool     check_signals      = true;
 227 static pid_t    _initial_pid       = 0;
 228 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 229 static sigset_t SR_sigset;
 230 
 231 // This describes the state of multipage support of the underlying
 232 // OS. Note that this is of no interest to the outsize world and
 233 // therefore should not be defined in AIX class.
 234 //
 235 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 236 // latter two (16M "large" resp. 16G "huge" pages) require special
 237 // setup and are normally not available.
 238 //
 239 // AIX supports multiple page sizes per process, for:
 240 //  - Stack (of the primordial thread, so not relevant for us)
 241 //  - Data - data, bss, heap, for us also pthread stacks
 242 //  - Text - text code
 243 //  - shared memory
 244 //
 245 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 246 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 247 //
 248 // For shared memory, page size can be set dynamically via
 249 // shmctl(). Different shared memory regions can have different page
 250 // sizes.
 251 //
 252 // More information can be found at AIBM info center:
 253 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 254 //
 255 static struct {
 256   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 257   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 258   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 259   size_t pthr_stack_pagesize; // stack page size of pthread threads
 260   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 261   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 262   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 263   int error;                  // Error describing if something went wrong at multipage init.
 264 } g_multipage_support = {
 265   (size_t) -1,
 266   (size_t) -1,
 267   (size_t) -1,
 268   (size_t) -1,
 269   (size_t) -1,
 270   false, false,
 271   0
 272 };
 273 
 274 // We must not accidentally allocate memory close to the BRK - even if
 275 // that would work - because then we prevent the BRK segment from
 276 // growing which may result in a malloc OOM even though there is
 277 // enough memory. The problem only arises if we shmat() or mmap() at
 278 // a specific wish address, e.g. to place the heap in a
 279 // compressed-oops-friendly way.
 280 static bool is_close_to_brk(address a) {
 281   address a1 = (address) sbrk(0);
 282   if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {
 283     return true;
 284   }
 285   return false;
 286 }
 287 
 288 julong os::available_memory() {
 289   return Aix::available_memory();
 290 }
 291 
 292 julong os::Aix::available_memory() {
 293   os::Aix::meminfo_t mi;
 294   if (os::Aix::get_meminfo(&mi)) {
 295     return mi.real_free;
 296   } else {
 297     return 0xFFFFFFFFFFFFFFFFLL;
 298   }
 299 }
 300 
 301 julong os::physical_memory() {
 302   return Aix::physical_memory();
 303 }
 304 
 305 // Return true if user is running as root.
 306 
 307 bool os::have_special_privileges() {
 308   static bool init = false;
 309   static bool privileges = false;
 310   if (!init) {
 311     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 312     init = true;
 313   }
 314   return privileges;
 315 }
 316 
 317 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 318 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 319 static bool my_disclaim64(char* addr, size_t size) {
 320 
 321   if (size == 0) {
 322     return true;
 323   }
 324 
 325   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 326   const unsigned int maxDisclaimSize = 0x40000000;
 327 
 328   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 329   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 330 
 331   char* p = addr;
 332 
 333   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 334     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 335       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 336       return false;
 337     }
 338     p += maxDisclaimSize;
 339   }
 340 
 341   if (lastDisclaimSize > 0) {
 342     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 343       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 344       return false;
 345     }
 346   }
 347 
 348   return true;
 349 }
 350 
 351 // Cpu architecture string
 352 #if defined(PPC32)
 353 static char cpu_arch[] = "ppc";
 354 #elif defined(PPC64)
 355 static char cpu_arch[] = "ppc64";
 356 #else
 357 #error Add appropriate cpu_arch setting
 358 #endif
 359 
 360 
 361 // Given an address, returns the size of the page backing that address.
 362 size_t os::Aix::query_pagesize(void* addr) {
 363 
 364   vm_page_info pi;
 365   pi.addr = (uint64_t)addr;
 366   if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 367     return pi.pagesize;
 368   } else {
 369     fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
 370     assert(false, "vmgetinfo failed to retrieve page size");
 371     return SIZE_4K;
 372   }
 373 
 374 }
 375 
 376 // Returns the kernel thread id of the currently running thread.
 377 pid_t os::Aix::gettid() {
 378   return (pid_t) thread_self();
 379 }
 380 
 381 void os::Aix::initialize_system_info() {
 382 
 383   // Get the number of online(logical) cpus instead of configured.
 384   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 385   assert(_processor_count > 0, "_processor_count must be > 0");
 386 
 387   // Retrieve total physical storage.
 388   os::Aix::meminfo_t mi;
 389   if (!os::Aix::get_meminfo(&mi)) {
 390     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
 391     assert(false, "os::Aix::get_meminfo failed.");
 392   }
 393   _physical_memory = (julong) mi.real_total;
 394 }
 395 
 396 // Helper function for tracing page sizes.
 397 static const char* describe_pagesize(size_t pagesize) {
 398   switch (pagesize) {
 399     case SIZE_4K : return "4K";
 400     case SIZE_64K: return "64K";
 401     case SIZE_16M: return "16M";
 402     case SIZE_16G: return "16G";
 403     case -1:       return "not set";
 404     default:
 405       assert(false, "surprise");
 406       return "??";
 407   }
 408 }
 409 
 410 // Probe OS for multipage support.
 411 // Will fill the global g_multipage_support structure.
 412 // Must be called before calling os::large_page_init().
 413 static void query_multipage_support() {
 414 
 415   guarantee(g_multipage_support.pagesize == -1,
 416             "do not call twice");
 417 
 418   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 419 
 420   // This really would surprise me.
 421   assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
 422 
 423   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 424   // Default data page size is defined either by linker options (-bdatapsize)
 425   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 426   // default should be 4K.
 427   {
 428     void* p = ::malloc(SIZE_16M);
 429     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 430     ::free(p);
 431   }
 432 
 433   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 434   {
 435     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 436     guarantee(shmid != -1, "shmget failed");
 437     void* p = ::shmat(shmid, NULL, 0);
 438     ::shmctl(shmid, IPC_RMID, NULL);
 439     guarantee(p != (void*) -1, "shmat failed");
 440     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 441     ::shmdt(p);
 442   }
 443 
 444   // Before querying the stack page size, make sure we are not running as primordial
 445   // thread (because primordial thread's stack may have different page size than
 446   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 447   // number of reasons so we may just as well guarantee it here.
 448   guarantee0(!os::Aix::is_primordial_thread());
 449 
 450   // Query pthread stack page size.
 451   {
 452     int dummy = 0;
 453     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 454   }
 455 
 456   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 457   /* PPC port: so far unused.
 458   {
 459     address any_function =
 460       (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 461     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 462   }
 463   */
 464 
 465   // Now probe for support of 64K pages and 16M pages.
 466 
 467   // Before OS/400 V6R1, there is no support for pages other than 4K.
 468   if (os::Aix::on_pase_V5R4_or_older()) {
 469     Unimplemented();
 470     goto query_multipage_support_end;
 471   }
 472 
 473   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 474   {
 475     const int MAX_PAGE_SIZES = 4;
 476     psize_t sizes[MAX_PAGE_SIZES];
 477     const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 478     if (num_psizes == -1) {
 479       trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
 480       trc("disabling multipage support.\n");
 481       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 482       goto query_multipage_support_end;
 483     }
 484     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 485     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 486     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 487     for (int i = 0; i < num_psizes; i ++) {
 488       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 489     }
 490 
 491     // Can we use 64K, 16M pages?
 492     for (int i = 0; i < num_psizes; i ++) {
 493       const size_t pagesize = sizes[i];
 494       if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
 495         continue;
 496       }
 497       bool can_use = false;
 498       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 499       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 500         IPC_CREAT | S_IRUSR | S_IWUSR);
 501       guarantee0(shmid != -1); // Should always work.
 502       // Try to set pagesize.
 503       struct shmid_ds shm_buf = { 0 };
 504       shm_buf.shm_pagesize = pagesize;
 505       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 506         const int en = errno;
 507         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 508         // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
 509         // PPC port  MiscUtils::describe_errno(en));
 510       } else {
 511         // Attach and double check pageisze.
 512         void* p = ::shmat(shmid, NULL, 0);
 513         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 514         guarantee0(p != (void*) -1); // Should always work.
 515         const size_t real_pagesize = os::Aix::query_pagesize(p);
 516         if (real_pagesize != pagesize) {
 517           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 518         } else {
 519           can_use = true;
 520         }
 521         ::shmdt(p);
 522       }
 523       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 524       if (pagesize == SIZE_64K) {
 525         g_multipage_support.can_use_64K_pages = can_use;
 526       } else if (pagesize == SIZE_16M) {
 527         g_multipage_support.can_use_16M_pages = can_use;
 528       }
 529     }
 530 
 531   } // end: check which pages can be used for shared memory
 532 
 533 query_multipage_support_end:
 534 
 535   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
 536       describe_pagesize(g_multipage_support.pagesize));
 537   trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
 538       describe_pagesize(g_multipage_support.datapsize));
 539   trcVerbose("Text page size: %s\n",
 540       describe_pagesize(g_multipage_support.textpsize));
 541   trcVerbose("Thread stack page size (pthread): %s\n",
 542       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 543   trcVerbose("Default shared memory page size: %s\n",
 544       describe_pagesize(g_multipage_support.shmpsize));
 545   trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
 546       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 547   trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
 548       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 549   trcVerbose("Multipage error details: %d\n",
 550       g_multipage_support.error);
 551 
 552   // sanity checks
 553   assert0(g_multipage_support.pagesize == SIZE_4K);
 554   assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
 555   // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
 556   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 557   assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
 558 
 559 } // end os::Aix::query_multipage_support()
 560 
 561 void os::init_system_properties_values() {
 562 
 563 #define DEFAULT_LIBPATH "/usr/lib:/lib"
 564 #define EXTENSIONS_DIR  "/lib/ext"
 565 
 566   // Buffer that fits several sprintfs.
 567   // Note that the space for the trailing null is provided
 568   // by the nulls included by the sizeof operator.
 569   const size_t bufsize =
 570     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 571          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 572   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 573 
 574   // sysclasspath, java_home, dll_dir
 575   {
 576     char *pslash;
 577     os::jvm_path(buf, bufsize);
 578 
 579     // Found the full path to libjvm.so.
 580     // Now cut the path to <java_home>/jre if we can.
 581     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 582     pslash = strrchr(buf, '/');
 583     if (pslash != NULL) {
 584       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 585     }
 586     Arguments::set_dll_dir(buf);
 587 
 588     if (pslash != NULL) {
 589       pslash = strrchr(buf, '/');
 590       if (pslash != NULL) {
 591         *pslash = '\0';          // Get rid of /<arch>.
 592         pslash = strrchr(buf, '/');
 593         if (pslash != NULL) {
 594           *pslash = '\0';        // Get rid of /lib.
 595         }
 596       }
 597     }
 598     Arguments::set_java_home(buf);
 599     set_boot_path('/', ':');
 600   }
 601 
 602   // Where to look for native libraries.
 603 
 604   // On Aix we get the user setting of LIBPATH.
 605   // Eventually, all the library path setting will be done here.
 606   // Get the user setting of LIBPATH.
 607   const char *v = ::getenv("LIBPATH");
 608   const char *v_colon = ":";
 609   if (v == NULL) { v = ""; v_colon = ""; }
 610 
 611   // Concatenate user and invariant part of ld_library_path.
 612   // That's +1 for the colon and +1 for the trailing '\0'.
 613   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 614   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 615   Arguments::set_library_path(ld_library_path);
 616   FREE_C_HEAP_ARRAY(char, ld_library_path);
 617 
 618   // Extensions directories.
 619   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 620   Arguments::set_ext_dirs(buf);
 621 
 622   FREE_C_HEAP_ARRAY(char, buf);
 623 
 624 #undef DEFAULT_LIBPATH
 625 #undef EXTENSIONS_DIR
 626 }
 627 
 628 ////////////////////////////////////////////////////////////////////////////////
 629 // breakpoint support
 630 
 631 void os::breakpoint() {
 632   BREAKPOINT;
 633 }
 634 
 635 extern "C" void breakpoint() {
 636   // use debugger to set breakpoint here
 637 }
 638 
 639 ////////////////////////////////////////////////////////////////////////////////
 640 // signal support
 641 
 642 debug_only(static bool signal_sets_initialized = false);
 643 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 644 
 645 bool os::Aix::is_sig_ignored(int sig) {
 646   struct sigaction oact;
 647   sigaction(sig, (struct sigaction*)NULL, &oact);
 648   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 649     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 650   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
 651     return true;
 652   } else {
 653     return false;
 654   }
 655 }
 656 
 657 void os::Aix::signal_sets_init() {
 658   // Should also have an assertion stating we are still single-threaded.
 659   assert(!signal_sets_initialized, "Already initialized");
 660   // Fill in signals that are necessarily unblocked for all threads in
 661   // the VM. Currently, we unblock the following signals:
 662   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 663   //                         by -Xrs (=ReduceSignalUsage));
 664   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 665   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 666   // the dispositions or masks wrt these signals.
 667   // Programs embedding the VM that want to use the above signals for their
 668   // own purposes must, at this time, use the "-Xrs" option to prevent
 669   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 670   // (See bug 4345157, and other related bugs).
 671   // In reality, though, unblocking these signals is really a nop, since
 672   // these signals are not blocked by default.
 673   sigemptyset(&unblocked_sigs);
 674   sigemptyset(&allowdebug_blocked_sigs);
 675   sigaddset(&unblocked_sigs, SIGILL);
 676   sigaddset(&unblocked_sigs, SIGSEGV);
 677   sigaddset(&unblocked_sigs, SIGBUS);
 678   sigaddset(&unblocked_sigs, SIGFPE);
 679   sigaddset(&unblocked_sigs, SIGTRAP);
 680   sigaddset(&unblocked_sigs, SIGDANGER);
 681   sigaddset(&unblocked_sigs, SR_signum);
 682 
 683   if (!ReduceSignalUsage) {
 684    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 685      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 686      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
 687    }
 688    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 689      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 690      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
 691    }
 692    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 693      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 694      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
 695    }
 696   }
 697   // Fill in signals that are blocked by all but the VM thread.
 698   sigemptyset(&vm_sigs);
 699   if (!ReduceSignalUsage)
 700     sigaddset(&vm_sigs, BREAK_SIGNAL);
 701   debug_only(signal_sets_initialized = true);
 702 }
 703 
 704 // These are signals that are unblocked while a thread is running Java.
 705 // (For some reason, they get blocked by default.)
 706 sigset_t* os::Aix::unblocked_signals() {
 707   assert(signal_sets_initialized, "Not initialized");
 708   return &unblocked_sigs;
 709 }
 710 
 711 // These are the signals that are blocked while a (non-VM) thread is
 712 // running Java. Only the VM thread handles these signals.
 713 sigset_t* os::Aix::vm_signals() {
 714   assert(signal_sets_initialized, "Not initialized");
 715   return &vm_sigs;
 716 }
 717 
 718 // These are signals that are blocked during cond_wait to allow debugger in
 719 sigset_t* os::Aix::allowdebug_blocked_signals() {
 720   assert(signal_sets_initialized, "Not initialized");
 721   return &allowdebug_blocked_sigs;
 722 }
 723 
 724 void os::Aix::hotspot_sigmask(Thread* thread) {
 725 
 726   //Save caller's signal mask before setting VM signal mask
 727   sigset_t caller_sigmask;
 728   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 729 
 730   OSThread* osthread = thread->osthread();
 731   osthread->set_caller_sigmask(caller_sigmask);
 732 
 733   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 734 
 735   if (!ReduceSignalUsage) {
 736     if (thread->is_VM_thread()) {
 737       // Only the VM thread handles BREAK_SIGNAL ...
 738       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 739     } else {
 740       // ... all other threads block BREAK_SIGNAL
 741       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 742     }
 743   }
 744 }
 745 
 746 // retrieve memory information.
 747 // Returns false if something went wrong;
 748 // content of pmi undefined in this case.
 749 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 750 
 751   assert(pmi, "get_meminfo: invalid parameter");
 752 
 753   memset(pmi, 0, sizeof(meminfo_t));
 754 
 755   if (os::Aix::on_pase()) {
 756 
 757     Unimplemented();
 758     return false;
 759 
 760   } else {
 761 
 762     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 763     // See:
 764     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 765     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 766     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 767     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 768 
 769     perfstat_memory_total_t psmt;
 770     memset (&psmt, '\0', sizeof(psmt));
 771     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 772     if (rc == -1) {
 773       fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
 774       assert(0, "perfstat_memory_total() failed");
 775       return false;
 776     }
 777 
 778     assert(rc == 1, "perfstat_memory_total() - weird return code");
 779 
 780     // excerpt from
 781     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 782     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 783     // The fields of perfstat_memory_total_t:
 784     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 785     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 786     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 787     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 788     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 789 
 790     pmi->virt_total = psmt.virt_total * 4096;
 791     pmi->real_total = psmt.real_total * 4096;
 792     pmi->real_free = psmt.real_free * 4096;
 793     pmi->pgsp_total = psmt.pgsp_total * 4096;
 794     pmi->pgsp_free = psmt.pgsp_free * 4096;
 795 
 796     return true;
 797 
 798   }
 799 } // end os::Aix::get_meminfo
 800 
 801 // Retrieve global cpu information.
 802 // Returns false if something went wrong;
 803 // the content of pci is undefined in this case.
 804 bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
 805   assert(pci, "get_cpuinfo: invalid parameter");
 806   memset(pci, 0, sizeof(cpuinfo_t));
 807 
 808   perfstat_cpu_total_t psct;
 809   memset (&psct, '\0', sizeof(psct));
 810 
 811   if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
 812     fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
 813     assert(0, "perfstat_cpu_total() failed");
 814     return false;
 815   }
 816 
 817   // global cpu information
 818   strcpy (pci->description, psct.description);
 819   pci->processorHZ = psct.processorHZ;
 820   pci->ncpus = psct.ncpus;
 821   os::Aix::_logical_cpus = psct.ncpus;
 822   for (int i = 0; i < 3; i++) {
 823     pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
 824   }
 825 
 826   // get the processor version from _system_configuration
 827   switch (_system_configuration.version) {
 828   case PV_8:
 829     strcpy(pci->version, "Power PC 8");
 830     break;
 831   case PV_7:
 832     strcpy(pci->version, "Power PC 7");
 833     break;
 834   case PV_6_1:
 835     strcpy(pci->version, "Power PC 6 DD1.x");
 836     break;
 837   case PV_6:
 838     strcpy(pci->version, "Power PC 6");
 839     break;
 840   case PV_5:
 841     strcpy(pci->version, "Power PC 5");
 842     break;
 843   case PV_5_2:
 844     strcpy(pci->version, "Power PC 5_2");
 845     break;
 846   case PV_5_3:
 847     strcpy(pci->version, "Power PC 5_3");
 848     break;
 849   case PV_5_Compat:
 850     strcpy(pci->version, "PV_5_Compat");
 851     break;
 852   case PV_6_Compat:
 853     strcpy(pci->version, "PV_6_Compat");
 854     break;
 855   case PV_7_Compat:
 856     strcpy(pci->version, "PV_7_Compat");
 857     break;
 858   case PV_8_Compat:
 859     strcpy(pci->version, "PV_8_Compat");
 860     break;
 861   default:
 862     strcpy(pci->version, "unknown");
 863   }
 864 
 865   return true;
 866 
 867 } //end os::Aix::get_cpuinfo
 868 
 869 //////////////////////////////////////////////////////////////////////////////
 870 // detecting pthread library
 871 
 872 void os::Aix::libpthread_init() {
 873   return;
 874 }
 875 
 876 //////////////////////////////////////////////////////////////////////////////
 877 // create new thread
 878 
 879 // Thread start routine for all newly created threads
 880 static void *java_start(Thread *thread) {
 881 
 882   // find out my own stack dimensions
 883   {
 884     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 885     address base = 0;
 886     size_t size = 0;
 887     query_stack_dimensions(&base, &size);
 888     thread->set_stack_base(base);
 889     thread->set_stack_size(size);
 890   }
 891 
 892   // Do some sanity checks.
 893   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 894 
 895   // Try to randomize the cache line index of hot stack frames.
 896   // This helps when threads of the same stack traces evict each other's
 897   // cache lines. The threads can be either from the same JVM instance, or
 898   // from different JVM instances. The benefit is especially true for
 899   // processors with hyperthreading technology.
 900 
 901   static int counter = 0;
 902   int pid = os::current_process_id();
 903   alloca(((pid ^ counter++) & 7) * 128);
 904 
 905   ThreadLocalStorage::set_thread(thread);
 906 
 907   OSThread* osthread = thread->osthread();
 908 
 909   // thread_id is kernel thread id (similar to Solaris LWP id)
 910   osthread->set_thread_id(os::Aix::gettid());
 911 
 912   // initialize signal mask for this thread
 913   os::Aix::hotspot_sigmask(thread);
 914 
 915   // initialize floating point control register
 916   os::Aix::init_thread_fpu_state();
 917 
 918   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 919 
 920   // call one more level start routine
 921   thread->run();
 922 
 923   return 0;
 924 }
 925 
 926 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 927 
 928   // We want the whole function to be synchronized.
 929   ThreadCritical cs;
 930 
 931   assert(thread->osthread() == NULL, "caller responsible");
 932 
 933   // Allocate the OSThread object
 934   OSThread* osthread = new OSThread(NULL, NULL);
 935   if (osthread == NULL) {
 936     return false;
 937   }
 938 
 939   // set the correct thread state
 940   osthread->set_thread_type(thr_type);
 941 
 942   // Initial state is ALLOCATED but not INITIALIZED
 943   osthread->set_state(ALLOCATED);
 944 
 945   thread->set_osthread(osthread);
 946 
 947   // init thread attributes
 948   pthread_attr_t attr;
 949   pthread_attr_init(&attr);
 950   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 951 
 952   // Make sure we run in 1:1 kernel-user-thread mode.
 953   if (os::Aix::on_aix()) {
 954     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 955     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 956   } // end: aix
 957 
 958   // Start in suspended state, and in os::thread_start, wake the thread up.
 959   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 960 
 961   // calculate stack size if it's not specified by caller
 962   if (stack_size == 0) {
 963     stack_size = os::Aix::default_stack_size(thr_type);
 964 
 965     switch (thr_type) {
 966     case os::java_thread:
 967       // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
 968       assert(JavaThread::stack_size_at_create() > 0, "this should be set");
 969       stack_size = JavaThread::stack_size_at_create();
 970       break;
 971     case os::compiler_thread:
 972       if (CompilerThreadStackSize > 0) {
 973         stack_size = (size_t)(CompilerThreadStackSize * K);
 974         break;
 975       } // else fall through:
 976         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 977     case os::vm_thread:
 978     case os::pgc_thread:
 979     case os::cgc_thread:
 980     case os::watcher_thread:
 981       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 982       break;
 983     }
 984   }
 985 
 986   stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
 987   pthread_attr_setstacksize(&attr, stack_size);
 988 
 989   pthread_t tid;
 990   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
 991 
 992   pthread_attr_destroy(&attr);
 993 
 994   if (ret == 0) {
 995     // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
 996   } else {
 997     if (PrintMiscellaneous && (Verbose || WizardMode)) {
 998       perror("pthread_create()");
 999     }
1000     // Need to clean up stuff we've allocated so far
1001     thread->set_osthread(NULL);
1002     delete osthread;
1003     return false;
1004   }
1005 
1006   // Store pthread info into the OSThread
1007   osthread->set_pthread_id(tid);
1008 
1009   return true;
1010 }
1011 
1012 /////////////////////////////////////////////////////////////////////////////
1013 // attach existing thread
1014 
1015 // bootstrap the main thread
1016 bool os::create_main_thread(JavaThread* thread) {
1017   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
1018   return create_attached_thread(thread);
1019 }
1020 
1021 bool os::create_attached_thread(JavaThread* thread) {
1022 #ifdef ASSERT
1023     thread->verify_not_published();
1024 #endif
1025 
1026   // Allocate the OSThread object
1027   OSThread* osthread = new OSThread(NULL, NULL);
1028 
1029   if (osthread == NULL) {
1030     return false;
1031   }
1032 
1033   // Store pthread info into the OSThread
1034   osthread->set_thread_id(os::Aix::gettid());
1035   osthread->set_pthread_id(::pthread_self());
1036 
1037   // initialize floating point control register
1038   os::Aix::init_thread_fpu_state();
1039 
1040   // some sanity checks
1041   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1042 
1043   // Initial thread state is RUNNABLE
1044   osthread->set_state(RUNNABLE);
1045 
1046   thread->set_osthread(osthread);
1047 
1048   if (UseNUMA) {
1049     int lgrp_id = os::numa_get_group_id();
1050     if (lgrp_id != -1) {
1051       thread->set_lgrp_id(lgrp_id);
1052     }
1053   }
1054 
1055   // initialize signal mask for this thread
1056   // and save the caller's signal mask
1057   os::Aix::hotspot_sigmask(thread);
1058 
1059   return true;
1060 }
1061 
1062 void os::pd_start_thread(Thread* thread) {
1063   int status = pthread_continue_np(thread->osthread()->pthread_id());
1064   assert(status == 0, "thr_continue failed");
1065 }
1066 
1067 // Free OS resources related to the OSThread
1068 void os::free_thread(OSThread* osthread) {
1069   assert(osthread != NULL, "osthread not set");
1070 
1071   if (Thread::current()->osthread() == osthread) {
1072     // Restore caller's signal mask
1073     sigset_t sigmask = osthread->caller_sigmask();
1074     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1075    }
1076 
1077   delete osthread;
1078 }
1079 
1080 //////////////////////////////////////////////////////////////////////////////
1081 // thread local storage
1082 
1083 int os::allocate_thread_local_storage() {
1084   pthread_key_t key;
1085   int rslt = pthread_key_create(&key, NULL);
1086   assert(rslt == 0, "cannot allocate thread local storage");
1087   return (int)key;
1088 }
1089 
1090 // Note: This is currently not used by VM, as we don't destroy TLS key
1091 // on VM exit.
1092 void os::free_thread_local_storage(int index) {
1093   int rslt = pthread_key_delete((pthread_key_t)index);
1094   assert(rslt == 0, "invalid index");
1095 }
1096 
1097 void os::thread_local_storage_at_put(int index, void* value) {
1098   int rslt = pthread_setspecific((pthread_key_t)index, value);
1099   assert(rslt == 0, "pthread_setspecific failed");
1100 }
1101 
1102 extern "C" Thread* get_thread() {
1103   return ThreadLocalStorage::thread();
1104 }
1105 
1106 ////////////////////////////////////////////////////////////////////////////////
1107 // time support
1108 
1109 // Time since start-up in seconds to a fine granularity.
1110 // Used by VMSelfDestructTimer and the MemProfiler.
1111 double os::elapsedTime() {
1112   return (double)(os::elapsed_counter()) * 0.000001;
1113 }
1114 
1115 jlong os::elapsed_counter() {
1116   timeval time;
1117   int status = gettimeofday(&time, NULL);
1118   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1119 }
1120 
1121 jlong os::elapsed_frequency() {
1122   return (1000 * 1000);
1123 }
1124 
1125 bool os::supports_vtime() { return true; }
1126 bool os::enable_vtime()   { return false; }
1127 bool os::vtime_enabled()  { return false; }
1128 
1129 double os::elapsedVTime() {
1130   struct rusage usage;
1131   int retval = getrusage(RUSAGE_THREAD, &usage);
1132   if (retval == 0) {
1133     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1134   } else {
1135     // better than nothing, but not much
1136     return elapsedTime();
1137   }
1138 }
1139 
1140 jlong os::javaTimeMillis() {
1141   timeval time;
1142   int status = gettimeofday(&time, NULL);
1143   assert(status != -1, "aix error at gettimeofday()");
1144   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1145 }
1146 
1147 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1148   timeval time;
1149   int status = gettimeofday(&time, NULL);
1150   assert(status != -1, "aix error at gettimeofday()");
1151   seconds = jlong(time.tv_sec);
1152   nanos = jlong(time.tv_usec) * 1000;
1153 }
1154 
1155 
1156 // We need to manually declare mread_real_time,
1157 // because IBM didn't provide a prototype in time.h.
1158 // (they probably only ever tested in C, not C++)
1159 extern "C"
1160 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1161 
1162 jlong os::javaTimeNanos() {
1163   if (os::Aix::on_pase()) {
1164     Unimplemented();
1165     return 0;
1166   } else {
1167     // On AIX use the precision of processors real time clock
1168     // or time base registers.
1169     timebasestruct_t time;
1170     int rc;
1171 
1172     // If the CPU has a time register, it will be used and
1173     // we have to convert to real time first. After convertion we have following data:
1174     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1175     // time.tb_low  [nanoseconds after the last full second above]
1176     // We better use mread_real_time here instead of read_real_time
1177     // to ensure that we will get a monotonic increasing time.
1178     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1179       rc = time_base_to_time(&time, TIMEBASE_SZ);
1180       assert(rc != -1, "aix error at time_base_to_time()");
1181     }
1182     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1183   }
1184 }
1185 
1186 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1187   info_ptr->max_value = ALL_64_BITS;
1188   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1189   info_ptr->may_skip_backward = false;
1190   info_ptr->may_skip_forward = false;
1191   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1192 }
1193 
1194 // Return the real, user, and system times in seconds from an
1195 // arbitrary fixed point in the past.
1196 bool os::getTimesSecs(double* process_real_time,
1197                       double* process_user_time,
1198                       double* process_system_time) {
1199   struct tms ticks;
1200   clock_t real_ticks = times(&ticks);
1201 
1202   if (real_ticks == (clock_t) (-1)) {
1203     return false;
1204   } else {
1205     double ticks_per_second = (double) clock_tics_per_sec;
1206     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1207     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1208     *process_real_time = ((double) real_ticks) / ticks_per_second;
1209 
1210     return true;
1211   }
1212 }
1213 
1214 char * os::local_time_string(char *buf, size_t buflen) {
1215   struct tm t;
1216   time_t long_time;
1217   time(&long_time);
1218   localtime_r(&long_time, &t);
1219   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1220                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1221                t.tm_hour, t.tm_min, t.tm_sec);
1222   return buf;
1223 }
1224 
1225 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1226   return localtime_r(clock, res);
1227 }
1228 
1229 ////////////////////////////////////////////////////////////////////////////////
1230 // runtime exit support
1231 
1232 // Note: os::shutdown() might be called very early during initialization, or
1233 // called from signal handler. Before adding something to os::shutdown(), make
1234 // sure it is async-safe and can handle partially initialized VM.
1235 void os::shutdown() {
1236 
1237   // allow PerfMemory to attempt cleanup of any persistent resources
1238   perfMemory_exit();
1239 
1240   // needs to remove object in file system
1241   AttachListener::abort();
1242 
1243   // flush buffered output, finish log files
1244   ostream_abort();
1245 
1246   // Check for abort hook
1247   abort_hook_t abort_hook = Arguments::abort_hook();
1248   if (abort_hook != NULL) {
1249     abort_hook();
1250   }
1251 }
1252 
1253 // Note: os::abort() might be called very early during initialization, or
1254 // called from signal handler. Before adding something to os::abort(), make
1255 // sure it is async-safe and can handle partially initialized VM.
1256 void os::abort(bool dump_core, void* siginfo, void* context) {
1257   os::shutdown();
1258   if (dump_core) {
1259 #ifndef PRODUCT
1260     fdStream out(defaultStream::output_fd());
1261     out.print_raw("Current thread is ");
1262     char buf[16];
1263     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1264     out.print_raw_cr(buf);
1265     out.print_raw_cr("Dumping core ...");
1266 #endif
1267     ::abort(); // dump core
1268   }
1269 
1270   ::exit(1);
1271 }
1272 
1273 // Die immediately, no exit hook, no abort hook, no cleanup.
1274 void os::die() {
1275   ::abort();
1276 }
1277 
1278 // This method is a copy of JDK's sysGetLastErrorString
1279 // from src/solaris/hpi/src/system_md.c
1280 
1281 size_t os::lasterror(char *buf, size_t len) {
1282   if (errno == 0) return 0;
1283 
1284   const char *s = ::strerror(errno);
1285   size_t n = ::strlen(s);
1286   if (n >= len) {
1287     n = len - 1;
1288   }
1289   ::strncpy(buf, s, n);
1290   buf[n] = '\0';
1291   return n;
1292 }
1293 
1294 intx os::current_thread_id() { return (intx)pthread_self(); }
1295 
1296 int os::current_process_id() {
1297 
1298   // This implementation returns a unique pid, the pid of the
1299   // launcher thread that starts the vm 'process'.
1300 
1301   // Under POSIX, getpid() returns the same pid as the
1302   // launcher thread rather than a unique pid per thread.
1303   // Use gettid() if you want the old pre NPTL behaviour.
1304 
1305   // if you are looking for the result of a call to getpid() that
1306   // returns a unique pid for the calling thread, then look at the
1307   // OSThread::thread_id() method in osThread_linux.hpp file
1308 
1309   return (int)(_initial_pid ? _initial_pid : getpid());
1310 }
1311 
1312 // DLL functions
1313 
1314 const char* os::dll_file_extension() { return ".so"; }
1315 
1316 // This must be hard coded because it's the system's temporary
1317 // directory not the java application's temp directory, ala java.io.tmpdir.
1318 const char* os::get_temp_directory() { return "/tmp"; }
1319 
1320 static bool file_exists(const char* filename) {
1321   struct stat statbuf;
1322   if (filename == NULL || strlen(filename) == 0) {
1323     return false;
1324   }
1325   return os::stat(filename, &statbuf) == 0;
1326 }
1327 
1328 bool os::dll_build_name(char* buffer, size_t buflen,
1329                         const char* pname, const char* fname) {
1330   bool retval = false;
1331   // Copied from libhpi
1332   const size_t pnamelen = pname ? strlen(pname) : 0;
1333 
1334   // Return error on buffer overflow.
1335   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1336     *buffer = '\0';
1337     return retval;
1338   }
1339 
1340   if (pnamelen == 0) {
1341     snprintf(buffer, buflen, "lib%s.so", fname);
1342     retval = true;
1343   } else if (strchr(pname, *os::path_separator()) != NULL) {
1344     int n;
1345     char** pelements = split_path(pname, &n);
1346     for (int i = 0; i < n; i++) {
1347       // Really shouldn't be NULL, but check can't hurt
1348       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1349         continue; // skip the empty path values
1350       }
1351       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1352       if (file_exists(buffer)) {
1353         retval = true;
1354         break;
1355       }
1356     }
1357     // release the storage
1358     for (int i = 0; i < n; i++) {
1359       if (pelements[i] != NULL) {
1360         FREE_C_HEAP_ARRAY(char, pelements[i]);
1361       }
1362     }
1363     if (pelements != NULL) {
1364       FREE_C_HEAP_ARRAY(char*, pelements);
1365     }
1366   } else {
1367     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1368     retval = true;
1369   }
1370   return retval;
1371 }
1372 
1373 // Check if addr is inside libjvm.so.
1374 bool os::address_is_in_vm(address addr) {
1375 
1376   // Input could be a real pc or a function pointer literal. The latter
1377   // would be a function descriptor residing in the data segment of a module.
1378   loaded_module_t lm;
1379   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1380     return lm.is_in_vm;
1381   } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1382     return lm.is_in_vm;
1383   } else {
1384     return false;
1385   }
1386 
1387 }
1388 
1389 // Resolve an AIX function descriptor literal to a code pointer.
1390 // If the input is a valid code pointer to a text segment of a loaded module,
1391 //   it is returned unchanged.
1392 // If the input is a valid AIX function descriptor, it is resolved to the
1393 //   code entry point.
1394 // If the input is neither a valid function descriptor nor a valid code pointer,
1395 //   NULL is returned.
1396 static address resolve_function_descriptor_to_code_pointer(address p) {
1397 
1398   if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1399     // It is a real code pointer.
1400     return p;
1401   } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1402     // Pointer to data segment, potential function descriptor.
1403     address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1404     if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1405       // It is a function descriptor.
1406       return code_entry;
1407     }
1408   }
1409 
1410   return NULL;
1411 }
1412 
1413 bool os::dll_address_to_function_name(address addr, char *buf,
1414                                       int buflen, int *offset,
1415                                       bool demangle) {
1416   if (offset) {
1417     *offset = -1;
1418   }
1419   // Buf is not optional, but offset is optional.
1420   assert(buf != NULL, "sanity check");
1421   buf[0] = '\0';
1422 
1423   // Resolve function ptr literals first.
1424   addr = resolve_function_descriptor_to_code_pointer(addr);
1425   if (!addr) {
1426     return false;
1427   }
1428 
1429   // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1430   return Decoder::decode(addr, buf, buflen, offset, demangle);
1431 }
1432 
1433 static int getModuleName(codeptr_t pc,                    // [in] program counter
1434                          char* p_name, size_t namelen,    // [out] optional: function name
1435                          char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1436                          ) {
1437 
1438   if (p_name && namelen > 0) {
1439     *p_name = '\0';
1440   }
1441   if (p_errmsg && errmsglen > 0) {
1442     *p_errmsg = '\0';
1443   }
1444 
1445   if (p_name && namelen > 0) {
1446     loaded_module_t lm;
1447     if (LoadedLibraries::find_for_text_address(pc, &lm) != NULL) {
1448       strncpy(p_name, lm.shortname, namelen);
1449       p_name[namelen - 1] = '\0';
1450     }
1451     return 0;
1452   }
1453 
1454   return -1;
1455 }
1456 
1457 bool os::dll_address_to_library_name(address addr, char* buf,
1458                                      int buflen, int* offset) {
1459   if (offset) {
1460     *offset = -1;
1461   }
1462   // Buf is not optional, but offset is optional.
1463   assert(buf != NULL, "sanity check");
1464   buf[0] = '\0';
1465 
1466   // Resolve function ptr literals first.
1467   addr = resolve_function_descriptor_to_code_pointer(addr);
1468   if (!addr) {
1469     return false;
1470   }
1471 
1472   if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1473     return true;
1474   }
1475   return false;
1476 }
1477 
1478 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1479 // for the same architecture as Hotspot is running on.
1480 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1481 
1482   if (ebuf && ebuflen > 0) {
1483     ebuf[0] = '\0';
1484     ebuf[ebuflen - 1] = '\0';
1485   }
1486 
1487   if (!filename || strlen(filename) == 0) {
1488     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1489     return NULL;
1490   }
1491 
1492   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1493   void * result= ::dlopen(filename, RTLD_LAZY);
1494   if (result != NULL) {
1495     // Reload dll cache. Don't do this in signal handling.
1496     LoadedLibraries::reload();
1497     return result;
1498   } else {
1499     // error analysis when dlopen fails
1500     const char* const error_report = ::dlerror();
1501     if (error_report && ebuf && ebuflen > 0) {
1502       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1503                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1504     }
1505   }
1506   return NULL;
1507 }
1508 
1509 void* os::dll_lookup(void* handle, const char* name) {
1510   void* res = dlsym(handle, name);
1511   return res;
1512 }
1513 
1514 void* os::get_default_process_handle() {
1515   return (void*)::dlopen(NULL, RTLD_LAZY);
1516 }
1517 
1518 void os::print_dll_info(outputStream *st) {
1519   st->print_cr("Dynamic libraries:");
1520   LoadedLibraries::print(st);
1521 }
1522 
1523 void os::get_summary_os_info(char* buf, size_t buflen) {
1524   // There might be something more readable than uname results for AIX.
1525   struct utsname name;
1526   uname(&name);
1527   snprintf(buf, buflen, "%s %s", name.release, name.version);
1528 }
1529 
1530 void os::print_os_info(outputStream* st) {
1531   st->print("OS:");
1532 
1533   st->print("uname:");
1534   struct utsname name;
1535   uname(&name);
1536   st->print(name.sysname); st->print(" ");
1537   st->print(name.nodename); st->print(" ");
1538   st->print(name.release); st->print(" ");
1539   st->print(name.version); st->print(" ");
1540   st->print(name.machine);
1541   st->cr();
1542 
1543   // rlimit
1544   st->print("rlimit:");
1545   struct rlimit rlim;
1546 
1547   st->print(" STACK ");
1548   getrlimit(RLIMIT_STACK, &rlim);
1549   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1550   else st->print("%uk", rlim.rlim_cur >> 10);
1551 
1552   st->print(", CORE ");
1553   getrlimit(RLIMIT_CORE, &rlim);
1554   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1555   else st->print("%uk", rlim.rlim_cur >> 10);
1556 
1557   st->print(", NPROC ");
1558   st->print("%d", sysconf(_SC_CHILD_MAX));
1559 
1560   st->print(", NOFILE ");
1561   getrlimit(RLIMIT_NOFILE, &rlim);
1562   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1563   else st->print("%d", rlim.rlim_cur);
1564 
1565   st->print(", AS ");
1566   getrlimit(RLIMIT_AS, &rlim);
1567   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1568   else st->print("%uk", rlim.rlim_cur >> 10);
1569 
1570   // Print limits on DATA, because it limits the C-heap.
1571   st->print(", DATA ");
1572   getrlimit(RLIMIT_DATA, &rlim);
1573   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1574   else st->print("%uk", rlim.rlim_cur >> 10);
1575   st->cr();
1576 
1577   // load average
1578   st->print("load average:");
1579   double loadavg[3] = {-1.L, -1.L, -1.L};
1580   os::loadavg(loadavg, 3);
1581   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1582   st->cr();
1583 }
1584 
1585 void os::print_memory_info(outputStream* st) {
1586 
1587   st->print_cr("Memory:");
1588 
1589   st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1590   st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1591   st->print_cr("  Default shared memory page size:        %s",
1592     describe_pagesize(g_multipage_support.shmpsize));
1593   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1594     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1595   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1596     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1597   if (g_multipage_error != 0) {
1598     st->print_cr("  multipage error: %d", g_multipage_error);
1599   }
1600 
1601   // print out LDR_CNTRL because it affects the default page sizes
1602   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1603   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1604 
1605   const char* const extshm = ::getenv("EXTSHM");
1606   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1607   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1608     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1609   }
1610 
1611   // Call os::Aix::get_meminfo() to retrieve memory statistics.
1612   os::Aix::meminfo_t mi;
1613   if (os::Aix::get_meminfo(&mi)) {
1614     char buffer[256];
1615     if (os::Aix::on_aix()) {
1616       jio_snprintf(buffer, sizeof(buffer),
1617                    "  physical total : %llu\n"
1618                    "  physical free  : %llu\n"
1619                    "  swap total     : %llu\n"
1620                    "  swap free      : %llu\n",
1621                    mi.real_total,
1622                    mi.real_free,
1623                    mi.pgsp_total,
1624                    mi.pgsp_free);
1625     } else {
1626       Unimplemented();
1627     }
1628     st->print_raw(buffer);
1629   } else {
1630     st->print_cr("  (no more information available)");
1631   }
1632 }
1633 
1634 // Get a string for the cpuinfo that is a summary of the cpu type
1635 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1636   // This looks good
1637   os::Aix::cpuinfo_t ci;
1638   if (os::Aix::get_cpuinfo(&ci)) {
1639     strncpy(buf, ci.version, buflen);
1640   } else {
1641     strncpy(buf, "AIX", buflen);
1642   }
1643 }
1644 
1645 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1646 }
1647 
1648 void os::print_siginfo(outputStream* st, void* siginfo) {
1649   // Use common posix version.
1650   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1651   st->cr();
1652 }
1653 
1654 static void print_signal_handler(outputStream* st, int sig,
1655                                  char* buf, size_t buflen);
1656 
1657 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1658   st->print_cr("Signal Handlers:");
1659   print_signal_handler(st, SIGSEGV, buf, buflen);
1660   print_signal_handler(st, SIGBUS , buf, buflen);
1661   print_signal_handler(st, SIGFPE , buf, buflen);
1662   print_signal_handler(st, SIGPIPE, buf, buflen);
1663   print_signal_handler(st, SIGXFSZ, buf, buflen);
1664   print_signal_handler(st, SIGILL , buf, buflen);
1665   print_signal_handler(st, SR_signum, buf, buflen);
1666   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1667   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1668   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1669   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1670   print_signal_handler(st, SIGTRAP, buf, buflen);
1671   print_signal_handler(st, SIGDANGER, buf, buflen);
1672 }
1673 
1674 static char saved_jvm_path[MAXPATHLEN] = {0};
1675 
1676 // Find the full path to the current module, libjvm.so.
1677 void os::jvm_path(char *buf, jint buflen) {
1678   // Error checking.
1679   if (buflen < MAXPATHLEN) {
1680     assert(false, "must use a large-enough buffer");
1681     buf[0] = '\0';
1682     return;
1683   }
1684   // Lazy resolve the path to current module.
1685   if (saved_jvm_path[0] != 0) {
1686     strcpy(buf, saved_jvm_path);
1687     return;
1688   }
1689 
1690   Dl_info dlinfo;
1691   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1692   assert(ret != 0, "cannot locate libjvm");
1693   char* rp = realpath((char *)dlinfo.dli_fname, buf);
1694   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1695 
1696   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1697   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1698 }
1699 
1700 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1701   // no prefix required, not even "_"
1702 }
1703 
1704 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1705   // no suffix required
1706 }
1707 
1708 ////////////////////////////////////////////////////////////////////////////////
1709 // sun.misc.Signal support
1710 
1711 static volatile jint sigint_count = 0;
1712 
1713 static void
1714 UserHandler(int sig, void *siginfo, void *context) {
1715   // 4511530 - sem_post is serialized and handled by the manager thread. When
1716   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1717   // don't want to flood the manager thread with sem_post requests.
1718   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1719     return;
1720 
1721   // Ctrl-C is pressed during error reporting, likely because the error
1722   // handler fails to abort. Let VM die immediately.
1723   if (sig == SIGINT && is_error_reported()) {
1724     os::die();
1725   }
1726 
1727   os::signal_notify(sig);
1728 }
1729 
1730 void* os::user_handler() {
1731   return CAST_FROM_FN_PTR(void*, UserHandler);
1732 }
1733 
1734 extern "C" {
1735   typedef void (*sa_handler_t)(int);
1736   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1737 }
1738 
1739 void* os::signal(int signal_number, void* handler) {
1740   struct sigaction sigAct, oldSigAct;
1741 
1742   sigfillset(&(sigAct.sa_mask));
1743 
1744   // Do not block out synchronous signals in the signal handler.
1745   // Blocking synchronous signals only makes sense if you can really
1746   // be sure that those signals won't happen during signal handling,
1747   // when the blocking applies. Normal signal handlers are lean and
1748   // do not cause signals. But our signal handlers tend to be "risky"
1749   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1750   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1751   // by a SIGILL, which was blocked due to the signal mask. The process
1752   // just hung forever. Better to crash from a secondary signal than to hang.
1753   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1754   sigdelset(&(sigAct.sa_mask), SIGBUS);
1755   sigdelset(&(sigAct.sa_mask), SIGILL);
1756   sigdelset(&(sigAct.sa_mask), SIGFPE);
1757   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1758 
1759   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1760 
1761   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1762 
1763   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1764     // -1 means registration failed
1765     return (void *)-1;
1766   }
1767 
1768   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1769 }
1770 
1771 void os::signal_raise(int signal_number) {
1772   ::raise(signal_number);
1773 }
1774 
1775 //
1776 // The following code is moved from os.cpp for making this
1777 // code platform specific, which it is by its very nature.
1778 //
1779 
1780 // Will be modified when max signal is changed to be dynamic
1781 int os::sigexitnum_pd() {
1782   return NSIG;
1783 }
1784 
1785 // a counter for each possible signal value
1786 static volatile jint pending_signals[NSIG+1] = { 0 };
1787 
1788 // Linux(POSIX) specific hand shaking semaphore.
1789 static sem_t sig_sem;
1790 
1791 void os::signal_init_pd() {
1792   // Initialize signal structures
1793   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1794 
1795   // Initialize signal semaphore
1796   int rc = ::sem_init(&sig_sem, 0, 0);
1797   guarantee(rc != -1, "sem_init failed");
1798 }
1799 
1800 void os::signal_notify(int sig) {
1801   Atomic::inc(&pending_signals[sig]);
1802   ::sem_post(&sig_sem);
1803 }
1804 
1805 static int check_pending_signals(bool wait) {
1806   Atomic::store(0, &sigint_count);
1807   for (;;) {
1808     for (int i = 0; i < NSIG + 1; i++) {
1809       jint n = pending_signals[i];
1810       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1811         return i;
1812       }
1813     }
1814     if (!wait) {
1815       return -1;
1816     }
1817     JavaThread *thread = JavaThread::current();
1818     ThreadBlockInVM tbivm(thread);
1819 
1820     bool threadIsSuspended;
1821     do {
1822       thread->set_suspend_equivalent();
1823       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1824 
1825       ::sem_wait(&sig_sem);
1826 
1827       // were we externally suspended while we were waiting?
1828       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1829       if (threadIsSuspended) {
1830         //
1831         // The semaphore has been incremented, but while we were waiting
1832         // another thread suspended us. We don't want to continue running
1833         // while suspended because that would surprise the thread that
1834         // suspended us.
1835         //
1836         ::sem_post(&sig_sem);
1837 
1838         thread->java_suspend_self();
1839       }
1840     } while (threadIsSuspended);
1841   }
1842 }
1843 
1844 int os::signal_lookup() {
1845   return check_pending_signals(false);
1846 }
1847 
1848 int os::signal_wait() {
1849   return check_pending_signals(true);
1850 }
1851 
1852 ////////////////////////////////////////////////////////////////////////////////
1853 // Virtual Memory
1854 
1855 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1856 
1857 #define VMEM_MAPPED  1
1858 #define VMEM_SHMATED 2
1859 
1860 struct vmembk_t {
1861   int type;         // 1 - mmap, 2 - shmat
1862   char* addr;
1863   size_t size;      // Real size, may be larger than usersize.
1864   size_t pagesize;  // page size of area
1865   vmembk_t* next;
1866 
1867   bool contains_addr(char* p) const {
1868     return p >= addr && p < (addr + size);
1869   }
1870 
1871   bool contains_range(char* p, size_t s) const {
1872     return contains_addr(p) && contains_addr(p + s - 1);
1873   }
1874 
1875   void print_on(outputStream* os) const {
1876     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1877       " bytes, %d %s pages), %s",
1878       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1879       (type == VMEM_SHMATED ? "shmat" : "mmap")
1880     );
1881   }
1882 
1883   // Check that range is a sub range of memory block (or equal to memory block);
1884   // also check that range is fully page aligned to the page size if the block.
1885   void assert_is_valid_subrange(char* p, size_t s) const {
1886     if (!contains_range(p, s)) {
1887       fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1888               "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1889               p, p + s - 1, addr, addr + size - 1);
1890       guarantee0(false);
1891     }
1892     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1893       fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1894               " aligned to pagesize (%s)\n", p, p + s);
1895       guarantee0(false);
1896     }
1897   }
1898 };
1899 
1900 static struct {
1901   vmembk_t* first;
1902   MiscUtils::CritSect cs;
1903 } vmem;
1904 
1905 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1906   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1907   assert0(p);
1908   if (p) {
1909     MiscUtils::AutoCritSect lck(&vmem.cs);
1910     p->addr = addr; p->size = size;
1911     p->pagesize = pagesize;
1912     p->type = type;
1913     p->next = vmem.first;
1914     vmem.first = p;
1915   }
1916 }
1917 
1918 static vmembk_t* vmembk_find(char* addr) {
1919   MiscUtils::AutoCritSect lck(&vmem.cs);
1920   for (vmembk_t* p = vmem.first; p; p = p->next) {
1921     if (p->addr <= addr && (p->addr + p->size) > addr) {
1922       return p;
1923     }
1924   }
1925   return NULL;
1926 }
1927 
1928 static void vmembk_remove(vmembk_t* p0) {
1929   MiscUtils::AutoCritSect lck(&vmem.cs);
1930   assert0(p0);
1931   assert0(vmem.first); // List should not be empty.
1932   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1933     if (*pp == p0) {
1934       *pp = p0->next;
1935       ::free(p0);
1936       return;
1937     }
1938   }
1939   assert0(false); // Not found?
1940 }
1941 
1942 static void vmembk_print_on(outputStream* os) {
1943   MiscUtils::AutoCritSect lck(&vmem.cs);
1944   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1945     vmi->print_on(os);
1946     os->cr();
1947   }
1948 }
1949 
1950 // Reserve and attach a section of System V memory.
1951 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1952 // address. Failing that, it will attach the memory anywhere.
1953 // If <requested_addr> is NULL, function will attach the memory anywhere.
1954 //
1955 // <alignment_hint> is being ignored by this function. It is very probable however that the
1956 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1957 // Should this be not enogh, we can put more work into it.
1958 static char* reserve_shmated_memory (
1959   size_t bytes,
1960   char* requested_addr,
1961   size_t alignment_hint) {
1962 
1963   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1964     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1965     bytes, requested_addr, alignment_hint);
1966 
1967   // Either give me wish address or wish alignment but not both.
1968   assert0(!(requested_addr != NULL && alignment_hint != 0));
1969 
1970   // We must prevent anyone from attaching too close to the
1971   // BRK because that may cause malloc OOM.
1972   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1973     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1974       "Will attach anywhere.", requested_addr);
1975     // Act like the OS refused to attach there.
1976     requested_addr = NULL;
1977   }
1978 
1979   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1980   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1981   if (os::Aix::on_pase_V5R4_or_older()) {
1982     ShouldNotReachHere();
1983   }
1984 
1985   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1986   const size_t size = align_size_up(bytes, SIZE_64K);
1987 
1988   // Reserve the shared segment.
1989   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1990   if (shmid == -1) {
1991     trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1992     return NULL;
1993   }
1994 
1995   // Important note:
1996   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1997   // We must right after attaching it remove it from the system. System V shm segments are global and
1998   // survive the process.
1999   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2000 
2001   struct shmid_ds shmbuf;
2002   memset(&shmbuf, 0, sizeof(shmbuf));
2003   shmbuf.shm_pagesize = SIZE_64K;
2004   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2005     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2006                size / SIZE_64K, errno);
2007     // I want to know if this ever happens.
2008     assert(false, "failed to set page size for shmat");
2009   }
2010 
2011   // Now attach the shared segment.
2012   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2013   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2014   // were not a segment boundary.
2015   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2016   const int errno_shmat = errno;
2017 
2018   // (A) Right after shmat and before handing shmat errors delete the shm segment.
2019   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2020     trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2021     assert(false, "failed to remove shared memory segment!");
2022   }
2023 
2024   // Handle shmat error. If we failed to attach, just return.
2025   if (addr == (char*)-1) {
2026     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2027     return NULL;
2028   }
2029 
2030   // Just for info: query the real page size. In case setting the page size did not
2031   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2032   const size_t real_pagesize = os::Aix::query_pagesize(addr);
2033   if (real_pagesize != shmbuf.shm_pagesize) {
2034     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2035   }
2036 
2037   if (addr) {
2038     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2039       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2040   } else {
2041     if (requested_addr != NULL) {
2042       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2043     } else {
2044       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2045     }
2046   }
2047 
2048   // book-keeping
2049   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2050   assert0(is_aligned_to(addr, os::vm_page_size()));
2051 
2052   return addr;
2053 }
2054 
2055 static bool release_shmated_memory(char* addr, size_t size) {
2056 
2057   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2058     addr, addr + size - 1);
2059 
2060   bool rc = false;
2061 
2062   // TODO: is there a way to verify shm size without doing bookkeeping?
2063   if (::shmdt(addr) != 0) {
2064     trcVerbose("error (%d).", errno);
2065   } else {
2066     trcVerbose("ok.");
2067     rc = true;
2068   }
2069   return rc;
2070 }
2071 
2072 static bool uncommit_shmated_memory(char* addr, size_t size) {
2073   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2074     addr, addr + size - 1);
2075 
2076   const bool rc = my_disclaim64(addr, size);
2077 
2078   if (!rc) {
2079     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2080     return false;
2081   }
2082   return true;
2083 }
2084 
2085 // Reserve memory via mmap.
2086 // If <requested_addr> is given, an attempt is made to attach at the given address.
2087 // Failing that, memory is allocated at any address.
2088 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2089 // allocate at an address aligned with the given alignment. Failing that, memory
2090 // is aligned anywhere.
2091 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2092   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2093     "alignment_hint " UINTX_FORMAT "...",
2094     bytes, requested_addr, alignment_hint);
2095 
2096   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2097   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2098     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2099     return NULL;
2100   }
2101 
2102   // We must prevent anyone from attaching too close to the
2103   // BRK because that may cause malloc OOM.
2104   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2105     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2106       "Will attach anywhere.", requested_addr);
2107     // Act like the OS refused to attach there.
2108     requested_addr = NULL;
2109   }
2110 
2111   // Specify one or the other but not both.
2112   assert0(!(requested_addr != NULL && alignment_hint > 0));
2113 
2114   // In 64K mode, we claim the global page size (os::vm_page_size())
2115   // is 64K. This is one of the few points where that illusion may
2116   // break, because mmap() will always return memory aligned to 4K. So
2117   // we must ensure we only ever return memory aligned to 64k.
2118   if (alignment_hint) {
2119     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2120   } else {
2121     alignment_hint = os::vm_page_size();
2122   }
2123 
2124   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2125   const size_t size = align_size_up(bytes, os::vm_page_size());
2126 
2127   // alignment: Allocate memory large enough to include an aligned range of the right size and
2128   // cut off the leading and trailing waste pages.
2129   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2130   const size_t extra_size = size + alignment_hint;
2131 
2132   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2133   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2134   int flags = MAP_ANONYMOUS | MAP_SHARED;
2135 
2136   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2137   // it means if wishaddress is given but MAP_FIXED is not set.
2138   //
2139   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2140   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2141   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2142   // get clobbered.
2143   if (requested_addr != NULL) {
2144     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2145       flags |= MAP_FIXED;
2146     }
2147   }
2148 
2149   char* addr = (char*)::mmap(requested_addr, extra_size,
2150       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2151 
2152   if (addr == MAP_FAILED) {
2153     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2154     return NULL;
2155   }
2156 
2157   // Handle alignment.
2158   char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2159   const size_t waste_pre = addr_aligned - addr;
2160   char* const addr_aligned_end = addr_aligned + size;
2161   const size_t waste_post = extra_size - waste_pre - size;
2162   if (waste_pre > 0) {
2163     ::munmap(addr, waste_pre);
2164   }
2165   if (waste_post > 0) {
2166     ::munmap(addr_aligned_end, waste_post);
2167   }
2168   addr = addr_aligned;
2169 
2170   if (addr) {
2171     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2172       addr, addr + bytes, bytes);
2173   } else {
2174     if (requested_addr != NULL) {
2175       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2176     } else {
2177       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2178     }
2179   }
2180 
2181   // bookkeeping
2182   vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
2183 
2184   // Test alignment, see above.
2185   assert0(is_aligned_to(addr, os::vm_page_size()));
2186 
2187   return addr;
2188 }
2189 
2190 static bool release_mmaped_memory(char* addr, size_t size) {
2191   assert0(is_aligned_to(addr, os::vm_page_size()));
2192   assert0(is_aligned_to(size, os::vm_page_size()));
2193 
2194   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2195     addr, addr + size - 1);
2196   bool rc = false;
2197 
2198   if (::munmap(addr, size) != 0) {
2199     trcVerbose("failed (%d)\n", errno);
2200     rc = false;
2201   } else {
2202     trcVerbose("ok.");
2203     rc = true;
2204   }
2205 
2206   return rc;
2207 }
2208 
2209 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2210 
2211   assert0(is_aligned_to(addr, os::vm_page_size()));
2212   assert0(is_aligned_to(size, os::vm_page_size()));
2213 
2214   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2215     addr, addr + size - 1);
2216   bool rc = false;
2217 
2218   // Uncommit mmap memory with msync MS_INVALIDATE.
2219   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2220     trcVerbose("failed (%d)\n", errno);
2221     rc = false;
2222   } else {
2223     trcVerbose("ok.");
2224     rc = true;
2225   }
2226 
2227   return rc;
2228 }
2229 
2230 // End: shared memory bookkeeping
2231 ////////////////////////////////////////////////////////////////////////////////////////////////////
2232 
2233 int os::vm_page_size() {
2234   // Seems redundant as all get out.
2235   assert(os::Aix::page_size() != -1, "must call os::init");
2236   return os::Aix::page_size();
2237 }
2238 
2239 // Aix allocates memory by pages.
2240 int os::vm_allocation_granularity() {
2241   assert(os::Aix::page_size() != -1, "must call os::init");
2242   return os::Aix::page_size();
2243 }
2244 
2245 #ifdef PRODUCT
2246 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2247                                     int err) {
2248   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2249           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2250           strerror(err), err);
2251 }
2252 #endif
2253 
2254 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2255                                   const char* mesg) {
2256   assert(mesg != NULL, "mesg must be specified");
2257   if (!pd_commit_memory(addr, size, exec)) {
2258     // Add extra info in product mode for vm_exit_out_of_memory():
2259     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2260     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2261   }
2262 }
2263 
2264 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2265 
2266   assert0(is_aligned_to(addr, os::vm_page_size()));
2267   assert0(is_aligned_to(size, os::vm_page_size()));
2268 
2269   vmembk_t* const vmi = vmembk_find(addr);
2270   assert0(vmi);
2271   vmi->assert_is_valid_subrange(addr, size);
2272 
2273   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2274 
2275   return true;
2276 }
2277 
2278 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2279   return pd_commit_memory(addr, size, exec);
2280 }
2281 
2282 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2283                                   size_t alignment_hint, bool exec,
2284                                   const char* mesg) {
2285   // Alignment_hint is ignored on this OS.
2286   pd_commit_memory_or_exit(addr, size, exec, mesg);
2287 }
2288 
2289 bool os::pd_uncommit_memory(char* addr, size_t size) {
2290   assert0(is_aligned_to(addr, os::vm_page_size()));
2291   assert0(is_aligned_to(size, os::vm_page_size()));
2292 
2293   // Dynamically do different things for mmap/shmat.
2294   const vmembk_t* const vmi = vmembk_find(addr);
2295   assert0(vmi);
2296   vmi->assert_is_valid_subrange(addr, size);
2297 
2298   if (vmi->type == VMEM_SHMATED) {
2299     return uncommit_shmated_memory(addr, size);
2300   } else {
2301     return uncommit_mmaped_memory(addr, size);
2302   }
2303 }
2304 
2305 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2306   // Do not call this; no need to commit stack pages on AIX.
2307   ShouldNotReachHere();
2308   return true;
2309 }
2310 
2311 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2312   // Do not call this; no need to commit stack pages on AIX.
2313   ShouldNotReachHere();
2314   return true;
2315 }
2316 
2317 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2318 }
2319 
2320 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2321 }
2322 
2323 void os::numa_make_global(char *addr, size_t bytes) {
2324 }
2325 
2326 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2327 }
2328 
2329 bool os::numa_topology_changed() {
2330   return false;
2331 }
2332 
2333 size_t os::numa_get_groups_num() {
2334   return 1;
2335 }
2336 
2337 int os::numa_get_group_id() {
2338   return 0;
2339 }
2340 
2341 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2342   if (size > 0) {
2343     ids[0] = 0;
2344     return 1;
2345   }
2346   return 0;
2347 }
2348 
2349 bool os::get_page_info(char *start, page_info* info) {
2350   return false;
2351 }
2352 
2353 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2354   return end;
2355 }
2356 
2357 // Reserves and attaches a shared memory segment.
2358 // Will assert if a wish address is given and could not be obtained.
2359 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2360 
2361   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2362   // thereby clobbering old mappings at that place. That is probably
2363   // not intended, never used and almost certainly an error were it
2364   // ever be used this way (to try attaching at a specified address
2365   // without clobbering old mappings an alternate API exists,
2366   // os::attempt_reserve_memory_at()).
2367   // Instead of mimicking the dangerous coding of the other platforms, here I
2368   // just ignore the request address (release) or assert(debug).
2369   assert0(requested_addr == NULL);
2370 
2371   // Always round to os::vm_page_size(), which may be larger than 4K.
2372   bytes = align_size_up(bytes, os::vm_page_size());
2373   const size_t alignment_hint0 =
2374     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2375 
2376   // In 4K mode always use mmap.
2377   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2378   if (os::vm_page_size() == SIZE_4K) {
2379     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2380   } else {
2381     if (bytes >= Use64KPagesThreshold) {
2382       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2383     } else {
2384       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2385     }
2386   }
2387 }
2388 
2389 bool os::pd_release_memory(char* addr, size_t size) {
2390 
2391   // Dynamically do different things for mmap/shmat.
2392   vmembk_t* const vmi = vmembk_find(addr);
2393   assert0(vmi);
2394 
2395   // Always round to os::vm_page_size(), which may be larger than 4K.
2396   size = align_size_up(size, os::vm_page_size());
2397   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2398 
2399   bool rc = false;
2400   bool remove_bookkeeping = false;
2401   if (vmi->type == VMEM_SHMATED) {
2402     // For shmatted memory, we do:
2403     // - If user wants to release the whole range, release the memory (shmdt).
2404     // - If user only wants to release a partial range, uncommit (disclaim) that
2405     //   range. That way, at least, we do not use memory anymore (bust still page
2406     //   table space).
2407     vmi->assert_is_valid_subrange(addr, size);
2408     if (addr == vmi->addr && size == vmi->size) {
2409       rc = release_shmated_memory(addr, size);
2410       remove_bookkeeping = true;
2411     } else {
2412       rc = uncommit_shmated_memory(addr, size);
2413     }
2414   } else {
2415     // User may unmap partial regions but region has to be fully contained.
2416 #ifdef ASSERT
2417     vmi->assert_is_valid_subrange(addr, size);
2418 #endif
2419     rc = release_mmaped_memory(addr, size);
2420     remove_bookkeeping = true;
2421   }
2422 
2423   // update bookkeeping
2424   if (rc && remove_bookkeeping) {
2425     vmembk_remove(vmi);
2426   }
2427 
2428   return rc;
2429 }
2430 
2431 static bool checked_mprotect(char* addr, size_t size, int prot) {
2432 
2433   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2434   // not tell me if protection failed when trying to protect an un-protectable range.
2435   //
2436   // This means if the memory was allocated using shmget/shmat, protection wont work
2437   // but mprotect will still return 0:
2438   //
2439   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2440 
2441   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2442 
2443   if (!rc) {
2444     const char* const s_errno = strerror(errno);
2445     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2446     return false;
2447   }
2448 
2449   // mprotect success check
2450   //
2451   // Mprotect said it changed the protection but can I believe it?
2452   //
2453   // To be sure I need to check the protection afterwards. Try to
2454   // read from protected memory and check whether that causes a segfault.
2455   //
2456   if (!os::Aix::xpg_sus_mode()) {
2457 
2458     if (CanUseSafeFetch32()) {
2459 
2460       const bool read_protected =
2461         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2462          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2463 
2464       if (prot & PROT_READ) {
2465         rc = !read_protected;
2466       } else {
2467         rc = read_protected;
2468       }
2469     }
2470   }
2471   if (!rc) {
2472     assert(false, "mprotect failed.");
2473   }
2474   return rc;
2475 }
2476 
2477 // Set protections specified
2478 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2479   unsigned int p = 0;
2480   switch (prot) {
2481   case MEM_PROT_NONE: p = PROT_NONE; break;
2482   case MEM_PROT_READ: p = PROT_READ; break;
2483   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2484   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2485   default:
2486     ShouldNotReachHere();
2487   }
2488   // is_committed is unused.
2489   return checked_mprotect(addr, size, p);
2490 }
2491 
2492 bool os::guard_memory(char* addr, size_t size) {
2493   return checked_mprotect(addr, size, PROT_NONE);
2494 }
2495 
2496 bool os::unguard_memory(char* addr, size_t size) {
2497   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2498 }
2499 
2500 // Large page support
2501 
2502 static size_t _large_page_size = 0;
2503 
2504 // Enable large page support if OS allows that.
2505 void os::large_page_init() {
2506   return; // Nothing to do. See query_multipage_support and friends.
2507 }
2508 
2509 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2510   // "exec" is passed in but not used. Creating the shared image for
2511   // the code cache doesn't have an SHM_X executable permission to check.
2512   Unimplemented();
2513   return 0;
2514 }
2515 
2516 bool os::release_memory_special(char* base, size_t bytes) {
2517   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2518   Unimplemented();
2519   return false;
2520 }
2521 
2522 size_t os::large_page_size() {
2523   return _large_page_size;
2524 }
2525 
2526 bool os::can_commit_large_page_memory() {
2527   // Does not matter, we do not support huge pages.
2528   return false;
2529 }
2530 
2531 bool os::can_execute_large_page_memory() {
2532   // Does not matter, we do not support huge pages.
2533   return false;
2534 }
2535 
2536 // Reserve memory at an arbitrary address, only if that area is
2537 // available (and not reserved for something else).
2538 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2539   char* addr = NULL;
2540 
2541   // Always round to os::vm_page_size(), which may be larger than 4K.
2542   bytes = align_size_up(bytes, os::vm_page_size());
2543 
2544   // In 4K mode always use mmap.
2545   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2546   if (os::vm_page_size() == SIZE_4K) {
2547     return reserve_mmaped_memory(bytes, requested_addr, 0);
2548   } else {
2549     if (bytes >= Use64KPagesThreshold) {
2550       return reserve_shmated_memory(bytes, requested_addr, 0);
2551     } else {
2552       return reserve_mmaped_memory(bytes, requested_addr, 0);
2553     }
2554   }
2555 
2556   return addr;
2557 }
2558 
2559 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2560   return ::read(fd, buf, nBytes);
2561 }
2562 
2563 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2564   return ::pread(fd, buf, nBytes, offset);
2565 }
2566 
2567 void os::naked_short_sleep(jlong ms) {
2568   struct timespec req;
2569 
2570   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2571   req.tv_sec = 0;
2572   if (ms > 0) {
2573     req.tv_nsec = (ms % 1000) * 1000000;
2574   }
2575   else {
2576     req.tv_nsec = 1;
2577   }
2578 
2579   nanosleep(&req, NULL);
2580 
2581   return;
2582 }
2583 
2584 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2585 void os::infinite_sleep() {
2586   while (true) {    // sleep forever ...
2587     ::sleep(100);   // ... 100 seconds at a time
2588   }
2589 }
2590 
2591 // Used to convert frequent JVM_Yield() to nops
2592 bool os::dont_yield() {
2593   return DontYieldALot;
2594 }
2595 
2596 void os::naked_yield() {
2597   sched_yield();
2598 }
2599 
2600 ////////////////////////////////////////////////////////////////////////////////
2601 // thread priority support
2602 
2603 // From AIX manpage to pthread_setschedparam
2604 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2605 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2606 //
2607 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2608 // range from 40 to 80, where 40 is the least favored priority and 80
2609 // is the most favored."
2610 //
2611 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2612 // scheduling there; however, this still leaves iSeries.)
2613 //
2614 // We use the same values for AIX and PASE.
2615 int os::java_to_os_priority[CriticalPriority + 1] = {
2616   54,             // 0 Entry should never be used
2617 
2618   55,             // 1 MinPriority
2619   55,             // 2
2620   56,             // 3
2621 
2622   56,             // 4
2623   57,             // 5 NormPriority
2624   57,             // 6
2625 
2626   58,             // 7
2627   58,             // 8
2628   59,             // 9 NearMaxPriority
2629 
2630   60,             // 10 MaxPriority
2631 
2632   60              // 11 CriticalPriority
2633 };
2634 
2635 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2636   if (!UseThreadPriorities) return OS_OK;
2637   pthread_t thr = thread->osthread()->pthread_id();
2638   int policy = SCHED_OTHER;
2639   struct sched_param param;
2640   param.sched_priority = newpri;
2641   int ret = pthread_setschedparam(thr, policy, &param);
2642 
2643   if (ret != 0) {
2644     trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2645         (int)thr, newpri, ret, strerror(ret));
2646   }
2647   return (ret == 0) ? OS_OK : OS_ERR;
2648 }
2649 
2650 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2651   if (!UseThreadPriorities) {
2652     *priority_ptr = java_to_os_priority[NormPriority];
2653     return OS_OK;
2654   }
2655   pthread_t thr = thread->osthread()->pthread_id();
2656   int policy = SCHED_OTHER;
2657   struct sched_param param;
2658   int ret = pthread_getschedparam(thr, &policy, &param);
2659   *priority_ptr = param.sched_priority;
2660 
2661   return (ret == 0) ? OS_OK : OS_ERR;
2662 }
2663 
2664 // Hint to the underlying OS that a task switch would not be good.
2665 // Void return because it's a hint and can fail.
2666 void os::hint_no_preempt() {}
2667 
2668 ////////////////////////////////////////////////////////////////////////////////
2669 // suspend/resume support
2670 
2671 //  the low-level signal-based suspend/resume support is a remnant from the
2672 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2673 //  within hotspot. Now there is a single use-case for this:
2674 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
2675 //      that runs in the watcher thread.
2676 //  The remaining code is greatly simplified from the more general suspension
2677 //  code that used to be used.
2678 //
2679 //  The protocol is quite simple:
2680 //  - suspend:
2681 //      - sends a signal to the target thread
2682 //      - polls the suspend state of the osthread using a yield loop
2683 //      - target thread signal handler (SR_handler) sets suspend state
2684 //        and blocks in sigsuspend until continued
2685 //  - resume:
2686 //      - sets target osthread state to continue
2687 //      - sends signal to end the sigsuspend loop in the SR_handler
2688 //
2689 //  Note that the SR_lock plays no role in this suspend/resume protocol.
2690 //
2691 
2692 static void resume_clear_context(OSThread *osthread) {
2693   osthread->set_ucontext(NULL);
2694   osthread->set_siginfo(NULL);
2695 }
2696 
2697 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2698   osthread->set_ucontext(context);
2699   osthread->set_siginfo(siginfo);
2700 }
2701 
2702 //
2703 // Handler function invoked when a thread's execution is suspended or
2704 // resumed. We have to be careful that only async-safe functions are
2705 // called here (Note: most pthread functions are not async safe and
2706 // should be avoided.)
2707 //
2708 // Note: sigwait() is a more natural fit than sigsuspend() from an
2709 // interface point of view, but sigwait() prevents the signal hander
2710 // from being run. libpthread would get very confused by not having
2711 // its signal handlers run and prevents sigwait()'s use with the
2712 // mutex granting granting signal.
2713 //
2714 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2715 //
2716 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2717   // Save and restore errno to avoid confusing native code with EINTR
2718   // after sigsuspend.
2719   int old_errno = errno;
2720 
2721   Thread* thread = Thread::current();
2722   OSThread* osthread = thread->osthread();
2723   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2724 
2725   os::SuspendResume::State current = osthread->sr.state();
2726   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2727     suspend_save_context(osthread, siginfo, context);
2728 
2729     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2730     os::SuspendResume::State state = osthread->sr.suspended();
2731     if (state == os::SuspendResume::SR_SUSPENDED) {
2732       sigset_t suspend_set;  // signals for sigsuspend()
2733 
2734       // get current set of blocked signals and unblock resume signal
2735       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2736       sigdelset(&suspend_set, SR_signum);
2737 
2738       // wait here until we are resumed
2739       while (1) {
2740         sigsuspend(&suspend_set);
2741 
2742         os::SuspendResume::State result = osthread->sr.running();
2743         if (result == os::SuspendResume::SR_RUNNING) {
2744           break;
2745         }
2746       }
2747 
2748     } else if (state == os::SuspendResume::SR_RUNNING) {
2749       // request was cancelled, continue
2750     } else {
2751       ShouldNotReachHere();
2752     }
2753 
2754     resume_clear_context(osthread);
2755   } else if (current == os::SuspendResume::SR_RUNNING) {
2756     // request was cancelled, continue
2757   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2758     // ignore
2759   } else {
2760     ShouldNotReachHere();
2761   }
2762 
2763   errno = old_errno;
2764 }
2765 
2766 static int SR_initialize() {
2767   struct sigaction act;
2768   char *s;
2769   // Get signal number to use for suspend/resume
2770   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2771     int sig = ::strtol(s, 0, 10);
2772     if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2773         sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2774       SR_signum = sig;
2775     } else {
2776       warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2777               sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2778     }
2779   }
2780 
2781   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2782         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2783 
2784   sigemptyset(&SR_sigset);
2785   sigaddset(&SR_sigset, SR_signum);
2786 
2787   // Set up signal handler for suspend/resume.
2788   act.sa_flags = SA_RESTART|SA_SIGINFO;
2789   act.sa_handler = (void (*)(int)) SR_handler;
2790 
2791   // SR_signum is blocked by default.
2792   // 4528190 - We also need to block pthread restart signal (32 on all
2793   // supported Linux platforms). Note that LinuxThreads need to block
2794   // this signal for all threads to work properly. So we don't have
2795   // to use hard-coded signal number when setting up the mask.
2796   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2797 
2798   if (sigaction(SR_signum, &act, 0) == -1) {
2799     return -1;
2800   }
2801 
2802   // Save signal flag
2803   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2804   return 0;
2805 }
2806 
2807 static int SR_finalize() {
2808   return 0;
2809 }
2810 
2811 static int sr_notify(OSThread* osthread) {
2812   int status = pthread_kill(osthread->pthread_id(), SR_signum);
2813   assert_status(status == 0, status, "pthread_kill");
2814   return status;
2815 }
2816 
2817 // "Randomly" selected value for how long we want to spin
2818 // before bailing out on suspending a thread, also how often
2819 // we send a signal to a thread we want to resume
2820 static const int RANDOMLY_LARGE_INTEGER = 1000000;
2821 static const int RANDOMLY_LARGE_INTEGER2 = 100;
2822 
2823 // returns true on success and false on error - really an error is fatal
2824 // but this seems the normal response to library errors
2825 static bool do_suspend(OSThread* osthread) {
2826   assert(osthread->sr.is_running(), "thread should be running");
2827   // mark as suspended and send signal
2828 
2829   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2830     // failed to switch, state wasn't running?
2831     ShouldNotReachHere();
2832     return false;
2833   }
2834 
2835   if (sr_notify(osthread) != 0) {
2836     // try to cancel, switch to running
2837 
2838     os::SuspendResume::State result = osthread->sr.cancel_suspend();
2839     if (result == os::SuspendResume::SR_RUNNING) {
2840       // cancelled
2841       return false;
2842     } else if (result == os::SuspendResume::SR_SUSPENDED) {
2843       // somehow managed to suspend
2844       return true;
2845     } else {
2846       ShouldNotReachHere();
2847       return false;
2848     }
2849   }
2850 
2851   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2852 
2853   for (int n = 0; !osthread->sr.is_suspended(); n++) {
2854     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2855       os::naked_yield();
2856     }
2857 
2858     // timeout, try to cancel the request
2859     if (n >= RANDOMLY_LARGE_INTEGER) {
2860       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2861       if (cancelled == os::SuspendResume::SR_RUNNING) {
2862         return false;
2863       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2864         return true;
2865       } else {
2866         ShouldNotReachHere();
2867         return false;
2868       }
2869     }
2870   }
2871 
2872   guarantee(osthread->sr.is_suspended(), "Must be suspended");
2873   return true;
2874 }
2875 
2876 static void do_resume(OSThread* osthread) {
2877   //assert(osthread->sr.is_suspended(), "thread should be suspended");
2878 
2879   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2880     // failed to switch to WAKEUP_REQUEST
2881     ShouldNotReachHere();
2882     return;
2883   }
2884 
2885   while (!osthread->sr.is_running()) {
2886     if (sr_notify(osthread) == 0) {
2887       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2888         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2889           os::naked_yield();
2890         }
2891       }
2892     } else {
2893       ShouldNotReachHere();
2894     }
2895   }
2896 
2897   guarantee(osthread->sr.is_running(), "Must be running!");
2898 }
2899 
2900 ///////////////////////////////////////////////////////////////////////////////////
2901 // signal handling (except suspend/resume)
2902 
2903 // This routine may be used by user applications as a "hook" to catch signals.
2904 // The user-defined signal handler must pass unrecognized signals to this
2905 // routine, and if it returns true (non-zero), then the signal handler must
2906 // return immediately. If the flag "abort_if_unrecognized" is true, then this
2907 // routine will never retun false (zero), but instead will execute a VM panic
2908 // routine kill the process.
2909 //
2910 // If this routine returns false, it is OK to call it again. This allows
2911 // the user-defined signal handler to perform checks either before or after
2912 // the VM performs its own checks. Naturally, the user code would be making
2913 // a serious error if it tried to handle an exception (such as a null check
2914 // or breakpoint) that the VM was generating for its own correct operation.
2915 //
2916 // This routine may recognize any of the following kinds of signals:
2917 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2918 // It should be consulted by handlers for any of those signals.
2919 //
2920 // The caller of this routine must pass in the three arguments supplied
2921 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
2922 // field of the structure passed to sigaction(). This routine assumes that
2923 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2924 //
2925 // Note that the VM will print warnings if it detects conflicting signal
2926 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2927 //
2928 extern "C" JNIEXPORT int
2929 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2930 
2931 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
2932 // to be the thing to call; documentation is not terribly clear about whether
2933 // pthread_sigmask also works, and if it does, whether it does the same.
2934 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2935   const int rc = ::pthread_sigmask(how, set, oset);
2936   // return value semantics differ slightly for error case:
2937   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2938   // (so, pthread_sigmask is more theadsafe for error handling)
2939   // But success is always 0.
2940   return rc == 0 ? true : false;
2941 }
2942 
2943 // Function to unblock all signals which are, according
2944 // to POSIX, typical program error signals. If they happen while being blocked,
2945 // they typically will bring down the process immediately.
2946 bool unblock_program_error_signals() {
2947   sigset_t set;
2948   ::sigemptyset(&set);
2949   ::sigaddset(&set, SIGILL);
2950   ::sigaddset(&set, SIGBUS);
2951   ::sigaddset(&set, SIGFPE);
2952   ::sigaddset(&set, SIGSEGV);
2953   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2954 }
2955 
2956 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2957 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2958   assert(info != NULL && uc != NULL, "it must be old kernel");
2959 
2960   // Never leave program error signals blocked;
2961   // on all our platforms they would bring down the process immediately when
2962   // getting raised while being blocked.
2963   unblock_program_error_signals();
2964 
2965   JVM_handle_aix_signal(sig, info, uc, true);
2966 }
2967 
2968 // This boolean allows users to forward their own non-matching signals
2969 // to JVM_handle_aix_signal, harmlessly.
2970 bool os::Aix::signal_handlers_are_installed = false;
2971 
2972 // For signal-chaining
2973 struct sigaction sigact[NSIG];
2974 sigset_t sigs;
2975 bool os::Aix::libjsig_is_loaded = false;
2976 typedef struct sigaction *(*get_signal_t)(int);
2977 get_signal_t os::Aix::get_signal_action = NULL;
2978 
2979 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2980   struct sigaction *actp = NULL;
2981 
2982   if (libjsig_is_loaded) {
2983     // Retrieve the old signal handler from libjsig
2984     actp = (*get_signal_action)(sig);
2985   }
2986   if (actp == NULL) {
2987     // Retrieve the preinstalled signal handler from jvm
2988     actp = get_preinstalled_handler(sig);
2989   }
2990 
2991   return actp;
2992 }
2993 
2994 static bool call_chained_handler(struct sigaction *actp, int sig,
2995                                  siginfo_t *siginfo, void *context) {
2996   // Call the old signal handler
2997   if (actp->sa_handler == SIG_DFL) {
2998     // It's more reasonable to let jvm treat it as an unexpected exception
2999     // instead of taking the default action.
3000     return false;
3001   } else if (actp->sa_handler != SIG_IGN) {
3002     if ((actp->sa_flags & SA_NODEFER) == 0) {
3003       // automaticlly block the signal
3004       sigaddset(&(actp->sa_mask), sig);
3005     }
3006 
3007     sa_handler_t hand = NULL;
3008     sa_sigaction_t sa = NULL;
3009     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3010     // retrieve the chained handler
3011     if (siginfo_flag_set) {
3012       sa = actp->sa_sigaction;
3013     } else {
3014       hand = actp->sa_handler;
3015     }
3016 
3017     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3018       actp->sa_handler = SIG_DFL;
3019     }
3020 
3021     // try to honor the signal mask
3022     sigset_t oset;
3023     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3024 
3025     // call into the chained handler
3026     if (siginfo_flag_set) {
3027       (*sa)(sig, siginfo, context);
3028     } else {
3029       (*hand)(sig);
3030     }
3031 
3032     // restore the signal mask
3033     pthread_sigmask(SIG_SETMASK, &oset, 0);
3034   }
3035   // Tell jvm's signal handler the signal is taken care of.
3036   return true;
3037 }
3038 
3039 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3040   bool chained = false;
3041   // signal-chaining
3042   if (UseSignalChaining) {
3043     struct sigaction *actp = get_chained_signal_action(sig);
3044     if (actp != NULL) {
3045       chained = call_chained_handler(actp, sig, siginfo, context);
3046     }
3047   }
3048   return chained;
3049 }
3050 
3051 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3052   if (sigismember(&sigs, sig)) {
3053     return &sigact[sig];
3054   }
3055   return NULL;
3056 }
3057 
3058 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3059   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3060   sigact[sig] = oldAct;
3061   sigaddset(&sigs, sig);
3062 }
3063 
3064 // for diagnostic
3065 int sigflags[NSIG];
3066 
3067 int os::Aix::get_our_sigflags(int sig) {
3068   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3069   return sigflags[sig];
3070 }
3071 
3072 void os::Aix::set_our_sigflags(int sig, int flags) {
3073   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3074   if (sig > 0 && sig < NSIG) {
3075     sigflags[sig] = flags;
3076   }
3077 }
3078 
3079 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3080   // Check for overwrite.
3081   struct sigaction oldAct;
3082   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3083 
3084   void* oldhand = oldAct.sa_sigaction
3085     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3086     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3087   // Renamed 'signalHandler' to avoid collision with other shared libs.
3088   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3089       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3090       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3091     if (AllowUserSignalHandlers || !set_installed) {
3092       // Do not overwrite; user takes responsibility to forward to us.
3093       return;
3094     } else if (UseSignalChaining) {
3095       // save the old handler in jvm
3096       save_preinstalled_handler(sig, oldAct);
3097       // libjsig also interposes the sigaction() call below and saves the
3098       // old sigaction on it own.
3099     } else {
3100       fatal("Encountered unexpected pre-existing sigaction handler "
3101             "%#lx for signal %d.", (long)oldhand, sig);
3102     }
3103   }
3104 
3105   struct sigaction sigAct;
3106   sigfillset(&(sigAct.sa_mask));
3107   if (!set_installed) {
3108     sigAct.sa_handler = SIG_DFL;
3109     sigAct.sa_flags = SA_RESTART;
3110   } else {
3111     // Renamed 'signalHandler' to avoid collision with other shared libs.
3112     sigAct.sa_sigaction = javaSignalHandler;
3113     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3114   }
3115   // Save flags, which are set by ours
3116   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3117   sigflags[sig] = sigAct.sa_flags;
3118 
3119   int ret = sigaction(sig, &sigAct, &oldAct);
3120   assert(ret == 0, "check");
3121 
3122   void* oldhand2 = oldAct.sa_sigaction
3123                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3124                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3125   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3126 }
3127 
3128 // install signal handlers for signals that HotSpot needs to
3129 // handle in order to support Java-level exception handling.
3130 void os::Aix::install_signal_handlers() {
3131   if (!signal_handlers_are_installed) {
3132     signal_handlers_are_installed = true;
3133 
3134     // signal-chaining
3135     typedef void (*signal_setting_t)();
3136     signal_setting_t begin_signal_setting = NULL;
3137     signal_setting_t end_signal_setting = NULL;
3138     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3139                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3140     if (begin_signal_setting != NULL) {
3141       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3142                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3143       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3144                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3145       libjsig_is_loaded = true;
3146       assert(UseSignalChaining, "should enable signal-chaining");
3147     }
3148     if (libjsig_is_loaded) {
3149       // Tell libjsig jvm is setting signal handlers.
3150       (*begin_signal_setting)();
3151     }
3152 
3153     ::sigemptyset(&sigs);
3154     set_signal_handler(SIGSEGV, true);
3155     set_signal_handler(SIGPIPE, true);
3156     set_signal_handler(SIGBUS, true);
3157     set_signal_handler(SIGILL, true);
3158     set_signal_handler(SIGFPE, true);
3159     set_signal_handler(SIGTRAP, true);
3160     set_signal_handler(SIGXFSZ, true);
3161     set_signal_handler(SIGDANGER, true);
3162 
3163     if (libjsig_is_loaded) {
3164       // Tell libjsig jvm finishes setting signal handlers.
3165       (*end_signal_setting)();
3166     }
3167 
3168     // We don't activate signal checker if libjsig is in place, we trust ourselves
3169     // and if UserSignalHandler is installed all bets are off.
3170     // Log that signal checking is off only if -verbose:jni is specified.
3171     if (CheckJNICalls) {
3172       if (libjsig_is_loaded) {
3173         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3174         check_signals = false;
3175       }
3176       if (AllowUserSignalHandlers) {
3177         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3178         check_signals = false;
3179       }
3180       // Need to initialize check_signal_done.
3181       ::sigemptyset(&check_signal_done);
3182     }
3183   }
3184 }
3185 
3186 static const char* get_signal_handler_name(address handler,
3187                                            char* buf, int buflen) {
3188   int offset;
3189   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3190   if (found) {
3191     // skip directory names
3192     const char *p1, *p2;
3193     p1 = buf;
3194     size_t len = strlen(os::file_separator());
3195     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3196     // The way os::dll_address_to_library_name is implemented on Aix
3197     // right now, it always returns -1 for the offset which is not
3198     // terribly informative.
3199     // Will fix that. For now, omit the offset.
3200     jio_snprintf(buf, buflen, "%s", p1);
3201   } else {
3202     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3203   }
3204   return buf;
3205 }
3206 
3207 static void print_signal_handler(outputStream* st, int sig,
3208                                  char* buf, size_t buflen) {
3209   struct sigaction sa;
3210   sigaction(sig, NULL, &sa);
3211 
3212   st->print("%s: ", os::exception_name(sig, buf, buflen));
3213 
3214   address handler = (sa.sa_flags & SA_SIGINFO)
3215     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3216     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3217 
3218   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3219     st->print("SIG_DFL");
3220   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3221     st->print("SIG_IGN");
3222   } else {
3223     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3224   }
3225 
3226   // Print readable mask.
3227   st->print(", sa_mask[0]=");
3228   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3229 
3230   address rh = VMError::get_resetted_sighandler(sig);
3231   // May be, handler was resetted by VMError?
3232   if (rh != NULL) {
3233     handler = rh;
3234     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3235   }
3236 
3237   // Print textual representation of sa_flags.
3238   st->print(", sa_flags=");
3239   os::Posix::print_sa_flags(st, sa.sa_flags);
3240 
3241   // Check: is it our handler?
3242   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3243       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3244     // It is our signal handler.
3245     // Check for flags, reset system-used one!
3246     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3247       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3248                 os::Aix::get_our_sigflags(sig));
3249     }
3250   }
3251   st->cr();
3252 }
3253 
3254 #define DO_SIGNAL_CHECK(sig) \
3255   if (!sigismember(&check_signal_done, sig)) \
3256     os::Aix::check_signal_handler(sig)
3257 
3258 // This method is a periodic task to check for misbehaving JNI applications
3259 // under CheckJNI, we can add any periodic checks here
3260 
3261 void os::run_periodic_checks() {
3262 
3263   if (check_signals == false) return;
3264 
3265   // SEGV and BUS if overridden could potentially prevent
3266   // generation of hs*.log in the event of a crash, debugging
3267   // such a case can be very challenging, so we absolutely
3268   // check the following for a good measure:
3269   DO_SIGNAL_CHECK(SIGSEGV);
3270   DO_SIGNAL_CHECK(SIGILL);
3271   DO_SIGNAL_CHECK(SIGFPE);
3272   DO_SIGNAL_CHECK(SIGBUS);
3273   DO_SIGNAL_CHECK(SIGPIPE);
3274   DO_SIGNAL_CHECK(SIGXFSZ);
3275   if (UseSIGTRAP) {
3276     DO_SIGNAL_CHECK(SIGTRAP);
3277   }
3278   DO_SIGNAL_CHECK(SIGDANGER);
3279 
3280   // ReduceSignalUsage allows the user to override these handlers
3281   // see comments at the very top and jvm_solaris.h
3282   if (!ReduceSignalUsage) {
3283     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3284     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3285     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3286     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3287   }
3288 
3289   DO_SIGNAL_CHECK(SR_signum);
3290 }
3291 
3292 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3293 
3294 static os_sigaction_t os_sigaction = NULL;
3295 
3296 void os::Aix::check_signal_handler(int sig) {
3297   char buf[O_BUFLEN];
3298   address jvmHandler = NULL;
3299 
3300   struct sigaction act;
3301   if (os_sigaction == NULL) {
3302     // only trust the default sigaction, in case it has been interposed
3303     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3304     if (os_sigaction == NULL) return;
3305   }
3306 
3307   os_sigaction(sig, (struct sigaction*)NULL, &act);
3308 
3309   address thisHandler = (act.sa_flags & SA_SIGINFO)
3310     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3311     : CAST_FROM_FN_PTR(address, act.sa_handler);
3312 
3313   switch(sig) {
3314   case SIGSEGV:
3315   case SIGBUS:
3316   case SIGFPE:
3317   case SIGPIPE:
3318   case SIGILL:
3319   case SIGXFSZ:
3320     // Renamed 'signalHandler' to avoid collision with other shared libs.
3321     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3322     break;
3323 
3324   case SHUTDOWN1_SIGNAL:
3325   case SHUTDOWN2_SIGNAL:
3326   case SHUTDOWN3_SIGNAL:
3327   case BREAK_SIGNAL:
3328     jvmHandler = (address)user_handler();
3329     break;
3330 
3331   default:
3332     if (sig == SR_signum) {
3333       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3334     } else {
3335       return;
3336     }
3337     break;
3338   }
3339 
3340   if (thisHandler != jvmHandler) {
3341     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3342     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3343     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3344     // No need to check this sig any longer
3345     sigaddset(&check_signal_done, sig);
3346     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3347     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3348       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3349                     exception_name(sig, buf, O_BUFLEN));
3350     }
3351   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3352     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3353     tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3354     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3355     // No need to check this sig any longer
3356     sigaddset(&check_signal_done, sig);
3357   }
3358 
3359   // Dump all the signal
3360   if (sigismember(&check_signal_done, sig)) {
3361     print_signal_handlers(tty, buf, O_BUFLEN);
3362   }
3363 }
3364 
3365 extern bool signal_name(int signo, char* buf, size_t len);
3366 
3367 const char* os::exception_name(int exception_code, char* buf, size_t size) {
3368   if (0 < exception_code && exception_code <= SIGRTMAX) {
3369     // signal
3370     if (!signal_name(exception_code, buf, size)) {
3371       jio_snprintf(buf, size, "SIG%d", exception_code);
3372     }
3373     return buf;
3374   } else {
3375     return NULL;
3376   }
3377 }
3378 
3379 // To install functions for atexit system call
3380 extern "C" {
3381   static void perfMemory_exit_helper() {
3382     perfMemory_exit();
3383   }
3384 }
3385 
3386 // This is called _before_ the most of global arguments have been parsed.
3387 void os::init(void) {
3388   // This is basic, we want to know if that ever changes.
3389   // (Shared memory boundary is supposed to be a 256M aligned.)
3390   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3391 
3392   // First off, we need to know whether we run on AIX or PASE, and
3393   // the OS level we run on.
3394   os::Aix::initialize_os_info();
3395 
3396   // Scan environment (SPEC1170 behaviour, etc).
3397   os::Aix::scan_environment();
3398 
3399   // Check which pages are supported by AIX.
3400   query_multipage_support();
3401 
3402   // Act like we only have one page size by eliminating corner cases which
3403   // we did not support very well anyway.
3404   // We have two input conditions:
3405   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3406   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3407   //    setting.
3408   //    Data segment page size is important for us because it defines the thread stack page
3409   //    size, which is needed for guard page handling, stack banging etc.
3410   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3411   //    and should be allocated with 64k pages.
3412   //
3413   // So, we do the following:
3414   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3415   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3416   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3417   // 64k          no              --- AIX 5.2 ? ---
3418   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3419 
3420   // We explicitly leave no option to change page size, because only upgrading would work,
3421   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3422 
3423   if (g_multipage_support.datapsize == SIZE_4K) {
3424     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3425     if (g_multipage_support.can_use_64K_pages) {
3426       // .. but we are able to use 64K pages dynamically.
3427       // This would be typical for java launchers which are not linked
3428       // with datapsize=64K (like, any other launcher but our own).
3429       //
3430       // In this case it would be smart to allocate the java heap with 64K
3431       // to get the performance benefit, and to fake 64k pages for the
3432       // data segment (when dealing with thread stacks).
3433       //
3434       // However, leave a possibility to downgrade to 4K, using
3435       // -XX:-Use64KPages.
3436       if (Use64KPages) {
3437         trcVerbose("64K page mode (faked for data segment)");
3438         Aix::_page_size = SIZE_64K;
3439       } else {
3440         trcVerbose("4K page mode (Use64KPages=off)");
3441         Aix::_page_size = SIZE_4K;
3442       }
3443     } else {
3444       // .. and not able to allocate 64k pages dynamically. Here, just
3445       // fall back to 4K paged mode and use mmap for everything.
3446       trcVerbose("4K page mode");
3447       Aix::_page_size = SIZE_4K;
3448       FLAG_SET_ERGO(bool, Use64KPages, false);
3449     }
3450   } else {
3451     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3452     //   This normally means that we can allocate 64k pages dynamically.
3453     //   (There is one special case where this may be false: EXTSHM=on.
3454     //    but we decided to not support that mode).
3455     assert0(g_multipage_support.can_use_64K_pages);
3456     Aix::_page_size = SIZE_64K;
3457     trcVerbose("64K page mode");
3458     FLAG_SET_ERGO(bool, Use64KPages, true);
3459   }
3460 
3461   // Short-wire stack page size to base page size; if that works, we just remove
3462   // that stack page size altogether.
3463   Aix::_stack_page_size = Aix::_page_size;
3464 
3465   // For now UseLargePages is just ignored.
3466   FLAG_SET_ERGO(bool, UseLargePages, false);
3467   _page_sizes[0] = 0;
3468 
3469   // debug trace
3470   trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
3471 
3472   // Next, we need to initialize libo4 and libperfstat libraries.
3473   if (os::Aix::on_pase()) {
3474     os::Aix::initialize_libo4();
3475   } else {
3476     os::Aix::initialize_libperfstat();
3477   }
3478 
3479   // Reset the perfstat information provided by ODM.
3480   if (os::Aix::on_aix()) {
3481     libperfstat::perfstat_reset();
3482   }
3483 
3484   // Now initialze basic system properties. Note that for some of the values we
3485   // need libperfstat etc.
3486   os::Aix::initialize_system_info();
3487 
3488   _initial_pid = getpid();
3489 
3490   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3491 
3492   init_random(1234567);
3493 
3494   ThreadCritical::initialize();
3495 
3496   // Main_thread points to the aboriginal thread.
3497   Aix::_main_thread = pthread_self();
3498 
3499   initial_time_count = os::elapsed_counter();
3500 
3501   // If the pagesize of the VM is greater than 8K determine the appropriate
3502   // number of initial guard pages. The user can change this with the
3503   // command line arguments, if needed.
3504   if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3505     StackYellowPages = 1;
3506     StackRedPages = 1;
3507     StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3508   }
3509 }
3510 
3511 // This is called _after_ the global arguments have been parsed.
3512 jint os::init_2(void) {
3513 
3514   trcVerbose("processor count: %d", os::_processor_count);
3515   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3516 
3517   // Initially build up the loaded dll map.
3518   LoadedLibraries::reload();
3519 
3520   const int page_size = Aix::page_size();
3521   const int map_size = page_size;
3522 
3523   address map_address = (address) MAP_FAILED;
3524   const int prot  = PROT_READ;
3525   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3526 
3527   // Use optimized addresses for the polling page,
3528   // e.g. map it to a special 32-bit address.
3529   if (OptimizePollingPageLocation) {
3530     // architecture-specific list of address wishes:
3531     address address_wishes[] = {
3532       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3533       // PPC64: all address wishes are non-negative 32 bit values where
3534       // the lower 16 bits are all zero. we can load these addresses
3535       // with a single ppc_lis instruction.
3536       (address) 0x30000000, (address) 0x31000000,
3537       (address) 0x32000000, (address) 0x33000000,
3538       (address) 0x40000000, (address) 0x41000000,
3539       (address) 0x42000000, (address) 0x43000000,
3540       (address) 0x50000000, (address) 0x51000000,
3541       (address) 0x52000000, (address) 0x53000000,
3542       (address) 0x60000000, (address) 0x61000000,
3543       (address) 0x62000000, (address) 0x63000000
3544     };
3545     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3546 
3547     // iterate over the list of address wishes:
3548     for (int i=0; i<address_wishes_length; i++) {
3549       // Try to map with current address wish.
3550       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3551       // fail if the address is already mapped.
3552       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3553                                      map_size, prot,
3554                                      flags | MAP_FIXED,
3555                                      -1, 0);
3556       if (Verbose) {
3557         fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3558                 address_wishes[i], map_address + (ssize_t)page_size);
3559       }
3560 
3561       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3562         // Map succeeded and map_address is at wished address, exit loop.
3563         break;
3564       }
3565 
3566       if (map_address != (address) MAP_FAILED) {
3567         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3568         ::munmap(map_address, map_size);
3569         map_address = (address) MAP_FAILED;
3570       }
3571       // Map failed, continue loop.
3572     }
3573   } // end OptimizePollingPageLocation
3574 
3575   if (map_address == (address) MAP_FAILED) {
3576     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3577   }
3578   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3579   os::set_polling_page(map_address);
3580 
3581   if (!UseMembar) {
3582     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3583     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3584     os::set_memory_serialize_page(mem_serialize_page);
3585 
3586 #ifndef PRODUCT
3587     if (Verbose && PrintMiscellaneous) {
3588       tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3589     }
3590 #endif
3591   }
3592 
3593   // initialize suspend/resume support - must do this before signal_sets_init()
3594   if (SR_initialize() != 0) {
3595     perror("SR_initialize failed");
3596     return JNI_ERR;
3597   }
3598 
3599   Aix::signal_sets_init();
3600   Aix::install_signal_handlers();
3601 
3602   // Check minimum allowable stack size for thread creation and to initialize
3603   // the java system classes, including StackOverflowError - depends on page
3604   // size. Add a page for compiler2 recursion in main thread.
3605   // Add in 2*BytesPerWord times page size to account for VM stack during
3606   // class initialization depending on 32 or 64 bit VM.
3607   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3608             (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3609                      (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3610 
3611   os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3612 
3613   size_t threadStackSizeInBytes = ThreadStackSize * K;
3614   if (threadStackSizeInBytes != 0 &&
3615       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3616     tty->print_cr("\nThe stack size specified is too small, "
3617                   "Specify at least %dk",
3618                   os::Aix::min_stack_allowed / K);
3619     return JNI_ERR;
3620   }
3621 
3622   // Make the stack size a multiple of the page size so that
3623   // the yellow/red zones can be guarded.
3624   // Note that this can be 0, if no default stacksize was set.
3625   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3626 
3627   Aix::libpthread_init();
3628 
3629   if (MaxFDLimit) {
3630     // Set the number of file descriptors to max. print out error
3631     // if getrlimit/setrlimit fails but continue regardless.
3632     struct rlimit nbr_files;
3633     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3634     if (status != 0) {
3635       if (PrintMiscellaneous && (Verbose || WizardMode))
3636         perror("os::init_2 getrlimit failed");
3637     } else {
3638       nbr_files.rlim_cur = nbr_files.rlim_max;
3639       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3640       if (status != 0) {
3641         if (PrintMiscellaneous && (Verbose || WizardMode))
3642           perror("os::init_2 setrlimit failed");
3643       }
3644     }
3645   }
3646 
3647   if (PerfAllowAtExitRegistration) {
3648     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3649     // Atexit functions can be delayed until process exit time, which
3650     // can be problematic for embedded VM situations. Embedded VMs should
3651     // call DestroyJavaVM() to assure that VM resources are released.
3652 
3653     // Note: perfMemory_exit_helper atexit function may be removed in
3654     // the future if the appropriate cleanup code can be added to the
3655     // VM_Exit VMOperation's doit method.
3656     if (atexit(perfMemory_exit_helper) != 0) {
3657       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3658     }
3659   }
3660 
3661   return JNI_OK;
3662 }
3663 
3664 // Mark the polling page as unreadable
3665 void os::make_polling_page_unreadable(void) {
3666   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3667     fatal("Could not disable polling page");
3668   }
3669 };
3670 
3671 // Mark the polling page as readable
3672 void os::make_polling_page_readable(void) {
3673   // Changed according to os_linux.cpp.
3674   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3675     fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3676   }
3677 };
3678 
3679 int os::active_processor_count() {
3680   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3681   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3682   return online_cpus;
3683 }
3684 
3685 void os::set_native_thread_name(const char *name) {
3686   // Not yet implemented.
3687   return;
3688 }
3689 
3690 bool os::distribute_processes(uint length, uint* distribution) {
3691   // Not yet implemented.
3692   return false;
3693 }
3694 
3695 bool os::bind_to_processor(uint processor_id) {
3696   // Not yet implemented.
3697   return false;
3698 }
3699 
3700 void os::SuspendedThreadTask::internal_do_task() {
3701   if (do_suspend(_thread->osthread())) {
3702     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3703     do_task(context);
3704     do_resume(_thread->osthread());
3705   }
3706 }
3707 
3708 class PcFetcher : public os::SuspendedThreadTask {
3709 public:
3710   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3711   ExtendedPC result();
3712 protected:
3713   void do_task(const os::SuspendedThreadTaskContext& context);
3714 private:
3715   ExtendedPC _epc;
3716 };
3717 
3718 ExtendedPC PcFetcher::result() {
3719   guarantee(is_done(), "task is not done yet.");
3720   return _epc;
3721 }
3722 
3723 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3724   Thread* thread = context.thread();
3725   OSThread* osthread = thread->osthread();
3726   if (osthread->ucontext() != NULL) {
3727     _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3728   } else {
3729     // NULL context is unexpected, double-check this is the VMThread.
3730     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3731   }
3732 }
3733 
3734 // Suspends the target using the signal mechanism and then grabs the PC before
3735 // resuming the target. Used by the flat-profiler only
3736 ExtendedPC os::get_thread_pc(Thread* thread) {
3737   // Make sure that it is called by the watcher for the VMThread.
3738   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3739   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3740 
3741   PcFetcher fetcher(thread);
3742   fetcher.run();
3743   return fetcher.result();
3744 }
3745 
3746 ////////////////////////////////////////////////////////////////////////////////
3747 // debug support
3748 
3749 static address same_page(address x, address y) {
3750   intptr_t page_bits = -os::vm_page_size();
3751   if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3752     return x;
3753   else if (x > y)
3754     return (address)(intptr_t(y) | ~page_bits) + 1;
3755   else
3756     return (address)(intptr_t(y) & page_bits);
3757 }
3758 
3759 bool os::find(address addr, outputStream* st) {
3760 
3761   st->print(PTR_FORMAT ": ", addr);
3762 
3763   loaded_module_t lm;
3764   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3765       LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3766     st->print("%s", lm.path);
3767     return true;
3768   }
3769 
3770   return false;
3771 }
3772 
3773 ////////////////////////////////////////////////////////////////////////////////
3774 // misc
3775 
3776 // This does not do anything on Aix. This is basically a hook for being
3777 // able to use structured exception handling (thread-local exception filters)
3778 // on, e.g., Win32.
3779 void
3780 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3781                          JavaCallArguments* args, Thread* thread) {
3782   f(value, method, args, thread);
3783 }
3784 
3785 void os::print_statistics() {
3786 }
3787 
3788 int os::message_box(const char* title, const char* message) {
3789   int i;
3790   fdStream err(defaultStream::error_fd());
3791   for (i = 0; i < 78; i++) err.print_raw("=");
3792   err.cr();
3793   err.print_raw_cr(title);
3794   for (i = 0; i < 78; i++) err.print_raw("-");
3795   err.cr();
3796   err.print_raw_cr(message);
3797   for (i = 0; i < 78; i++) err.print_raw("=");
3798   err.cr();
3799 
3800   char buf[16];
3801   // Prevent process from exiting upon "read error" without consuming all CPU
3802   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3803 
3804   return buf[0] == 'y' || buf[0] == 'Y';
3805 }
3806 
3807 int os::stat(const char *path, struct stat *sbuf) {
3808   char pathbuf[MAX_PATH];
3809   if (strlen(path) > MAX_PATH - 1) {
3810     errno = ENAMETOOLONG;
3811     return -1;
3812   }
3813   os::native_path(strcpy(pathbuf, path));
3814   return ::stat(pathbuf, sbuf);
3815 }
3816 
3817 bool os::check_heap(bool force) {
3818   return true;
3819 }
3820 
3821 // Is a (classpath) directory empty?
3822 bool os::dir_is_empty(const char* path) {
3823   DIR *dir = NULL;
3824   struct dirent *ptr;
3825 
3826   dir = opendir(path);
3827   if (dir == NULL) return true;
3828 
3829   /* Scan the directory */
3830   bool result = true;
3831   char buf[sizeof(struct dirent) + MAX_PATH];
3832   while (result && (ptr = ::readdir(dir)) != NULL) {
3833     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3834       result = false;
3835     }
3836   }
3837   closedir(dir);
3838   return result;
3839 }
3840 
3841 // This code originates from JDK's sysOpen and open64_w
3842 // from src/solaris/hpi/src/system_md.c
3843 
3844 int os::open(const char *path, int oflag, int mode) {
3845 
3846   if (strlen(path) > MAX_PATH - 1) {
3847     errno = ENAMETOOLONG;
3848     return -1;
3849   }
3850   int fd;
3851 
3852   fd = ::open64(path, oflag, mode);
3853   if (fd == -1) return -1;
3854 
3855   // If the open succeeded, the file might still be a directory.
3856   {
3857     struct stat64 buf64;
3858     int ret = ::fstat64(fd, &buf64);
3859     int st_mode = buf64.st_mode;
3860 
3861     if (ret != -1) {
3862       if ((st_mode & S_IFMT) == S_IFDIR) {
3863         errno = EISDIR;
3864         ::close(fd);
3865         return -1;
3866       }
3867     } else {
3868       ::close(fd);
3869       return -1;
3870     }
3871   }
3872 
3873   // All file descriptors that are opened in the JVM and not
3874   // specifically destined for a subprocess should have the
3875   // close-on-exec flag set. If we don't set it, then careless 3rd
3876   // party native code might fork and exec without closing all
3877   // appropriate file descriptors (e.g. as we do in closeDescriptors in
3878   // UNIXProcess.c), and this in turn might:
3879   //
3880   // - cause end-of-file to fail to be detected on some file
3881   //   descriptors, resulting in mysterious hangs, or
3882   //
3883   // - might cause an fopen in the subprocess to fail on a system
3884   //   suffering from bug 1085341.
3885   //
3886   // (Yes, the default setting of the close-on-exec flag is a Unix
3887   // design flaw.)
3888   //
3889   // See:
3890   // 1085341: 32-bit stdio routines should support file descriptors >255
3891   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3892   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3893 #ifdef FD_CLOEXEC
3894   {
3895     int flags = ::fcntl(fd, F_GETFD);
3896     if (flags != -1)
3897       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3898   }
3899 #endif
3900 
3901   return fd;
3902 }
3903 
3904 // create binary file, rewriting existing file if required
3905 int os::create_binary_file(const char* path, bool rewrite_existing) {
3906   int oflags = O_WRONLY | O_CREAT;
3907   if (!rewrite_existing) {
3908     oflags |= O_EXCL;
3909   }
3910   return ::open64(path, oflags, S_IREAD | S_IWRITE);
3911 }
3912 
3913 // return current position of file pointer
3914 jlong os::current_file_offset(int fd) {
3915   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3916 }
3917 
3918 // move file pointer to the specified offset
3919 jlong os::seek_to_file_offset(int fd, jlong offset) {
3920   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3921 }
3922 
3923 // This code originates from JDK's sysAvailable
3924 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3925 
3926 int os::available(int fd, jlong *bytes) {
3927   jlong cur, end;
3928   int mode;
3929   struct stat64 buf64;
3930 
3931   if (::fstat64(fd, &buf64) >= 0) {
3932     mode = buf64.st_mode;
3933     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3934       int n;
3935       if (::ioctl(fd, FIONREAD, &n) >= 0) {
3936         *bytes = n;
3937         return 1;
3938       }
3939     }
3940   }
3941   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3942     return 0;
3943   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3944     return 0;
3945   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3946     return 0;
3947   }
3948   *bytes = end - cur;
3949   return 1;
3950 }
3951 
3952 // Map a block of memory.
3953 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3954                         char *addr, size_t bytes, bool read_only,
3955                         bool allow_exec) {
3956   int prot;
3957   int flags = MAP_PRIVATE;
3958 
3959   if (read_only) {
3960     prot = PROT_READ;
3961     flags = MAP_SHARED;
3962   } else {
3963     prot = PROT_READ | PROT_WRITE;
3964     flags = MAP_PRIVATE;
3965   }
3966 
3967   if (allow_exec) {
3968     prot |= PROT_EXEC;
3969   }
3970 
3971   if (addr != NULL) {
3972     flags |= MAP_FIXED;
3973   }
3974 
3975   // Allow anonymous mappings if 'fd' is -1.
3976   if (fd == -1) {
3977     flags |= MAP_ANONYMOUS;
3978   }
3979 
3980   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3981                                      fd, file_offset);
3982   if (mapped_address == MAP_FAILED) {
3983     return NULL;
3984   }
3985   return mapped_address;
3986 }
3987 
3988 // Remap a block of memory.
3989 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3990                           char *addr, size_t bytes, bool read_only,
3991                           bool allow_exec) {
3992   // same as map_memory() on this OS
3993   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3994                         allow_exec);
3995 }
3996 
3997 // Unmap a block of memory.
3998 bool os::pd_unmap_memory(char* addr, size_t bytes) {
3999   return munmap(addr, bytes) == 0;
4000 }
4001 
4002 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4003 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4004 // of a thread.
4005 //
4006 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4007 // the fast estimate available on the platform.
4008 
4009 jlong os::current_thread_cpu_time() {
4010   // return user + sys since the cost is the same
4011   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4012   assert(n >= 0, "negative CPU time");
4013   return n;
4014 }
4015 
4016 jlong os::thread_cpu_time(Thread* thread) {
4017   // consistent with what current_thread_cpu_time() returns
4018   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4019   assert(n >= 0, "negative CPU time");
4020   return n;
4021 }
4022 
4023 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4024   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4025   assert(n >= 0, "negative CPU time");
4026   return n;
4027 }
4028 
4029 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4030   bool error = false;
4031 
4032   jlong sys_time = 0;
4033   jlong user_time = 0;
4034 
4035   // Reimplemented using getthrds64().
4036   //
4037   // Works like this:
4038   // For the thread in question, get the kernel thread id. Then get the
4039   // kernel thread statistics using that id.
4040   //
4041   // This only works of course when no pthread scheduling is used,
4042   // i.e. there is a 1:1 relationship to kernel threads.
4043   // On AIX, see AIXTHREAD_SCOPE variable.
4044 
4045   pthread_t pthtid = thread->osthread()->pthread_id();
4046 
4047   // retrieve kernel thread id for the pthread:
4048   tid64_t tid = 0;
4049   struct __pthrdsinfo pinfo;
4050   // I just love those otherworldly IBM APIs which force me to hand down
4051   // dummy buffers for stuff I dont care for...
4052   char dummy[1];
4053   int dummy_size = sizeof(dummy);
4054   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4055                           dummy, &dummy_size) == 0) {
4056     tid = pinfo.__pi_tid;
4057   } else {
4058     tty->print_cr("pthread_getthrds_np failed.");
4059     error = true;
4060   }
4061 
4062   // retrieve kernel timing info for that kernel thread
4063   if (!error) {
4064     struct thrdentry64 thrdentry;
4065     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4066       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4067       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4068     } else {
4069       tty->print_cr("pthread_getthrds_np failed.");
4070       error = true;
4071     }
4072   }
4073 
4074   if (p_sys_time) {
4075     *p_sys_time = sys_time;
4076   }
4077 
4078   if (p_user_time) {
4079     *p_user_time = user_time;
4080   }
4081 
4082   if (error) {
4083     return false;
4084   }
4085 
4086   return true;
4087 }
4088 
4089 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4090   jlong sys_time;
4091   jlong user_time;
4092 
4093   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4094     return -1;
4095   }
4096 
4097   return user_sys_cpu_time ? sys_time + user_time : user_time;
4098 }
4099 
4100 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4101   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4102   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4103   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4104   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4105 }
4106 
4107 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4108   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4109   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4110   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4111   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4112 }
4113 
4114 bool os::is_thread_cpu_time_supported() {
4115   return true;
4116 }
4117 
4118 // System loadavg support. Returns -1 if load average cannot be obtained.
4119 // For now just return the system wide load average (no processor sets).
4120 int os::loadavg(double values[], int nelem) {
4121 
4122   // Implemented using libperfstat on AIX.
4123 
4124   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4125   guarantee(values, "argument error");
4126 
4127   if (os::Aix::on_pase()) {
4128     Unimplemented();
4129     return -1;
4130   } else {
4131     // AIX: use libperfstat
4132     //
4133     // See also:
4134     // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4135     // /usr/include/libperfstat.h:
4136 
4137     // Use the already AIX version independent get_cpuinfo.
4138     os::Aix::cpuinfo_t ci;
4139     if (os::Aix::get_cpuinfo(&ci)) {
4140       for (int i = 0; i < nelem; i++) {
4141         values[i] = ci.loadavg[i];
4142       }
4143     } else {
4144       return -1;
4145     }
4146     return nelem;
4147   }
4148 }
4149 
4150 void os::pause() {
4151   char filename[MAX_PATH];
4152   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4153     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4154   } else {
4155     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4156   }
4157 
4158   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4159   if (fd != -1) {
4160     struct stat buf;
4161     ::close(fd);
4162     while (::stat(filename, &buf) == 0) {
4163       (void)::poll(NULL, 0, 100);
4164     }
4165   } else {
4166     jio_fprintf(stderr,
4167       "Could not open pause file '%s', continuing immediately.\n", filename);
4168   }
4169 }
4170 
4171 bool os::Aix::is_primordial_thread() {
4172   if (pthread_self() == (pthread_t)1) {
4173     return true;
4174   } else {
4175     return false;
4176   }
4177 }
4178 
4179 // OS recognitions (PASE/AIX, OS level) call this before calling any
4180 // one of Aix::on_pase(), Aix::os_version() static
4181 void os::Aix::initialize_os_info() {
4182 
4183   assert(_on_pase == -1 && _os_version == -1, "already called.");
4184 
4185   struct utsname uts;
4186   memset(&uts, 0, sizeof(uts));
4187   strcpy(uts.sysname, "?");
4188   if (::uname(&uts) == -1) {
4189     trc("uname failed (%d)", errno);
4190     guarantee(0, "Could not determine whether we run on AIX or PASE");
4191   } else {
4192     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4193                "node \"%s\" machine \"%s\"\n",
4194                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4195     const int major = atoi(uts.version);
4196     assert(major > 0, "invalid OS version");
4197     const int minor = atoi(uts.release);
4198     assert(minor > 0, "invalid OS release");
4199     _os_version = (major << 8) | minor;
4200     if (strcmp(uts.sysname, "OS400") == 0) {
4201       Unimplemented();
4202     } else if (strcmp(uts.sysname, "AIX") == 0) {
4203       // We run on AIX. We do not support versions older than AIX 5.3.
4204       _on_pase = 0;
4205       if (_os_version < 0x0503) {
4206         trc("AIX release older than AIX 5.3 not supported.");
4207         assert(false, "AIX release too old.");
4208       } else {
4209         trcVerbose("We run on AIX %d.%d\n", major, minor);
4210       }
4211     } else {
4212       assert(false, "unknown OS");
4213     }
4214   }
4215 
4216   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4217 } // end: os::Aix::initialize_os_info()
4218 
4219 // Scan environment for important settings which might effect the VM.
4220 // Trace out settings. Warn about invalid settings and/or correct them.
4221 //
4222 // Must run after os::Aix::initialue_os_info().
4223 void os::Aix::scan_environment() {
4224 
4225   char* p;
4226   int rc;
4227 
4228   // Warn explicity if EXTSHM=ON is used. That switch changes how
4229   // System V shared memory behaves. One effect is that page size of
4230   // shared memory cannot be change dynamically, effectivly preventing
4231   // large pages from working.
4232   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4233   // recommendation is (in OSS notes) to switch it off.
4234   p = ::getenv("EXTSHM");
4235   if (Verbose) {
4236     fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4237   }
4238   if (p && strcasecmp(p, "ON") == 0) {
4239     fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4240     _extshm = 1;
4241   } else {
4242     _extshm = 0;
4243   }
4244 
4245   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4246   // Not tested, not supported.
4247   //
4248   // Note that it might be worth the trouble to test and to require it, if only to
4249   // get useful return codes for mprotect.
4250   //
4251   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4252   // exec() ? before loading the libjvm ? ....)
4253   p = ::getenv("XPG_SUS_ENV");
4254   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4255   if (p && strcmp(p, "ON") == 0) {
4256     _xpg_sus_mode = 1;
4257     trc("Unsupported setting: XPG_SUS_ENV=ON");
4258     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4259     // clobber address ranges. If we ever want to support that, we have to do some
4260     // testing first.
4261     guarantee(false, "XPG_SUS_ENV=ON not supported");
4262   } else {
4263     _xpg_sus_mode = 0;
4264   }
4265 
4266   // Switch off AIX internal (pthread) guard pages. This has
4267   // immediate effect for any pthread_create calls which follow.
4268   p = ::getenv("AIXTHREAD_GUARDPAGES");
4269   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4270   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4271   guarantee(rc == 0, "");
4272 
4273 } // end: os::Aix::scan_environment()
4274 
4275 // PASE: initialize the libo4 library (AS400 PASE porting library).
4276 void os::Aix::initialize_libo4() {
4277   Unimplemented();
4278 }
4279 
4280 // AIX: initialize the libperfstat library (we load this dynamically
4281 // because it is only available on AIX.
4282 void os::Aix::initialize_libperfstat() {
4283 
4284   assert(os::Aix::on_aix(), "AIX only");
4285 
4286   if (!libperfstat::init()) {
4287     trc("libperfstat initialization failed.");
4288     assert(false, "libperfstat initialization failed");
4289   } else {
4290     if (Verbose) {
4291       fprintf(stderr, "libperfstat initialized.\n");
4292     }
4293   }
4294 } // end: os::Aix::initialize_libperfstat
4295 
4296 /////////////////////////////////////////////////////////////////////////////
4297 // thread stack
4298 
4299 // Function to query the current stack size using pthread_getthrds_np.
4300 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4301   // This only works when invoked on a pthread. As we agreed not to use
4302   // primordial threads anyway, I assert here.
4303   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4304 
4305   // Information about this api can be found (a) in the pthread.h header and
4306   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4307   //
4308   // The use of this API to find out the current stack is kind of undefined.
4309   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4310   // enough for cases where I let the pthread library create its stacks. For cases
4311   // where I create an own stack and pass this to pthread_create, it seems not to
4312   // work (the returned stack size in that case is 0).
4313 
4314   pthread_t tid = pthread_self();
4315   struct __pthrdsinfo pinfo;
4316   char dummy[1]; // We only need this to satisfy the api and to not get E.
4317   int dummy_size = sizeof(dummy);
4318 
4319   memset(&pinfo, 0, sizeof(pinfo));
4320 
4321   const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4322                                      sizeof(pinfo), dummy, &dummy_size);
4323 
4324   if (rc != 0) {
4325     assert0(false);
4326     trcVerbose("pthread_getthrds_np failed (%d)", rc);
4327     return false;
4328   }
4329   guarantee0(pinfo.__pi_stackend);
4330 
4331   // The following can happen when invoking pthread_getthrds_np on a pthread running
4332   // on a user provided stack (when handing down a stack to pthread create, see
4333   // pthread_attr_setstackaddr).
4334   // Not sure what to do here - I feel inclined to forbid this use case completely.
4335   guarantee0(pinfo.__pi_stacksize);
4336 
4337   // Note: the pthread stack on AIX seems to look like this:
4338   //
4339   // ---------------------   real base ? at page border ?
4340   //
4341   //     pthread internal data, like ~2K, see also
4342   //     http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/thread_supp_tun_params.htm
4343   //
4344   // ---------------------   __pi_stackend - not page aligned, (xxxxF890)
4345   //
4346   //     stack
4347   //      ....
4348   //
4349   //     stack
4350   //
4351   // ---------------------   __pi_stackend  - __pi_stacksize
4352   //
4353   //     padding due to AIX guard pages (?) see AIXTHREAD_GUARDPAGES
4354   // ---------------------   __pi_stackaddr  (page aligned if AIXTHREAD_GUARDPAGES > 0)
4355   //
4356   //   AIX guard pages (?)
4357   //
4358 
4359   // So, the safe thing to do is to use the area from __pi_stackend to __pi_stackaddr;
4360   // __pi_stackend however is almost never page aligned.
4361   //
4362 
4363   if (p_stack_base) {
4364     (*p_stack_base) = (address) (pinfo.__pi_stackend);
4365   }
4366 
4367   if (p_stack_size) {
4368     (*p_stack_size) = pinfo.__pi_stackend - pinfo.__pi_stackaddr;
4369   }
4370 
4371   return true;
4372 }
4373 
4374 // Get the current stack base from the OS (actually, the pthread library).
4375 address os::current_stack_base() {
4376   address p;
4377   query_stack_dimensions(&p, 0);
4378   return p;
4379 }
4380 
4381 // Get the current stack size from the OS (actually, the pthread library).
4382 size_t os::current_stack_size() {
4383   size_t s;
4384   query_stack_dimensions(0, &s);
4385   return s;
4386 }
4387 
4388 // Refer to the comments in os_solaris.cpp park-unpark.
4389 //
4390 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4391 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4392 // For specifics regarding the bug see GLIBC BUGID 261237 :
4393 //    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4394 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4395 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4396 // is used. (The simple C test-case provided in the GLIBC bug report manifests the
4397 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4398 // and monitorenter when we're using 1-0 locking. All those operations may result in
4399 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4400 // of libpthread avoids the problem, but isn't practical.
4401 //
4402 // Possible remedies:
4403 //
4404 // 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4405 //      This is palliative and probabilistic, however. If the thread is preempted
4406 //      between the call to compute_abstime() and pthread_cond_timedwait(), more
4407 //      than the minimum period may have passed, and the abstime may be stale (in the
4408 //      past) resultin in a hang. Using this technique reduces the odds of a hang
4409 //      but the JVM is still vulnerable, particularly on heavily loaded systems.
4410 //
4411 // 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4412 //      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4413 //      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4414 //      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4415 //      thread.
4416 //
4417 // 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4418 //      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4419 //      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4420 //      This also works well. In fact it avoids kernel-level scalability impediments
4421 //      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4422 //      timers in a graceful fashion.
4423 //
4424 // 4.   When the abstime value is in the past it appears that control returns
4425 //      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4426 //      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4427 //      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4428 //      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4429 //      It may be possible to avoid reinitialization by checking the return
4430 //      value from pthread_cond_timedwait(). In addition to reinitializing the
4431 //      condvar we must establish the invariant that cond_signal() is only called
4432 //      within critical sections protected by the adjunct mutex. This prevents
4433 //      cond_signal() from "seeing" a condvar that's in the midst of being
4434 //      reinitialized or that is corrupt. Sadly, this invariant obviates the
4435 //      desirable signal-after-unlock optimization that avoids futile context switching.
4436 //
4437 //      I'm also concerned that some versions of NTPL might allocate an auxilliary
4438 //      structure when a condvar is used or initialized. cond_destroy() would
4439 //      release the helper structure. Our reinitialize-after-timedwait fix
4440 //      put excessive stress on malloc/free and locks protecting the c-heap.
4441 //
4442 // We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4443 // It may be possible to refine (4) by checking the kernel and NTPL verisons
4444 // and only enabling the work-around for vulnerable environments.
4445 
4446 // utility to compute the abstime argument to timedwait:
4447 // millis is the relative timeout time
4448 // abstime will be the absolute timeout time
4449 // TODO: replace compute_abstime() with unpackTime()
4450 
4451 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4452   if (millis < 0) millis = 0;
4453   struct timeval now;
4454   int status = gettimeofday(&now, NULL);
4455   assert(status == 0, "gettimeofday");
4456   jlong seconds = millis / 1000;
4457   millis %= 1000;
4458   if (seconds > 50000000) { // see man cond_timedwait(3T)
4459     seconds = 50000000;
4460   }
4461   abstime->tv_sec = now.tv_sec  + seconds;
4462   long       usec = now.tv_usec + millis * 1000;
4463   if (usec >= 1000000) {
4464     abstime->tv_sec += 1;
4465     usec -= 1000000;
4466   }
4467   abstime->tv_nsec = usec * 1000;
4468   return abstime;
4469 }
4470 
4471 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4472 // Conceptually TryPark() should be equivalent to park(0).
4473 
4474 int os::PlatformEvent::TryPark() {
4475   for (;;) {
4476     const int v = _Event;
4477     guarantee ((v == 0) || (v == 1), "invariant");
4478     if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4479   }
4480 }
4481 
4482 void os::PlatformEvent::park() {       // AKA "down()"
4483   // Invariant: Only the thread associated with the Event/PlatformEvent
4484   // may call park().
4485   // TODO: assert that _Assoc != NULL or _Assoc == Self
4486   int v;
4487   for (;;) {
4488     v = _Event;
4489     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4490   }
4491   guarantee (v >= 0, "invariant");
4492   if (v == 0) {
4493     // Do this the hard way by blocking ...
4494     int status = pthread_mutex_lock(_mutex);
4495     assert_status(status == 0, status, "mutex_lock");
4496     guarantee (_nParked == 0, "invariant");
4497     ++ _nParked;
4498     while (_Event < 0) {
4499       status = pthread_cond_wait(_cond, _mutex);
4500       assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4501     }
4502     -- _nParked;
4503 
4504     // In theory we could move the ST of 0 into _Event past the unlock(),
4505     // but then we'd need a MEMBAR after the ST.
4506     _Event = 0;
4507     status = pthread_mutex_unlock(_mutex);
4508     assert_status(status == 0, status, "mutex_unlock");
4509   }
4510   guarantee (_Event >= 0, "invariant");
4511 }
4512 
4513 int os::PlatformEvent::park(jlong millis) {
4514   guarantee (_nParked == 0, "invariant");
4515 
4516   int v;
4517   for (;;) {
4518     v = _Event;
4519     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4520   }
4521   guarantee (v >= 0, "invariant");
4522   if (v != 0) return OS_OK;
4523 
4524   // We do this the hard way, by blocking the thread.
4525   // Consider enforcing a minimum timeout value.
4526   struct timespec abst;
4527   compute_abstime(&abst, millis);
4528 
4529   int ret = OS_TIMEOUT;
4530   int status = pthread_mutex_lock(_mutex);
4531   assert_status(status == 0, status, "mutex_lock");
4532   guarantee (_nParked == 0, "invariant");
4533   ++_nParked;
4534 
4535   // Object.wait(timo) will return because of
4536   // (a) notification
4537   // (b) timeout
4538   // (c) thread.interrupt
4539   //
4540   // Thread.interrupt and object.notify{All} both call Event::set.
4541   // That is, we treat thread.interrupt as a special case of notification.
4542   // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4543   // We assume all ETIME returns are valid.
4544   //
4545   // TODO: properly differentiate simultaneous notify+interrupt.
4546   // In that case, we should propagate the notify to another waiter.
4547 
4548   while (_Event < 0) {
4549     status = pthread_cond_timedwait(_cond, _mutex, &abst);
4550     assert_status(status == 0 || status == ETIMEDOUT,
4551                   status, "cond_timedwait");
4552     if (!FilterSpuriousWakeups) break;         // previous semantics
4553     if (status == ETIMEDOUT) break;
4554     // We consume and ignore EINTR and spurious wakeups.
4555   }
4556   --_nParked;
4557   if (_Event >= 0) {
4558      ret = OS_OK;
4559   }
4560   _Event = 0;
4561   status = pthread_mutex_unlock(_mutex);
4562   assert_status(status == 0, status, "mutex_unlock");
4563   assert (_nParked == 0, "invariant");
4564   return ret;
4565 }
4566 
4567 void os::PlatformEvent::unpark() {
4568   int v, AnyWaiters;
4569   for (;;) {
4570     v = _Event;
4571     if (v > 0) {
4572       // The LD of _Event could have reordered or be satisfied
4573       // by a read-aside from this processor's write buffer.
4574       // To avoid problems execute a barrier and then
4575       // ratify the value.
4576       OrderAccess::fence();
4577       if (_Event == v) return;
4578       continue;
4579     }
4580     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4581   }
4582   if (v < 0) {
4583     // Wait for the thread associated with the event to vacate
4584     int status = pthread_mutex_lock(_mutex);
4585     assert_status(status == 0, status, "mutex_lock");
4586     AnyWaiters = _nParked;
4587 
4588     if (AnyWaiters != 0) {
4589       // We intentional signal *after* dropping the lock
4590       // to avoid a common class of futile wakeups.
4591       status = pthread_cond_signal(_cond);
4592       assert_status(status == 0, status, "cond_signal");
4593     }
4594     // Mutex should be locked for pthread_cond_signal(_cond).
4595     status = pthread_mutex_unlock(_mutex);
4596     assert_status(status == 0, status, "mutex_unlock");
4597   }
4598 
4599   // Note that we signal() _after dropping the lock for "immortal" Events.
4600   // This is safe and avoids a common class of futile wakeups. In rare
4601   // circumstances this can cause a thread to return prematurely from
4602   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4603   // simply re-test the condition and re-park itself.
4604 }
4605 
4606 
4607 // JSR166
4608 // -------------------------------------------------------
4609 
4610 //
4611 // The solaris and linux implementations of park/unpark are fairly
4612 // conservative for now, but can be improved. They currently use a
4613 // mutex/condvar pair, plus a a count.
4614 // Park decrements count if > 0, else does a condvar wait. Unpark
4615 // sets count to 1 and signals condvar. Only one thread ever waits
4616 // on the condvar. Contention seen when trying to park implies that someone
4617 // is unparking you, so don't wait. And spurious returns are fine, so there
4618 // is no need to track notifications.
4619 //
4620 
4621 #define MAX_SECS 100000000
4622 //
4623 // This code is common to linux and solaris and will be moved to a
4624 // common place in dolphin.
4625 //
4626 // The passed in time value is either a relative time in nanoseconds
4627 // or an absolute time in milliseconds. Either way it has to be unpacked
4628 // into suitable seconds and nanoseconds components and stored in the
4629 // given timespec structure.
4630 // Given time is a 64-bit value and the time_t used in the timespec is only
4631 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
4632 // overflow if times way in the future are given. Further on Solaris versions
4633 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4634 // number of seconds, in abstime, is less than current_time + 100,000,000.
4635 // As it will be 28 years before "now + 100000000" will overflow we can
4636 // ignore overflow and just impose a hard-limit on seconds using the value
4637 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4638 // years from "now".
4639 //
4640 
4641 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4642   assert (time > 0, "convertTime");
4643 
4644   struct timeval now;
4645   int status = gettimeofday(&now, NULL);
4646   assert(status == 0, "gettimeofday");
4647 
4648   time_t max_secs = now.tv_sec + MAX_SECS;
4649 
4650   if (isAbsolute) {
4651     jlong secs = time / 1000;
4652     if (secs > max_secs) {
4653       absTime->tv_sec = max_secs;
4654     }
4655     else {
4656       absTime->tv_sec = secs;
4657     }
4658     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4659   }
4660   else {
4661     jlong secs = time / NANOSECS_PER_SEC;
4662     if (secs >= MAX_SECS) {
4663       absTime->tv_sec = max_secs;
4664       absTime->tv_nsec = 0;
4665     }
4666     else {
4667       absTime->tv_sec = now.tv_sec + secs;
4668       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4669       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4670         absTime->tv_nsec -= NANOSECS_PER_SEC;
4671         ++absTime->tv_sec; // note: this must be <= max_secs
4672       }
4673     }
4674   }
4675   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4676   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4677   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4678   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4679 }
4680 
4681 void Parker::park(bool isAbsolute, jlong time) {
4682   // Optional fast-path check:
4683   // Return immediately if a permit is available.
4684   if (_counter > 0) {
4685     _counter = 0;
4686     OrderAccess::fence();
4687     return;
4688   }
4689 
4690   Thread* thread = Thread::current();
4691   assert(thread->is_Java_thread(), "Must be JavaThread");
4692   JavaThread *jt = (JavaThread *)thread;
4693 
4694   // Optional optimization -- avoid state transitions if there's an interrupt pending.
4695   // Check interrupt before trying to wait
4696   if (Thread::is_interrupted(thread, false)) {
4697     return;
4698   }
4699 
4700   // Next, demultiplex/decode time arguments
4701   timespec absTime;
4702   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4703     return;
4704   }
4705   if (time > 0) {
4706     unpackTime(&absTime, isAbsolute, time);
4707   }
4708 
4709   // Enter safepoint region
4710   // Beware of deadlocks such as 6317397.
4711   // The per-thread Parker:: mutex is a classic leaf-lock.
4712   // In particular a thread must never block on the Threads_lock while
4713   // holding the Parker:: mutex. If safepoints are pending both the
4714   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4715   ThreadBlockInVM tbivm(jt);
4716 
4717   // Don't wait if cannot get lock since interference arises from
4718   // unblocking. Also. check interrupt before trying wait
4719   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4720     return;
4721   }
4722 
4723   int status;
4724   if (_counter > 0) { // no wait needed
4725     _counter = 0;
4726     status = pthread_mutex_unlock(_mutex);
4727     assert (status == 0, "invariant");
4728     OrderAccess::fence();
4729     return;
4730   }
4731 
4732 #ifdef ASSERT
4733   // Don't catch signals while blocked; let the running threads have the signals.
4734   // (This allows a debugger to break into the running thread.)
4735   sigset_t oldsigs;
4736   sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4737   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4738 #endif
4739 
4740   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4741   jt->set_suspend_equivalent();
4742   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4743 
4744   if (time == 0) {
4745     status = pthread_cond_wait (_cond, _mutex);
4746   } else {
4747     status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4748     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4749       pthread_cond_destroy (_cond);
4750       pthread_cond_init    (_cond, NULL);
4751     }
4752   }
4753   assert_status(status == 0 || status == EINTR ||
4754                 status == ETIME || status == ETIMEDOUT,
4755                 status, "cond_timedwait");
4756 
4757 #ifdef ASSERT
4758   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4759 #endif
4760 
4761   _counter = 0;
4762   status = pthread_mutex_unlock(_mutex);
4763   assert_status(status == 0, status, "invariant");
4764   // If externally suspended while waiting, re-suspend
4765   if (jt->handle_special_suspend_equivalent_condition()) {
4766     jt->java_suspend_self();
4767   }
4768 
4769   OrderAccess::fence();
4770 }
4771 
4772 void Parker::unpark() {
4773   int s, status;
4774   status = pthread_mutex_lock(_mutex);
4775   assert (status == 0, "invariant");
4776   s = _counter;
4777   _counter = 1;
4778   if (s < 1) {
4779     if (WorkAroundNPTLTimedWaitHang) {
4780       status = pthread_cond_signal (_cond);
4781       assert (status == 0, "invariant");
4782       status = pthread_mutex_unlock(_mutex);
4783       assert (status == 0, "invariant");
4784     } else {
4785       status = pthread_mutex_unlock(_mutex);
4786       assert (status == 0, "invariant");
4787       status = pthread_cond_signal (_cond);
4788       assert (status == 0, "invariant");
4789     }
4790   } else {
4791     pthread_mutex_unlock(_mutex);
4792     assert (status == 0, "invariant");
4793   }
4794 }
4795 
4796 extern char** environ;
4797 
4798 // Run the specified command in a separate process. Return its exit value,
4799 // or -1 on failure (e.g. can't fork a new process).
4800 // Unlike system(), this function can be called from signal handler. It
4801 // doesn't block SIGINT et al.
4802 int os::fork_and_exec(char* cmd) {
4803   char * argv[4] = {"sh", "-c", cmd, NULL};
4804 
4805   pid_t pid = fork();
4806 
4807   if (pid < 0) {
4808     // fork failed
4809     return -1;
4810 
4811   } else if (pid == 0) {
4812     // child process
4813 
4814     // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4815     execve("/usr/bin/sh", argv, environ);
4816 
4817     // execve failed
4818     _exit(-1);
4819 
4820   } else {
4821     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4822     // care about the actual exit code, for now.
4823 
4824     int status;
4825 
4826     // Wait for the child process to exit. This returns immediately if
4827     // the child has already exited. */
4828     while (waitpid(pid, &status, 0) < 0) {
4829       switch (errno) {
4830         case ECHILD: return 0;
4831         case EINTR: break;
4832         default: return -1;
4833       }
4834     }
4835 
4836     if (WIFEXITED(status)) {
4837       // The child exited normally; get its exit code.
4838       return WEXITSTATUS(status);
4839     } else if (WIFSIGNALED(status)) {
4840       // The child exited because of a signal.
4841       // The best value to return is 0x80 + signal number,
4842       // because that is what all Unix shells do, and because
4843       // it allows callers to distinguish between process exit and
4844       // process death by signal.
4845       return 0x80 + WTERMSIG(status);
4846     } else {
4847       // Unknown exit code; pass it through.
4848       return status;
4849     }
4850   }
4851   return -1;
4852 }
4853 
4854 // is_headless_jre()
4855 //
4856 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
4857 // in order to report if we are running in a headless jre.
4858 //
4859 // Since JDK8 xawt/libmawt.so is moved into the same directory
4860 // as libawt.so, and renamed libawt_xawt.so
4861 bool os::is_headless_jre() {
4862   struct stat statbuf;
4863   char buf[MAXPATHLEN];
4864   char libmawtpath[MAXPATHLEN];
4865   const char *xawtstr = "/xawt/libmawt.so";
4866   const char *new_xawtstr = "/libawt_xawt.so";
4867 
4868   char *p;
4869 
4870   // Get path to libjvm.so
4871   os::jvm_path(buf, sizeof(buf));
4872 
4873   // Get rid of libjvm.so
4874   p = strrchr(buf, '/');
4875   if (p == NULL) return false;
4876   else *p = '\0';
4877 
4878   // Get rid of client or server
4879   p = strrchr(buf, '/');
4880   if (p == NULL) return false;
4881   else *p = '\0';
4882 
4883   // check xawt/libmawt.so
4884   strcpy(libmawtpath, buf);
4885   strcat(libmawtpath, xawtstr);
4886   if (::stat(libmawtpath, &statbuf) == 0) return false;
4887 
4888   // check libawt_xawt.so
4889   strcpy(libmawtpath, buf);
4890   strcat(libmawtpath, new_xawtstr);
4891   if (::stat(libmawtpath, &statbuf) == 0) return false;
4892 
4893   return true;
4894 }
4895 
4896 // Get the default path to the core file
4897 // Returns the length of the string
4898 int os::get_core_path(char* buffer, size_t bufferSize) {
4899   const char* p = get_current_directory(buffer, bufferSize);
4900 
4901   if (p == NULL) {
4902     assert(p != NULL, "failed to get current directory");
4903     return 0;
4904   }
4905 
4906   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4907                                                p, current_process_id());
4908 
4909   return strlen(buffer);
4910 }
4911 
4912 #ifndef PRODUCT
4913 void TestReserveMemorySpecial_test() {
4914   // No tests available for this platform
4915 }
4916 #endif