1 /*
   2  * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2014 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "libperfstat_aix.hpp"
  40 #include "loadlib_aix.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "mutex_aix.inline.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "os_aix.inline.hpp"
  46 #include "os_share_aix.hpp"
  47 #include "porting_aix.hpp"
  48 #include "prims/jniFastGetField.hpp"
  49 #include "prims/jvm.h"
  50 #include "prims/jvm_misc.hpp"
  51 #include "runtime/arguments.hpp"
  52 #include "runtime/atomic.inline.hpp"
  53 #include "runtime/extendedPC.hpp"
  54 #include "runtime/globals.hpp"
  55 #include "runtime/interfaceSupport.hpp"
  56 #include "runtime/java.hpp"
  57 #include "runtime/javaCalls.hpp"
  58 #include "runtime/mutexLocker.hpp"
  59 #include "runtime/objectMonitor.hpp"
  60 #include "runtime/orderAccess.inline.hpp"
  61 #include "runtime/os.hpp"
  62 #include "runtime/osThread.hpp"
  63 #include "runtime/perfMemory.hpp"
  64 #include "runtime/sharedRuntime.hpp"
  65 #include "runtime/statSampler.hpp"
  66 #include "runtime/stubRoutines.hpp"
  67 #include "runtime/thread.inline.hpp"
  68 #include "runtime/threadCritical.hpp"
  69 #include "runtime/timer.hpp"
  70 #include "runtime/vm_version.hpp"
  71 #include "services/attachListener.hpp"
  72 #include "services/runtimeService.hpp"
  73 #include "utilities/decoder.hpp"
  74 #include "utilities/defaultStream.hpp"
  75 #include "utilities/events.hpp"
  76 #include "utilities/growableArray.hpp"
  77 #include "utilities/vmError.hpp"
  78 
  79 // put OS-includes here (sorted alphabetically)
  80 #include <errno.h>
  81 #include <fcntl.h>
  82 #include <inttypes.h>
  83 #include <poll.h>
  84 #include <procinfo.h>
  85 #include <pthread.h>
  86 #include <pwd.h>
  87 #include <semaphore.h>
  88 #include <signal.h>
  89 #include <stdint.h>
  90 #include <stdio.h>
  91 #include <string.h>
  92 #include <unistd.h>
  93 #include <sys/ioctl.h>
  94 #include <sys/ipc.h>
  95 #include <sys/mman.h>
  96 #include <sys/resource.h>
  97 #include <sys/select.h>
  98 #include <sys/shm.h>
  99 #include <sys/socket.h>
 100 #include <sys/stat.h>
 101 #include <sys/sysinfo.h>
 102 #include <sys/systemcfg.h>
 103 #include <sys/time.h>
 104 #include <sys/times.h>
 105 #include <sys/types.h>
 106 #include <sys/utsname.h>
 107 #include <sys/vminfo.h>
 108 #include <sys/wait.h>
 109 
 110 // If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 111 // getrusage() is prepared to handle the associated failure.
 112 #ifndef RUSAGE_THREAD
 113 #define RUSAGE_THREAD   (1)               /* only the calling thread */
 114 #endif
 115 
 116 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
 117 #if !defined(_AIXVERSION_610)
 118 extern "C" {
 119   int getthrds64(pid_t ProcessIdentifier,
 120                  struct thrdentry64* ThreadBuffer,
 121                  int ThreadSize,
 122                  tid64_t* IndexPointer,
 123                  int Count);
 124 }
 125 #endif
 126 
 127 // Excerpts from systemcfg.h definitions newer than AIX 5.3
 128 #ifndef PV_7
 129 # define PV_7 0x200000          // Power PC 7
 130 # define PV_7_Compat 0x208000   // Power PC 7
 131 #endif
 132 
 133 #define MAX_PATH (2 * K)
 134 
 135 // for timer info max values which include all bits
 136 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 137 // for multipage initialization error analysis (in 'g_multipage_error')
 138 #define ERROR_MP_OS_TOO_OLD                          100
 139 #define ERROR_MP_EXTSHM_ACTIVE                       101
 140 #define ERROR_MP_VMGETINFO_FAILED                    102
 141 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 142 
 143 // the semantics in this file are thus that codeptr_t is a *real code ptr*
 144 // This means that any function taking codeptr_t as arguments will assume
 145 // a real codeptr and won't handle function descriptors (eg getFuncName),
 146 // whereas functions taking address as args will deal with function
 147 // descriptors (eg os::dll_address_to_library_name)
 148 typedef unsigned int* codeptr_t;
 149 
 150 // typedefs for stackslots, stack pointers, pointers to op codes
 151 typedef unsigned long stackslot_t;
 152 typedef stackslot_t* stackptr_t;
 153 
 154 // query dimensions of the stack of the calling thread
 155 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 156 
 157 // function to check a given stack pointer against given stack limits
 158 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 159   if (((uintptr_t)sp) & 0x7) {
 160     return false;
 161   }
 162   if (sp > stack_base) {
 163     return false;
 164   }
 165   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 166     return false;
 167   }
 168   return true;
 169 }
 170 
 171 // returns true if function is a valid codepointer
 172 inline bool is_valid_codepointer(codeptr_t p) {
 173   if (!p) {
 174     return false;
 175   }
 176   if (((uintptr_t)p) & 0x3) {
 177     return false;
 178   }
 179   if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
 180     return false;
 181   }
 182   return true;
 183 }
 184 
 185 // macro to check a given stack pointer against given stack limits and to die if test fails
 186 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 187     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 188 }
 189 
 190 // macro to check the current stack pointer against given stacklimits
 191 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 192   address sp; \
 193   sp = os::current_stack_pointer(); \
 194   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 195 }
 196 
 197 ////////////////////////////////////////////////////////////////////////////////
 198 // global variables (for a description see os_aix.hpp)
 199 
 200 julong    os::Aix::_physical_memory = 0;
 201 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 202 int       os::Aix::_page_size = -1;
 203 int       os::Aix::_on_pase = -1;
 204 int       os::Aix::_os_version = -1;
 205 int       os::Aix::_stack_page_size = -1;
 206 size_t    os::Aix::_shm_default_page_size = -1;
 207 int       os::Aix::_can_use_64K_pages = -1;
 208 int       os::Aix::_can_use_16M_pages = -1;
 209 int       os::Aix::_xpg_sus_mode = -1;
 210 int       os::Aix::_extshm = -1;
 211 int       os::Aix::_logical_cpus = -1;
 212 
 213 ////////////////////////////////////////////////////////////////////////////////
 214 // local variables
 215 
 216 static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 217 static jlong    initial_time_count = 0;
 218 static int      clock_tics_per_sec = 100;
 219 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 220 static bool     check_signals      = true;
 221 static pid_t    _initial_pid       = 0;
 222 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 223 static sigset_t SR_sigset;
 224 static pthread_mutex_t dl_mutex;           // Used to protect dlsym() calls */
 225 
 226 julong os::available_memory() {
 227   return Aix::available_memory();
 228 }
 229 
 230 julong os::Aix::available_memory() {
 231   os::Aix::meminfo_t mi;
 232   if (os::Aix::get_meminfo(&mi)) {
 233     return mi.real_free;
 234   } else {
 235     return 0xFFFFFFFFFFFFFFFFLL;
 236   }
 237 }
 238 
 239 julong os::physical_memory() {
 240   return Aix::physical_memory();
 241 }
 242 
 243 ////////////////////////////////////////////////////////////////////////////////
 244 // environment support
 245 
 246 bool os::getenv(const char* name, char* buf, int len) {
 247   const char* val = ::getenv(name);
 248   if (val != NULL && strlen(val) < (size_t)len) {
 249     strcpy(buf, val);
 250     return true;
 251   }
 252   if (len > 0) buf[0] = 0;  // return a null string
 253   return false;
 254 }
 255 
 256 
 257 // Return true if user is running as root.
 258 
 259 bool os::have_special_privileges() {
 260   static bool init = false;
 261   static bool privileges = false;
 262   if (!init) {
 263     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 264     init = true;
 265   }
 266   return privileges;
 267 }
 268 
 269 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 270 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 271 static bool my_disclaim64(char* addr, size_t size) {
 272 
 273   if (size == 0) {
 274     return true;
 275   }
 276 
 277   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 278   const unsigned int maxDisclaimSize = 0x80000000;
 279 
 280   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 281   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 282 
 283   char* p = addr;
 284 
 285   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 286     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 287       //if (Verbose)
 288       fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 289       return false;
 290     }
 291     p += maxDisclaimSize;
 292   }
 293 
 294   if (lastDisclaimSize > 0) {
 295     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 296       //if (Verbose)
 297         fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 298       return false;
 299     }
 300   }
 301 
 302   return true;
 303 }
 304 
 305 // Cpu architecture string
 306 #if defined(PPC32)
 307 static char cpu_arch[] = "ppc";
 308 #elif defined(PPC64)
 309 static char cpu_arch[] = "ppc64";
 310 #else
 311 #error Add appropriate cpu_arch setting
 312 #endif
 313 
 314 
 315 // Given an address, returns the size of the page backing that address.
 316 size_t os::Aix::query_pagesize(void* addr) {
 317 
 318   vm_page_info pi;
 319   pi.addr = (uint64_t)addr;
 320   if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 321     return pi.pagesize;
 322   } else {
 323     fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
 324     assert(false, "vmgetinfo failed to retrieve page size");
 325     return SIZE_4K;
 326   }
 327 
 328 }
 329 
 330 // Returns the kernel thread id of the currently running thread.
 331 pid_t os::Aix::gettid() {
 332   return (pid_t) thread_self();
 333 }
 334 
 335 void os::Aix::initialize_system_info() {
 336 
 337   // get the number of online(logical) cpus instead of configured
 338   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 339   assert(_processor_count > 0, "_processor_count must be > 0");
 340 
 341   // retrieve total physical storage
 342   os::Aix::meminfo_t mi;
 343   if (!os::Aix::get_meminfo(&mi)) {
 344     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
 345     assert(false, "os::Aix::get_meminfo failed.");
 346   }
 347   _physical_memory = (julong) mi.real_total;
 348 }
 349 
 350 // Helper function for tracing page sizes.
 351 static const char* describe_pagesize(size_t pagesize) {
 352   switch (pagesize) {
 353     case SIZE_4K : return "4K";
 354     case SIZE_64K: return "64K";
 355     case SIZE_16M: return "16M";
 356     case SIZE_16G: return "16G";
 357     default:
 358       assert(false, "surprise");
 359       return "??";
 360   }
 361 }
 362 
 363 // Retrieve information about multipage size support. Will initialize
 364 // Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,
 365 // Aix::_can_use_16M_pages.
 366 // Must be called before calling os::large_page_init().
 367 void os::Aix::query_multipage_support() {
 368 
 369   guarantee(_page_size == -1 &&
 370             _stack_page_size == -1 &&
 371             _can_use_64K_pages == -1 &&
 372             _can_use_16M_pages == -1 &&
 373             g_multipage_error == -1,
 374             "do not call twice");
 375 
 376   _page_size = ::sysconf(_SC_PAGESIZE);
 377 
 378   // This really would surprise me.
 379   assert(_page_size == SIZE_4K, "surprise!");
 380 
 381 
 382   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 383   // Default data page size is influenced either by linker options (-bdatapsize)
 384   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 385   // default should be 4K.
 386   size_t data_page_size = SIZE_4K;
 387   {
 388     void* p = os::malloc(SIZE_16M, mtInternal);
 389     guarantee(p != NULL, "malloc failed");
 390     data_page_size = os::Aix::query_pagesize(p);
 391     os::free(p);
 392   }
 393 
 394   // query default shm page size (LDR_CNTRL SHMPSIZE)
 395   {
 396     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 397     guarantee(shmid != -1, "shmget failed");
 398     void* p = ::shmat(shmid, NULL, 0);
 399     ::shmctl(shmid, IPC_RMID, NULL);
 400     guarantee(p != (void*) -1, "shmat failed");
 401     _shm_default_page_size = os::Aix::query_pagesize(p);
 402     ::shmdt(p);
 403   }
 404 
 405   // before querying the stack page size, make sure we are not running as primordial
 406   // thread (because primordial thread's stack may have different page size than
 407   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 408   // number of reasons so we may just as well guarantee it here
 409   guarantee(!os::Aix::is_primordial_thread(), "Must not be called for primordial thread");
 410 
 411   // query stack page size
 412   {
 413     int dummy = 0;
 414     _stack_page_size = os::Aix::query_pagesize(&dummy);
 415     // everything else would surprise me and should be looked into
 416     guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");
 417     // also, just for completeness: pthread stacks are allocated from C heap, so
 418     // stack page size should be the same as data page size
 419     guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");
 420   }
 421 
 422   // EXTSHM is bad: among other things, it prevents setting pagesize dynamically
 423   // for system V shm.
 424   if (Aix::extshm()) {
 425     if (Verbose) {
 426       fprintf(stderr, "EXTSHM is active - will disable large page support.\n"
 427                       "Please make sure EXTSHM is OFF for large page support.\n");
 428     }
 429     g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;
 430     _can_use_64K_pages = _can_use_16M_pages = 0;
 431     goto query_multipage_support_end;
 432   }
 433 
 434   // now check which page sizes the OS claims it supports, and of those, which actually can be used.
 435   {
 436     const int MAX_PAGE_SIZES = 4;
 437     psize_t sizes[MAX_PAGE_SIZES];
 438     const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 439     if (num_psizes == -1) {
 440       if (Verbose) {
 441         fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
 442         fprintf(stderr, "disabling multipage support.\n");
 443       }
 444       g_multipage_error = ERROR_MP_VMGETINFO_FAILED;
 445       _can_use_64K_pages = _can_use_16M_pages = 0;
 446       goto query_multipage_support_end;
 447     }
 448     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 449     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 450     if (Verbose) {
 451       fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 452       for (int i = 0; i < num_psizes; i ++) {
 453         fprintf(stderr, " %s ", describe_pagesize(sizes[i]));
 454       }
 455       fprintf(stderr, " .\n");
 456     }
 457 
 458     // Can we use 64K, 16M pages?
 459     _can_use_64K_pages = 0;
 460     _can_use_16M_pages = 0;
 461     for (int i = 0; i < num_psizes; i ++) {
 462       if (sizes[i] == SIZE_64K) {
 463         _can_use_64K_pages = 1;
 464       } else if (sizes[i] == SIZE_16M) {
 465         _can_use_16M_pages = 1;
 466       }
 467     }
 468 
 469     if (!_can_use_64K_pages) {
 470       g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;
 471     }
 472 
 473     // Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,
 474     // there must be an actual 16M page pool, and we must run with enough rights.
 475     if (_can_use_16M_pages) {
 476       const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);
 477       guarantee(shmid != -1, "shmget failed");
 478       struct shmid_ds shm_buf = { 0 };
 479       shm_buf.shm_pagesize = SIZE_16M;
 480       const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;
 481       const int en = errno;
 482       ::shmctl(shmid, IPC_RMID, NULL);
 483       if (!can_set_pagesize) {
 484         if (Verbose) {
 485           fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"
 486                           "Will deactivate 16M support.\n", en, strerror(en));
 487         }
 488         _can_use_16M_pages = 0;
 489       }
 490     }
 491 
 492   } // end: check which pages can be used for shared memory
 493 
 494 query_multipage_support_end:
 495 
 496   guarantee(_page_size != -1 &&
 497             _stack_page_size != -1 &&
 498             _can_use_64K_pages != -1 &&
 499             _can_use_16M_pages != -1, "Page sizes not properly initialized");
 500 
 501   if (_can_use_64K_pages) {
 502     g_multipage_error = 0;
 503   }
 504 
 505   if (Verbose) {
 506     fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));
 507     fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));
 508     fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));
 509     fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));
 510     fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));
 511     fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);
 512   }
 513 
 514 } // end os::Aix::query_multipage_support()
 515 
 516 // The code for this method was initially derived from the version in os_linux.cpp.
 517 void os::init_system_properties_values() {
 518 
 519 #define DEFAULT_LIBPATH "/usr/lib:/lib"
 520 #define EXTENSIONS_DIR  "/lib/ext"
 521 #define ENDORSED_DIR    "/lib/endorsed"
 522 
 523   // Buffer that fits several sprintfs.
 524   // Note that the space for the trailing null is provided
 525   // by the nulls included by the sizeof operator.
 526   const size_t bufsize =
 527     MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
 528          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR), // extensions dir
 529          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
 530   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 531 
 532   // sysclasspath, java_home, dll_dir
 533   {
 534     char *pslash;
 535     os::jvm_path(buf, bufsize);
 536 
 537     // Found the full path to libjvm.so.
 538     // Now cut the path to <java_home>/jre if we can.
 539     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 540     pslash = strrchr(buf, '/');
 541     if (pslash != NULL) {
 542       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 543     }
 544     Arguments::set_dll_dir(buf);
 545 
 546     if (pslash != NULL) {
 547       pslash = strrchr(buf, '/');
 548       if (pslash != NULL) {
 549         *pslash = '\0';          // Get rid of /<arch>.
 550         pslash = strrchr(buf, '/');
 551         if (pslash != NULL) {
 552           *pslash = '\0';        // Get rid of /lib.
 553         }
 554       }
 555     }
 556     Arguments::set_java_home(buf);
 557     set_boot_path('/', ':');
 558   }
 559 
 560   // Where to look for native libraries.
 561 
 562   // On Aix we get the user setting of LIBPATH.
 563   // Eventually, all the library path setting will be done here.
 564   // Get the user setting of LIBPATH.
 565   const char *v = ::getenv("LIBPATH");
 566   const char *v_colon = ":";
 567   if (v == NULL) { v = ""; v_colon = ""; }
 568 
 569   // Concatenate user and invariant part of ld_library_path.
 570   // That's +1 for the colon and +1 for the trailing '\0'.
 571   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 572   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 573   Arguments::set_library_path(ld_library_path);
 574   FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
 575 
 576   // Extensions directories.
 577   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 578   Arguments::set_ext_dirs(buf);
 579 
 580   // Endorsed standards default directory.
 581   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
 582   Arguments::set_endorsed_dirs(buf);
 583 
 584   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 585 
 586 #undef DEFAULT_LIBPATH
 587 #undef EXTENSIONS_DIR
 588 #undef ENDORSED_DIR
 589 }
 590 
 591 ////////////////////////////////////////////////////////////////////////////////
 592 // breakpoint support
 593 
 594 void os::breakpoint() {
 595   BREAKPOINT;
 596 }
 597 
 598 extern "C" void breakpoint() {
 599   // use debugger to set breakpoint here
 600 }
 601 
 602 ////////////////////////////////////////////////////////////////////////////////
 603 // signal support
 604 
 605 debug_only(static bool signal_sets_initialized = false);
 606 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 607 
 608 bool os::Aix::is_sig_ignored(int sig) {
 609   struct sigaction oact;
 610   sigaction(sig, (struct sigaction*)NULL, &oact);
 611   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 612     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 613   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
 614     return true;
 615   else
 616     return false;
 617 }
 618 
 619 void os::Aix::signal_sets_init() {
 620   // Should also have an assertion stating we are still single-threaded.
 621   assert(!signal_sets_initialized, "Already initialized");
 622   // Fill in signals that are necessarily unblocked for all threads in
 623   // the VM. Currently, we unblock the following signals:
 624   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 625   //                         by -Xrs (=ReduceSignalUsage));
 626   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 627   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 628   // the dispositions or masks wrt these signals.
 629   // Programs embedding the VM that want to use the above signals for their
 630   // own purposes must, at this time, use the "-Xrs" option to prevent
 631   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 632   // (See bug 4345157, and other related bugs).
 633   // In reality, though, unblocking these signals is really a nop, since
 634   // these signals are not blocked by default.
 635   sigemptyset(&unblocked_sigs);
 636   sigemptyset(&allowdebug_blocked_sigs);
 637   sigaddset(&unblocked_sigs, SIGILL);
 638   sigaddset(&unblocked_sigs, SIGSEGV);
 639   sigaddset(&unblocked_sigs, SIGBUS);
 640   sigaddset(&unblocked_sigs, SIGFPE);
 641   sigaddset(&unblocked_sigs, SIGTRAP);
 642   sigaddset(&unblocked_sigs, SIGDANGER);
 643   sigaddset(&unblocked_sigs, SR_signum);
 644 
 645   if (!ReduceSignalUsage) {
 646    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 647      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 648      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
 649    }
 650    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 651      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 652      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
 653    }
 654    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 655      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 656      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
 657    }
 658   }
 659   // Fill in signals that are blocked by all but the VM thread.
 660   sigemptyset(&vm_sigs);
 661   if (!ReduceSignalUsage)
 662     sigaddset(&vm_sigs, BREAK_SIGNAL);
 663   debug_only(signal_sets_initialized = true);
 664 }
 665 
 666 // These are signals that are unblocked while a thread is running Java.
 667 // (For some reason, they get blocked by default.)
 668 sigset_t* os::Aix::unblocked_signals() {
 669   assert(signal_sets_initialized, "Not initialized");
 670   return &unblocked_sigs;
 671 }
 672 
 673 // These are the signals that are blocked while a (non-VM) thread is
 674 // running Java. Only the VM thread handles these signals.
 675 sigset_t* os::Aix::vm_signals() {
 676   assert(signal_sets_initialized, "Not initialized");
 677   return &vm_sigs;
 678 }
 679 
 680 // These are signals that are blocked during cond_wait to allow debugger in
 681 sigset_t* os::Aix::allowdebug_blocked_signals() {
 682   assert(signal_sets_initialized, "Not initialized");
 683   return &allowdebug_blocked_sigs;
 684 }
 685 
 686 void os::Aix::hotspot_sigmask(Thread* thread) {
 687 
 688   //Save caller's signal mask before setting VM signal mask
 689   sigset_t caller_sigmask;
 690   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 691 
 692   OSThread* osthread = thread->osthread();
 693   osthread->set_caller_sigmask(caller_sigmask);
 694 
 695   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 696 
 697   if (!ReduceSignalUsage) {
 698     if (thread->is_VM_thread()) {
 699       // Only the VM thread handles BREAK_SIGNAL ...
 700       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 701     } else {
 702       // ... all other threads block BREAK_SIGNAL
 703       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 704     }
 705   }
 706 }
 707 
 708 // retrieve memory information.
 709 // Returns false if something went wrong;
 710 // content of pmi undefined in this case.
 711 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 712 
 713   assert(pmi, "get_meminfo: invalid parameter");
 714 
 715   memset(pmi, 0, sizeof(meminfo_t));
 716 
 717   if (os::Aix::on_pase()) {
 718 
 719     Unimplemented();
 720     return false;
 721 
 722   } else {
 723 
 724     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 725     // See:
 726     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 727     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 728     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 729     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 730 
 731     perfstat_memory_total_t psmt;
 732     memset (&psmt, '\0', sizeof(psmt));
 733     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 734     if (rc == -1) {
 735       fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
 736       assert(0, "perfstat_memory_total() failed");
 737       return false;
 738     }
 739 
 740     assert(rc == 1, "perfstat_memory_total() - weird return code");
 741 
 742     // excerpt from
 743     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 744     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 745     // The fields of perfstat_memory_total_t:
 746     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 747     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 748     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 749     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 750     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 751 
 752     pmi->virt_total = psmt.virt_total * 4096;
 753     pmi->real_total = psmt.real_total * 4096;
 754     pmi->real_free = psmt.real_free * 4096;
 755     pmi->pgsp_total = psmt.pgsp_total * 4096;
 756     pmi->pgsp_free = psmt.pgsp_free * 4096;
 757 
 758     return true;
 759 
 760   }
 761 } // end os::Aix::get_meminfo
 762 
 763 // Retrieve global cpu information.
 764 // Returns false if something went wrong;
 765 // the content of pci is undefined in this case.
 766 bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
 767   assert(pci, "get_cpuinfo: invalid parameter");
 768   memset(pci, 0, sizeof(cpuinfo_t));
 769 
 770   perfstat_cpu_total_t psct;
 771   memset (&psct, '\0', sizeof(psct));
 772 
 773   if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
 774     fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
 775     assert(0, "perfstat_cpu_total() failed");
 776     return false;
 777   }
 778 
 779   // global cpu information
 780   strcpy (pci->description, psct.description);
 781   pci->processorHZ = psct.processorHZ;
 782   pci->ncpus = psct.ncpus;
 783   os::Aix::_logical_cpus = psct.ncpus;
 784   for (int i = 0; i < 3; i++) {
 785     pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
 786   }
 787 
 788   // get the processor version from _system_configuration
 789   switch (_system_configuration.version) {
 790   case PV_7:
 791     strcpy(pci->version, "Power PC 7");
 792     break;
 793   case PV_6_1:
 794     strcpy(pci->version, "Power PC 6 DD1.x");
 795     break;
 796   case PV_6:
 797     strcpy(pci->version, "Power PC 6");
 798     break;
 799   case PV_5:
 800     strcpy(pci->version, "Power PC 5");
 801     break;
 802   case PV_5_2:
 803     strcpy(pci->version, "Power PC 5_2");
 804     break;
 805   case PV_5_3:
 806     strcpy(pci->version, "Power PC 5_3");
 807     break;
 808   case PV_5_Compat:
 809     strcpy(pci->version, "PV_5_Compat");
 810     break;
 811   case PV_6_Compat:
 812     strcpy(pci->version, "PV_6_Compat");
 813     break;
 814   case PV_7_Compat:
 815     strcpy(pci->version, "PV_7_Compat");
 816     break;
 817   default:
 818     strcpy(pci->version, "unknown");
 819   }
 820 
 821   return true;
 822 
 823 } //end os::Aix::get_cpuinfo
 824 
 825 //////////////////////////////////////////////////////////////////////////////
 826 // detecting pthread library
 827 
 828 void os::Aix::libpthread_init() {
 829   return;
 830 }
 831 
 832 //////////////////////////////////////////////////////////////////////////////
 833 // create new thread
 834 
 835 // Thread start routine for all newly created threads
 836 static void *java_start(Thread *thread) {
 837 
 838   // find out my own stack dimensions
 839   {
 840     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 841     address base = 0;
 842     size_t size = 0;
 843     query_stack_dimensions(&base, &size);
 844     thread->set_stack_base(base);
 845     thread->set_stack_size(size);
 846   }
 847 
 848   // Do some sanity checks.
 849   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 850 
 851   // Try to randomize the cache line index of hot stack frames.
 852   // This helps when threads of the same stack traces evict each other's
 853   // cache lines. The threads can be either from the same JVM instance, or
 854   // from different JVM instances. The benefit is especially true for
 855   // processors with hyperthreading technology.
 856 
 857   static int counter = 0;
 858   int pid = os::current_process_id();
 859   alloca(((pid ^ counter++) & 7) * 128);
 860 
 861   ThreadLocalStorage::set_thread(thread);
 862 
 863   OSThread* osthread = thread->osthread();
 864 
 865   // thread_id is kernel thread id (similar to Solaris LWP id)
 866   osthread->set_thread_id(os::Aix::gettid());
 867 
 868   // initialize signal mask for this thread
 869   os::Aix::hotspot_sigmask(thread);
 870 
 871   // initialize floating point control register
 872   os::Aix::init_thread_fpu_state();
 873 
 874   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 875 
 876   // call one more level start routine
 877   thread->run();
 878 
 879   return 0;
 880 }
 881 
 882 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 883 
 884   // We want the whole function to be synchronized.
 885   ThreadCritical cs;
 886 
 887   assert(thread->osthread() == NULL, "caller responsible");
 888 
 889   // Allocate the OSThread object
 890   OSThread* osthread = new OSThread(NULL, NULL);
 891   if (osthread == NULL) {
 892     return false;
 893   }
 894 
 895   // set the correct thread state
 896   osthread->set_thread_type(thr_type);
 897 
 898   // Initial state is ALLOCATED but not INITIALIZED
 899   osthread->set_state(ALLOCATED);
 900 
 901   thread->set_osthread(osthread);
 902 
 903   // init thread attributes
 904   pthread_attr_t attr;
 905   pthread_attr_init(&attr);
 906   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 907 
 908   // Make sure we run in 1:1 kernel-user-thread mode.
 909   if (os::Aix::on_aix()) {
 910     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 911     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 912   } // end: aix
 913 
 914   // Start in suspended state, and in os::thread_start, wake the thread up.
 915   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 916 
 917   // calculate stack size if it's not specified by caller
 918   if (os::Aix::supports_variable_stack_size()) {
 919     if (stack_size == 0) {
 920       stack_size = os::Aix::default_stack_size(thr_type);
 921 
 922       switch (thr_type) {
 923       case os::java_thread:
 924         // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
 925         assert(JavaThread::stack_size_at_create() > 0, "this should be set");
 926         stack_size = JavaThread::stack_size_at_create();
 927         break;
 928       case os::compiler_thread:
 929         if (CompilerThreadStackSize > 0) {
 930           stack_size = (size_t)(CompilerThreadStackSize * K);
 931           break;
 932         } // else fall through:
 933           // use VMThreadStackSize if CompilerThreadStackSize is not defined
 934       case os::vm_thread:
 935       case os::pgc_thread:
 936       case os::cgc_thread:
 937       case os::watcher_thread:
 938         if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 939         break;
 940       }
 941     }
 942 
 943     stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
 944     pthread_attr_setstacksize(&attr, stack_size);
 945   } //else let thread_create() pick the default value (96 K on AIX)
 946 
 947   pthread_t tid;
 948   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
 949 
 950   pthread_attr_destroy(&attr);
 951 
 952   if (ret != 0) {
 953     if (PrintMiscellaneous && (Verbose || WizardMode)) {
 954       perror("pthread_create()");
 955     }
 956     // Need to clean up stuff we've allocated so far
 957     thread->set_osthread(NULL);
 958     delete osthread;
 959     return false;
 960   }
 961 
 962   // Store pthread info into the OSThread
 963   osthread->set_pthread_id(tid);
 964 
 965   return true;
 966 }
 967 
 968 /////////////////////////////////////////////////////////////////////////////
 969 // attach existing thread
 970 
 971 // bootstrap the main thread
 972 bool os::create_main_thread(JavaThread* thread) {
 973   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 974   return create_attached_thread(thread);
 975 }
 976 
 977 bool os::create_attached_thread(JavaThread* thread) {
 978 #ifdef ASSERT
 979     thread->verify_not_published();
 980 #endif
 981 
 982   // Allocate the OSThread object
 983   OSThread* osthread = new OSThread(NULL, NULL);
 984 
 985   if (osthread == NULL) {
 986     return false;
 987   }
 988 
 989   // Store pthread info into the OSThread
 990   osthread->set_thread_id(os::Aix::gettid());
 991   osthread->set_pthread_id(::pthread_self());
 992 
 993   // initialize floating point control register
 994   os::Aix::init_thread_fpu_state();
 995 
 996   // some sanity checks
 997   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 998 
 999   // Initial thread state is RUNNABLE
1000   osthread->set_state(RUNNABLE);
1001 
1002   thread->set_osthread(osthread);
1003 
1004   if (UseNUMA) {
1005     int lgrp_id = os::numa_get_group_id();
1006     if (lgrp_id != -1) {
1007       thread->set_lgrp_id(lgrp_id);
1008     }
1009   }
1010 
1011   // initialize signal mask for this thread
1012   // and save the caller's signal mask
1013   os::Aix::hotspot_sigmask(thread);
1014 
1015   return true;
1016 }
1017 
1018 void os::pd_start_thread(Thread* thread) {
1019   int status = pthread_continue_np(thread->osthread()->pthread_id());
1020   assert(status == 0, "thr_continue failed");
1021 }
1022 
1023 // Free OS resources related to the OSThread
1024 void os::free_thread(OSThread* osthread) {
1025   assert(osthread != NULL, "osthread not set");
1026 
1027   if (Thread::current()->osthread() == osthread) {
1028     // Restore caller's signal mask
1029     sigset_t sigmask = osthread->caller_sigmask();
1030     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1031    }
1032 
1033   delete osthread;
1034 }
1035 
1036 //////////////////////////////////////////////////////////////////////////////
1037 // thread local storage
1038 
1039 int os::allocate_thread_local_storage() {
1040   pthread_key_t key;
1041   int rslt = pthread_key_create(&key, NULL);
1042   assert(rslt == 0, "cannot allocate thread local storage");
1043   return (int)key;
1044 }
1045 
1046 // Note: This is currently not used by VM, as we don't destroy TLS key
1047 // on VM exit.
1048 void os::free_thread_local_storage(int index) {
1049   int rslt = pthread_key_delete((pthread_key_t)index);
1050   assert(rslt == 0, "invalid index");
1051 }
1052 
1053 void os::thread_local_storage_at_put(int index, void* value) {
1054   int rslt = pthread_setspecific((pthread_key_t)index, value);
1055   assert(rslt == 0, "pthread_setspecific failed");
1056 }
1057 
1058 extern "C" Thread* get_thread() {
1059   return ThreadLocalStorage::thread();
1060 }
1061 
1062 ////////////////////////////////////////////////////////////////////////////////
1063 // time support
1064 
1065 // Time since start-up in seconds to a fine granularity.
1066 // Used by VMSelfDestructTimer and the MemProfiler.
1067 double os::elapsedTime() {
1068   return (double)(os::elapsed_counter()) * 0.000001;
1069 }
1070 
1071 jlong os::elapsed_counter() {
1072   timeval time;
1073   int status = gettimeofday(&time, NULL);
1074   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1075 }
1076 
1077 jlong os::elapsed_frequency() {
1078   return (1000 * 1000);
1079 }
1080 
1081 bool os::supports_vtime() { return true; }
1082 bool os::enable_vtime()   { return false; }
1083 bool os::vtime_enabled()  { return false; }
1084 
1085 double os::elapsedVTime() {
1086   struct rusage usage;
1087   int retval = getrusage(RUSAGE_THREAD, &usage);
1088   if (retval == 0) {
1089     return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1090   } else {
1091     // better than nothing, but not much
1092     return elapsedTime();
1093   }
1094 }
1095 
1096 jlong os::javaTimeMillis() {
1097   timeval time;
1098   int status = gettimeofday(&time, NULL);
1099   assert(status != -1, "aix error at gettimeofday()");
1100   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1101 }
1102 
1103 // We need to manually declare mread_real_time,
1104 // because IBM didn't provide a prototype in time.h.
1105 // (they probably only ever tested in C, not C++)
1106 extern "C"
1107 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1108 
1109 jlong os::javaTimeNanos() {
1110   if (os::Aix::on_pase()) {
1111     Unimplemented();
1112     return 0;
1113   }
1114   else {
1115     // On AIX use the precision of processors real time clock
1116     // or time base registers.
1117     timebasestruct_t time;
1118     int rc;
1119 
1120     // If the CPU has a time register, it will be used and
1121     // we have to convert to real time first. After convertion we have following data:
1122     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1123     // time.tb_low  [nanoseconds after the last full second above]
1124     // We better use mread_real_time here instead of read_real_time
1125     // to ensure that we will get a monotonic increasing time.
1126     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1127       rc = time_base_to_time(&time, TIMEBASE_SZ);
1128       assert(rc != -1, "aix error at time_base_to_time()");
1129     }
1130     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1131   }
1132 }
1133 
1134 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1135   info_ptr->max_value = ALL_64_BITS;
1136   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1137   info_ptr->may_skip_backward = false;
1138   info_ptr->may_skip_forward = false;
1139   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1140 }
1141 
1142 // Return the real, user, and system times in seconds from an
1143 // arbitrary fixed point in the past.
1144 bool os::getTimesSecs(double* process_real_time,
1145                       double* process_user_time,
1146                       double* process_system_time) {
1147   struct tms ticks;
1148   clock_t real_ticks = times(&ticks);
1149 
1150   if (real_ticks == (clock_t) (-1)) {
1151     return false;
1152   } else {
1153     double ticks_per_second = (double) clock_tics_per_sec;
1154     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1155     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1156     *process_real_time = ((double) real_ticks) / ticks_per_second;
1157 
1158     return true;
1159   }
1160 }
1161 
1162 
1163 char * os::local_time_string(char *buf, size_t buflen) {
1164   struct tm t;
1165   time_t long_time;
1166   time(&long_time);
1167   localtime_r(&long_time, &t);
1168   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1169                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1170                t.tm_hour, t.tm_min, t.tm_sec);
1171   return buf;
1172 }
1173 
1174 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1175   return localtime_r(clock, res);
1176 }
1177 
1178 ////////////////////////////////////////////////////////////////////////////////
1179 // runtime exit support
1180 
1181 // Note: os::shutdown() might be called very early during initialization, or
1182 // called from signal handler. Before adding something to os::shutdown(), make
1183 // sure it is async-safe and can handle partially initialized VM.
1184 void os::shutdown() {
1185 
1186   // allow PerfMemory to attempt cleanup of any persistent resources
1187   perfMemory_exit();
1188 
1189   // needs to remove object in file system
1190   AttachListener::abort();
1191 
1192   // flush buffered output, finish log files
1193   ostream_abort();
1194 
1195   // Check for abort hook
1196   abort_hook_t abort_hook = Arguments::abort_hook();
1197   if (abort_hook != NULL) {
1198     abort_hook();
1199   }
1200 
1201 }
1202 
1203 // Note: os::abort() might be called very early during initialization, or
1204 // called from signal handler. Before adding something to os::abort(), make
1205 // sure it is async-safe and can handle partially initialized VM.
1206 void os::abort(bool dump_core) {
1207   os::shutdown();
1208   if (dump_core) {
1209 #ifndef PRODUCT
1210     fdStream out(defaultStream::output_fd());
1211     out.print_raw("Current thread is ");
1212     char buf[16];
1213     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1214     out.print_raw_cr(buf);
1215     out.print_raw_cr("Dumping core ...");
1216 #endif
1217     ::abort(); // dump core
1218   }
1219 
1220   ::exit(1);
1221 }
1222 
1223 // Die immediately, no exit hook, no abort hook, no cleanup.
1224 void os::die() {
1225   ::abort();
1226 }
1227 
1228 // This method is a copy of JDK's sysGetLastErrorString
1229 // from src/solaris/hpi/src/system_md.c
1230 
1231 size_t os::lasterror(char *buf, size_t len) {
1232 
1233   if (errno == 0)  return 0;
1234 
1235   const char *s = ::strerror(errno);
1236   size_t n = ::strlen(s);
1237   if (n >= len) {
1238     n = len - 1;
1239   }
1240   ::strncpy(buf, s, n);
1241   buf[n] = '\0';
1242   return n;
1243 }
1244 
1245 intx os::current_thread_id() { return (intx)pthread_self(); }
1246 int os::current_process_id() {
1247 
1248   // This implementation returns a unique pid, the pid of the
1249   // launcher thread that starts the vm 'process'.
1250 
1251   // Under POSIX, getpid() returns the same pid as the
1252   // launcher thread rather than a unique pid per thread.
1253   // Use gettid() if you want the old pre NPTL behaviour.
1254 
1255   // if you are looking for the result of a call to getpid() that
1256   // returns a unique pid for the calling thread, then look at the
1257   // OSThread::thread_id() method in osThread_linux.hpp file
1258 
1259   return (int)(_initial_pid ? _initial_pid : getpid());
1260 }
1261 
1262 // DLL functions
1263 
1264 const char* os::dll_file_extension() { return ".so"; }
1265 
1266 // This must be hard coded because it's the system's temporary
1267 // directory not the java application's temp directory, ala java.io.tmpdir.
1268 const char* os::get_temp_directory() { return "/tmp"; }
1269 
1270 static bool file_exists(const char* filename) {
1271   struct stat statbuf;
1272   if (filename == NULL || strlen(filename) == 0) {
1273     return false;
1274   }
1275   return os::stat(filename, &statbuf) == 0;
1276 }
1277 
1278 bool os::dll_build_name(char* buffer, size_t buflen,
1279                         const char* pname, const char* fname) {
1280   bool retval = false;
1281   // Copied from libhpi
1282   const size_t pnamelen = pname ? strlen(pname) : 0;
1283 
1284   // Return error on buffer overflow.
1285   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1286     *buffer = '\0';
1287     return retval;
1288   }
1289 
1290   if (pnamelen == 0) {
1291     snprintf(buffer, buflen, "lib%s.so", fname);
1292     retval = true;
1293   } else if (strchr(pname, *os::path_separator()) != NULL) {
1294     int n;
1295     char** pelements = split_path(pname, &n);
1296     for (int i = 0; i < n; i++) {
1297       // Really shouldn't be NULL, but check can't hurt
1298       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1299         continue; // skip the empty path values
1300       }
1301       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1302       if (file_exists(buffer)) {
1303         retval = true;
1304         break;
1305       }
1306     }
1307     // release the storage
1308     for (int i = 0; i < n; i++) {
1309       if (pelements[i] != NULL) {
1310         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1311       }
1312     }
1313     if (pelements != NULL) {
1314       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1315     }
1316   } else {
1317     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1318     retval = true;
1319   }
1320   return retval;
1321 }
1322 
1323 // Check if addr is inside libjvm.so.
1324 bool os::address_is_in_vm(address addr) {
1325 
1326   // Input could be a real pc or a function pointer literal. The latter
1327   // would be a function descriptor residing in the data segment of a module.
1328 
1329   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
1330   if (lib) {
1331     if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1332       return true;
1333     } else {
1334       return false;
1335     }
1336   } else {
1337     lib = LoadedLibraries::find_for_data_address(addr);
1338     if (lib) {
1339       if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1340         return true;
1341       } else {
1342         return false;
1343       }
1344     } else {
1345       return false;
1346     }
1347   }
1348 }
1349 
1350 // Resolve an AIX function descriptor literal to a code pointer.
1351 // If the input is a valid code pointer to a text segment of a loaded module,
1352 //   it is returned unchanged.
1353 // If the input is a valid AIX function descriptor, it is resolved to the
1354 //   code entry point.
1355 // If the input is neither a valid function descriptor nor a valid code pointer,
1356 //   NULL is returned.
1357 static address resolve_function_descriptor_to_code_pointer(address p) {
1358 
1359   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
1360   if (lib) {
1361     // its a real code pointer
1362     return p;
1363   } else {
1364     lib = LoadedLibraries::find_for_data_address(p);
1365     if (lib) {
1366       // pointer to data segment, potential function descriptor
1367       address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1368       if (LoadedLibraries::find_for_text_address(code_entry)) {
1369         // Its a function descriptor
1370         return code_entry;
1371       }
1372     }
1373   }
1374   return NULL;
1375 }
1376 
1377 bool os::dll_address_to_function_name(address addr, char *buf,
1378                                       int buflen, int *offset) {
1379   if (offset) {
1380     *offset = -1;
1381   }
1382   if (buf) {
1383     buf[0] = '\0';
1384   }
1385 
1386   // Resolve function ptr literals first.
1387   addr = resolve_function_descriptor_to_code_pointer(addr);
1388   if (!addr) {
1389     return false;
1390   }
1391 
1392   // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1393   return Decoder::decode(addr, buf, buflen, offset);
1394 }
1395 
1396 static int getModuleName(codeptr_t pc,                    // [in] program counter
1397                          char* p_name, size_t namelen,    // [out] optional: function name
1398                          char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1399                          ) {
1400 
1401   // initialize output parameters
1402   if (p_name && namelen > 0) {
1403     *p_name = '\0';
1404   }
1405   if (p_errmsg && errmsglen > 0) {
1406     *p_errmsg = '\0';
1407   }
1408 
1409   const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
1410   if (lib) {
1411     if (p_name && namelen > 0) {
1412       sprintf(p_name, "%.*s", namelen, lib->get_shortname());
1413     }
1414     return 0;
1415   }
1416 
1417   if (Verbose) {
1418     fprintf(stderr, "pc outside any module");
1419   }
1420 
1421   return -1;
1422 
1423 }
1424 
1425 bool os::dll_address_to_library_name(address addr, char* buf,
1426                                      int buflen, int* offset) {
1427   if (offset) {
1428     *offset = -1;
1429   }
1430   if (buf) {
1431       buf[0] = '\0';
1432   }
1433 
1434   // Resolve function ptr literals first.
1435   addr = resolve_function_descriptor_to_code_pointer(addr);
1436   if (!addr) {
1437     return false;
1438   }
1439 
1440   if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1441     return true;
1442   }
1443   return false;
1444 }
1445 
1446 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1447 // for the same architecture as Hotspot is running on
1448 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1449 
1450   if (ebuf && ebuflen > 0) {
1451     ebuf[0] = '\0';
1452     ebuf[ebuflen - 1] = '\0';
1453   }
1454 
1455   if (!filename || strlen(filename) == 0) {
1456     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1457     return NULL;
1458   }
1459 
1460   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1461   void * result= ::dlopen(filename, RTLD_LAZY);
1462   if (result != NULL) {
1463     // Reload dll cache. Don't do this in signal handling.
1464     LoadedLibraries::reload();
1465     return result;
1466   } else {
1467     // error analysis when dlopen fails
1468     const char* const error_report = ::dlerror();
1469     if (error_report && ebuf && ebuflen > 0) {
1470       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1471                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1472     }
1473   }
1474   return NULL;
1475 }
1476 
1477 // Glibc-2.0 libdl is not MT safe. If you are building with any glibc,
1478 // chances are you might want to run the generated bits against glibc-2.0
1479 // libdl.so, so always use locking for any version of glibc.
1480 void* os::dll_lookup(void* handle, const char* name) {
1481   pthread_mutex_lock(&dl_mutex);
1482   void* res = dlsym(handle, name);
1483   pthread_mutex_unlock(&dl_mutex);
1484   return res;
1485 }
1486 
1487 void* os::get_default_process_handle() {
1488   return (void*)::dlopen(NULL, RTLD_LAZY);
1489 }
1490 
1491 void os::print_dll_info(outputStream *st) {
1492   st->print_cr("Dynamic libraries:");
1493   LoadedLibraries::print(st);
1494 }
1495 
1496 void os::print_os_info(outputStream* st) {
1497   st->print("OS:");
1498 
1499   st->print("uname:");
1500   struct utsname name;
1501   uname(&name);
1502   st->print(name.sysname); st->print(" ");
1503   st->print(name.nodename); st->print(" ");
1504   st->print(name.release); st->print(" ");
1505   st->print(name.version); st->print(" ");
1506   st->print(name.machine);
1507   st->cr();
1508 
1509   // rlimit
1510   st->print("rlimit:");
1511   struct rlimit rlim;
1512 
1513   st->print(" STACK ");
1514   getrlimit(RLIMIT_STACK, &rlim);
1515   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1516   else st->print("%uk", rlim.rlim_cur >> 10);
1517 
1518   st->print(", CORE ");
1519   getrlimit(RLIMIT_CORE, &rlim);
1520   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1521   else st->print("%uk", rlim.rlim_cur >> 10);
1522 
1523   st->print(", NPROC ");
1524   st->print("%d", sysconf(_SC_CHILD_MAX));
1525 
1526   st->print(", NOFILE ");
1527   getrlimit(RLIMIT_NOFILE, &rlim);
1528   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1529   else st->print("%d", rlim.rlim_cur);
1530 
1531   st->print(", AS ");
1532   getrlimit(RLIMIT_AS, &rlim);
1533   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1534   else st->print("%uk", rlim.rlim_cur >> 10);
1535 
1536   // Print limits on DATA, because it limits the C-heap.
1537   st->print(", DATA ");
1538   getrlimit(RLIMIT_DATA, &rlim);
1539   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1540   else st->print("%uk", rlim.rlim_cur >> 10);
1541   st->cr();
1542 
1543   // load average
1544   st->print("load average:");
1545   double loadavg[3] = {-1.L, -1.L, -1.L};
1546   os::loadavg(loadavg, 3);
1547   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1548   st->cr();
1549 }
1550 
1551 void os::print_memory_info(outputStream* st) {
1552 
1553   st->print_cr("Memory:");
1554 
1555   st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1556   st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1557   st->print_cr("  default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));
1558   st->print_cr("  can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no"));
1559   st->print_cr("  can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no"));
1560   if (g_multipage_error != 0) {
1561     st->print_cr("  multipage error: %d", g_multipage_error);
1562   }
1563 
1564   // print out LDR_CNTRL because it affects the default page sizes
1565   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1566   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1567 
1568   const char* const extshm = ::getenv("EXTSHM");
1569   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1570 
1571   // Call os::Aix::get_meminfo() to retrieve memory statistics.
1572   os::Aix::meminfo_t mi;
1573   if (os::Aix::get_meminfo(&mi)) {
1574     char buffer[256];
1575     if (os::Aix::on_aix()) {
1576       jio_snprintf(buffer, sizeof(buffer),
1577                    "  physical total : %llu\n"
1578                    "  physical free  : %llu\n"
1579                    "  swap total     : %llu\n"
1580                    "  swap free      : %llu\n",
1581                    mi.real_total,
1582                    mi.real_free,
1583                    mi.pgsp_total,
1584                    mi.pgsp_free);
1585     } else {
1586       Unimplemented();
1587     }
1588     st->print_raw(buffer);
1589   } else {
1590     st->print_cr("  (no more information available)");
1591   }
1592 }
1593 
1594 void os::pd_print_cpu_info(outputStream* st) {
1595   // cpu
1596   st->print("CPU:");
1597   st->print("total %d", os::processor_count());
1598   // It's not safe to query number of active processors after crash
1599   // st->print("(active %d)", os::active_processor_count());
1600   st->print(" %s", VM_Version::cpu_features());
1601   st->cr();
1602 }
1603 
1604 void os::print_siginfo(outputStream* st, void* siginfo) {
1605   // Use common posix version.
1606   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1607   st->cr();
1608 }
1609 
1610 
1611 static void print_signal_handler(outputStream* st, int sig,
1612                                  char* buf, size_t buflen);
1613 
1614 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1615   st->print_cr("Signal Handlers:");
1616   print_signal_handler(st, SIGSEGV, buf, buflen);
1617   print_signal_handler(st, SIGBUS , buf, buflen);
1618   print_signal_handler(st, SIGFPE , buf, buflen);
1619   print_signal_handler(st, SIGPIPE, buf, buflen);
1620   print_signal_handler(st, SIGXFSZ, buf, buflen);
1621   print_signal_handler(st, SIGILL , buf, buflen);
1622   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1623   print_signal_handler(st, SR_signum, buf, buflen);
1624   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1625   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1626   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1627   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1628   print_signal_handler(st, SIGTRAP, buf, buflen);
1629   print_signal_handler(st, SIGDANGER, buf, buflen);
1630 }
1631 
1632 static char saved_jvm_path[MAXPATHLEN] = {0};
1633 
1634 // Find the full path to the current module, libjvm.so or libjvm_g.so
1635 void os::jvm_path(char *buf, jint buflen) {
1636   // Error checking.
1637   if (buflen < MAXPATHLEN) {
1638     assert(false, "must use a large-enough buffer");
1639     buf[0] = '\0';
1640     return;
1641   }
1642   // Lazy resolve the path to current module.
1643   if (saved_jvm_path[0] != 0) {
1644     strcpy(buf, saved_jvm_path);
1645     return;
1646   }
1647 
1648   Dl_info dlinfo;
1649   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1650   assert(ret != 0, "cannot locate libjvm");
1651   char* rp = realpath((char *)dlinfo.dli_fname, buf);
1652   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1653 
1654   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1655   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1656 }
1657 
1658 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1659   // no prefix required, not even "_"
1660 }
1661 
1662 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1663   // no suffix required
1664 }
1665 
1666 ////////////////////////////////////////////////////////////////////////////////
1667 // sun.misc.Signal support
1668 
1669 static volatile jint sigint_count = 0;
1670 
1671 static void
1672 UserHandler(int sig, void *siginfo, void *context) {
1673   // 4511530 - sem_post is serialized and handled by the manager thread. When
1674   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1675   // don't want to flood the manager thread with sem_post requests.
1676   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1677     return;
1678 
1679   // Ctrl-C is pressed during error reporting, likely because the error
1680   // handler fails to abort. Let VM die immediately.
1681   if (sig == SIGINT && is_error_reported()) {
1682     os::die();
1683   }
1684 
1685   os::signal_notify(sig);
1686 }
1687 
1688 void* os::user_handler() {
1689   return CAST_FROM_FN_PTR(void*, UserHandler);
1690 }
1691 
1692 extern "C" {
1693   typedef void (*sa_handler_t)(int);
1694   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1695 }
1696 
1697 void* os::signal(int signal_number, void* handler) {
1698   struct sigaction sigAct, oldSigAct;
1699 
1700   sigfillset(&(sigAct.sa_mask));
1701 
1702   // Do not block out synchronous signals in the signal handler.
1703   // Blocking synchronous signals only makes sense if you can really
1704   // be sure that those signals won't happen during signal handling,
1705   // when the blocking applies.  Normal signal handlers are lean and
1706   // do not cause signals. But our signal handlers tend to be "risky"
1707   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1708   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1709   // by a SIGILL, which was blocked due to the signal mask. The process
1710   // just hung forever. Better to crash from a secondary signal than to hang.
1711   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1712   sigdelset(&(sigAct.sa_mask), SIGBUS);
1713   sigdelset(&(sigAct.sa_mask), SIGILL);
1714   sigdelset(&(sigAct.sa_mask), SIGFPE);
1715   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1716 
1717   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1718 
1719   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1720 
1721   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1722     // -1 means registration failed
1723     return (void *)-1;
1724   }
1725 
1726   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1727 }
1728 
1729 void os::signal_raise(int signal_number) {
1730   ::raise(signal_number);
1731 }
1732 
1733 //
1734 // The following code is moved from os.cpp for making this
1735 // code platform specific, which it is by its very nature.
1736 //
1737 
1738 // Will be modified when max signal is changed to be dynamic
1739 int os::sigexitnum_pd() {
1740   return NSIG;
1741 }
1742 
1743 // a counter for each possible signal value
1744 static volatile jint pending_signals[NSIG+1] = { 0 };
1745 
1746 // Linux(POSIX) specific hand shaking semaphore.
1747 static sem_t sig_sem;
1748 
1749 void os::signal_init_pd() {
1750   // Initialize signal structures
1751   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1752 
1753   // Initialize signal semaphore
1754   int rc = ::sem_init(&sig_sem, 0, 0);
1755   guarantee(rc != -1, "sem_init failed");
1756 }
1757 
1758 void os::signal_notify(int sig) {
1759   Atomic::inc(&pending_signals[sig]);
1760   ::sem_post(&sig_sem);
1761 }
1762 
1763 static int check_pending_signals(bool wait) {
1764   Atomic::store(0, &sigint_count);
1765   for (;;) {
1766     for (int i = 0; i < NSIG + 1; i++) {
1767       jint n = pending_signals[i];
1768       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1769         return i;
1770       }
1771     }
1772     if (!wait) {
1773       return -1;
1774     }
1775     JavaThread *thread = JavaThread::current();
1776     ThreadBlockInVM tbivm(thread);
1777 
1778     bool threadIsSuspended;
1779     do {
1780       thread->set_suspend_equivalent();
1781       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1782 
1783       ::sem_wait(&sig_sem);
1784 
1785       // were we externally suspended while we were waiting?
1786       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1787       if (threadIsSuspended) {
1788         //
1789         // The semaphore has been incremented, but while we were waiting
1790         // another thread suspended us. We don't want to continue running
1791         // while suspended because that would surprise the thread that
1792         // suspended us.
1793         //
1794         ::sem_post(&sig_sem);
1795 
1796         thread->java_suspend_self();
1797       }
1798     } while (threadIsSuspended);
1799   }
1800 }
1801 
1802 int os::signal_lookup() {
1803   return check_pending_signals(false);
1804 }
1805 
1806 int os::signal_wait() {
1807   return check_pending_signals(true);
1808 }
1809 
1810 ////////////////////////////////////////////////////////////////////////////////
1811 // Virtual Memory
1812 
1813 // AddrRange describes an immutable address range
1814 //
1815 // This is a helper class for the 'shared memory bookkeeping' below.
1816 class AddrRange {
1817   friend class ShmBkBlock;
1818 
1819   char* _start;
1820   size_t _size;
1821 
1822 public:
1823 
1824   AddrRange(char* start, size_t size)
1825     : _start(start), _size(size)
1826   {}
1827 
1828   AddrRange(const AddrRange& r)
1829     : _start(r.start()), _size(r.size())
1830   {}
1831 
1832   char* start() const { return _start; }
1833   size_t size() const { return _size; }
1834   char* end() const { return _start + _size; }
1835   bool is_empty() const { return _size == 0 ? true : false; }
1836 
1837   static AddrRange empty_range() { return AddrRange(NULL, 0); }
1838 
1839   bool contains(const char* p) const {
1840     return start() <= p && end() > p;
1841   }
1842 
1843   bool contains(const AddrRange& range) const {
1844     return start() <= range.start() && end() >= range.end();
1845   }
1846 
1847   bool intersects(const AddrRange& range) const {
1848     return (range.start() <= start() && range.end() > start()) ||
1849            (range.start() < end() && range.end() >= end()) ||
1850            contains(range);
1851   }
1852 
1853   bool is_same_range(const AddrRange& range) const {
1854     return start() == range.start() && size() == range.size();
1855   }
1856 
1857   // return the closest inside range consisting of whole pages
1858   AddrRange find_closest_aligned_range(size_t pagesize) const {
1859     if (pagesize == 0 || is_empty()) {
1860       return empty_range();
1861     }
1862     char* const from = (char*)align_size_up((intptr_t)_start, pagesize);
1863     char* const to = (char*)align_size_down((intptr_t)end(), pagesize);
1864     if (from > to) {
1865       return empty_range();
1866     }
1867     return AddrRange(from, to - from);
1868   }
1869 };
1870 
1871 ////////////////////////////////////////////////////////////////////////////
1872 // shared memory bookkeeping
1873 //
1874 // the os::reserve_memory() API and friends hand out different kind of memory, depending
1875 // on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.
1876 //
1877 // But these memory types have to be treated differently. For example, to uncommit
1878 // mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,
1879 // disclaim64() is needed.
1880 //
1881 // Therefore we need to keep track of the allocated memory segments and their
1882 // properties.
1883 
1884 // ShmBkBlock: base class for all blocks in the shared memory bookkeeping
1885 class ShmBkBlock : public CHeapObj<mtInternal> {
1886 
1887   ShmBkBlock* _next;
1888 
1889 protected:
1890 
1891   AddrRange _range;
1892   const size_t _pagesize;
1893   const bool _pinned;
1894 
1895 public:
1896 
1897   ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)
1898     : _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {
1899 
1900     assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");
1901     assert(!_range.is_empty(), "invalid range");
1902   }
1903 
1904   virtual void print(outputStream* st) const {
1905     st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s",
1906               _range.start(), _range.end(), _range.size(),
1907               _range.size() / _pagesize, describe_pagesize(_pagesize),
1908               _pinned ? "pinned" : "");
1909   }
1910 
1911   enum Type { MMAP, SHMAT };
1912   virtual Type getType() = 0;
1913 
1914   char* base() const { return _range.start(); }
1915   size_t size() const { return _range.size(); }
1916 
1917   void setAddrRange(AddrRange range) {
1918     _range = range;
1919   }
1920 
1921   bool containsAddress(const char* p) const {
1922     return _range.contains(p);
1923   }
1924 
1925   bool containsRange(const char* p, size_t size) const {
1926     return _range.contains(AddrRange((char*)p, size));
1927   }
1928 
1929   bool isSameRange(const char* p, size_t size) const {
1930     return _range.is_same_range(AddrRange((char*)p, size));
1931   }
1932 
1933   virtual bool disclaim(char* p, size_t size) = 0;
1934   virtual bool release() = 0;
1935 
1936   // blocks live in a list.
1937   ShmBkBlock* next() const { return _next; }
1938   void set_next(ShmBkBlock* blk) { _next = blk; }
1939 
1940 }; // end: ShmBkBlock
1941 
1942 
1943 // ShmBkMappedBlock: describes an block allocated with mmap()
1944 class ShmBkMappedBlock : public ShmBkBlock {
1945 public:
1946 
1947   ShmBkMappedBlock(AddrRange range)
1948     : ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned
1949 
1950   void print(outputStream* st) const {
1951     ShmBkBlock::print(st);
1952     st->print_cr(" - mmap'ed");
1953   }
1954 
1955   Type getType() {
1956     return MMAP;
1957   }
1958 
1959   bool disclaim(char* p, size_t size) {
1960 
1961     AddrRange r(p, size);
1962 
1963     guarantee(_range.contains(r), "invalid disclaim");
1964 
1965     // only disclaim whole ranges.
1966     const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
1967     if (r2.is_empty()) {
1968       return true;
1969     }
1970 
1971     const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);
1972 
1973     if (rc != 0) {
1974       warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);
1975     }
1976 
1977     return rc == 0 ? true : false;
1978   }
1979 
1980   bool release() {
1981     // mmap'ed blocks are released using munmap
1982     if (::munmap(_range.start(), _range.size()) != 0) {
1983       warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);
1984       return false;
1985     }
1986     return true;
1987   }
1988 }; // end: ShmBkMappedBlock
1989 
1990 // ShmBkShmatedBlock: describes an block allocated with shmget/shmat()
1991 class ShmBkShmatedBlock : public ShmBkBlock {
1992 public:
1993 
1994   ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)
1995     : ShmBkBlock(range, pagesize, pinned) {}
1996 
1997   void print(outputStream* st) const {
1998     ShmBkBlock::print(st);
1999     st->print_cr(" - shmat'ed");
2000   }
2001 
2002   Type getType() {
2003     return SHMAT;
2004   }
2005 
2006   bool disclaim(char* p, size_t size) {
2007 
2008     AddrRange r(p, size);
2009 
2010     if (_pinned) {
2011       return true;
2012     }
2013 
2014     // shmat'ed blocks are disclaimed using disclaim64
2015     guarantee(_range.contains(r), "invalid disclaim");
2016 
2017     // only disclaim whole ranges.
2018     const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
2019     if (r2.is_empty()) {
2020       return true;
2021     }
2022 
2023     const bool rc = my_disclaim64(r2.start(), r2.size());
2024 
2025     if (Verbose && !rc) {
2026       warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());
2027     }
2028 
2029     return rc;
2030   }
2031 
2032   bool release() {
2033     bool rc = false;
2034     if (::shmdt(_range.start()) != 0) {
2035       warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);
2036     } else {
2037       rc = true;
2038     }
2039     return rc;
2040   }
2041 
2042 }; // end: ShmBkShmatedBlock
2043 
2044 static ShmBkBlock* g_shmbk_list = NULL;
2045 static volatile jint g_shmbk_table_lock = 0;
2046 
2047 // keep some usage statistics
2048 static struct {
2049   int nodes;    // number of nodes in list
2050   size_t bytes; // reserved - not committed - bytes.
2051   int reserves; // how often reserve was called
2052   int lookups;  // how often a lookup was made
2053 } g_shmbk_stats = { 0, 0, 0, 0 };
2054 
2055 // add information about a shared memory segment to the bookkeeping
2056 static void shmbk_register(ShmBkBlock* p_block) {
2057   guarantee(p_block, "logic error");
2058   p_block->set_next(g_shmbk_list);
2059   g_shmbk_list = p_block;
2060   g_shmbk_stats.reserves ++;
2061   g_shmbk_stats.bytes += p_block->size();
2062   g_shmbk_stats.nodes ++;
2063 }
2064 
2065 // remove information about a shared memory segment by its starting address
2066 static void shmbk_unregister(ShmBkBlock* p_block) {
2067   ShmBkBlock* p = g_shmbk_list;
2068   ShmBkBlock* prev = NULL;
2069   while (p) {
2070     if (p == p_block) {
2071       if (prev) {
2072         prev->set_next(p->next());
2073       } else {
2074         g_shmbk_list = p->next();
2075       }
2076       g_shmbk_stats.nodes --;
2077       g_shmbk_stats.bytes -= p->size();
2078       return;
2079     }
2080     prev = p;
2081     p = p->next();
2082   }
2083   assert(false, "should not happen");
2084 }
2085 
2086 // given a pointer, return shared memory bookkeeping record for the segment it points into
2087 // using the returned block info must happen under lock protection
2088 static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {
2089   g_shmbk_stats.lookups ++;
2090   ShmBkBlock* p = g_shmbk_list;
2091   while (p) {
2092     if (p->containsAddress(addr)) {
2093       return p;
2094     }
2095     p = p->next();
2096   }
2097   return NULL;
2098 }
2099 
2100 // dump all information about all memory segments allocated with os::reserve_memory()
2101 void shmbk_dump_info() {
2102   tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "
2103     "total reserves: %d total lookups: %d)",
2104     g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);
2105   const ShmBkBlock* p = g_shmbk_list;
2106   int i = 0;
2107   while (p) {
2108     p->print(tty);
2109     p = p->next();
2110     i ++;
2111   }
2112 }
2113 
2114 #define LOCK_SHMBK     { ThreadCritical _LOCK_SHMBK;
2115 #define UNLOCK_SHMBK   }
2116 
2117 // End: shared memory bookkeeping
2118 ////////////////////////////////////////////////////////////////////////////////////////////////////
2119 
2120 int os::vm_page_size() {
2121   // Seems redundant as all get out
2122   assert(os::Aix::page_size() != -1, "must call os::init");
2123   return os::Aix::page_size();
2124 }
2125 
2126 // Aix allocates memory by pages.
2127 int os::vm_allocation_granularity() {
2128   assert(os::Aix::page_size() != -1, "must call os::init");
2129   return os::Aix::page_size();
2130 }
2131 
2132 int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) {
2133 
2134   // Commit is a noop. There is no explicit commit
2135   // needed on AIX. Memory is committed when touched.
2136   //
2137   // Debug : check address range for validity
2138 #ifdef ASSERT
2139   LOCK_SHMBK
2140     ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2141     if (!block) {
2142       fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);
2143       shmbk_dump_info();
2144       assert(false, "invalid pointer");
2145       return false;
2146     } else if (!block->containsRange(addr, size)) {
2147       fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);
2148       shmbk_dump_info();
2149       assert(false, "invalid range");
2150       return false;
2151     }
2152   UNLOCK_SHMBK
2153 #endif // ASSERT
2154 
2155   return 0;
2156 }
2157 
2158 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2159   return os::Aix::commit_memory_impl(addr, size, exec) == 0;
2160 }
2161 
2162 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2163                                   const char* mesg) {
2164   assert(mesg != NULL, "mesg must be specified");
2165   os::Aix::commit_memory_impl(addr, size, exec);
2166 }
2167 
2168 int os::Aix::commit_memory_impl(char* addr, size_t size,
2169                                 size_t alignment_hint, bool exec) {
2170   return os::Aix::commit_memory_impl(addr, size, exec);
2171 }
2172 
2173 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2174                           bool exec) {
2175   return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2176 }
2177 
2178 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2179                                   size_t alignment_hint, bool exec,
2180                                   const char* mesg) {
2181   os::Aix::commit_memory_impl(addr, size, alignment_hint, exec);
2182 }
2183 
2184 bool os::pd_uncommit_memory(char* addr, size_t size) {
2185 
2186   // Delegate to ShmBkBlock class which knows how to uncommit its memory.
2187 
2188   bool rc = false;
2189   LOCK_SHMBK
2190     ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2191     if (!block) {
2192       fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2193       shmbk_dump_info();
2194       assert(false, "invalid pointer");
2195       return false;
2196     } else if (!block->containsRange(addr, size)) {
2197       fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
2198       shmbk_dump_info();
2199       assert(false, "invalid range");
2200       return false;
2201     }
2202     rc = block->disclaim(addr, size);
2203   UNLOCK_SHMBK
2204 
2205   if (Verbose && !rc) {
2206     warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);
2207   }
2208   return rc;
2209 }
2210 
2211 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2212   return os::guard_memory(addr, size);
2213 }
2214 
2215 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2216   return os::unguard_memory(addr, size);
2217 }
2218 
2219 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2220 }
2221 
2222 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2223 }
2224 
2225 void os::numa_make_global(char *addr, size_t bytes) {
2226 }
2227 
2228 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2229 }
2230 
2231 bool os::numa_topology_changed() {
2232   return false;
2233 }
2234 
2235 size_t os::numa_get_groups_num() {
2236   return 1;
2237 }
2238 
2239 int os::numa_get_group_id() {
2240   return 0;
2241 }
2242 
2243 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2244   if (size > 0) {
2245     ids[0] = 0;
2246     return 1;
2247   }
2248   return 0;
2249 }
2250 
2251 bool os::get_page_info(char *start, page_info* info) {
2252   return false;
2253 }
2254 
2255 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2256   return end;
2257 }
2258 
2259 // Flags for reserve_shmatted_memory:
2260 #define RESSHM_WISHADDR_OR_FAIL                     1
2261 #define RESSHM_TRY_16M_PAGES                        2
2262 #define RESSHM_16M_PAGES_OR_FAIL                    4
2263 
2264 // Result of reserve_shmatted_memory:
2265 struct shmatted_memory_info_t {
2266   char* addr;
2267   size_t pagesize;
2268   bool pinned;
2269 };
2270 
2271 // Reserve a section of shmatted memory.
2272 // params:
2273 // bytes [in]: size of memory, in bytes
2274 // requested_addr [in]: wish address.
2275 //                      NULL = no wish.
2276 //                      If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot
2277 //                      be obtained, function will fail. Otherwise wish address is treated as hint and
2278 //                      another pointer is returned.
2279 // flags [in]:          some flags. Valid flags are:
2280 //                      RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.
2281 //                      RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool
2282 //                          (requires UseLargePages and Use16MPages)
2283 //                      RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.
2284 //                          Otherwise any other page size will do.
2285 // p_info [out] :       holds information about the created shared memory segment.
2286 static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {
2287 
2288   assert(p_info, "parameter error");
2289 
2290   // init output struct.
2291   p_info->addr = NULL;
2292 
2293   // neither should we be here for EXTSHM=ON.
2294   if (os::Aix::extshm()) {
2295     ShouldNotReachHere();
2296   }
2297 
2298   // extract flags. sanity checks.
2299   const bool wishaddr_or_fail =
2300     flags & RESSHM_WISHADDR_OR_FAIL;
2301   const bool try_16M_pages =
2302     flags & RESSHM_TRY_16M_PAGES;
2303   const bool f16M_pages_or_fail =
2304     flags & RESSHM_16M_PAGES_OR_FAIL;
2305 
2306   // first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,
2307   // shmat will fail anyway, so save some cycles by failing right away
2308   if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {
2309     if (wishaddr_or_fail) {
2310       return false;
2311     } else {
2312       requested_addr = NULL;
2313     }
2314   }
2315 
2316   char* addr = NULL;
2317 
2318   // Align size of shm up to the largest possible page size, to avoid errors later on when we try to change
2319   // pagesize dynamically.
2320   const size_t size = align_size_up(bytes, SIZE_16M);
2321 
2322   // reserve the shared segment
2323   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2324   if (shmid == -1) {
2325     warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);
2326     return false;
2327   }
2328 
2329   // Important note:
2330   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2331   // We must right after attaching it remove it from the system. System V shm segments are global and
2332   // survive the process.
2333   // So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".
2334 
2335   // try forcing the page size
2336   size_t pagesize = -1; // unknown so far
2337 
2338   if (UseLargePages) {
2339 
2340     struct shmid_ds shmbuf;
2341     memset(&shmbuf, 0, sizeof(shmbuf));
2342 
2343     // First, try to take from 16M page pool if...
2344     if (os::Aix::can_use_16M_pages()  // we can ...
2345         && Use16MPages                // we are not explicitly forbidden to do so (-XX:-Use16MPages)..
2346         && try_16M_pages) {           // caller wants us to.
2347       shmbuf.shm_pagesize = SIZE_16M;
2348       if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2349         pagesize = SIZE_16M;
2350       } else {
2351         warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",
2352                 size / SIZE_16M, errno);
2353         if (f16M_pages_or_fail) {
2354           goto cleanup_shm;
2355         }
2356       }
2357     }
2358 
2359     // Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,
2360     // because the 64K page pool may also be exhausted.
2361     if (pagesize == -1) {
2362       shmbuf.shm_pagesize = SIZE_64K;
2363       if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2364         pagesize = SIZE_64K;
2365       } else {
2366         warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",
2367                 size / SIZE_64K, errno);
2368         // here I give up. leave page_size -1 - later, after attaching, we will query the
2369         // real page size of the attached memory. (in theory, it may be something different
2370         // from 4K if LDR_CNTRL SHM_PSIZE is set)
2371       }
2372     }
2373   }
2374 
2375   // sanity point
2376   assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");
2377 
2378   // Now attach the shared segment.
2379   addr = (char*) shmat(shmid, requested_addr, 0);
2380   if (addr == (char*)-1) {
2381     // How to handle attach failure:
2382     // If it failed for a specific wish address, tolerate this: in that case, if wish address was
2383     // mandatory, fail, if not, retry anywhere.
2384     // If it failed for any other reason, treat that as fatal error.
2385     addr = NULL;
2386     if (requested_addr) {
2387       if (wishaddr_or_fail) {
2388         goto cleanup_shm;
2389       } else {
2390         addr = (char*) shmat(shmid, NULL, 0);
2391         if (addr == (char*)-1) { // fatal
2392           addr = NULL;
2393           warning("shmat failed (errno: %d)", errno);
2394           goto cleanup_shm;
2395         }
2396       }
2397     } else { // fatal
2398       addr = NULL;
2399       warning("shmat failed (errno: %d)", errno);
2400       goto cleanup_shm;
2401     }
2402   }
2403 
2404   // sanity point
2405   assert(addr && addr != (char*) -1, "wrong address");
2406 
2407   // after successful Attach remove the segment - right away.
2408   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2409     warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2410     guarantee(false, "failed to remove shared memory segment!");
2411   }
2412   shmid = -1;
2413 
2414   // query the real page size. In case setting the page size did not work (see above), the system
2415   // may have given us something other then 4K (LDR_CNTRL)
2416   {
2417     const size_t real_pagesize = os::Aix::query_pagesize(addr);
2418     if (pagesize != -1) {
2419       assert(pagesize == real_pagesize, "unexpected pagesize after shmat");
2420     } else {
2421       pagesize = real_pagesize;
2422     }
2423   }
2424 
2425   // Now register the reserved block with internal book keeping.
2426   LOCK_SHMBK
2427     const bool pinned = pagesize >= SIZE_16M ? true : false;
2428     ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);
2429     assert(p_block, "");
2430     shmbk_register(p_block);
2431   UNLOCK_SHMBK
2432 
2433 cleanup_shm:
2434 
2435   // if we have not done so yet, remove the shared memory segment. This is very important.
2436   if (shmid != -1) {
2437     if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2438       warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2439       guarantee(false, "failed to remove shared memory segment!");
2440     }
2441     shmid = -1;
2442   }
2443 
2444   // trace
2445   if (Verbose && !addr) {
2446     if (requested_addr != NULL) {
2447       warning("failed to shm-allocate 0x%llX bytes at wish address 0x%p.", size, requested_addr);
2448     } else {
2449       warning("failed to shm-allocate 0x%llX bytes at any address.", size);
2450     }
2451   }
2452 
2453   // hand info to caller
2454   if (addr) {
2455     p_info->addr = addr;
2456     p_info->pagesize = pagesize;
2457     p_info->pinned = pagesize == SIZE_16M ? true : false;
2458   }
2459 
2460   // sanity test:
2461   if (requested_addr && addr && wishaddr_or_fail) {
2462     guarantee(addr == requested_addr, "shmat error");
2463   }
2464 
2465   // just one more test to really make sure we have no dangling shm segments.
2466   guarantee(shmid == -1, "dangling shm segments");
2467 
2468   return addr ? true : false;
2469 
2470 } // end: reserve_shmatted_memory
2471 
2472 // Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():
2473 // will return NULL in case of an error.
2474 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
2475 
2476   // if a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2477   if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {
2478     warning("Wish address 0x%p not aligned to page boundary.", requested_addr);
2479     return NULL;
2480   }
2481 
2482   const size_t size = align_size_up(bytes, SIZE_4K);
2483 
2484   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2485   // msync(MS_INVALIDATE) (see os::uncommit_memory)
2486   int flags = MAP_ANONYMOUS | MAP_SHARED;
2487 
2488   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2489   // it means if wishaddress is given but MAP_FIXED is not set.
2490   //
2491   // Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED
2492   // clobbers the address range, which is probably not what the caller wants. That's
2493   // why I assert here (again) that the SPEC1170 compat mode is off.
2494   // If we want to be able to run under SPEC1170, we have to do some porting and
2495   // testing.
2496   if (requested_addr != NULL) {
2497     assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");
2498     flags |= MAP_FIXED;
2499   }
2500 
2501   char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2502 
2503   if (addr == MAP_FAILED) {
2504     // attach failed: tolerate for specific wish addresses. Not being able to attach
2505     // anywhere is a fatal error.
2506     if (requested_addr == NULL) {
2507       // It's ok to fail here if the machine has not enough memory.
2508       warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);
2509     }
2510     addr = NULL;
2511     goto cleanup_mmap;
2512   }
2513 
2514   // If we did request a specific address and that address was not available, fail.
2515   if (addr && requested_addr) {
2516     guarantee(addr == requested_addr, "unexpected");
2517   }
2518 
2519   // register this mmap'ed segment with book keeping
2520   LOCK_SHMBK
2521     ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));
2522     assert(p_block, "");
2523     shmbk_register(p_block);
2524   UNLOCK_SHMBK
2525 
2526 cleanup_mmap:
2527 
2528   // trace
2529   if (Verbose) {
2530     if (addr) {
2531       fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);
2532     }
2533     else {
2534       if (requested_addr != NULL) {
2535         warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
2536       } else {
2537         warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
2538       }
2539     }
2540   }
2541 
2542   return addr;
2543 
2544 } // end: reserve_mmaped_memory
2545 
2546 // Reserves and attaches a shared memory segment.
2547 // Will assert if a wish address is given and could not be obtained.
2548 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2549   return os::attempt_reserve_memory_at(bytes, requested_addr);
2550 }
2551 
2552 bool os::pd_release_memory(char* addr, size_t size) {
2553 
2554   // delegate to ShmBkBlock class which knows how to uncommit its memory.
2555 
2556   bool rc = false;
2557   LOCK_SHMBK
2558     ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2559     if (!block) {
2560       fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2561       shmbk_dump_info();
2562       assert(false, "invalid pointer");
2563       return false;
2564     }
2565     else if (!block->isSameRange(addr, size)) {
2566       if (block->getType() == ShmBkBlock::MMAP) {
2567         // Release only the same range or a the beginning or the end of a range.
2568         if (block->base() == addr && size < block->size()) {
2569           ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size));
2570           assert(b, "");
2571           shmbk_register(b);
2572           block->setAddrRange(AddrRange(addr, size));
2573         }
2574         else if (addr > block->base() && addr + size == block->base() + block->size()) {
2575           ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size));
2576           assert(b, "");
2577           shmbk_register(b);
2578           block->setAddrRange(AddrRange(addr, size));
2579         }
2580         else {
2581           fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size);
2582           shmbk_dump_info();
2583           assert(false, "invalid mmap range");
2584           return false;
2585         }
2586       }
2587       else {
2588         // Release only the same range. No partial release allowed.
2589         // Soften the requirement a bit, because the user may think he owns a smaller size
2590         // than the block is due to alignment etc.
2591         if (block->base() != addr || block->size() < size) {
2592           fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size);
2593           shmbk_dump_info();
2594           assert(false, "invalid shmget range");
2595           return false;
2596         }
2597       }
2598     }
2599     rc = block->release();
2600     assert(rc, "release failed");
2601     // remove block from bookkeeping
2602     shmbk_unregister(block);
2603     delete block;
2604   UNLOCK_SHMBK
2605 
2606   if (!rc) {
2607     warning("failed to released %lu bytes at 0x%p", size, addr);
2608   }
2609 
2610   return rc;
2611 }
2612 
2613 static bool checked_mprotect(char* addr, size_t size, int prot) {
2614 
2615   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2616   // not tell me if protection failed when trying to protect an un-protectable range.
2617   //
2618   // This means if the memory was allocated using shmget/shmat, protection wont work
2619   // but mprotect will still return 0:
2620   //
2621   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2622 
2623   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2624 
2625   if (!rc) {
2626     const char* const s_errno = strerror(errno);
2627     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2628     return false;
2629   }
2630 
2631   // mprotect success check
2632   //
2633   // Mprotect said it changed the protection but can I believe it?
2634   //
2635   // To be sure I need to check the protection afterwards. Try to
2636   // read from protected memory and check whether that causes a segfault.
2637   //
2638   if (!os::Aix::xpg_sus_mode()) {
2639 
2640     if (StubRoutines::SafeFetch32_stub()) {
2641 
2642       const bool read_protected =
2643         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2644          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2645 
2646       if (prot & PROT_READ) {
2647         rc = !read_protected;
2648       } else {
2649         rc = read_protected;
2650       }
2651     }
2652   }
2653   if (!rc) {
2654     assert(false, "mprotect failed.");
2655   }
2656   return rc;
2657 }
2658 
2659 // Set protections specified
2660 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2661   unsigned int p = 0;
2662   switch (prot) {
2663   case MEM_PROT_NONE: p = PROT_NONE; break;
2664   case MEM_PROT_READ: p = PROT_READ; break;
2665   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2666   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2667   default:
2668     ShouldNotReachHere();
2669   }
2670   // is_committed is unused.
2671   return checked_mprotect(addr, size, p);
2672 }
2673 
2674 bool os::guard_memory(char* addr, size_t size) {
2675   return checked_mprotect(addr, size, PROT_NONE);
2676 }
2677 
2678 bool os::unguard_memory(char* addr, size_t size) {
2679   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2680 }
2681 
2682 // Large page support
2683 
2684 static size_t _large_page_size = 0;
2685 
2686 // Enable large page support if OS allows that.
2687 void os::large_page_init() {
2688 
2689   // Note: os::Aix::query_multipage_support must run first.
2690 
2691   if (!UseLargePages) {
2692     return;
2693   }
2694 
2695   if (!Aix::can_use_64K_pages()) {
2696     assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");
2697     UseLargePages = false;
2698     return;
2699   }
2700 
2701   if (!Aix::can_use_16M_pages() && Use16MPages) {
2702     fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "
2703             " and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n");
2704   }
2705 
2706   // Do not report 16M page alignment as part of os::_page_sizes if we are
2707   // explicitly forbidden from using 16M pages. Doing so would increase the
2708   // alignment the garbage collector calculates with, slightly increasing
2709   // heap usage. We should only pay for 16M alignment if we really want to
2710   // use 16M pages.
2711   if (Use16MPages && Aix::can_use_16M_pages()) {
2712     _large_page_size = SIZE_16M;
2713     _page_sizes[0] = SIZE_16M;
2714     _page_sizes[1] = SIZE_64K;
2715     _page_sizes[2] = SIZE_4K;
2716     _page_sizes[3] = 0;
2717   } else if (Aix::can_use_64K_pages()) {
2718     _large_page_size = SIZE_64K;
2719     _page_sizes[0] = SIZE_64K;
2720     _page_sizes[1] = SIZE_4K;
2721     _page_sizes[2] = 0;
2722   }
2723 
2724   if (Verbose) {
2725     ("Default large page size is 0x%llX.", _large_page_size);
2726   }
2727 } // end: os::large_page_init()
2728 
2729 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2730   // "exec" is passed in but not used. Creating the shared image for
2731   // the code cache doesn't have an SHM_X executable permission to check.
2732   Unimplemented();
2733   return 0;
2734 }
2735 
2736 bool os::release_memory_special(char* base, size_t bytes) {
2737   // detaching the SHM segment will also delete it, see reserve_memory_special()
2738   Unimplemented();
2739   return false;
2740 }
2741 
2742 size_t os::large_page_size() {
2743   return _large_page_size;
2744 }
2745 
2746 bool os::can_commit_large_page_memory() {
2747   // Well, sadly we cannot commit anything at all (see comment in
2748   // os::commit_memory) but we claim to so we can make use of large pages
2749   return true;
2750 }
2751 
2752 bool os::can_execute_large_page_memory() {
2753   // We can do that
2754   return true;
2755 }
2756 
2757 // Reserve memory at an arbitrary address, only if that area is
2758 // available (and not reserved for something else).
2759 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2760 
2761   bool use_mmap = false;
2762 
2763   // mmap: smaller graining, no large page support
2764   // shm: large graining (256M), large page support, limited number of shm segments
2765   //
2766   // Prefer mmap wherever we either do not need large page support or have OS limits
2767 
2768   if (!UseLargePages || bytes < SIZE_16M) {
2769     use_mmap = true;
2770   }
2771 
2772   char* addr = NULL;
2773   if (use_mmap) {
2774     addr = reserve_mmaped_memory(bytes, requested_addr);
2775   } else {
2776     // shmat: wish address is mandatory, and do not try 16M pages here.
2777     shmatted_memory_info_t info;
2778     const int flags = RESSHM_WISHADDR_OR_FAIL;
2779     if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {
2780       addr = info.addr;
2781     }
2782   }
2783 
2784   return addr;
2785 }
2786 
2787 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2788   return ::read(fd, buf, nBytes);
2789 }
2790 
2791 void os::naked_short_sleep(jlong ms) {
2792   struct timespec req;
2793 
2794   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2795   req.tv_sec = 0;
2796   if (ms > 0) {
2797     req.tv_nsec = (ms % 1000) * 1000000;
2798   }
2799   else {
2800     req.tv_nsec = 1;
2801   }
2802 
2803   nanosleep(&req, NULL);
2804 
2805   return;
2806 }
2807 
2808 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2809 void os::infinite_sleep() {
2810   while (true) {    // sleep forever ...
2811     ::sleep(100);   // ... 100 seconds at a time
2812   }
2813 }
2814 
2815 // Used to convert frequent JVM_Yield() to nops
2816 bool os::dont_yield() {
2817   return DontYieldALot;
2818 }
2819 
2820 void os::naked_yield() {
2821   sched_yield();
2822 }
2823 
2824 ////////////////////////////////////////////////////////////////////////////////
2825 // thread priority support
2826 
2827 // From AIX manpage to pthread_setschedparam
2828 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2829 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2830 //
2831 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2832 // range from 40 to 80, where 40 is the least favored priority and 80
2833 // is the most favored."
2834 //
2835 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2836 // scheduling there; however, this still leaves iSeries.)
2837 //
2838 // We use the same values for AIX and PASE.
2839 int os::java_to_os_priority[CriticalPriority + 1] = {
2840   54,             // 0 Entry should never be used
2841 
2842   55,             // 1 MinPriority
2843   55,             // 2
2844   56,             // 3
2845 
2846   56,             // 4
2847   57,             // 5 NormPriority
2848   57,             // 6
2849 
2850   58,             // 7
2851   58,             // 8
2852   59,             // 9 NearMaxPriority
2853 
2854   60,             // 10 MaxPriority
2855 
2856   60              // 11 CriticalPriority
2857 };
2858 
2859 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2860   if (!UseThreadPriorities) return OS_OK;
2861   pthread_t thr = thread->osthread()->pthread_id();
2862   int policy = SCHED_OTHER;
2863   struct sched_param param;
2864   param.sched_priority = newpri;
2865   int ret = pthread_setschedparam(thr, policy, &param);
2866 
2867   if (Verbose) {
2868     if (ret == 0) {
2869       fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
2870     } else {
2871       fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
2872               (int)thr, newpri, ret, strerror(ret));
2873     }
2874   }
2875   return (ret == 0) ? OS_OK : OS_ERR;
2876 }
2877 
2878 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2879   if (!UseThreadPriorities) {
2880     *priority_ptr = java_to_os_priority[NormPriority];
2881     return OS_OK;
2882   }
2883   pthread_t thr = thread->osthread()->pthread_id();
2884   int policy = SCHED_OTHER;
2885   struct sched_param param;
2886   int ret = pthread_getschedparam(thr, &policy, &param);
2887   *priority_ptr = param.sched_priority;
2888 
2889   return (ret == 0) ? OS_OK : OS_ERR;
2890 }
2891 
2892 // Hint to the underlying OS that a task switch would not be good.
2893 // Void return because it's a hint and can fail.
2894 void os::hint_no_preempt() {}
2895 
2896 ////////////////////////////////////////////////////////////////////////////////
2897 // suspend/resume support
2898 
2899 //  the low-level signal-based suspend/resume support is a remnant from the
2900 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2901 //  within hotspot. Now there is a single use-case for this:
2902 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
2903 //      that runs in the watcher thread.
2904 //  The remaining code is greatly simplified from the more general suspension
2905 //  code that used to be used.
2906 //
2907 //  The protocol is quite simple:
2908 //  - suspend:
2909 //      - sends a signal to the target thread
2910 //      - polls the suspend state of the osthread using a yield loop
2911 //      - target thread signal handler (SR_handler) sets suspend state
2912 //        and blocks in sigsuspend until continued
2913 //  - resume:
2914 //      - sets target osthread state to continue
2915 //      - sends signal to end the sigsuspend loop in the SR_handler
2916 //
2917 //  Note that the SR_lock plays no role in this suspend/resume protocol.
2918 //
2919 
2920 static void resume_clear_context(OSThread *osthread) {
2921   osthread->set_ucontext(NULL);
2922   osthread->set_siginfo(NULL);
2923 }
2924 
2925 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2926   osthread->set_ucontext(context);
2927   osthread->set_siginfo(siginfo);
2928 }
2929 
2930 //
2931 // Handler function invoked when a thread's execution is suspended or
2932 // resumed. We have to be careful that only async-safe functions are
2933 // called here (Note: most pthread functions are not async safe and
2934 // should be avoided.)
2935 //
2936 // Note: sigwait() is a more natural fit than sigsuspend() from an
2937 // interface point of view, but sigwait() prevents the signal hander
2938 // from being run. libpthread would get very confused by not having
2939 // its signal handlers run and prevents sigwait()'s use with the
2940 // mutex granting granting signal.
2941 //
2942 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2943 //
2944 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2945   // Save and restore errno to avoid confusing native code with EINTR
2946   // after sigsuspend.
2947   int old_errno = errno;
2948 
2949   Thread* thread = Thread::current();
2950   OSThread* osthread = thread->osthread();
2951   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2952 
2953   os::SuspendResume::State current = osthread->sr.state();
2954   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2955     suspend_save_context(osthread, siginfo, context);
2956 
2957     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2958     os::SuspendResume::State state = osthread->sr.suspended();
2959     if (state == os::SuspendResume::SR_SUSPENDED) {
2960       sigset_t suspend_set;  // signals for sigsuspend()
2961 
2962       // get current set of blocked signals and unblock resume signal
2963       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2964       sigdelset(&suspend_set, SR_signum);
2965 
2966       // wait here until we are resumed
2967       while (1) {
2968         sigsuspend(&suspend_set);
2969 
2970         os::SuspendResume::State result = osthread->sr.running();
2971         if (result == os::SuspendResume::SR_RUNNING) {
2972           break;
2973         }
2974       }
2975 
2976     } else if (state == os::SuspendResume::SR_RUNNING) {
2977       // request was cancelled, continue
2978     } else {
2979       ShouldNotReachHere();
2980     }
2981 
2982     resume_clear_context(osthread);
2983   } else if (current == os::SuspendResume::SR_RUNNING) {
2984     // request was cancelled, continue
2985   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2986     // ignore
2987   } else {
2988     ShouldNotReachHere();
2989   }
2990 
2991   errno = old_errno;
2992 }
2993 
2994 
2995 static int SR_initialize() {
2996   struct sigaction act;
2997   char *s;
2998   // Get signal number to use for suspend/resume
2999   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
3000     int sig = ::strtol(s, 0, 10);
3001     if (sig > 0 || sig < NSIG) {
3002       SR_signum = sig;
3003     }
3004   }
3005 
3006   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
3007         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
3008 
3009   sigemptyset(&SR_sigset);
3010   sigaddset(&SR_sigset, SR_signum);
3011 
3012   // Set up signal handler for suspend/resume.
3013   act.sa_flags = SA_RESTART|SA_SIGINFO;
3014   act.sa_handler = (void (*)(int)) SR_handler;
3015 
3016   // SR_signum is blocked by default.
3017   // 4528190 - We also need to block pthread restart signal (32 on all
3018   // supported Linux platforms). Note that LinuxThreads need to block
3019   // this signal for all threads to work properly. So we don't have
3020   // to use hard-coded signal number when setting up the mask.
3021   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
3022 
3023   if (sigaction(SR_signum, &act, 0) == -1) {
3024     return -1;
3025   }
3026 
3027   // Save signal flag
3028   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
3029   return 0;
3030 }
3031 
3032 static int SR_finalize() {
3033   return 0;
3034 }
3035 
3036 static int sr_notify(OSThread* osthread) {
3037   int status = pthread_kill(osthread->pthread_id(), SR_signum);
3038   assert_status(status == 0, status, "pthread_kill");
3039   return status;
3040 }
3041 
3042 // "Randomly" selected value for how long we want to spin
3043 // before bailing out on suspending a thread, also how often
3044 // we send a signal to a thread we want to resume
3045 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3046 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3047 
3048 // returns true on success and false on error - really an error is fatal
3049 // but this seems the normal response to library errors
3050 static bool do_suspend(OSThread* osthread) {
3051   assert(osthread->sr.is_running(), "thread should be running");
3052   // mark as suspended and send signal
3053 
3054   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3055     // failed to switch, state wasn't running?
3056     ShouldNotReachHere();
3057     return false;
3058   }
3059 
3060   if (sr_notify(osthread) != 0) {
3061     // try to cancel, switch to running
3062 
3063     os::SuspendResume::State result = osthread->sr.cancel_suspend();
3064     if (result == os::SuspendResume::SR_RUNNING) {
3065       // cancelled
3066       return false;
3067     } else if (result == os::SuspendResume::SR_SUSPENDED) {
3068       // somehow managed to suspend
3069       return true;
3070     } else {
3071       ShouldNotReachHere();
3072       return false;
3073     }
3074   }
3075 
3076   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3077 
3078   for (int n = 0; !osthread->sr.is_suspended(); n++) {
3079     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
3080       os::naked_yield();
3081     }
3082 
3083     // timeout, try to cancel the request
3084     if (n >= RANDOMLY_LARGE_INTEGER) {
3085       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3086       if (cancelled == os::SuspendResume::SR_RUNNING) {
3087         return false;
3088       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3089         return true;
3090       } else {
3091         ShouldNotReachHere();
3092         return false;
3093       }
3094     }
3095   }
3096 
3097   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3098   return true;
3099 }
3100 
3101 static void do_resume(OSThread* osthread) {
3102   //assert(osthread->sr.is_suspended(), "thread should be suspended");
3103 
3104   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3105     // failed to switch to WAKEUP_REQUEST
3106     ShouldNotReachHere();
3107     return;
3108   }
3109 
3110   while (!osthread->sr.is_running()) {
3111     if (sr_notify(osthread) == 0) {
3112       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
3113         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
3114           os::naked_yield();
3115         }
3116       }
3117     } else {
3118       ShouldNotReachHere();
3119     }
3120   }
3121 
3122   guarantee(osthread->sr.is_running(), "Must be running!");
3123 }
3124 
3125 ///////////////////////////////////////////////////////////////////////////////////
3126 // signal handling (except suspend/resume)
3127 
3128 // This routine may be used by user applications as a "hook" to catch signals.
3129 // The user-defined signal handler must pass unrecognized signals to this
3130 // routine, and if it returns true (non-zero), then the signal handler must
3131 // return immediately. If the flag "abort_if_unrecognized" is true, then this
3132 // routine will never retun false (zero), but instead will execute a VM panic
3133 // routine kill the process.
3134 //
3135 // If this routine returns false, it is OK to call it again. This allows
3136 // the user-defined signal handler to perform checks either before or after
3137 // the VM performs its own checks. Naturally, the user code would be making
3138 // a serious error if it tried to handle an exception (such as a null check
3139 // or breakpoint) that the VM was generating for its own correct operation.
3140 //
3141 // This routine may recognize any of the following kinds of signals:
3142 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
3143 // It should be consulted by handlers for any of those signals.
3144 //
3145 // The caller of this routine must pass in the three arguments supplied
3146 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3147 // field of the structure passed to sigaction(). This routine assumes that
3148 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3149 //
3150 // Note that the VM will print warnings if it detects conflicting signal
3151 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3152 //
3153 extern "C" JNIEXPORT int
3154 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
3155 
3156 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
3157 // to be the thing to call; documentation is not terribly clear about whether
3158 // pthread_sigmask also works, and if it does, whether it does the same.
3159 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
3160   const int rc = ::pthread_sigmask(how, set, oset);
3161   // return value semantics differ slightly for error case:
3162   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
3163   // (so, pthread_sigmask is more theadsafe for error handling)
3164   // But success is always 0.
3165   return rc == 0 ? true : false;
3166 }
3167 
3168 // Function to unblock all signals which are, according
3169 // to POSIX, typical program error signals. If they happen while being blocked,
3170 // they typically will bring down the process immediately.
3171 bool unblock_program_error_signals() {
3172   sigset_t set;
3173   ::sigemptyset(&set);
3174   ::sigaddset(&set, SIGILL);
3175   ::sigaddset(&set, SIGBUS);
3176   ::sigaddset(&set, SIGFPE);
3177   ::sigaddset(&set, SIGSEGV);
3178   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
3179 }
3180 
3181 // Renamed from 'signalHandler' to avoid collision with other shared libs.
3182 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
3183   assert(info != NULL && uc != NULL, "it must be old kernel");
3184 
3185   // Never leave program error signals blocked;
3186   // on all our platforms they would bring down the process immediately when
3187   // getting raised while being blocked.
3188   unblock_program_error_signals();
3189 
3190   JVM_handle_aix_signal(sig, info, uc, true);
3191 }
3192 
3193 
3194 // This boolean allows users to forward their own non-matching signals
3195 // to JVM_handle_aix_signal, harmlessly.
3196 bool os::Aix::signal_handlers_are_installed = false;
3197 
3198 // For signal-chaining
3199 struct sigaction os::Aix::sigact[MAXSIGNUM];
3200 unsigned int os::Aix::sigs = 0;
3201 bool os::Aix::libjsig_is_loaded = false;
3202 typedef struct sigaction *(*get_signal_t)(int);
3203 get_signal_t os::Aix::get_signal_action = NULL;
3204 
3205 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3206   struct sigaction *actp = NULL;
3207 
3208   if (libjsig_is_loaded) {
3209     // Retrieve the old signal handler from libjsig
3210     actp = (*get_signal_action)(sig);
3211   }
3212   if (actp == NULL) {
3213     // Retrieve the preinstalled signal handler from jvm
3214     actp = get_preinstalled_handler(sig);
3215   }
3216 
3217   return actp;
3218 }
3219 
3220 static bool call_chained_handler(struct sigaction *actp, int sig,
3221                                  siginfo_t *siginfo, void *context) {
3222   // Call the old signal handler
3223   if (actp->sa_handler == SIG_DFL) {
3224     // It's more reasonable to let jvm treat it as an unexpected exception
3225     // instead of taking the default action.
3226     return false;
3227   } else if (actp->sa_handler != SIG_IGN) {
3228     if ((actp->sa_flags & SA_NODEFER) == 0) {
3229       // automaticlly block the signal
3230       sigaddset(&(actp->sa_mask), sig);
3231     }
3232 
3233     sa_handler_t hand = NULL;
3234     sa_sigaction_t sa = NULL;
3235     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3236     // retrieve the chained handler
3237     if (siginfo_flag_set) {
3238       sa = actp->sa_sigaction;
3239     } else {
3240       hand = actp->sa_handler;
3241     }
3242 
3243     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3244       actp->sa_handler = SIG_DFL;
3245     }
3246 
3247     // try to honor the signal mask
3248     sigset_t oset;
3249     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3250 
3251     // call into the chained handler
3252     if (siginfo_flag_set) {
3253       (*sa)(sig, siginfo, context);
3254     } else {
3255       (*hand)(sig);
3256     }
3257 
3258     // restore the signal mask
3259     pthread_sigmask(SIG_SETMASK, &oset, 0);
3260   }
3261   // Tell jvm's signal handler the signal is taken care of.
3262   return true;
3263 }
3264 
3265 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3266   bool chained = false;
3267   // signal-chaining
3268   if (UseSignalChaining) {
3269     struct sigaction *actp = get_chained_signal_action(sig);
3270     if (actp != NULL) {
3271       chained = call_chained_handler(actp, sig, siginfo, context);
3272     }
3273   }
3274   return chained;
3275 }
3276 
3277 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3278   if ((((unsigned int)1 << sig) & sigs) != 0) {
3279     return &sigact[sig];
3280   }
3281   return NULL;
3282 }
3283 
3284 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3285   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3286   sigact[sig] = oldAct;
3287   sigs |= (unsigned int)1 << sig;
3288 }
3289 
3290 // for diagnostic
3291 int os::Aix::sigflags[MAXSIGNUM];
3292 
3293 int os::Aix::get_our_sigflags(int sig) {
3294   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3295   return sigflags[sig];
3296 }
3297 
3298 void os::Aix::set_our_sigflags(int sig, int flags) {
3299   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3300   sigflags[sig] = flags;
3301 }
3302 
3303 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3304   // Check for overwrite.
3305   struct sigaction oldAct;
3306   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3307 
3308   void* oldhand = oldAct.sa_sigaction
3309     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3310     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3311   // Renamed 'signalHandler' to avoid collision with other shared libs.
3312   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3313       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3314       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3315     if (AllowUserSignalHandlers || !set_installed) {
3316       // Do not overwrite; user takes responsibility to forward to us.
3317       return;
3318     } else if (UseSignalChaining) {
3319       // save the old handler in jvm
3320       save_preinstalled_handler(sig, oldAct);
3321       // libjsig also interposes the sigaction() call below and saves the
3322       // old sigaction on it own.
3323     } else {
3324       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
3325                     "%#lx for signal %d.", (long)oldhand, sig));
3326     }
3327   }
3328 
3329   struct sigaction sigAct;
3330   sigfillset(&(sigAct.sa_mask));
3331   if (!set_installed) {
3332     sigAct.sa_handler = SIG_DFL;
3333     sigAct.sa_flags = SA_RESTART;
3334   } else {
3335     // Renamed 'signalHandler' to avoid collision with other shared libs.
3336     sigAct.sa_sigaction = javaSignalHandler;
3337     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3338   }
3339   // Save flags, which are set by ours
3340   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3341   sigflags[sig] = sigAct.sa_flags;
3342 
3343   int ret = sigaction(sig, &sigAct, &oldAct);
3344   assert(ret == 0, "check");
3345 
3346   void* oldhand2 = oldAct.sa_sigaction
3347                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3348                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3349   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3350 }
3351 
3352 // install signal handlers for signals that HotSpot needs to
3353 // handle in order to support Java-level exception handling.
3354 void os::Aix::install_signal_handlers() {
3355   if (!signal_handlers_are_installed) {
3356     signal_handlers_are_installed = true;
3357 
3358     // signal-chaining
3359     typedef void (*signal_setting_t)();
3360     signal_setting_t begin_signal_setting = NULL;
3361     signal_setting_t end_signal_setting = NULL;
3362     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3363                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3364     if (begin_signal_setting != NULL) {
3365       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3366                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3367       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3368                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3369       libjsig_is_loaded = true;
3370       assert(UseSignalChaining, "should enable signal-chaining");
3371     }
3372     if (libjsig_is_loaded) {
3373       // Tell libjsig jvm is setting signal handlers
3374       (*begin_signal_setting)();
3375     }
3376 
3377     set_signal_handler(SIGSEGV, true);
3378     set_signal_handler(SIGPIPE, true);
3379     set_signal_handler(SIGBUS, true);
3380     set_signal_handler(SIGILL, true);
3381     set_signal_handler(SIGFPE, true);
3382     set_signal_handler(SIGTRAP, true);
3383     set_signal_handler(SIGXFSZ, true);
3384     set_signal_handler(SIGDANGER, true);
3385 
3386     if (libjsig_is_loaded) {
3387       // Tell libjsig jvm finishes setting signal handlers
3388       (*end_signal_setting)();
3389     }
3390 
3391     // We don't activate signal checker if libjsig is in place, we trust ourselves
3392     // and if UserSignalHandler is installed all bets are off.
3393     // Log that signal checking is off only if -verbose:jni is specified.
3394     if (CheckJNICalls) {
3395       if (libjsig_is_loaded) {
3396         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3397         check_signals = false;
3398       }
3399       if (AllowUserSignalHandlers) {
3400         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3401         check_signals = false;
3402       }
3403       // need to initialize check_signal_done
3404       ::sigemptyset(&check_signal_done);
3405     }
3406   }
3407 }
3408 
3409 static const char* get_signal_handler_name(address handler,
3410                                            char* buf, int buflen) {
3411   int offset;
3412   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3413   if (found) {
3414     // skip directory names
3415     const char *p1, *p2;
3416     p1 = buf;
3417     size_t len = strlen(os::file_separator());
3418     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3419     // The way os::dll_address_to_library_name is implemented on Aix
3420     // right now, it always returns -1 for the offset which is not
3421     // terribly informative.
3422     // Will fix that. For now, omit the offset.
3423     jio_snprintf(buf, buflen, "%s", p1);
3424   } else {
3425     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3426   }
3427   return buf;
3428 }
3429 
3430 static void print_signal_handler(outputStream* st, int sig,
3431                                  char* buf, size_t buflen) {
3432   struct sigaction sa;
3433   sigaction(sig, NULL, &sa);
3434 
3435   st->print("%s: ", os::exception_name(sig, buf, buflen));
3436 
3437   address handler = (sa.sa_flags & SA_SIGINFO)
3438     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3439     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3440 
3441   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3442     st->print("SIG_DFL");
3443   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3444     st->print("SIG_IGN");
3445   } else {
3446     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3447   }
3448 
3449   // Print readable mask.
3450   st->print(", sa_mask[0]=");
3451   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3452 
3453   address rh = VMError::get_resetted_sighandler(sig);
3454   // May be, handler was resetted by VMError?
3455   if (rh != NULL) {
3456     handler = rh;
3457     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3458   }
3459 
3460   // Print textual representation of sa_flags.
3461   st->print(", sa_flags=");
3462   os::Posix::print_sa_flags(st, sa.sa_flags);
3463 
3464   // Check: is it our handler?
3465   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3466       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3467     // It is our signal handler.
3468     // Check for flags, reset system-used one!
3469     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3470       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3471                 os::Aix::get_our_sigflags(sig));
3472     }
3473   }
3474   st->cr();
3475 }
3476 
3477 
3478 #define DO_SIGNAL_CHECK(sig) \
3479   if (!sigismember(&check_signal_done, sig)) \
3480     os::Aix::check_signal_handler(sig)
3481 
3482 // This method is a periodic task to check for misbehaving JNI applications
3483 // under CheckJNI, we can add any periodic checks here
3484 
3485 void os::run_periodic_checks() {
3486 
3487   if (check_signals == false) return;
3488 
3489   // SEGV and BUS if overridden could potentially prevent
3490   // generation of hs*.log in the event of a crash, debugging
3491   // such a case can be very challenging, so we absolutely
3492   // check the following for a good measure:
3493   DO_SIGNAL_CHECK(SIGSEGV);
3494   DO_SIGNAL_CHECK(SIGILL);
3495   DO_SIGNAL_CHECK(SIGFPE);
3496   DO_SIGNAL_CHECK(SIGBUS);
3497   DO_SIGNAL_CHECK(SIGPIPE);
3498   DO_SIGNAL_CHECK(SIGXFSZ);
3499   if (UseSIGTRAP) {
3500     DO_SIGNAL_CHECK(SIGTRAP);
3501   }
3502   DO_SIGNAL_CHECK(SIGDANGER);
3503 
3504   // ReduceSignalUsage allows the user to override these handlers
3505   // see comments at the very top and jvm_solaris.h
3506   if (!ReduceSignalUsage) {
3507     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3508     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3509     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3510     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3511   }
3512 
3513   DO_SIGNAL_CHECK(SR_signum);
3514   DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3515 }
3516 
3517 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3518 
3519 static os_sigaction_t os_sigaction = NULL;
3520 
3521 void os::Aix::check_signal_handler(int sig) {
3522   char buf[O_BUFLEN];
3523   address jvmHandler = NULL;
3524 
3525   struct sigaction act;
3526   if (os_sigaction == NULL) {
3527     // only trust the default sigaction, in case it has been interposed
3528     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3529     if (os_sigaction == NULL) return;
3530   }
3531 
3532   os_sigaction(sig, (struct sigaction*)NULL, &act);
3533 
3534   address thisHandler = (act.sa_flags & SA_SIGINFO)
3535     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3536     : CAST_FROM_FN_PTR(address, act.sa_handler);
3537 
3538 
3539   switch(sig) {
3540   case SIGSEGV:
3541   case SIGBUS:
3542   case SIGFPE:
3543   case SIGPIPE:
3544   case SIGILL:
3545   case SIGXFSZ:
3546     // Renamed 'signalHandler' to avoid collision with other shared libs.
3547     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3548     break;
3549 
3550   case SHUTDOWN1_SIGNAL:
3551   case SHUTDOWN2_SIGNAL:
3552   case SHUTDOWN3_SIGNAL:
3553   case BREAK_SIGNAL:
3554     jvmHandler = (address)user_handler();
3555     break;
3556 
3557   case INTERRUPT_SIGNAL:
3558     jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3559     break;
3560 
3561   default:
3562     if (sig == SR_signum) {
3563       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3564     } else {
3565       return;
3566     }
3567     break;
3568   }
3569 
3570   if (thisHandler != jvmHandler) {
3571     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3572     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3573     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3574     // No need to check this sig any longer
3575     sigaddset(&check_signal_done, sig);
3576     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3577     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3578       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3579                     exception_name(sig, buf, O_BUFLEN));
3580     }
3581   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3582     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3583     tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3584     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3585     // No need to check this sig any longer
3586     sigaddset(&check_signal_done, sig);
3587   }
3588 
3589   // Dump all the signal
3590   if (sigismember(&check_signal_done, sig)) {
3591     print_signal_handlers(tty, buf, O_BUFLEN);
3592   }
3593 }
3594 
3595 extern bool signal_name(int signo, char* buf, size_t len);
3596 
3597 const char* os::exception_name(int exception_code, char* buf, size_t size) {
3598   if (0 < exception_code && exception_code <= SIGRTMAX) {
3599     // signal
3600     if (!signal_name(exception_code, buf, size)) {
3601       jio_snprintf(buf, size, "SIG%d", exception_code);
3602     }
3603     return buf;
3604   } else {
3605     return NULL;
3606   }
3607 }
3608 
3609 // To install functions for atexit system call
3610 extern "C" {
3611   static void perfMemory_exit_helper() {
3612     perfMemory_exit();
3613   }
3614 }
3615 
3616 // This is called _before_ the most of global arguments have been parsed.
3617 void os::init(void) {
3618   // This is basic, we want to know if that ever changes.
3619   // (shared memory boundary is supposed to be a 256M aligned)
3620   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3621 
3622   // First off, we need to know whether we run on AIX or PASE, and
3623   // the OS level we run on.
3624   os::Aix::initialize_os_info();
3625 
3626   // Scan environment (SPEC1170 behaviour, etc)
3627   os::Aix::scan_environment();
3628 
3629   // Check which pages are supported by AIX.
3630   os::Aix::query_multipage_support();
3631 
3632   // Next, we need to initialize libo4 and libperfstat libraries.
3633   if (os::Aix::on_pase()) {
3634     os::Aix::initialize_libo4();
3635   } else {
3636     os::Aix::initialize_libperfstat();
3637   }
3638 
3639   // Reset the perfstat information provided by ODM.
3640   if (os::Aix::on_aix()) {
3641     libperfstat::perfstat_reset();
3642   }
3643 
3644   // Now initialze basic system properties. Note that for some of the values we
3645   // need libperfstat etc.
3646   os::Aix::initialize_system_info();
3647 
3648   // Initialize large page support.
3649   if (UseLargePages) {
3650     os::large_page_init();
3651     if (!UseLargePages) {
3652       // initialize os::_page_sizes
3653       _page_sizes[0] = Aix::page_size();
3654       _page_sizes[1] = 0;
3655       if (Verbose) {
3656         fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n");
3657       }
3658     }
3659   } else {
3660     // initialize os::_page_sizes
3661     _page_sizes[0] = Aix::page_size();
3662     _page_sizes[1] = 0;
3663   }
3664 
3665   // debug trace
3666   if (Verbose) {
3667     fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());
3668     fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());
3669     fprintf(stderr, "os::_page_sizes = ( ");
3670     for (int i = 0; _page_sizes[i]; i ++) {
3671       fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));
3672     }
3673     fprintf(stderr, ")\n");
3674   }
3675 
3676   _initial_pid = getpid();
3677 
3678   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3679 
3680   init_random(1234567);
3681 
3682   ThreadCritical::initialize();
3683 
3684   // Main_thread points to the aboriginal thread.
3685   Aix::_main_thread = pthread_self();
3686 
3687   initial_time_count = os::elapsed_counter();
3688   pthread_mutex_init(&dl_mutex, NULL);
3689 }
3690 
3691 // this is called _after_ the global arguments have been parsed
3692 jint os::init_2(void) {
3693 
3694   if (Verbose) {
3695     fprintf(stderr, "processor count: %d\n", os::_processor_count);
3696     fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
3697   }
3698 
3699   // initially build up the loaded dll map
3700   LoadedLibraries::reload();
3701 
3702   const int page_size = Aix::page_size();
3703   const int map_size = page_size;
3704 
3705   address map_address = (address) MAP_FAILED;
3706   const int prot  = PROT_READ;
3707   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3708 
3709   // use optimized addresses for the polling page,
3710   // e.g. map it to a special 32-bit address.
3711   if (OptimizePollingPageLocation) {
3712     // architecture-specific list of address wishes:
3713     address address_wishes[] = {
3714       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3715       // PPC64: all address wishes are non-negative 32 bit values where
3716       // the lower 16 bits are all zero. we can load these addresses
3717       // with a single ppc_lis instruction.
3718       (address) 0x30000000, (address) 0x31000000,
3719       (address) 0x32000000, (address) 0x33000000,
3720       (address) 0x40000000, (address) 0x41000000,
3721       (address) 0x42000000, (address) 0x43000000,
3722       (address) 0x50000000, (address) 0x51000000,
3723       (address) 0x52000000, (address) 0x53000000,
3724       (address) 0x60000000, (address) 0x61000000,
3725       (address) 0x62000000, (address) 0x63000000
3726     };
3727     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3728 
3729     // iterate over the list of address wishes:
3730     for (int i=0; i<address_wishes_length; i++) {
3731       // try to map with current address wish.
3732       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3733       // fail if the address is already mapped.
3734       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3735                                      map_size, prot,
3736                                      flags | MAP_FIXED,
3737                                      -1, 0);
3738       if (Verbose) {
3739         fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3740                 address_wishes[i], map_address + (ssize_t)page_size);
3741       }
3742 
3743       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3744         // map succeeded and map_address is at wished address, exit loop.
3745         break;
3746       }
3747 
3748       if (map_address != (address) MAP_FAILED) {
3749         // map succeeded, but polling_page is not at wished address, unmap and continue.
3750         ::munmap(map_address, map_size);
3751         map_address = (address) MAP_FAILED;
3752       }
3753       // map failed, continue loop.
3754     }
3755   } // end OptimizePollingPageLocation
3756 
3757   if (map_address == (address) MAP_FAILED) {
3758     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3759   }
3760   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3761   os::set_polling_page(map_address);
3762 
3763   if (!UseMembar) {
3764     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3765     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3766     os::set_memory_serialize_page(mem_serialize_page);
3767 
3768 #ifndef PRODUCT
3769     if (Verbose && PrintMiscellaneous)
3770       tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3771 #endif
3772   }
3773 
3774   // initialize suspend/resume support - must do this before signal_sets_init()
3775   if (SR_initialize() != 0) {
3776     perror("SR_initialize failed");
3777     return JNI_ERR;
3778   }
3779 
3780   Aix::signal_sets_init();
3781   Aix::install_signal_handlers();
3782 
3783   // Check minimum allowable stack size for thread creation and to initialize
3784   // the java system classes, including StackOverflowError - depends on page
3785   // size. Add a page for compiler2 recursion in main thread.
3786   // Add in 2*BytesPerWord times page size to account for VM stack during
3787   // class initialization depending on 32 or 64 bit VM.
3788   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3789             (size_t)(StackYellowPages+StackRedPages+StackShadowPages +
3790                      2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());
3791 
3792   size_t threadStackSizeInBytes = ThreadStackSize * K;
3793   if (threadStackSizeInBytes != 0 &&
3794       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3795         tty->print_cr("\nThe stack size specified is too small, "
3796                       "Specify at least %dk",
3797                       os::Aix::min_stack_allowed / K);
3798         return JNI_ERR;
3799   }
3800 
3801   // Make the stack size a multiple of the page size so that
3802   // the yellow/red zones can be guarded.
3803   // note that this can be 0, if no default stacksize was set
3804   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3805 
3806   Aix::libpthread_init();
3807 
3808   if (MaxFDLimit) {
3809     // set the number of file descriptors to max. print out error
3810     // if getrlimit/setrlimit fails but continue regardless.
3811     struct rlimit nbr_files;
3812     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3813     if (status != 0) {
3814       if (PrintMiscellaneous && (Verbose || WizardMode))
3815         perror("os::init_2 getrlimit failed");
3816     } else {
3817       nbr_files.rlim_cur = nbr_files.rlim_max;
3818       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3819       if (status != 0) {
3820         if (PrintMiscellaneous && (Verbose || WizardMode))
3821           perror("os::init_2 setrlimit failed");
3822       }
3823     }
3824   }
3825 
3826   if (PerfAllowAtExitRegistration) {
3827     // only register atexit functions if PerfAllowAtExitRegistration is set.
3828     // atexit functions can be delayed until process exit time, which
3829     // can be problematic for embedded VM situations. Embedded VMs should
3830     // call DestroyJavaVM() to assure that VM resources are released.
3831 
3832     // note: perfMemory_exit_helper atexit function may be removed in
3833     // the future if the appropriate cleanup code can be added to the
3834     // VM_Exit VMOperation's doit method.
3835     if (atexit(perfMemory_exit_helper) != 0) {
3836       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3837     }
3838   }
3839 
3840   return JNI_OK;
3841 }
3842 
3843 // Mark the polling page as unreadable
3844 void os::make_polling_page_unreadable(void) {
3845   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3846     fatal("Could not disable polling page");
3847   }
3848 };
3849 
3850 // Mark the polling page as readable
3851 void os::make_polling_page_readable(void) {
3852   // Changed according to os_linux.cpp.
3853   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3854     fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
3855   }
3856 };
3857 
3858 int os::active_processor_count() {
3859   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3860   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3861   return online_cpus;
3862 }
3863 
3864 void os::set_native_thread_name(const char *name) {
3865   // Not yet implemented.
3866   return;
3867 }
3868 
3869 bool os::distribute_processes(uint length, uint* distribution) {
3870   // Not yet implemented.
3871   return false;
3872 }
3873 
3874 bool os::bind_to_processor(uint processor_id) {
3875   // Not yet implemented.
3876   return false;
3877 }
3878 
3879 void os::SuspendedThreadTask::internal_do_task() {
3880   if (do_suspend(_thread->osthread())) {
3881     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3882     do_task(context);
3883     do_resume(_thread->osthread());
3884   }
3885 }
3886 
3887 class PcFetcher : public os::SuspendedThreadTask {
3888 public:
3889   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3890   ExtendedPC result();
3891 protected:
3892   void do_task(const os::SuspendedThreadTaskContext& context);
3893 private:
3894   ExtendedPC _epc;
3895 };
3896 
3897 ExtendedPC PcFetcher::result() {
3898   guarantee(is_done(), "task is not done yet.");
3899   return _epc;
3900 }
3901 
3902 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3903   Thread* thread = context.thread();
3904   OSThread* osthread = thread->osthread();
3905   if (osthread->ucontext() != NULL) {
3906     _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3907   } else {
3908     // NULL context is unexpected, double-check this is the VMThread.
3909     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3910   }
3911 }
3912 
3913 // Suspends the target using the signal mechanism and then grabs the PC before
3914 // resuming the target. Used by the flat-profiler only
3915 ExtendedPC os::get_thread_pc(Thread* thread) {
3916   // Make sure that it is called by the watcher for the VMThread.
3917   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3918   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3919 
3920   PcFetcher fetcher(thread);
3921   fetcher.run();
3922   return fetcher.result();
3923 }
3924 
3925 // Not neede on Aix.
3926 // int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
3927 // }
3928 
3929 ////////////////////////////////////////////////////////////////////////////////
3930 // debug support
3931 
3932 static address same_page(address x, address y) {
3933   intptr_t page_bits = -os::vm_page_size();
3934   if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3935     return x;
3936   else if (x > y)
3937     return (address)(intptr_t(y) | ~page_bits) + 1;
3938   else
3939     return (address)(intptr_t(y) & page_bits);
3940 }
3941 
3942 bool os::find(address addr, outputStream* st) {
3943 
3944   st->print(PTR_FORMAT ": ", addr);
3945 
3946   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
3947   if (lib) {
3948     lib->print(st);
3949     return true;
3950   } else {
3951     lib = LoadedLibraries::find_for_data_address(addr);
3952     if (lib) {
3953       lib->print(st);
3954       return true;
3955     } else {
3956       st->print_cr("(outside any module)");
3957     }
3958   }
3959 
3960   return false;
3961 }
3962 
3963 ////////////////////////////////////////////////////////////////////////////////
3964 // misc
3965 
3966 // This does not do anything on Aix. This is basically a hook for being
3967 // able to use structured exception handling (thread-local exception filters)
3968 // on, e.g., Win32.
3969 void
3970 os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
3971                          JavaCallArguments* args, Thread* thread) {
3972   f(value, method, args, thread);
3973 }
3974 
3975 void os::print_statistics() {
3976 }
3977 
3978 int os::message_box(const char* title, const char* message) {
3979   int i;
3980   fdStream err(defaultStream::error_fd());
3981   for (i = 0; i < 78; i++) err.print_raw("=");
3982   err.cr();
3983   err.print_raw_cr(title);
3984   for (i = 0; i < 78; i++) err.print_raw("-");
3985   err.cr();
3986   err.print_raw_cr(message);
3987   for (i = 0; i < 78; i++) err.print_raw("=");
3988   err.cr();
3989 
3990   char buf[16];
3991   // Prevent process from exiting upon "read error" without consuming all CPU
3992   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3993 
3994   return buf[0] == 'y' || buf[0] == 'Y';
3995 }
3996 
3997 int os::stat(const char *path, struct stat *sbuf) {
3998   char pathbuf[MAX_PATH];
3999   if (strlen(path) > MAX_PATH - 1) {
4000     errno = ENAMETOOLONG;
4001     return -1;
4002   }
4003   os::native_path(strcpy(pathbuf, path));
4004   return ::stat(pathbuf, sbuf);
4005 }
4006 
4007 bool os::check_heap(bool force) {
4008   return true;
4009 }
4010 
4011 // Is a (classpath) directory empty?
4012 bool os::dir_is_empty(const char* path) {
4013   DIR *dir = NULL;
4014   struct dirent *ptr;
4015 
4016   dir = opendir(path);
4017   if (dir == NULL) return true;
4018 
4019   /* Scan the directory */
4020   bool result = true;
4021   char buf[sizeof(struct dirent) + MAX_PATH];
4022   while (result && (ptr = ::readdir(dir)) != NULL) {
4023     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4024       result = false;
4025     }
4026   }
4027   closedir(dir);
4028   return result;
4029 }
4030 
4031 // This code originates from JDK's sysOpen and open64_w
4032 // from src/solaris/hpi/src/system_md.c
4033 
4034 int os::open(const char *path, int oflag, int mode) {
4035 
4036   if (strlen(path) > MAX_PATH - 1) {
4037     errno = ENAMETOOLONG;
4038     return -1;
4039   }
4040   int fd;
4041 
4042   fd = ::open64(path, oflag, mode);
4043   if (fd == -1) return -1;
4044 
4045   // If the open succeeded, the file might still be a directory.
4046   {
4047     struct stat64 buf64;
4048     int ret = ::fstat64(fd, &buf64);
4049     int st_mode = buf64.st_mode;
4050 
4051     if (ret != -1) {
4052       if ((st_mode & S_IFMT) == S_IFDIR) {
4053         errno = EISDIR;
4054         ::close(fd);
4055         return -1;
4056       }
4057     } else {
4058       ::close(fd);
4059       return -1;
4060     }
4061   }
4062 
4063   // All file descriptors that are opened in the JVM and not
4064   // specifically destined for a subprocess should have the
4065   // close-on-exec flag set. If we don't set it, then careless 3rd
4066   // party native code might fork and exec without closing all
4067   // appropriate file descriptors (e.g. as we do in closeDescriptors in
4068   // UNIXProcess.c), and this in turn might:
4069   //
4070   // - cause end-of-file to fail to be detected on some file
4071   //   descriptors, resulting in mysterious hangs, or
4072   //
4073   // - might cause an fopen in the subprocess to fail on a system
4074   //   suffering from bug 1085341.
4075   //
4076   // (Yes, the default setting of the close-on-exec flag is a Unix
4077   // design flaw.)
4078   //
4079   // See:
4080   // 1085341: 32-bit stdio routines should support file descriptors >255
4081   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4082   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4083 #ifdef FD_CLOEXEC
4084   {
4085     int flags = ::fcntl(fd, F_GETFD);
4086     if (flags != -1)
4087       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4088   }
4089 #endif
4090 
4091   return fd;
4092 }
4093 
4094 
4095 // create binary file, rewriting existing file if required
4096 int os::create_binary_file(const char* path, bool rewrite_existing) {
4097   int oflags = O_WRONLY | O_CREAT;
4098   if (!rewrite_existing) {
4099     oflags |= O_EXCL;
4100   }
4101   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4102 }
4103 
4104 // return current position of file pointer
4105 jlong os::current_file_offset(int fd) {
4106   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4107 }
4108 
4109 // move file pointer to the specified offset
4110 jlong os::seek_to_file_offset(int fd, jlong offset) {
4111   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4112 }
4113 
4114 // This code originates from JDK's sysAvailable
4115 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
4116 
4117 int os::available(int fd, jlong *bytes) {
4118   jlong cur, end;
4119   int mode;
4120   struct stat64 buf64;
4121 
4122   if (::fstat64(fd, &buf64) >= 0) {
4123     mode = buf64.st_mode;
4124     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4125       // XXX: is the following call interruptible? If so, this might
4126       // need to go through the INTERRUPT_IO() wrapper as for other
4127       // blocking, interruptible calls in this file.
4128       int n;
4129       if (::ioctl(fd, FIONREAD, &n) >= 0) {
4130         *bytes = n;
4131         return 1;
4132       }
4133     }
4134   }
4135   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4136     return 0;
4137   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4138     return 0;
4139   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4140     return 0;
4141   }
4142   *bytes = end - cur;
4143   return 1;
4144 }
4145 
4146 // Map a block of memory.
4147 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4148                         char *addr, size_t bytes, bool read_only,
4149                         bool allow_exec) {
4150   Unimplemented();
4151   return NULL;
4152 }
4153 
4154 
4155 // Remap a block of memory.
4156 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4157                           char *addr, size_t bytes, bool read_only,
4158                           bool allow_exec) {
4159   // same as map_memory() on this OS
4160   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4161                         allow_exec);
4162 }
4163 
4164 // Unmap a block of memory.
4165 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4166   return munmap(addr, bytes) == 0;
4167 }
4168 
4169 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4170 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4171 // of a thread.
4172 //
4173 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4174 // the fast estimate available on the platform.
4175 
4176 jlong os::current_thread_cpu_time() {
4177   // return user + sys since the cost is the same
4178   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4179   assert(n >= 0, "negative CPU time");
4180   return n;
4181 }
4182 
4183 jlong os::thread_cpu_time(Thread* thread) {
4184   // consistent with what current_thread_cpu_time() returns
4185   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4186   assert(n >= 0, "negative CPU time");
4187   return n;
4188 }
4189 
4190 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4191   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4192   assert(n >= 0, "negative CPU time");
4193   return n;
4194 }
4195 
4196 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4197   bool error = false;
4198 
4199   jlong sys_time = 0;
4200   jlong user_time = 0;
4201 
4202   // reimplemented using getthrds64().
4203   //
4204   // goes like this:
4205   // For the thread in question, get the kernel thread id. Then get the
4206   // kernel thread statistics using that id.
4207   //
4208   // This only works of course when no pthread scheduling is used,
4209   // ie there is a 1:1 relationship to kernel threads.
4210   // On AIX, see AIXTHREAD_SCOPE variable.
4211 
4212   pthread_t pthtid = thread->osthread()->pthread_id();
4213 
4214   // retrieve kernel thread id for the pthread:
4215   tid64_t tid = 0;
4216   struct __pthrdsinfo pinfo;
4217   // I just love those otherworldly IBM APIs which force me to hand down
4218   // dummy buffers for stuff I dont care for...
4219   char dummy[1];
4220   int dummy_size = sizeof(dummy);
4221   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4222                           dummy, &dummy_size) == 0) {
4223     tid = pinfo.__pi_tid;
4224   } else {
4225     tty->print_cr("pthread_getthrds_np failed.");
4226     error = true;
4227   }
4228 
4229   // retrieve kernel timing info for that kernel thread
4230   if (!error) {
4231     struct thrdentry64 thrdentry;
4232     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4233       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4234       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4235     } else {
4236       tty->print_cr("pthread_getthrds_np failed.");
4237       error = true;
4238     }
4239   }
4240 
4241   if (p_sys_time) {
4242     *p_sys_time = sys_time;
4243   }
4244 
4245   if (p_user_time) {
4246     *p_user_time = user_time;
4247   }
4248 
4249   if (error) {
4250     return false;
4251   }
4252 
4253   return true;
4254 }
4255 
4256 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4257   jlong sys_time;
4258   jlong user_time;
4259 
4260   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4261     return -1;
4262   }
4263 
4264   return user_sys_cpu_time ? sys_time + user_time : user_time;
4265 }
4266 
4267 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4268   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4269   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4270   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4271   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4272 }
4273 
4274 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4275   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4276   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4277   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4278   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4279 }
4280 
4281 bool os::is_thread_cpu_time_supported() {
4282   return true;
4283 }
4284 
4285 // System loadavg support. Returns -1 if load average cannot be obtained.
4286 // For now just return the system wide load average (no processor sets).
4287 int os::loadavg(double values[], int nelem) {
4288 
4289   // Implemented using libperfstat on AIX.
4290 
4291   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4292   guarantee(values, "argument error");
4293 
4294   if (os::Aix::on_pase()) {
4295     Unimplemented();
4296     return -1;
4297   } else {
4298     // AIX: use libperfstat
4299     //
4300     // See also:
4301     // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4302     // /usr/include/libperfstat.h:
4303 
4304     // Use the already AIX version independent get_cpuinfo.
4305     os::Aix::cpuinfo_t ci;
4306     if (os::Aix::get_cpuinfo(&ci)) {
4307       for (int i = 0; i < nelem; i++) {
4308         values[i] = ci.loadavg[i];
4309       }
4310     } else {
4311       return -1;
4312     }
4313     return nelem;
4314   }
4315 }
4316 
4317 void os::pause() {
4318   char filename[MAX_PATH];
4319   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4320     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4321   } else {
4322     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4323   }
4324 
4325   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4326   if (fd != -1) {
4327     struct stat buf;
4328     ::close(fd);
4329     while (::stat(filename, &buf) == 0) {
4330       (void)::poll(NULL, 0, 100);
4331     }
4332   } else {
4333     jio_fprintf(stderr,
4334       "Could not open pause file '%s', continuing immediately.\n", filename);
4335   }
4336 }
4337 
4338 bool os::Aix::is_primordial_thread() {
4339   if (pthread_self() == (pthread_t)1) {
4340     return true;
4341   } else {
4342     return false;
4343   }
4344 }
4345 
4346 // OS recognitions (PASE/AIX, OS level) call this before calling any
4347 // one of Aix::on_pase(), Aix::os_version() static
4348 void os::Aix::initialize_os_info() {
4349 
4350   assert(_on_pase == -1 && _os_version == -1, "already called.");
4351 
4352   struct utsname uts;
4353   memset(&uts, 0, sizeof(uts));
4354   strcpy(uts.sysname, "?");
4355   if (::uname(&uts) == -1) {
4356     fprintf(stderr, "uname failed (%d)\n", errno);
4357     guarantee(0, "Could not determine whether we run on AIX or PASE");
4358   } else {
4359     if (Verbose) {
4360       fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4361               "node \"%s\" machine \"%s\"\n",
4362               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4363     }
4364     const int major = atoi(uts.version);
4365     assert(major > 0, "invalid OS version");
4366     const int minor = atoi(uts.release);
4367     assert(minor > 0, "invalid OS release");
4368     _os_version = (major << 8) | minor;
4369     if (strcmp(uts.sysname, "OS400") == 0) {
4370       Unimplemented();
4371     } else if (strcmp(uts.sysname, "AIX") == 0) {
4372       // We run on AIX. We do not support versions older than AIX 5.3.
4373       _on_pase = 0;
4374       if (_os_version < 0x0503) {
4375         fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
4376         assert(false, "AIX release too old.");
4377       } else {
4378         if (Verbose) {
4379           fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
4380         }
4381       }
4382     } else {
4383       assert(false, "unknown OS");
4384     }
4385   }
4386 
4387   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4388 
4389 } // end: os::Aix::initialize_os_info()
4390 
4391 // Scan environment for important settings which might effect the VM.
4392 // Trace out settings. Warn about invalid settings and/or correct them.
4393 //
4394 // Must run after os::Aix::initialue_os_info().
4395 void os::Aix::scan_environment() {
4396 
4397   char* p;
4398   int rc;
4399 
4400   // Warn explicity if EXTSHM=ON is used. That switch changes how
4401   // System V shared memory behaves. One effect is that page size of
4402   // shared memory cannot be change dynamically, effectivly preventing
4403   // large pages from working.
4404   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4405   // recommendation is (in OSS notes) to switch it off.
4406   p = ::getenv("EXTSHM");
4407   if (Verbose) {
4408     fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4409   }
4410   if (p && strcmp(p, "ON") == 0) {
4411     fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4412     _extshm = 1;
4413   } else {
4414     _extshm = 0;
4415   }
4416 
4417   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4418   // Not tested, not supported.
4419   //
4420   // Note that it might be worth the trouble to test and to require it, if only to
4421   // get useful return codes for mprotect.
4422   //
4423   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4424   // exec() ? before loading the libjvm ? ....)
4425   p = ::getenv("XPG_SUS_ENV");
4426   if (Verbose) {
4427     fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
4428   }
4429   if (p && strcmp(p, "ON") == 0) {
4430     _xpg_sus_mode = 1;
4431     fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
4432     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4433     // clobber address ranges. If we ever want to support that, we have to do some
4434     // testing first.
4435     guarantee(false, "XPG_SUS_ENV=ON not supported");
4436   } else {
4437     _xpg_sus_mode = 0;
4438   }
4439 
4440   // Switch off AIX internal (pthread) guard pages. This has
4441   // immediate effect for any pthread_create calls which follow.
4442   p = ::getenv("AIXTHREAD_GUARDPAGES");
4443   if (Verbose) {
4444     fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
4445     fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
4446   }
4447   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4448   guarantee(rc == 0, "");
4449 
4450 } // end: os::Aix::scan_environment()
4451 
4452 // PASE: initialize the libo4 library (AS400 PASE porting library).
4453 void os::Aix::initialize_libo4() {
4454   Unimplemented();
4455 }
4456 
4457 // AIX: initialize the libperfstat library (we load this dynamically
4458 // because it is only available on AIX.
4459 void os::Aix::initialize_libperfstat() {
4460 
4461   assert(os::Aix::on_aix(), "AIX only");
4462 
4463   if (!libperfstat::init()) {
4464     fprintf(stderr, "libperfstat initialization failed.\n");
4465     assert(false, "libperfstat initialization failed");
4466   } else {
4467     if (Verbose) {
4468       fprintf(stderr, "libperfstat initialized.\n");
4469     }
4470   }
4471 } // end: os::Aix::initialize_libperfstat
4472 
4473 /////////////////////////////////////////////////////////////////////////////
4474 // thread stack
4475 
4476 // function to query the current stack size using pthread_getthrds_np
4477 //
4478 // ! do not change anything here unless you know what you are doing !
4479 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4480 
4481   // This only works when invoked on a pthread. As we agreed not to use
4482   // primordial threads anyway, I assert here
4483   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4484 
4485   // information about this api can be found (a) in the pthread.h header and
4486   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4487   //
4488   // The use of this API to find out the current stack is kind of undefined.
4489   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4490   // enough for cases where I let the pthread library create its stacks. For cases
4491   // where I create an own stack and pass this to pthread_create, it seems not to
4492   // work (the returned stack size in that case is 0).
4493 
4494   pthread_t tid = pthread_self();
4495   struct __pthrdsinfo pinfo;
4496   char dummy[1]; // we only need this to satisfy the api and to not get E
4497   int dummy_size = sizeof(dummy);
4498 
4499   memset(&pinfo, 0, sizeof(pinfo));
4500 
4501   const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4502                                       sizeof(pinfo), dummy, &dummy_size);
4503 
4504   if (rc != 0) {
4505     fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc);
4506     guarantee(0, "pthread_getthrds_np failed");
4507   }
4508 
4509   guarantee(pinfo.__pi_stackend, "returned stack base invalid");
4510 
4511   // the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack
4512   // (when handing down a stack to pthread create, see pthread_attr_setstackaddr).
4513   // Not sure what to do here - I feel inclined to forbid this use case completely.
4514   guarantee(pinfo.__pi_stacksize, "returned stack size invalid");
4515 
4516   // On AIX, stacks are not necessarily page aligned so round the base and size accordingly
4517   if (p_stack_base) {
4518     (*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());
4519   }
4520 
4521   if (p_stack_size) {
4522     (*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();
4523   }
4524 
4525 #ifndef PRODUCT
4526   if (Verbose) {
4527     fprintf(stderr,
4528             "query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT
4529             ", real stack_size=" INTPTR_FORMAT
4530             ", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n",
4531             (intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,
4532             (intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()),
4533             pinfo.__pi_stacksize - os::Aix::stack_page_size());
4534   }
4535 #endif
4536 
4537 } // end query_stack_dimensions
4538 
4539 // get the current stack base from the OS (actually, the pthread library)
4540 address os::current_stack_base() {
4541   address p;
4542   query_stack_dimensions(&p, 0);
4543   return p;
4544 }
4545 
4546 // get the current stack size from the OS (actually, the pthread library)
4547 size_t os::current_stack_size() {
4548   size_t s;
4549   query_stack_dimensions(0, &s);
4550   return s;
4551 }
4552 
4553 // Refer to the comments in os_solaris.cpp park-unpark.
4554 //
4555 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4556 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4557 // For specifics regarding the bug see GLIBC BUGID 261237 :
4558 //    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4559 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4560 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4561 // is used. (The simple C test-case provided in the GLIBC bug report manifests the
4562 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4563 // and monitorenter when we're using 1-0 locking. All those operations may result in
4564 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4565 // of libpthread avoids the problem, but isn't practical.
4566 //
4567 // Possible remedies:
4568 //
4569 // 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4570 //      This is palliative and probabilistic, however. If the thread is preempted
4571 //      between the call to compute_abstime() and pthread_cond_timedwait(), more
4572 //      than the minimum period may have passed, and the abstime may be stale (in the
4573 //      past) resultin in a hang. Using this technique reduces the odds of a hang
4574 //      but the JVM is still vulnerable, particularly on heavily loaded systems.
4575 //
4576 // 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4577 //      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4578 //      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4579 //      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4580 //      thread.
4581 //
4582 // 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4583 //      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4584 //      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4585 //      This also works well. In fact it avoids kernel-level scalability impediments
4586 //      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4587 //      timers in a graceful fashion.
4588 //
4589 // 4.   When the abstime value is in the past it appears that control returns
4590 //      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4591 //      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4592 //      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4593 //      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4594 //      It may be possible to avoid reinitialization by checking the return
4595 //      value from pthread_cond_timedwait(). In addition to reinitializing the
4596 //      condvar we must establish the invariant that cond_signal() is only called
4597 //      within critical sections protected by the adjunct mutex. This prevents
4598 //      cond_signal() from "seeing" a condvar that's in the midst of being
4599 //      reinitialized or that is corrupt. Sadly, this invariant obviates the
4600 //      desirable signal-after-unlock optimization that avoids futile context switching.
4601 //
4602 //      I'm also concerned that some versions of NTPL might allocate an auxilliary
4603 //      structure when a condvar is used or initialized. cond_destroy() would
4604 //      release the helper structure. Our reinitialize-after-timedwait fix
4605 //      put excessive stress on malloc/free and locks protecting the c-heap.
4606 //
4607 // We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4608 // It may be possible to refine (4) by checking the kernel and NTPL verisons
4609 // and only enabling the work-around for vulnerable environments.
4610 
4611 // utility to compute the abstime argument to timedwait:
4612 // millis is the relative timeout time
4613 // abstime will be the absolute timeout time
4614 // TODO: replace compute_abstime() with unpackTime()
4615 
4616 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4617   if (millis < 0) millis = 0;
4618   struct timeval now;
4619   int status = gettimeofday(&now, NULL);
4620   assert(status == 0, "gettimeofday");
4621   jlong seconds = millis / 1000;
4622   millis %= 1000;
4623   if (seconds > 50000000) { // see man cond_timedwait(3T)
4624     seconds = 50000000;
4625   }
4626   abstime->tv_sec = now.tv_sec  + seconds;
4627   long       usec = now.tv_usec + millis * 1000;
4628   if (usec >= 1000000) {
4629     abstime->tv_sec += 1;
4630     usec -= 1000000;
4631   }
4632   abstime->tv_nsec = usec * 1000;
4633   return abstime;
4634 }
4635 
4636 
4637 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4638 // Conceptually TryPark() should be equivalent to park(0).
4639 
4640 int os::PlatformEvent::TryPark() {
4641   for (;;) {
4642     const int v = _Event;
4643     guarantee ((v == 0) || (v == 1), "invariant");
4644     if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4645   }
4646 }
4647 
4648 void os::PlatformEvent::park() {       // AKA "down()"
4649   // Invariant: Only the thread associated with the Event/PlatformEvent
4650   // may call park().
4651   // TODO: assert that _Assoc != NULL or _Assoc == Self
4652   int v;
4653   for (;;) {
4654     v = _Event;
4655     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4656   }
4657   guarantee (v >= 0, "invariant");
4658   if (v == 0) {
4659     // Do this the hard way by blocking ...
4660     int status = pthread_mutex_lock(_mutex);
4661     assert_status(status == 0, status, "mutex_lock");
4662     guarantee (_nParked == 0, "invariant");
4663     ++ _nParked;
4664     while (_Event < 0) {
4665       status = pthread_cond_wait(_cond, _mutex);
4666       assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4667     }
4668     -- _nParked;
4669 
4670     // In theory we could move the ST of 0 into _Event past the unlock(),
4671     // but then we'd need a MEMBAR after the ST.
4672     _Event = 0;
4673     status = pthread_mutex_unlock(_mutex);
4674     assert_status(status == 0, status, "mutex_unlock");
4675   }
4676   guarantee (_Event >= 0, "invariant");
4677 }
4678 
4679 int os::PlatformEvent::park(jlong millis) {
4680   guarantee (_nParked == 0, "invariant");
4681 
4682   int v;
4683   for (;;) {
4684     v = _Event;
4685     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4686   }
4687   guarantee (v >= 0, "invariant");
4688   if (v != 0) return OS_OK;
4689 
4690   // We do this the hard way, by blocking the thread.
4691   // Consider enforcing a minimum timeout value.
4692   struct timespec abst;
4693   compute_abstime(&abst, millis);
4694 
4695   int ret = OS_TIMEOUT;
4696   int status = pthread_mutex_lock(_mutex);
4697   assert_status(status == 0, status, "mutex_lock");
4698   guarantee (_nParked == 0, "invariant");
4699   ++_nParked;
4700 
4701   // Object.wait(timo) will return because of
4702   // (a) notification
4703   // (b) timeout
4704   // (c) thread.interrupt
4705   //
4706   // Thread.interrupt and object.notify{All} both call Event::set.
4707   // That is, we treat thread.interrupt as a special case of notification.
4708   // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4709   // We assume all ETIME returns are valid.
4710   //
4711   // TODO: properly differentiate simultaneous notify+interrupt.
4712   // In that case, we should propagate the notify to another waiter.
4713 
4714   while (_Event < 0) {
4715     status = pthread_cond_timedwait(_cond, _mutex, &abst);
4716     assert_status(status == 0 || status == ETIMEDOUT,
4717           status, "cond_timedwait");
4718     if (!FilterSpuriousWakeups) break;         // previous semantics
4719     if (status == ETIMEDOUT) break;
4720     // We consume and ignore EINTR and spurious wakeups.
4721   }
4722   --_nParked;
4723   if (_Event >= 0) {
4724      ret = OS_OK;
4725   }
4726   _Event = 0;
4727   status = pthread_mutex_unlock(_mutex);
4728   assert_status(status == 0, status, "mutex_unlock");
4729   assert (_nParked == 0, "invariant");
4730   return ret;
4731 }
4732 
4733 void os::PlatformEvent::unpark() {
4734   int v, AnyWaiters;
4735   for (;;) {
4736     v = _Event;
4737     if (v > 0) {
4738       // The LD of _Event could have reordered or be satisfied
4739       // by a read-aside from this processor's write buffer.
4740       // To avoid problems execute a barrier and then
4741       // ratify the value.
4742       OrderAccess::fence();
4743       if (_Event == v) return;
4744       continue;
4745     }
4746     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4747   }
4748   if (v < 0) {
4749     // Wait for the thread associated with the event to vacate
4750     int status = pthread_mutex_lock(_mutex);
4751     assert_status(status == 0, status, "mutex_lock");
4752     AnyWaiters = _nParked;
4753 
4754     if (AnyWaiters != 0) {
4755       // We intentional signal *after* dropping the lock
4756       // to avoid a common class of futile wakeups.
4757       status = pthread_cond_signal(_cond);
4758       assert_status(status == 0, status, "cond_signal");
4759     }
4760     // Mutex should be locked for pthread_cond_signal(_cond).
4761     status = pthread_mutex_unlock(_mutex);
4762     assert_status(status == 0, status, "mutex_unlock");
4763   }
4764 
4765   // Note that we signal() _after dropping the lock for "immortal" Events.
4766   // This is safe and avoids a common class of futile wakeups. In rare
4767   // circumstances this can cause a thread to return prematurely from
4768   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4769   // simply re-test the condition and re-park itself.
4770 }
4771 
4772 
4773 // JSR166
4774 // -------------------------------------------------------
4775 
4776 //
4777 // The solaris and linux implementations of park/unpark are fairly
4778 // conservative for now, but can be improved. They currently use a
4779 // mutex/condvar pair, plus a a count.
4780 // Park decrements count if > 0, else does a condvar wait. Unpark
4781 // sets count to 1 and signals condvar. Only one thread ever waits
4782 // on the condvar. Contention seen when trying to park implies that someone
4783 // is unparking you, so don't wait. And spurious returns are fine, so there
4784 // is no need to track notifications.
4785 //
4786 
4787 #define MAX_SECS 100000000
4788 //
4789 // This code is common to linux and solaris and will be moved to a
4790 // common place in dolphin.
4791 //
4792 // The passed in time value is either a relative time in nanoseconds
4793 // or an absolute time in milliseconds. Either way it has to be unpacked
4794 // into suitable seconds and nanoseconds components and stored in the
4795 // given timespec structure.
4796 // Given time is a 64-bit value and the time_t used in the timespec is only
4797 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
4798 // overflow if times way in the future are given. Further on Solaris versions
4799 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4800 // number of seconds, in abstime, is less than current_time + 100,000,000.
4801 // As it will be 28 years before "now + 100000000" will overflow we can
4802 // ignore overflow and just impose a hard-limit on seconds using the value
4803 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4804 // years from "now".
4805 //
4806 
4807 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4808   assert (time > 0, "convertTime");
4809 
4810   struct timeval now;
4811   int status = gettimeofday(&now, NULL);
4812   assert(status == 0, "gettimeofday");
4813 
4814   time_t max_secs = now.tv_sec + MAX_SECS;
4815 
4816   if (isAbsolute) {
4817     jlong secs = time / 1000;
4818     if (secs > max_secs) {
4819       absTime->tv_sec = max_secs;
4820     }
4821     else {
4822       absTime->tv_sec = secs;
4823     }
4824     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4825   }
4826   else {
4827     jlong secs = time / NANOSECS_PER_SEC;
4828     if (secs >= MAX_SECS) {
4829       absTime->tv_sec = max_secs;
4830       absTime->tv_nsec = 0;
4831     }
4832     else {
4833       absTime->tv_sec = now.tv_sec + secs;
4834       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4835       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4836         absTime->tv_nsec -= NANOSECS_PER_SEC;
4837         ++absTime->tv_sec; // note: this must be <= max_secs
4838       }
4839     }
4840   }
4841   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4842   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4843   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4844   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4845 }
4846 
4847 void Parker::park(bool isAbsolute, jlong time) {
4848   // Optional fast-path check:
4849   // Return immediately if a permit is available.
4850   if (_counter > 0) {
4851       _counter = 0;
4852       OrderAccess::fence();
4853       return;
4854   }
4855 
4856   Thread* thread = Thread::current();
4857   assert(thread->is_Java_thread(), "Must be JavaThread");
4858   JavaThread *jt = (JavaThread *)thread;
4859 
4860   // Optional optimization -- avoid state transitions if there's an interrupt pending.
4861   // Check interrupt before trying to wait
4862   if (Thread::is_interrupted(thread, false)) {
4863     return;
4864   }
4865 
4866   // Next, demultiplex/decode time arguments
4867   timespec absTime;
4868   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4869     return;
4870   }
4871   if (time > 0) {
4872     unpackTime(&absTime, isAbsolute, time);
4873   }
4874 
4875 
4876   // Enter safepoint region
4877   // Beware of deadlocks such as 6317397.
4878   // The per-thread Parker:: mutex is a classic leaf-lock.
4879   // In particular a thread must never block on the Threads_lock while
4880   // holding the Parker:: mutex. If safepoints are pending both the
4881   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4882   ThreadBlockInVM tbivm(jt);
4883 
4884   // Don't wait if cannot get lock since interference arises from
4885   // unblocking. Also. check interrupt before trying wait
4886   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4887     return;
4888   }
4889 
4890   int status;
4891   if (_counter > 0) { // no wait needed
4892     _counter = 0;
4893     status = pthread_mutex_unlock(_mutex);
4894     assert (status == 0, "invariant");
4895     OrderAccess::fence();
4896     return;
4897   }
4898 
4899 #ifdef ASSERT
4900   // Don't catch signals while blocked; let the running threads have the signals.
4901   // (This allows a debugger to break into the running thread.)
4902   sigset_t oldsigs;
4903   sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4904   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4905 #endif
4906 
4907   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4908   jt->set_suspend_equivalent();
4909   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4910 
4911   if (time == 0) {
4912     status = pthread_cond_wait (_cond, _mutex);
4913   } else {
4914     status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4915     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4916       pthread_cond_destroy (_cond);
4917       pthread_cond_init    (_cond, NULL);
4918     }
4919   }
4920   assert_status(status == 0 || status == EINTR ||
4921                 status == ETIME || status == ETIMEDOUT,
4922                 status, "cond_timedwait");
4923 
4924 #ifdef ASSERT
4925   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4926 #endif
4927 
4928   _counter = 0;
4929   status = pthread_mutex_unlock(_mutex);
4930   assert_status(status == 0, status, "invariant");
4931   // If externally suspended while waiting, re-suspend
4932   if (jt->handle_special_suspend_equivalent_condition()) {
4933     jt->java_suspend_self();
4934   }
4935 
4936   OrderAccess::fence();
4937 }
4938 
4939 void Parker::unpark() {
4940   int s, status;
4941   status = pthread_mutex_lock(_mutex);
4942   assert (status == 0, "invariant");
4943   s = _counter;
4944   _counter = 1;
4945   if (s < 1) {
4946     if (WorkAroundNPTLTimedWaitHang) {
4947       status = pthread_cond_signal (_cond);
4948       assert (status == 0, "invariant");
4949       status = pthread_mutex_unlock(_mutex);
4950       assert (status == 0, "invariant");
4951     } else {
4952       status = pthread_mutex_unlock(_mutex);
4953       assert (status == 0, "invariant");
4954       status = pthread_cond_signal (_cond);
4955       assert (status == 0, "invariant");
4956     }
4957   } else {
4958     pthread_mutex_unlock(_mutex);
4959     assert (status == 0, "invariant");
4960   }
4961 }
4962 
4963 
4964 extern char** environ;
4965 
4966 // Run the specified command in a separate process. Return its exit value,
4967 // or -1 on failure (e.g. can't fork a new process).
4968 // Unlike system(), this function can be called from signal handler. It
4969 // doesn't block SIGINT et al.
4970 int os::fork_and_exec(char* cmd) {
4971   char * argv[4] = {"sh", "-c", cmd, NULL};
4972 
4973   pid_t pid = fork();
4974 
4975   if (pid < 0) {
4976     // fork failed
4977     return -1;
4978 
4979   } else if (pid == 0) {
4980     // child process
4981 
4982     // try to be consistent with system(), which uses "/usr/bin/sh" on AIX
4983     execve("/usr/bin/sh", argv, environ);
4984 
4985     // execve failed
4986     _exit(-1);
4987 
4988   } else  {
4989     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4990     // care about the actual exit code, for now.
4991 
4992     int status;
4993 
4994     // Wait for the child process to exit.  This returns immediately if
4995     // the child has already exited. */
4996     while (waitpid(pid, &status, 0) < 0) {
4997         switch (errno) {
4998         case ECHILD: return 0;
4999         case EINTR: break;
5000         default: return -1;
5001         }
5002     }
5003 
5004     if (WIFEXITED(status)) {
5005        // The child exited normally; get its exit code.
5006        return WEXITSTATUS(status);
5007     } else if (WIFSIGNALED(status)) {
5008        // The child exited because of a signal
5009        // The best value to return is 0x80 + signal number,
5010        // because that is what all Unix shells do, and because
5011        // it allows callers to distinguish between process exit and
5012        // process death by signal.
5013        return 0x80 + WTERMSIG(status);
5014     } else {
5015        // Unknown exit code; pass it through
5016        return status;
5017     }
5018   }
5019   // Remove warning.
5020   return -1;
5021 }
5022 
5023 // is_headless_jre()
5024 //
5025 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5026 // in order to report if we are running in a headless jre.
5027 //
5028 // Since JDK8 xawt/libmawt.so is moved into the same directory
5029 // as libawt.so, and renamed libawt_xawt.so
5030 bool os::is_headless_jre() {
5031   struct stat statbuf;
5032   char buf[MAXPATHLEN];
5033   char libmawtpath[MAXPATHLEN];
5034   const char *xawtstr  = "/xawt/libmawt.so";
5035   const char *new_xawtstr = "/libawt_xawt.so";
5036 
5037   char *p;
5038 
5039   // Get path to libjvm.so
5040   os::jvm_path(buf, sizeof(buf));
5041 
5042   // Get rid of libjvm.so
5043   p = strrchr(buf, '/');
5044   if (p == NULL) return false;
5045   else *p = '\0';
5046 
5047   // Get rid of client or server
5048   p = strrchr(buf, '/');
5049   if (p == NULL) return false;
5050   else *p = '\0';
5051 
5052   // check xawt/libmawt.so
5053   strcpy(libmawtpath, buf);
5054   strcat(libmawtpath, xawtstr);
5055   if (::stat(libmawtpath, &statbuf) == 0) return false;
5056 
5057   // check libawt_xawt.so
5058   strcpy(libmawtpath, buf);
5059   strcat(libmawtpath, new_xawtstr);
5060   if (::stat(libmawtpath, &statbuf) == 0) return false;
5061 
5062   return true;
5063 }
5064 
5065 // Get the default path to the core file
5066 // Returns the length of the string
5067 int os::get_core_path(char* buffer, size_t bufferSize) {
5068   const char* p = get_current_directory(buffer, bufferSize);
5069 
5070   if (p == NULL) {
5071     assert(p != NULL, "failed to get current directory");
5072     return 0;
5073   }
5074 
5075   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
5076                                                p, current_process_id());
5077 
5078   return strlen(buffer);
5079 }
5080 
5081 #ifndef PRODUCT
5082 void TestReserveMemorySpecial_test() {
5083   // No tests available for this platform
5084 }
5085 #endif