1 /*
   2  * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2014 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "libperfstat_aix.hpp"
  40 #include "loadlib_aix.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "mutex_aix.inline.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "os_aix.inline.hpp"
  46 #include "os_share_aix.hpp"
  47 #include "porting_aix.hpp"
  48 #include "prims/jniFastGetField.hpp"
  49 #include "prims/jvm.h"
  50 #include "prims/jvm_misc.hpp"
  51 #include "runtime/arguments.hpp"
  52 #include "runtime/atomic.inline.hpp"
  53 #include "runtime/extendedPC.hpp"
  54 #include "runtime/globals.hpp"
  55 #include "runtime/interfaceSupport.hpp"
  56 #include "runtime/java.hpp"
  57 #include "runtime/javaCalls.hpp"
  58 #include "runtime/mutexLocker.hpp"
  59 #include "runtime/objectMonitor.hpp"
  60 #include "runtime/orderAccess.inline.hpp"
  61 #include "runtime/os.hpp"
  62 #include "runtime/osThread.hpp"
  63 #include "runtime/perfMemory.hpp"
  64 #include "runtime/sharedRuntime.hpp"
  65 #include "runtime/statSampler.hpp"
  66 #include "runtime/stubRoutines.hpp"
  67 #include "runtime/thread.inline.hpp"
  68 #include "runtime/threadCritical.hpp"
  69 #include "runtime/timer.hpp"
  70 #include "runtime/vm_version.hpp"
  71 #include "services/attachListener.hpp"
  72 #include "services/runtimeService.hpp"
  73 #include "utilities/decoder.hpp"
  74 #include "utilities/defaultStream.hpp"
  75 #include "utilities/events.hpp"
  76 #include "utilities/growableArray.hpp"
  77 #include "utilities/vmError.hpp"
  78 
  79 // put OS-includes here (sorted alphabetically)
  80 #include <errno.h>
  81 #include <fcntl.h>
  82 #include <inttypes.h>
  83 #include <poll.h>
  84 #include <procinfo.h>
  85 #include <pthread.h>
  86 #include <pwd.h>
  87 #include <semaphore.h>
  88 #include <signal.h>
  89 #include <stdint.h>
  90 #include <stdio.h>
  91 #include <string.h>
  92 #include <unistd.h>
  93 #include <sys/ioctl.h>
  94 #include <sys/ipc.h>
  95 #include <sys/mman.h>
  96 #include <sys/resource.h>
  97 #include <sys/select.h>
  98 #include <sys/shm.h>
  99 #include <sys/socket.h>
 100 #include <sys/stat.h>
 101 #include <sys/sysinfo.h>
 102 #include <sys/systemcfg.h>
 103 #include <sys/time.h>
 104 #include <sys/times.h>
 105 #include <sys/types.h>
 106 #include <sys/utsname.h>
 107 #include <sys/vminfo.h>
 108 #include <sys/wait.h>
 109 
 110 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
 111 #if !defined(_AIXVERSION_610)
 112 extern "C" {
 113   int getthrds64(pid_t ProcessIdentifier,
 114                  struct thrdentry64* ThreadBuffer,
 115                  int ThreadSize,
 116                  tid64_t* IndexPointer,
 117                  int Count);
 118 }
 119 #endif
 120 
 121 // Excerpts from systemcfg.h definitions newer than AIX 5.3
 122 #ifndef PV_7
 123 # define PV_7 0x200000          // Power PC 7
 124 # define PV_7_Compat 0x208000   // Power PC 7
 125 #endif
 126 
 127 #define MAX_PATH (2 * K)
 128 
 129 // for timer info max values which include all bits
 130 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 131 // for multipage initialization error analysis (in 'g_multipage_error')
 132 #define ERROR_MP_OS_TOO_OLD                          100
 133 #define ERROR_MP_EXTSHM_ACTIVE                       101
 134 #define ERROR_MP_VMGETINFO_FAILED                    102
 135 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 136 
 137 // the semantics in this file are thus that codeptr_t is a *real code ptr*
 138 // This means that any function taking codeptr_t as arguments will assume
 139 // a real codeptr and won't handle function descriptors (eg getFuncName),
 140 // whereas functions taking address as args will deal with function
 141 // descriptors (eg os::dll_address_to_library_name)
 142 typedef unsigned int* codeptr_t;
 143 
 144 // typedefs for stackslots, stack pointers, pointers to op codes
 145 typedef unsigned long stackslot_t;
 146 typedef stackslot_t* stackptr_t;
 147 
 148 // query dimensions of the stack of the calling thread
 149 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 150 
 151 // function to check a given stack pointer against given stack limits
 152 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 153   if (((uintptr_t)sp) & 0x7) {
 154     return false;
 155   }
 156   if (sp > stack_base) {
 157     return false;
 158   }
 159   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 160     return false;
 161   }
 162   return true;
 163 }
 164 
 165 // returns true if function is a valid codepointer
 166 inline bool is_valid_codepointer(codeptr_t p) {
 167   if (!p) {
 168     return false;
 169   }
 170   if (((uintptr_t)p) & 0x3) {
 171     return false;
 172   }
 173   if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
 174     return false;
 175   }
 176   return true;
 177 }
 178 
 179 // macro to check a given stack pointer against given stack limits and to die if test fails
 180 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 181     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 182 }
 183 
 184 // macro to check the current stack pointer against given stacklimits
 185 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 186   address sp; \
 187   sp = os::current_stack_pointer(); \
 188   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 189 }
 190 
 191 ////////////////////////////////////////////////////////////////////////////////
 192 // global variables (for a description see os_aix.hpp)
 193 
 194 julong    os::Aix::_physical_memory = 0;
 195 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 196 int       os::Aix::_page_size = -1;
 197 int       os::Aix::_on_pase = -1;
 198 int       os::Aix::_os_version = -1;
 199 int       os::Aix::_stack_page_size = -1;
 200 size_t    os::Aix::_shm_default_page_size = -1;
 201 int       os::Aix::_can_use_64K_pages = -1;
 202 int       os::Aix::_can_use_16M_pages = -1;
 203 int       os::Aix::_xpg_sus_mode = -1;
 204 int       os::Aix::_extshm = -1;
 205 int       os::Aix::_logical_cpus = -1;
 206 
 207 ////////////////////////////////////////////////////////////////////////////////
 208 // local variables
 209 
 210 static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 211 static jlong    initial_time_count = 0;
 212 static int      clock_tics_per_sec = 100;
 213 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 214 static bool     check_signals      = true;
 215 static pid_t    _initial_pid       = 0;
 216 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 217 static sigset_t SR_sigset;
 218 static pthread_mutex_t dl_mutex;           // Used to protect dlsym() calls */
 219 
 220 julong os::available_memory() {
 221   return Aix::available_memory();
 222 }
 223 
 224 julong os::Aix::available_memory() {
 225   os::Aix::meminfo_t mi;
 226   if (os::Aix::get_meminfo(&mi)) {
 227     return mi.real_free;
 228   } else {
 229     return 0xFFFFFFFFFFFFFFFFLL;
 230   }
 231 }
 232 
 233 julong os::physical_memory() {
 234   return Aix::physical_memory();
 235 }
 236 
 237 ////////////////////////////////////////////////////////////////////////////////
 238 // environment support
 239 
 240 bool os::getenv(const char* name, char* buf, int len) {
 241   const char* val = ::getenv(name);
 242   if (val != NULL && strlen(val) < (size_t)len) {
 243     strcpy(buf, val);
 244     return true;
 245   }
 246   if (len > 0) buf[0] = 0;  // return a null string
 247   return false;
 248 }
 249 
 250 
 251 // Return true if user is running as root.
 252 
 253 bool os::have_special_privileges() {
 254   static bool init = false;
 255   static bool privileges = false;
 256   if (!init) {
 257     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 258     init = true;
 259   }
 260   return privileges;
 261 }
 262 
 263 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 264 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 265 static bool my_disclaim64(char* addr, size_t size) {
 266 
 267   if (size == 0) {
 268     return true;
 269   }
 270 
 271   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 272   const unsigned int maxDisclaimSize = 0x80000000;
 273 
 274   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 275   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 276 
 277   char* p = addr;
 278 
 279   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 280     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 281       //if (Verbose)
 282       fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 283       return false;
 284     }
 285     p += maxDisclaimSize;
 286   }
 287 
 288   if (lastDisclaimSize > 0) {
 289     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 290       //if (Verbose)
 291         fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 292       return false;
 293     }
 294   }
 295 
 296   return true;
 297 }
 298 
 299 // Cpu architecture string
 300 #if defined(PPC32)
 301 static char cpu_arch[] = "ppc";
 302 #elif defined(PPC64)
 303 static char cpu_arch[] = "ppc64";
 304 #else
 305 #error Add appropriate cpu_arch setting
 306 #endif
 307 
 308 
 309 // Given an address, returns the size of the page backing that address.
 310 size_t os::Aix::query_pagesize(void* addr) {
 311 
 312   vm_page_info pi;
 313   pi.addr = (uint64_t)addr;
 314   if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 315     return pi.pagesize;
 316   } else {
 317     fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
 318     assert(false, "vmgetinfo failed to retrieve page size");
 319     return SIZE_4K;
 320   }
 321 
 322 }
 323 
 324 // Returns the kernel thread id of the currently running thread.
 325 pid_t os::Aix::gettid() {
 326   return (pid_t) thread_self();
 327 }
 328 
 329 void os::Aix::initialize_system_info() {
 330 
 331   // get the number of online(logical) cpus instead of configured
 332   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 333   assert(_processor_count > 0, "_processor_count must be > 0");
 334 
 335   // retrieve total physical storage
 336   os::Aix::meminfo_t mi;
 337   if (!os::Aix::get_meminfo(&mi)) {
 338     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
 339     assert(false, "os::Aix::get_meminfo failed.");
 340   }
 341   _physical_memory = (julong) mi.real_total;
 342 }
 343 
 344 // Helper function for tracing page sizes.
 345 static const char* describe_pagesize(size_t pagesize) {
 346   switch (pagesize) {
 347     case SIZE_4K : return "4K";
 348     case SIZE_64K: return "64K";
 349     case SIZE_16M: return "16M";
 350     case SIZE_16G: return "16G";
 351     default:
 352       assert(false, "surprise");
 353       return "??";
 354   }
 355 }
 356 
 357 // Retrieve information about multipage size support. Will initialize
 358 // Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,
 359 // Aix::_can_use_16M_pages.
 360 // Must be called before calling os::large_page_init().
 361 void os::Aix::query_multipage_support() {
 362 
 363   guarantee(_page_size == -1 &&
 364             _stack_page_size == -1 &&
 365             _can_use_64K_pages == -1 &&
 366             _can_use_16M_pages == -1 &&
 367             g_multipage_error == -1,
 368             "do not call twice");
 369 
 370   _page_size = ::sysconf(_SC_PAGESIZE);
 371 
 372   // This really would surprise me.
 373   assert(_page_size == SIZE_4K, "surprise!");
 374 
 375 
 376   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 377   // Default data page size is influenced either by linker options (-bdatapsize)
 378   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 379   // default should be 4K.
 380   size_t data_page_size = SIZE_4K;
 381   {
 382     void* p = os::malloc(SIZE_16M, mtInternal);
 383     guarantee(p != NULL, "malloc failed");
 384     data_page_size = os::Aix::query_pagesize(p);
 385     os::free(p);
 386   }
 387 
 388   // query default shm page size (LDR_CNTRL SHMPSIZE)
 389   {
 390     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 391     guarantee(shmid != -1, "shmget failed");
 392     void* p = ::shmat(shmid, NULL, 0);
 393     ::shmctl(shmid, IPC_RMID, NULL);
 394     guarantee(p != (void*) -1, "shmat failed");
 395     _shm_default_page_size = os::Aix::query_pagesize(p);
 396     ::shmdt(p);
 397   }
 398 
 399   // before querying the stack page size, make sure we are not running as primordial
 400   // thread (because primordial thread's stack may have different page size than
 401   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 402   // number of reasons so we may just as well guarantee it here
 403   guarantee(!os::Aix::is_primordial_thread(), "Must not be called for primordial thread");
 404 
 405   // query stack page size
 406   {
 407     int dummy = 0;
 408     _stack_page_size = os::Aix::query_pagesize(&dummy);
 409     // everything else would surprise me and should be looked into
 410     guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");
 411     // also, just for completeness: pthread stacks are allocated from C heap, so
 412     // stack page size should be the same as data page size
 413     guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");
 414   }
 415 
 416   // EXTSHM is bad: among other things, it prevents setting pagesize dynamically
 417   // for system V shm.
 418   if (Aix::extshm()) {
 419     if (Verbose) {
 420       fprintf(stderr, "EXTSHM is active - will disable large page support.\n"
 421                       "Please make sure EXTSHM is OFF for large page support.\n");
 422     }
 423     g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;
 424     _can_use_64K_pages = _can_use_16M_pages = 0;
 425     goto query_multipage_support_end;
 426   }
 427 
 428   // now check which page sizes the OS claims it supports, and of those, which actually can be used.
 429   {
 430     const int MAX_PAGE_SIZES = 4;
 431     psize_t sizes[MAX_PAGE_SIZES];
 432     const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 433     if (num_psizes == -1) {
 434       if (Verbose) {
 435         fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
 436         fprintf(stderr, "disabling multipage support.\n");
 437       }
 438       g_multipage_error = ERROR_MP_VMGETINFO_FAILED;
 439       _can_use_64K_pages = _can_use_16M_pages = 0;
 440       goto query_multipage_support_end;
 441     }
 442     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 443     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 444     if (Verbose) {
 445       fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 446       for (int i = 0; i < num_psizes; i ++) {
 447         fprintf(stderr, " %s ", describe_pagesize(sizes[i]));
 448       }
 449       fprintf(stderr, " .\n");
 450     }
 451 
 452     // Can we use 64K, 16M pages?
 453     _can_use_64K_pages = 0;
 454     _can_use_16M_pages = 0;
 455     for (int i = 0; i < num_psizes; i ++) {
 456       if (sizes[i] == SIZE_64K) {
 457         _can_use_64K_pages = 1;
 458       } else if (sizes[i] == SIZE_16M) {
 459         _can_use_16M_pages = 1;
 460       }
 461     }
 462 
 463     if (!_can_use_64K_pages) {
 464       g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;
 465     }
 466 
 467     // Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,
 468     // there must be an actual 16M page pool, and we must run with enough rights.
 469     if (_can_use_16M_pages) {
 470       const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);
 471       guarantee(shmid != -1, "shmget failed");
 472       struct shmid_ds shm_buf = { 0 };
 473       shm_buf.shm_pagesize = SIZE_16M;
 474       const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;
 475       const int en = errno;
 476       ::shmctl(shmid, IPC_RMID, NULL);
 477       if (!can_set_pagesize) {
 478         if (Verbose) {
 479           fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"
 480                           "Will deactivate 16M support.\n", en, strerror(en));
 481         }
 482         _can_use_16M_pages = 0;
 483       }
 484     }
 485 
 486   } // end: check which pages can be used for shared memory
 487 
 488 query_multipage_support_end:
 489 
 490   guarantee(_page_size != -1 &&
 491             _stack_page_size != -1 &&
 492             _can_use_64K_pages != -1 &&
 493             _can_use_16M_pages != -1, "Page sizes not properly initialized");
 494 
 495   if (_can_use_64K_pages) {
 496     g_multipage_error = 0;
 497   }
 498 
 499   if (Verbose) {
 500     fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));
 501     fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));
 502     fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));
 503     fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));
 504     fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));
 505     fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);
 506   }
 507 
 508 } // end os::Aix::query_multipage_support()
 509 
 510 // The code for this method was initially derived from the version in os_linux.cpp.
 511 void os::init_system_properties_values() {
 512 
 513 #define DEFAULT_LIBPATH "/usr/lib:/lib"
 514 #define EXTENSIONS_DIR  "/lib/ext"
 515 #define ENDORSED_DIR    "/lib/endorsed"
 516 
 517   // Buffer that fits several sprintfs.
 518   // Note that the space for the trailing null is provided
 519   // by the nulls included by the sizeof operator.
 520   const size_t bufsize =
 521     MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
 522          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR), // extensions dir
 523          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
 524   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 525 
 526   // sysclasspath, java_home, dll_dir
 527   {
 528     char *pslash;
 529     os::jvm_path(buf, bufsize);
 530 
 531     // Found the full path to libjvm.so.
 532     // Now cut the path to <java_home>/jre if we can.
 533     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
 534     pslash = strrchr(buf, '/');
 535     if (pslash != NULL) {
 536       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 537     }
 538     Arguments::set_dll_dir(buf);
 539 
 540     if (pslash != NULL) {
 541       pslash = strrchr(buf, '/');
 542       if (pslash != NULL) {
 543         *pslash = '\0';          // Get rid of /<arch>.
 544         pslash = strrchr(buf, '/');
 545         if (pslash != NULL) {
 546           *pslash = '\0';        // Get rid of /lib.
 547         }
 548       }
 549     }
 550     Arguments::set_java_home(buf);
 551     set_boot_path('/', ':');
 552   }
 553 
 554   // Where to look for native libraries.
 555 
 556   // On Aix we get the user setting of LIBPATH.
 557   // Eventually, all the library path setting will be done here.
 558   // Get the user setting of LIBPATH.
 559   const char *v = ::getenv("LIBPATH");
 560   const char *v_colon = ":";
 561   if (v == NULL) { v = ""; v_colon = ""; }
 562 
 563   // Concatenate user and invariant part of ld_library_path.
 564   // That's +1 for the colon and +1 for the trailing '\0'.
 565   char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
 566   sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
 567   Arguments::set_library_path(ld_library_path);
 568   FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
 569 
 570   // Extensions directories.
 571   sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
 572   Arguments::set_ext_dirs(buf);
 573 
 574   // Endorsed standards default directory.
 575   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
 576   Arguments::set_endorsed_dirs(buf);
 577 
 578   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
 579 
 580 #undef DEFAULT_LIBPATH
 581 #undef EXTENSIONS_DIR
 582 #undef ENDORSED_DIR
 583 }
 584 
 585 ////////////////////////////////////////////////////////////////////////////////
 586 // breakpoint support
 587 
 588 void os::breakpoint() {
 589   BREAKPOINT;
 590 }
 591 
 592 extern "C" void breakpoint() {
 593   // use debugger to set breakpoint here
 594 }
 595 
 596 ////////////////////////////////////////////////////////////////////////////////
 597 // signal support
 598 
 599 debug_only(static bool signal_sets_initialized = false);
 600 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
 601 
 602 bool os::Aix::is_sig_ignored(int sig) {
 603   struct sigaction oact;
 604   sigaction(sig, (struct sigaction*)NULL, &oact);
 605   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
 606     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
 607   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
 608     return true;
 609   else
 610     return false;
 611 }
 612 
 613 void os::Aix::signal_sets_init() {
 614   // Should also have an assertion stating we are still single-threaded.
 615   assert(!signal_sets_initialized, "Already initialized");
 616   // Fill in signals that are necessarily unblocked for all threads in
 617   // the VM. Currently, we unblock the following signals:
 618   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
 619   //                         by -Xrs (=ReduceSignalUsage));
 620   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
 621   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
 622   // the dispositions or masks wrt these signals.
 623   // Programs embedding the VM that want to use the above signals for their
 624   // own purposes must, at this time, use the "-Xrs" option to prevent
 625   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
 626   // (See bug 4345157, and other related bugs).
 627   // In reality, though, unblocking these signals is really a nop, since
 628   // these signals are not blocked by default.
 629   sigemptyset(&unblocked_sigs);
 630   sigemptyset(&allowdebug_blocked_sigs);
 631   sigaddset(&unblocked_sigs, SIGILL);
 632   sigaddset(&unblocked_sigs, SIGSEGV);
 633   sigaddset(&unblocked_sigs, SIGBUS);
 634   sigaddset(&unblocked_sigs, SIGFPE);
 635   sigaddset(&unblocked_sigs, SIGTRAP);
 636   sigaddset(&unblocked_sigs, SIGDANGER);
 637   sigaddset(&unblocked_sigs, SR_signum);
 638 
 639   if (!ReduceSignalUsage) {
 640    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
 641      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
 642      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
 643    }
 644    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
 645      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
 646      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
 647    }
 648    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
 649      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
 650      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
 651    }
 652   }
 653   // Fill in signals that are blocked by all but the VM thread.
 654   sigemptyset(&vm_sigs);
 655   if (!ReduceSignalUsage)
 656     sigaddset(&vm_sigs, BREAK_SIGNAL);
 657   debug_only(signal_sets_initialized = true);
 658 }
 659 
 660 // These are signals that are unblocked while a thread is running Java.
 661 // (For some reason, they get blocked by default.)
 662 sigset_t* os::Aix::unblocked_signals() {
 663   assert(signal_sets_initialized, "Not initialized");
 664   return &unblocked_sigs;
 665 }
 666 
 667 // These are the signals that are blocked while a (non-VM) thread is
 668 // running Java. Only the VM thread handles these signals.
 669 sigset_t* os::Aix::vm_signals() {
 670   assert(signal_sets_initialized, "Not initialized");
 671   return &vm_sigs;
 672 }
 673 
 674 // These are signals that are blocked during cond_wait to allow debugger in
 675 sigset_t* os::Aix::allowdebug_blocked_signals() {
 676   assert(signal_sets_initialized, "Not initialized");
 677   return &allowdebug_blocked_sigs;
 678 }
 679 
 680 void os::Aix::hotspot_sigmask(Thread* thread) {
 681 
 682   //Save caller's signal mask before setting VM signal mask
 683   sigset_t caller_sigmask;
 684   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
 685 
 686   OSThread* osthread = thread->osthread();
 687   osthread->set_caller_sigmask(caller_sigmask);
 688 
 689   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
 690 
 691   if (!ReduceSignalUsage) {
 692     if (thread->is_VM_thread()) {
 693       // Only the VM thread handles BREAK_SIGNAL ...
 694       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 695     } else {
 696       // ... all other threads block BREAK_SIGNAL
 697       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 698     }
 699   }
 700 }
 701 
 702 // retrieve memory information.
 703 // Returns false if something went wrong;
 704 // content of pmi undefined in this case.
 705 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 706 
 707   assert(pmi, "get_meminfo: invalid parameter");
 708 
 709   memset(pmi, 0, sizeof(meminfo_t));
 710 
 711   if (os::Aix::on_pase()) {
 712 
 713     Unimplemented();
 714     return false;
 715 
 716   } else {
 717 
 718     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 719     // See:
 720     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 721     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 722     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 723     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 724 
 725     perfstat_memory_total_t psmt;
 726     memset (&psmt, '\0', sizeof(psmt));
 727     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 728     if (rc == -1) {
 729       fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
 730       assert(0, "perfstat_memory_total() failed");
 731       return false;
 732     }
 733 
 734     assert(rc == 1, "perfstat_memory_total() - weird return code");
 735 
 736     // excerpt from
 737     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 738     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 739     // The fields of perfstat_memory_total_t:
 740     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 741     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 742     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 743     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 744     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 745 
 746     pmi->virt_total = psmt.virt_total * 4096;
 747     pmi->real_total = psmt.real_total * 4096;
 748     pmi->real_free = psmt.real_free * 4096;
 749     pmi->pgsp_total = psmt.pgsp_total * 4096;
 750     pmi->pgsp_free = psmt.pgsp_free * 4096;
 751 
 752     return true;
 753 
 754   }
 755 } // end os::Aix::get_meminfo
 756 
 757 // Retrieve global cpu information.
 758 // Returns false if something went wrong;
 759 // the content of pci is undefined in this case.
 760 bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
 761   assert(pci, "get_cpuinfo: invalid parameter");
 762   memset(pci, 0, sizeof(cpuinfo_t));
 763 
 764   perfstat_cpu_total_t psct;
 765   memset (&psct, '\0', sizeof(psct));
 766 
 767   if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
 768     fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
 769     assert(0, "perfstat_cpu_total() failed");
 770     return false;
 771   }
 772 
 773   // global cpu information
 774   strcpy (pci->description, psct.description);
 775   pci->processorHZ = psct.processorHZ;
 776   pci->ncpus = psct.ncpus;
 777   os::Aix::_logical_cpus = psct.ncpus;
 778   for (int i = 0; i < 3; i++) {
 779     pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
 780   }
 781 
 782   // get the processor version from _system_configuration
 783   switch (_system_configuration.version) {
 784   case PV_7:
 785     strcpy(pci->version, "Power PC 7");
 786     break;
 787   case PV_6_1:
 788     strcpy(pci->version, "Power PC 6 DD1.x");
 789     break;
 790   case PV_6:
 791     strcpy(pci->version, "Power PC 6");
 792     break;
 793   case PV_5:
 794     strcpy(pci->version, "Power PC 5");
 795     break;
 796   case PV_5_2:
 797     strcpy(pci->version, "Power PC 5_2");
 798     break;
 799   case PV_5_3:
 800     strcpy(pci->version, "Power PC 5_3");
 801     break;
 802   case PV_5_Compat:
 803     strcpy(pci->version, "PV_5_Compat");
 804     break;
 805   case PV_6_Compat:
 806     strcpy(pci->version, "PV_6_Compat");
 807     break;
 808   case PV_7_Compat:
 809     strcpy(pci->version, "PV_7_Compat");
 810     break;
 811   default:
 812     strcpy(pci->version, "unknown");
 813   }
 814 
 815   return true;
 816 
 817 } //end os::Aix::get_cpuinfo
 818 
 819 //////////////////////////////////////////////////////////////////////////////
 820 // detecting pthread library
 821 
 822 void os::Aix::libpthread_init() {
 823   return;
 824 }
 825 
 826 //////////////////////////////////////////////////////////////////////////////
 827 // create new thread
 828 
 829 // Thread start routine for all newly created threads
 830 static void *java_start(Thread *thread) {
 831 
 832   // find out my own stack dimensions
 833   {
 834     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 835     address base = 0;
 836     size_t size = 0;
 837     query_stack_dimensions(&base, &size);
 838     thread->set_stack_base(base);
 839     thread->set_stack_size(size);
 840   }
 841 
 842   // Do some sanity checks.
 843   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 844 
 845   // Try to randomize the cache line index of hot stack frames.
 846   // This helps when threads of the same stack traces evict each other's
 847   // cache lines. The threads can be either from the same JVM instance, or
 848   // from different JVM instances. The benefit is especially true for
 849   // processors with hyperthreading technology.
 850 
 851   static int counter = 0;
 852   int pid = os::current_process_id();
 853   alloca(((pid ^ counter++) & 7) * 128);
 854 
 855   ThreadLocalStorage::set_thread(thread);
 856 
 857   OSThread* osthread = thread->osthread();
 858 
 859   // thread_id is kernel thread id (similar to Solaris LWP id)
 860   osthread->set_thread_id(os::Aix::gettid());
 861 
 862   // initialize signal mask for this thread
 863   os::Aix::hotspot_sigmask(thread);
 864 
 865   // initialize floating point control register
 866   os::Aix::init_thread_fpu_state();
 867 
 868   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 869 
 870   // call one more level start routine
 871   thread->run();
 872 
 873   return 0;
 874 }
 875 
 876 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 877 
 878   // We want the whole function to be synchronized.
 879   ThreadCritical cs;
 880 
 881   assert(thread->osthread() == NULL, "caller responsible");
 882 
 883   // Allocate the OSThread object
 884   OSThread* osthread = new OSThread(NULL, NULL);
 885   if (osthread == NULL) {
 886     return false;
 887   }
 888 
 889   // set the correct thread state
 890   osthread->set_thread_type(thr_type);
 891 
 892   // Initial state is ALLOCATED but not INITIALIZED
 893   osthread->set_state(ALLOCATED);
 894 
 895   thread->set_osthread(osthread);
 896 
 897   // init thread attributes
 898   pthread_attr_t attr;
 899   pthread_attr_init(&attr);
 900   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
 901 
 902   // Make sure we run in 1:1 kernel-user-thread mode.
 903   if (os::Aix::on_aix()) {
 904     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
 905     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
 906   } // end: aix
 907 
 908   // Start in suspended state, and in os::thread_start, wake the thread up.
 909   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
 910 
 911   // calculate stack size if it's not specified by caller
 912   if (os::Aix::supports_variable_stack_size()) {
 913     if (stack_size == 0) {
 914       stack_size = os::Aix::default_stack_size(thr_type);
 915 
 916       switch (thr_type) {
 917       case os::java_thread:
 918         // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
 919         assert(JavaThread::stack_size_at_create() > 0, "this should be set");
 920         stack_size = JavaThread::stack_size_at_create();
 921         break;
 922       case os::compiler_thread:
 923         if (CompilerThreadStackSize > 0) {
 924           stack_size = (size_t)(CompilerThreadStackSize * K);
 925           break;
 926         } // else fall through:
 927           // use VMThreadStackSize if CompilerThreadStackSize is not defined
 928       case os::vm_thread:
 929       case os::pgc_thread:
 930       case os::cgc_thread:
 931       case os::watcher_thread:
 932         if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 933         break;
 934       }
 935     }
 936 
 937     stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
 938     pthread_attr_setstacksize(&attr, stack_size);
 939   } //else let thread_create() pick the default value (96 K on AIX)
 940 
 941   pthread_t tid;
 942   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
 943 
 944   pthread_attr_destroy(&attr);
 945 
 946   if (ret != 0) {
 947     if (PrintMiscellaneous && (Verbose || WizardMode)) {
 948       perror("pthread_create()");
 949     }
 950     // Need to clean up stuff we've allocated so far
 951     thread->set_osthread(NULL);
 952     delete osthread;
 953     return false;
 954   }
 955 
 956   // Store pthread info into the OSThread
 957   osthread->set_pthread_id(tid);
 958 
 959   return true;
 960 }
 961 
 962 /////////////////////////////////////////////////////////////////////////////
 963 // attach existing thread
 964 
 965 // bootstrap the main thread
 966 bool os::create_main_thread(JavaThread* thread) {
 967   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 968   return create_attached_thread(thread);
 969 }
 970 
 971 bool os::create_attached_thread(JavaThread* thread) {
 972 #ifdef ASSERT
 973     thread->verify_not_published();
 974 #endif
 975 
 976   // Allocate the OSThread object
 977   OSThread* osthread = new OSThread(NULL, NULL);
 978 
 979   if (osthread == NULL) {
 980     return false;
 981   }
 982 
 983   // Store pthread info into the OSThread
 984   osthread->set_thread_id(os::Aix::gettid());
 985   osthread->set_pthread_id(::pthread_self());
 986 
 987   // initialize floating point control register
 988   os::Aix::init_thread_fpu_state();
 989 
 990   // some sanity checks
 991   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 992 
 993   // Initial thread state is RUNNABLE
 994   osthread->set_state(RUNNABLE);
 995 
 996   thread->set_osthread(osthread);
 997 
 998   if (UseNUMA) {
 999     int lgrp_id = os::numa_get_group_id();
1000     if (lgrp_id != -1) {
1001       thread->set_lgrp_id(lgrp_id);
1002     }
1003   }
1004 
1005   // initialize signal mask for this thread
1006   // and save the caller's signal mask
1007   os::Aix::hotspot_sigmask(thread);
1008 
1009   return true;
1010 }
1011 
1012 void os::pd_start_thread(Thread* thread) {
1013   int status = pthread_continue_np(thread->osthread()->pthread_id());
1014   assert(status == 0, "thr_continue failed");
1015 }
1016 
1017 // Free OS resources related to the OSThread
1018 void os::free_thread(OSThread* osthread) {
1019   assert(osthread != NULL, "osthread not set");
1020 
1021   if (Thread::current()->osthread() == osthread) {
1022     // Restore caller's signal mask
1023     sigset_t sigmask = osthread->caller_sigmask();
1024     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1025    }
1026 
1027   delete osthread;
1028 }
1029 
1030 //////////////////////////////////////////////////////////////////////////////
1031 // thread local storage
1032 
1033 int os::allocate_thread_local_storage() {
1034   pthread_key_t key;
1035   int rslt = pthread_key_create(&key, NULL);
1036   assert(rslt == 0, "cannot allocate thread local storage");
1037   return (int)key;
1038 }
1039 
1040 // Note: This is currently not used by VM, as we don't destroy TLS key
1041 // on VM exit.
1042 void os::free_thread_local_storage(int index) {
1043   int rslt = pthread_key_delete((pthread_key_t)index);
1044   assert(rslt == 0, "invalid index");
1045 }
1046 
1047 void os::thread_local_storage_at_put(int index, void* value) {
1048   int rslt = pthread_setspecific((pthread_key_t)index, value);
1049   assert(rslt == 0, "pthread_setspecific failed");
1050 }
1051 
1052 extern "C" Thread* get_thread() {
1053   return ThreadLocalStorage::thread();
1054 }
1055 
1056 ////////////////////////////////////////////////////////////////////////////////
1057 // time support
1058 
1059 // Time since start-up in seconds to a fine granularity.
1060 // Used by VMSelfDestructTimer and the MemProfiler.
1061 double os::elapsedTime() {
1062   return (double)(os::elapsed_counter()) * 0.000001;
1063 }
1064 
1065 jlong os::elapsed_counter() {
1066   timeval time;
1067   int status = gettimeofday(&time, NULL);
1068   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1069 }
1070 
1071 jlong os::elapsed_frequency() {
1072   return (1000 * 1000);
1073 }
1074 
1075 // For now, we say that linux does not support vtime. I have no idea
1076 // whether it can actually be made to (DLD, 9/13/05).
1077 
1078 bool os::supports_vtime() { return false; }
1079 bool os::enable_vtime()   { return false; }
1080 bool os::vtime_enabled()  { return false; }
1081 double os::elapsedVTime() {
1082   // better than nothing, but not much
1083   return elapsedTime();
1084 }
1085 
1086 jlong os::javaTimeMillis() {
1087   timeval time;
1088   int status = gettimeofday(&time, NULL);
1089   assert(status != -1, "aix error at gettimeofday()");
1090   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1091 }
1092 
1093 // We need to manually declare mread_real_time,
1094 // because IBM didn't provide a prototype in time.h.
1095 // (they probably only ever tested in C, not C++)
1096 extern "C"
1097 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1098 
1099 jlong os::javaTimeNanos() {
1100   if (os::Aix::on_pase()) {
1101     Unimplemented();
1102     return 0;
1103   }
1104   else {
1105     // On AIX use the precision of processors real time clock
1106     // or time base registers.
1107     timebasestruct_t time;
1108     int rc;
1109 
1110     // If the CPU has a time register, it will be used and
1111     // we have to convert to real time first. After convertion we have following data:
1112     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1113     // time.tb_low  [nanoseconds after the last full second above]
1114     // We better use mread_real_time here instead of read_real_time
1115     // to ensure that we will get a monotonic increasing time.
1116     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1117       rc = time_base_to_time(&time, TIMEBASE_SZ);
1118       assert(rc != -1, "aix error at time_base_to_time()");
1119     }
1120     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1121   }
1122 }
1123 
1124 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1125   info_ptr->max_value = ALL_64_BITS;
1126   // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1127   info_ptr->may_skip_backward = false;
1128   info_ptr->may_skip_forward = false;
1129   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1130 }
1131 
1132 // Return the real, user, and system times in seconds from an
1133 // arbitrary fixed point in the past.
1134 bool os::getTimesSecs(double* process_real_time,
1135                       double* process_user_time,
1136                       double* process_system_time) {
1137   struct tms ticks;
1138   clock_t real_ticks = times(&ticks);
1139 
1140   if (real_ticks == (clock_t) (-1)) {
1141     return false;
1142   } else {
1143     double ticks_per_second = (double) clock_tics_per_sec;
1144     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1145     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1146     *process_real_time = ((double) real_ticks) / ticks_per_second;
1147 
1148     return true;
1149   }
1150 }
1151 
1152 
1153 char * os::local_time_string(char *buf, size_t buflen) {
1154   struct tm t;
1155   time_t long_time;
1156   time(&long_time);
1157   localtime_r(&long_time, &t);
1158   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1159                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1160                t.tm_hour, t.tm_min, t.tm_sec);
1161   return buf;
1162 }
1163 
1164 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1165   return localtime_r(clock, res);
1166 }
1167 
1168 ////////////////////////////////////////////////////////////////////////////////
1169 // runtime exit support
1170 
1171 // Note: os::shutdown() might be called very early during initialization, or
1172 // called from signal handler. Before adding something to os::shutdown(), make
1173 // sure it is async-safe and can handle partially initialized VM.
1174 void os::shutdown() {
1175 
1176   // allow PerfMemory to attempt cleanup of any persistent resources
1177   perfMemory_exit();
1178 
1179   // needs to remove object in file system
1180   AttachListener::abort();
1181 
1182   // flush buffered output, finish log files
1183   ostream_abort();
1184 
1185   // Check for abort hook
1186   abort_hook_t abort_hook = Arguments::abort_hook();
1187   if (abort_hook != NULL) {
1188     abort_hook();
1189   }
1190 
1191 }
1192 
1193 // Note: os::abort() might be called very early during initialization, or
1194 // called from signal handler. Before adding something to os::abort(), make
1195 // sure it is async-safe and can handle partially initialized VM.
1196 void os::abort(bool dump_core) {
1197   os::shutdown();
1198   if (dump_core) {
1199 #ifndef PRODUCT
1200     fdStream out(defaultStream::output_fd());
1201     out.print_raw("Current thread is ");
1202     char buf[16];
1203     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1204     out.print_raw_cr(buf);
1205     out.print_raw_cr("Dumping core ...");
1206 #endif
1207     ::abort(); // dump core
1208   }
1209 
1210   ::exit(1);
1211 }
1212 
1213 // Die immediately, no exit hook, no abort hook, no cleanup.
1214 void os::die() {
1215   ::abort();
1216 }
1217 
1218 // This method is a copy of JDK's sysGetLastErrorString
1219 // from src/solaris/hpi/src/system_md.c
1220 
1221 size_t os::lasterror(char *buf, size_t len) {
1222 
1223   if (errno == 0)  return 0;
1224 
1225   const char *s = ::strerror(errno);
1226   size_t n = ::strlen(s);
1227   if (n >= len) {
1228     n = len - 1;
1229   }
1230   ::strncpy(buf, s, n);
1231   buf[n] = '\0';
1232   return n;
1233 }
1234 
1235 intx os::current_thread_id() { return (intx)pthread_self(); }
1236 int os::current_process_id() {
1237 
1238   // This implementation returns a unique pid, the pid of the
1239   // launcher thread that starts the vm 'process'.
1240 
1241   // Under POSIX, getpid() returns the same pid as the
1242   // launcher thread rather than a unique pid per thread.
1243   // Use gettid() if you want the old pre NPTL behaviour.
1244 
1245   // if you are looking for the result of a call to getpid() that
1246   // returns a unique pid for the calling thread, then look at the
1247   // OSThread::thread_id() method in osThread_linux.hpp file
1248 
1249   return (int)(_initial_pid ? _initial_pid : getpid());
1250 }
1251 
1252 // DLL functions
1253 
1254 const char* os::dll_file_extension() { return ".so"; }
1255 
1256 // This must be hard coded because it's the system's temporary
1257 // directory not the java application's temp directory, ala java.io.tmpdir.
1258 const char* os::get_temp_directory() { return "/tmp"; }
1259 
1260 static bool file_exists(const char* filename) {
1261   struct stat statbuf;
1262   if (filename == NULL || strlen(filename) == 0) {
1263     return false;
1264   }
1265   return os::stat(filename, &statbuf) == 0;
1266 }
1267 
1268 bool os::dll_build_name(char* buffer, size_t buflen,
1269                         const char* pname, const char* fname) {
1270   bool retval = false;
1271   // Copied from libhpi
1272   const size_t pnamelen = pname ? strlen(pname) : 0;
1273 
1274   // Return error on buffer overflow.
1275   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1276     *buffer = '\0';
1277     return retval;
1278   }
1279 
1280   if (pnamelen == 0) {
1281     snprintf(buffer, buflen, "lib%s.so", fname);
1282     retval = true;
1283   } else if (strchr(pname, *os::path_separator()) != NULL) {
1284     int n;
1285     char** pelements = split_path(pname, &n);
1286     for (int i = 0; i < n; i++) {
1287       // Really shouldn't be NULL, but check can't hurt
1288       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1289         continue; // skip the empty path values
1290       }
1291       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1292       if (file_exists(buffer)) {
1293         retval = true;
1294         break;
1295       }
1296     }
1297     // release the storage
1298     for (int i = 0; i < n; i++) {
1299       if (pelements[i] != NULL) {
1300         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1301       }
1302     }
1303     if (pelements != NULL) {
1304       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1305     }
1306   } else {
1307     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1308     retval = true;
1309   }
1310   return retval;
1311 }
1312 
1313 // Check if addr is inside libjvm.so.
1314 bool os::address_is_in_vm(address addr) {
1315 
1316   // Input could be a real pc or a function pointer literal. The latter
1317   // would be a function descriptor residing in the data segment of a module.
1318 
1319   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
1320   if (lib) {
1321     if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1322       return true;
1323     } else {
1324       return false;
1325     }
1326   } else {
1327     lib = LoadedLibraries::find_for_data_address(addr);
1328     if (lib) {
1329       if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1330         return true;
1331       } else {
1332         return false;
1333       }
1334     } else {
1335       return false;
1336     }
1337   }
1338 }
1339 
1340 // Resolve an AIX function descriptor literal to a code pointer.
1341 // If the input is a valid code pointer to a text segment of a loaded module,
1342 //   it is returned unchanged.
1343 // If the input is a valid AIX function descriptor, it is resolved to the
1344 //   code entry point.
1345 // If the input is neither a valid function descriptor nor a valid code pointer,
1346 //   NULL is returned.
1347 static address resolve_function_descriptor_to_code_pointer(address p) {
1348 
1349   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
1350   if (lib) {
1351     // its a real code pointer
1352     return p;
1353   } else {
1354     lib = LoadedLibraries::find_for_data_address(p);
1355     if (lib) {
1356       // pointer to data segment, potential function descriptor
1357       address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1358       if (LoadedLibraries::find_for_text_address(code_entry)) {
1359         // Its a function descriptor
1360         return code_entry;
1361       }
1362     }
1363   }
1364   return NULL;
1365 }
1366 
1367 bool os::dll_address_to_function_name(address addr, char *buf,
1368                                       int buflen, int *offset) {
1369   if (offset) {
1370     *offset = -1;
1371   }
1372   if (buf) {
1373     buf[0] = '\0';
1374   }
1375 
1376   // Resolve function ptr literals first.
1377   addr = resolve_function_descriptor_to_code_pointer(addr);
1378   if (!addr) {
1379     return false;
1380   }
1381 
1382   // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1383   return Decoder::decode(addr, buf, buflen, offset);
1384 }
1385 
1386 static int getModuleName(codeptr_t pc,                    // [in] program counter
1387                          char* p_name, size_t namelen,    // [out] optional: function name
1388                          char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1389                          ) {
1390 
1391   // initialize output parameters
1392   if (p_name && namelen > 0) {
1393     *p_name = '\0';
1394   }
1395   if (p_errmsg && errmsglen > 0) {
1396     *p_errmsg = '\0';
1397   }
1398 
1399   const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
1400   if (lib) {
1401     if (p_name && namelen > 0) {
1402       sprintf(p_name, "%.*s", namelen, lib->get_shortname());
1403     }
1404     return 0;
1405   }
1406 
1407   if (Verbose) {
1408     fprintf(stderr, "pc outside any module");
1409   }
1410 
1411   return -1;
1412 
1413 }
1414 
1415 bool os::dll_address_to_library_name(address addr, char* buf,
1416                                      int buflen, int* offset) {
1417   if (offset) {
1418     *offset = -1;
1419   }
1420   if (buf) {
1421       buf[0] = '\0';
1422   }
1423 
1424   // Resolve function ptr literals first.
1425   addr = resolve_function_descriptor_to_code_pointer(addr);
1426   if (!addr) {
1427     return false;
1428   }
1429 
1430   if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1431     return true;
1432   }
1433   return false;
1434 }
1435 
1436 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1437 // for the same architecture as Hotspot is running on
1438 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1439 
1440   if (ebuf && ebuflen > 0) {
1441     ebuf[0] = '\0';
1442     ebuf[ebuflen - 1] = '\0';
1443   }
1444 
1445   if (!filename || strlen(filename) == 0) {
1446     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1447     return NULL;
1448   }
1449 
1450   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1451   void * result= ::dlopen(filename, RTLD_LAZY);
1452   if (result != NULL) {
1453     // Reload dll cache. Don't do this in signal handling.
1454     LoadedLibraries::reload();
1455     return result;
1456   } else {
1457     // error analysis when dlopen fails
1458     const char* const error_report = ::dlerror();
1459     if (error_report && ebuf && ebuflen > 0) {
1460       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1461                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1462     }
1463   }
1464   return NULL;
1465 }
1466 
1467 // Glibc-2.0 libdl is not MT safe. If you are building with any glibc,
1468 // chances are you might want to run the generated bits against glibc-2.0
1469 // libdl.so, so always use locking for any version of glibc.
1470 void* os::dll_lookup(void* handle, const char* name) {
1471   pthread_mutex_lock(&dl_mutex);
1472   void* res = dlsym(handle, name);
1473   pthread_mutex_unlock(&dl_mutex);
1474   return res;
1475 }
1476 
1477 void* os::get_default_process_handle() {
1478   return (void*)::dlopen(NULL, RTLD_LAZY);
1479 }
1480 
1481 void os::print_dll_info(outputStream *st) {
1482   st->print_cr("Dynamic libraries:");
1483   LoadedLibraries::print(st);
1484 }
1485 
1486 void os::print_os_info(outputStream* st) {
1487   st->print("OS:");
1488 
1489   st->print("uname:");
1490   struct utsname name;
1491   uname(&name);
1492   st->print(name.sysname); st->print(" ");
1493   st->print(name.nodename); st->print(" ");
1494   st->print(name.release); st->print(" ");
1495   st->print(name.version); st->print(" ");
1496   st->print(name.machine);
1497   st->cr();
1498 
1499   // rlimit
1500   st->print("rlimit:");
1501   struct rlimit rlim;
1502 
1503   st->print(" STACK ");
1504   getrlimit(RLIMIT_STACK, &rlim);
1505   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1506   else st->print("%uk", rlim.rlim_cur >> 10);
1507 
1508   st->print(", CORE ");
1509   getrlimit(RLIMIT_CORE, &rlim);
1510   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1511   else st->print("%uk", rlim.rlim_cur >> 10);
1512 
1513   st->print(", NPROC ");
1514   st->print("%d", sysconf(_SC_CHILD_MAX));
1515 
1516   st->print(", NOFILE ");
1517   getrlimit(RLIMIT_NOFILE, &rlim);
1518   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1519   else st->print("%d", rlim.rlim_cur);
1520 
1521   st->print(", AS ");
1522   getrlimit(RLIMIT_AS, &rlim);
1523   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1524   else st->print("%uk", rlim.rlim_cur >> 10);
1525 
1526   // Print limits on DATA, because it limits the C-heap.
1527   st->print(", DATA ");
1528   getrlimit(RLIMIT_DATA, &rlim);
1529   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1530   else st->print("%uk", rlim.rlim_cur >> 10);
1531   st->cr();
1532 
1533   // load average
1534   st->print("load average:");
1535   double loadavg[3] = {-1.L, -1.L, -1.L};
1536   os::loadavg(loadavg, 3);
1537   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1538   st->cr();
1539 }
1540 
1541 void os::print_memory_info(outputStream* st) {
1542 
1543   st->print_cr("Memory:");
1544 
1545   st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1546   st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1547   st->print_cr("  default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));
1548   st->print_cr("  can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no"));
1549   st->print_cr("  can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no"));
1550   if (g_multipage_error != 0) {
1551     st->print_cr("  multipage error: %d", g_multipage_error);
1552   }
1553 
1554   // print out LDR_CNTRL because it affects the default page sizes
1555   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1556   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1557 
1558   const char* const extshm = ::getenv("EXTSHM");
1559   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1560 
1561   // Call os::Aix::get_meminfo() to retrieve memory statistics.
1562   os::Aix::meminfo_t mi;
1563   if (os::Aix::get_meminfo(&mi)) {
1564     char buffer[256];
1565     if (os::Aix::on_aix()) {
1566       jio_snprintf(buffer, sizeof(buffer),
1567                    "  physical total : %llu\n"
1568                    "  physical free  : %llu\n"
1569                    "  swap total     : %llu\n"
1570                    "  swap free      : %llu\n",
1571                    mi.real_total,
1572                    mi.real_free,
1573                    mi.pgsp_total,
1574                    mi.pgsp_free);
1575     } else {
1576       Unimplemented();
1577     }
1578     st->print_raw(buffer);
1579   } else {
1580     st->print_cr("  (no more information available)");
1581   }
1582 }
1583 
1584 void os::pd_print_cpu_info(outputStream* st) {
1585   // cpu
1586   st->print("CPU:");
1587   st->print("total %d", os::processor_count());
1588   // It's not safe to query number of active processors after crash
1589   // st->print("(active %d)", os::active_processor_count());
1590   st->print(" %s", VM_Version::cpu_features());
1591   st->cr();
1592 }
1593 
1594 void os::print_siginfo(outputStream* st, void* siginfo) {
1595   // Use common posix version.
1596   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1597   st->cr();
1598 }
1599 
1600 
1601 static void print_signal_handler(outputStream* st, int sig,
1602                                  char* buf, size_t buflen);
1603 
1604 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1605   st->print_cr("Signal Handlers:");
1606   print_signal_handler(st, SIGSEGV, buf, buflen);
1607   print_signal_handler(st, SIGBUS , buf, buflen);
1608   print_signal_handler(st, SIGFPE , buf, buflen);
1609   print_signal_handler(st, SIGPIPE, buf, buflen);
1610   print_signal_handler(st, SIGXFSZ, buf, buflen);
1611   print_signal_handler(st, SIGILL , buf, buflen);
1612   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1613   print_signal_handler(st, SR_signum, buf, buflen);
1614   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1615   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1616   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1617   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1618   print_signal_handler(st, SIGTRAP, buf, buflen);
1619   print_signal_handler(st, SIGDANGER, buf, buflen);
1620 }
1621 
1622 static char saved_jvm_path[MAXPATHLEN] = {0};
1623 
1624 // Find the full path to the current module, libjvm.so or libjvm_g.so
1625 void os::jvm_path(char *buf, jint buflen) {
1626   // Error checking.
1627   if (buflen < MAXPATHLEN) {
1628     assert(false, "must use a large-enough buffer");
1629     buf[0] = '\0';
1630     return;
1631   }
1632   // Lazy resolve the path to current module.
1633   if (saved_jvm_path[0] != 0) {
1634     strcpy(buf, saved_jvm_path);
1635     return;
1636   }
1637 
1638   Dl_info dlinfo;
1639   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1640   assert(ret != 0, "cannot locate libjvm");
1641   char* rp = realpath((char *)dlinfo.dli_fname, buf);
1642   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1643 
1644   strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1645   saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1646 }
1647 
1648 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1649   // no prefix required, not even "_"
1650 }
1651 
1652 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1653   // no suffix required
1654 }
1655 
1656 ////////////////////////////////////////////////////////////////////////////////
1657 // sun.misc.Signal support
1658 
1659 static volatile jint sigint_count = 0;
1660 
1661 static void
1662 UserHandler(int sig, void *siginfo, void *context) {
1663   // 4511530 - sem_post is serialized and handled by the manager thread. When
1664   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1665   // don't want to flood the manager thread with sem_post requests.
1666   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1667     return;
1668 
1669   // Ctrl-C is pressed during error reporting, likely because the error
1670   // handler fails to abort. Let VM die immediately.
1671   if (sig == SIGINT && is_error_reported()) {
1672     os::die();
1673   }
1674 
1675   os::signal_notify(sig);
1676 }
1677 
1678 void* os::user_handler() {
1679   return CAST_FROM_FN_PTR(void*, UserHandler);
1680 }
1681 
1682 extern "C" {
1683   typedef void (*sa_handler_t)(int);
1684   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1685 }
1686 
1687 void* os::signal(int signal_number, void* handler) {
1688   struct sigaction sigAct, oldSigAct;
1689 
1690   sigfillset(&(sigAct.sa_mask));
1691 
1692   // Do not block out synchronous signals in the signal handler.
1693   // Blocking synchronous signals only makes sense if you can really
1694   // be sure that those signals won't happen during signal handling,
1695   // when the blocking applies.  Normal signal handlers are lean and
1696   // do not cause signals. But our signal handlers tend to be "risky"
1697   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1698   // On AIX, PASE there was a case where a SIGSEGV happened, followed
1699   // by a SIGILL, which was blocked due to the signal mask. The process
1700   // just hung forever. Better to crash from a secondary signal than to hang.
1701   sigdelset(&(sigAct.sa_mask), SIGSEGV);
1702   sigdelset(&(sigAct.sa_mask), SIGBUS);
1703   sigdelset(&(sigAct.sa_mask), SIGILL);
1704   sigdelset(&(sigAct.sa_mask), SIGFPE);
1705   sigdelset(&(sigAct.sa_mask), SIGTRAP);
1706 
1707   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1708 
1709   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1710 
1711   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1712     // -1 means registration failed
1713     return (void *)-1;
1714   }
1715 
1716   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1717 }
1718 
1719 void os::signal_raise(int signal_number) {
1720   ::raise(signal_number);
1721 }
1722 
1723 //
1724 // The following code is moved from os.cpp for making this
1725 // code platform specific, which it is by its very nature.
1726 //
1727 
1728 // Will be modified when max signal is changed to be dynamic
1729 int os::sigexitnum_pd() {
1730   return NSIG;
1731 }
1732 
1733 // a counter for each possible signal value
1734 static volatile jint pending_signals[NSIG+1] = { 0 };
1735 
1736 // Linux(POSIX) specific hand shaking semaphore.
1737 static sem_t sig_sem;
1738 
1739 void os::signal_init_pd() {
1740   // Initialize signal structures
1741   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1742 
1743   // Initialize signal semaphore
1744   int rc = ::sem_init(&sig_sem, 0, 0);
1745   guarantee(rc != -1, "sem_init failed");
1746 }
1747 
1748 void os::signal_notify(int sig) {
1749   Atomic::inc(&pending_signals[sig]);
1750   ::sem_post(&sig_sem);
1751 }
1752 
1753 static int check_pending_signals(bool wait) {
1754   Atomic::store(0, &sigint_count);
1755   for (;;) {
1756     for (int i = 0; i < NSIG + 1; i++) {
1757       jint n = pending_signals[i];
1758       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1759         return i;
1760       }
1761     }
1762     if (!wait) {
1763       return -1;
1764     }
1765     JavaThread *thread = JavaThread::current();
1766     ThreadBlockInVM tbivm(thread);
1767 
1768     bool threadIsSuspended;
1769     do {
1770       thread->set_suspend_equivalent();
1771       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1772 
1773       ::sem_wait(&sig_sem);
1774 
1775       // were we externally suspended while we were waiting?
1776       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1777       if (threadIsSuspended) {
1778         //
1779         // The semaphore has been incremented, but while we were waiting
1780         // another thread suspended us. We don't want to continue running
1781         // while suspended because that would surprise the thread that
1782         // suspended us.
1783         //
1784         ::sem_post(&sig_sem);
1785 
1786         thread->java_suspend_self();
1787       }
1788     } while (threadIsSuspended);
1789   }
1790 }
1791 
1792 int os::signal_lookup() {
1793   return check_pending_signals(false);
1794 }
1795 
1796 int os::signal_wait() {
1797   return check_pending_signals(true);
1798 }
1799 
1800 ////////////////////////////////////////////////////////////////////////////////
1801 // Virtual Memory
1802 
1803 // AddrRange describes an immutable address range
1804 //
1805 // This is a helper class for the 'shared memory bookkeeping' below.
1806 class AddrRange {
1807   friend class ShmBkBlock;
1808 
1809   char* _start;
1810   size_t _size;
1811 
1812 public:
1813 
1814   AddrRange(char* start, size_t size)
1815     : _start(start), _size(size)
1816   {}
1817 
1818   AddrRange(const AddrRange& r)
1819     : _start(r.start()), _size(r.size())
1820   {}
1821 
1822   char* start() const { return _start; }
1823   size_t size() const { return _size; }
1824   char* end() const { return _start + _size; }
1825   bool is_empty() const { return _size == 0 ? true : false; }
1826 
1827   static AddrRange empty_range() { return AddrRange(NULL, 0); }
1828 
1829   bool contains(const char* p) const {
1830     return start() <= p && end() > p;
1831   }
1832 
1833   bool contains(const AddrRange& range) const {
1834     return start() <= range.start() && end() >= range.end();
1835   }
1836 
1837   bool intersects(const AddrRange& range) const {
1838     return (range.start() <= start() && range.end() > start()) ||
1839            (range.start() < end() && range.end() >= end()) ||
1840            contains(range);
1841   }
1842 
1843   bool is_same_range(const AddrRange& range) const {
1844     return start() == range.start() && size() == range.size();
1845   }
1846 
1847   // return the closest inside range consisting of whole pages
1848   AddrRange find_closest_aligned_range(size_t pagesize) const {
1849     if (pagesize == 0 || is_empty()) {
1850       return empty_range();
1851     }
1852     char* const from = (char*)align_size_up((intptr_t)_start, pagesize);
1853     char* const to = (char*)align_size_down((intptr_t)end(), pagesize);
1854     if (from > to) {
1855       return empty_range();
1856     }
1857     return AddrRange(from, to - from);
1858   }
1859 };
1860 
1861 ////////////////////////////////////////////////////////////////////////////
1862 // shared memory bookkeeping
1863 //
1864 // the os::reserve_memory() API and friends hand out different kind of memory, depending
1865 // on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.
1866 //
1867 // But these memory types have to be treated differently. For example, to uncommit
1868 // mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,
1869 // disclaim64() is needed.
1870 //
1871 // Therefore we need to keep track of the allocated memory segments and their
1872 // properties.
1873 
1874 // ShmBkBlock: base class for all blocks in the shared memory bookkeeping
1875 class ShmBkBlock : public CHeapObj<mtInternal> {
1876 
1877   ShmBkBlock* _next;
1878 
1879 protected:
1880 
1881   AddrRange _range;
1882   const size_t _pagesize;
1883   const bool _pinned;
1884 
1885 public:
1886 
1887   ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)
1888     : _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {
1889 
1890     assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");
1891     assert(!_range.is_empty(), "invalid range");
1892   }
1893 
1894   virtual void print(outputStream* st) const {
1895     st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s",
1896               _range.start(), _range.end(), _range.size(),
1897               _range.size() / _pagesize, describe_pagesize(_pagesize),
1898               _pinned ? "pinned" : "");
1899   }
1900 
1901   enum Type { MMAP, SHMAT };
1902   virtual Type getType() = 0;
1903 
1904   char* base() const { return _range.start(); }
1905   size_t size() const { return _range.size(); }
1906 
1907   void setAddrRange(AddrRange range) {
1908     _range = range;
1909   }
1910 
1911   bool containsAddress(const char* p) const {
1912     return _range.contains(p);
1913   }
1914 
1915   bool containsRange(const char* p, size_t size) const {
1916     return _range.contains(AddrRange((char*)p, size));
1917   }
1918 
1919   bool isSameRange(const char* p, size_t size) const {
1920     return _range.is_same_range(AddrRange((char*)p, size));
1921   }
1922 
1923   virtual bool disclaim(char* p, size_t size) = 0;
1924   virtual bool release() = 0;
1925 
1926   // blocks live in a list.
1927   ShmBkBlock* next() const { return _next; }
1928   void set_next(ShmBkBlock* blk) { _next = blk; }
1929 
1930 }; // end: ShmBkBlock
1931 
1932 
1933 // ShmBkMappedBlock: describes an block allocated with mmap()
1934 class ShmBkMappedBlock : public ShmBkBlock {
1935 public:
1936 
1937   ShmBkMappedBlock(AddrRange range)
1938     : ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned
1939 
1940   void print(outputStream* st) const {
1941     ShmBkBlock::print(st);
1942     st->print_cr(" - mmap'ed");
1943   }
1944 
1945   Type getType() {
1946     return MMAP;
1947   }
1948 
1949   bool disclaim(char* p, size_t size) {
1950 
1951     AddrRange r(p, size);
1952 
1953     guarantee(_range.contains(r), "invalid disclaim");
1954 
1955     // only disclaim whole ranges.
1956     const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
1957     if (r2.is_empty()) {
1958       return true;
1959     }
1960 
1961     const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);
1962 
1963     if (rc != 0) {
1964       warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);
1965     }
1966 
1967     return rc == 0 ? true : false;
1968   }
1969 
1970   bool release() {
1971     // mmap'ed blocks are released using munmap
1972     if (::munmap(_range.start(), _range.size()) != 0) {
1973       warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);
1974       return false;
1975     }
1976     return true;
1977   }
1978 }; // end: ShmBkMappedBlock
1979 
1980 // ShmBkShmatedBlock: describes an block allocated with shmget/shmat()
1981 class ShmBkShmatedBlock : public ShmBkBlock {
1982 public:
1983 
1984   ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)
1985     : ShmBkBlock(range, pagesize, pinned) {}
1986 
1987   void print(outputStream* st) const {
1988     ShmBkBlock::print(st);
1989     st->print_cr(" - shmat'ed");
1990   }
1991 
1992   Type getType() {
1993     return SHMAT;
1994   }
1995 
1996   bool disclaim(char* p, size_t size) {
1997 
1998     AddrRange r(p, size);
1999 
2000     if (_pinned) {
2001       return true;
2002     }
2003 
2004     // shmat'ed blocks are disclaimed using disclaim64
2005     guarantee(_range.contains(r), "invalid disclaim");
2006 
2007     // only disclaim whole ranges.
2008     const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
2009     if (r2.is_empty()) {
2010       return true;
2011     }
2012 
2013     const bool rc = my_disclaim64(r2.start(), r2.size());
2014 
2015     if (Verbose && !rc) {
2016       warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());
2017     }
2018 
2019     return rc;
2020   }
2021 
2022   bool release() {
2023     bool rc = false;
2024     if (::shmdt(_range.start()) != 0) {
2025       warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);
2026     } else {
2027       rc = true;
2028     }
2029     return rc;
2030   }
2031 
2032 }; // end: ShmBkShmatedBlock
2033 
2034 static ShmBkBlock* g_shmbk_list = NULL;
2035 static volatile jint g_shmbk_table_lock = 0;
2036 
2037 // keep some usage statistics
2038 static struct {
2039   int nodes;    // number of nodes in list
2040   size_t bytes; // reserved - not committed - bytes.
2041   int reserves; // how often reserve was called
2042   int lookups;  // how often a lookup was made
2043 } g_shmbk_stats = { 0, 0, 0, 0 };
2044 
2045 // add information about a shared memory segment to the bookkeeping
2046 static void shmbk_register(ShmBkBlock* p_block) {
2047   guarantee(p_block, "logic error");
2048   p_block->set_next(g_shmbk_list);
2049   g_shmbk_list = p_block;
2050   g_shmbk_stats.reserves ++;
2051   g_shmbk_stats.bytes += p_block->size();
2052   g_shmbk_stats.nodes ++;
2053 }
2054 
2055 // remove information about a shared memory segment by its starting address
2056 static void shmbk_unregister(ShmBkBlock* p_block) {
2057   ShmBkBlock* p = g_shmbk_list;
2058   ShmBkBlock* prev = NULL;
2059   while (p) {
2060     if (p == p_block) {
2061       if (prev) {
2062         prev->set_next(p->next());
2063       } else {
2064         g_shmbk_list = p->next();
2065       }
2066       g_shmbk_stats.nodes --;
2067       g_shmbk_stats.bytes -= p->size();
2068       return;
2069     }
2070     prev = p;
2071     p = p->next();
2072   }
2073   assert(false, "should not happen");
2074 }
2075 
2076 // given a pointer, return shared memory bookkeeping record for the segment it points into
2077 // using the returned block info must happen under lock protection
2078 static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {
2079   g_shmbk_stats.lookups ++;
2080   ShmBkBlock* p = g_shmbk_list;
2081   while (p) {
2082     if (p->containsAddress(addr)) {
2083       return p;
2084     }
2085     p = p->next();
2086   }
2087   return NULL;
2088 }
2089 
2090 // dump all information about all memory segments allocated with os::reserve_memory()
2091 void shmbk_dump_info() {
2092   tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "
2093     "total reserves: %d total lookups: %d)",
2094     g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);
2095   const ShmBkBlock* p = g_shmbk_list;
2096   int i = 0;
2097   while (p) {
2098     p->print(tty);
2099     p = p->next();
2100     i ++;
2101   }
2102 }
2103 
2104 #define LOCK_SHMBK     { ThreadCritical _LOCK_SHMBK;
2105 #define UNLOCK_SHMBK   }
2106 
2107 // End: shared memory bookkeeping
2108 ////////////////////////////////////////////////////////////////////////////////////////////////////
2109 
2110 int os::vm_page_size() {
2111   // Seems redundant as all get out
2112   assert(os::Aix::page_size() != -1, "must call os::init");
2113   return os::Aix::page_size();
2114 }
2115 
2116 // Aix allocates memory by pages.
2117 int os::vm_allocation_granularity() {
2118   assert(os::Aix::page_size() != -1, "must call os::init");
2119   return os::Aix::page_size();
2120 }
2121 
2122 int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) {
2123 
2124   // Commit is a noop. There is no explicit commit
2125   // needed on AIX. Memory is committed when touched.
2126   //
2127   // Debug : check address range for validity
2128 #ifdef ASSERT
2129   LOCK_SHMBK
2130     ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2131     if (!block) {
2132       fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);
2133       shmbk_dump_info();
2134       assert(false, "invalid pointer");
2135       return false;
2136     } else if (!block->containsRange(addr, size)) {
2137       fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);
2138       shmbk_dump_info();
2139       assert(false, "invalid range");
2140       return false;
2141     }
2142   UNLOCK_SHMBK
2143 #endif // ASSERT
2144 
2145   return 0;
2146 }
2147 
2148 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2149   return os::Aix::commit_memory_impl(addr, size, exec) == 0;
2150 }
2151 
2152 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2153                                   const char* mesg) {
2154   assert(mesg != NULL, "mesg must be specified");
2155   os::Aix::commit_memory_impl(addr, size, exec);
2156 }
2157 
2158 int os::Aix::commit_memory_impl(char* addr, size_t size,
2159                                 size_t alignment_hint, bool exec) {
2160   return os::Aix::commit_memory_impl(addr, size, exec);
2161 }
2162 
2163 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2164                           bool exec) {
2165   return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2166 }
2167 
2168 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2169                                   size_t alignment_hint, bool exec,
2170                                   const char* mesg) {
2171   os::Aix::commit_memory_impl(addr, size, alignment_hint, exec);
2172 }
2173 
2174 bool os::pd_uncommit_memory(char* addr, size_t size) {
2175 
2176   // Delegate to ShmBkBlock class which knows how to uncommit its memory.
2177 
2178   bool rc = false;
2179   LOCK_SHMBK
2180     ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2181     if (!block) {
2182       fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2183       shmbk_dump_info();
2184       assert(false, "invalid pointer");
2185       return false;
2186     } else if (!block->containsRange(addr, size)) {
2187       fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
2188       shmbk_dump_info();
2189       assert(false, "invalid range");
2190       return false;
2191     }
2192     rc = block->disclaim(addr, size);
2193   UNLOCK_SHMBK
2194 
2195   if (Verbose && !rc) {
2196     warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);
2197   }
2198   return rc;
2199 }
2200 
2201 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2202   return os::guard_memory(addr, size);
2203 }
2204 
2205 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2206   return os::unguard_memory(addr, size);
2207 }
2208 
2209 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2210 }
2211 
2212 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2213 }
2214 
2215 void os::numa_make_global(char *addr, size_t bytes) {
2216 }
2217 
2218 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2219 }
2220 
2221 bool os::numa_topology_changed() {
2222   return false;
2223 }
2224 
2225 size_t os::numa_get_groups_num() {
2226   return 1;
2227 }
2228 
2229 int os::numa_get_group_id() {
2230   return 0;
2231 }
2232 
2233 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2234   if (size > 0) {
2235     ids[0] = 0;
2236     return 1;
2237   }
2238   return 0;
2239 }
2240 
2241 bool os::get_page_info(char *start, page_info* info) {
2242   return false;
2243 }
2244 
2245 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2246   return end;
2247 }
2248 
2249 // Flags for reserve_shmatted_memory:
2250 #define RESSHM_WISHADDR_OR_FAIL                     1
2251 #define RESSHM_TRY_16M_PAGES                        2
2252 #define RESSHM_16M_PAGES_OR_FAIL                    4
2253 
2254 // Result of reserve_shmatted_memory:
2255 struct shmatted_memory_info_t {
2256   char* addr;
2257   size_t pagesize;
2258   bool pinned;
2259 };
2260 
2261 // Reserve a section of shmatted memory.
2262 // params:
2263 // bytes [in]: size of memory, in bytes
2264 // requested_addr [in]: wish address.
2265 //                      NULL = no wish.
2266 //                      If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot
2267 //                      be obtained, function will fail. Otherwise wish address is treated as hint and
2268 //                      another pointer is returned.
2269 // flags [in]:          some flags. Valid flags are:
2270 //                      RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.
2271 //                      RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool
2272 //                          (requires UseLargePages and Use16MPages)
2273 //                      RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.
2274 //                          Otherwise any other page size will do.
2275 // p_info [out] :       holds information about the created shared memory segment.
2276 static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {
2277 
2278   assert(p_info, "parameter error");
2279 
2280   // init output struct.
2281   p_info->addr = NULL;
2282 
2283   // neither should we be here for EXTSHM=ON.
2284   if (os::Aix::extshm()) {
2285     ShouldNotReachHere();
2286   }
2287 
2288   // extract flags. sanity checks.
2289   const bool wishaddr_or_fail =
2290     flags & RESSHM_WISHADDR_OR_FAIL;
2291   const bool try_16M_pages =
2292     flags & RESSHM_TRY_16M_PAGES;
2293   const bool f16M_pages_or_fail =
2294     flags & RESSHM_16M_PAGES_OR_FAIL;
2295 
2296   // first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,
2297   // shmat will fail anyway, so save some cycles by failing right away
2298   if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {
2299     if (wishaddr_or_fail) {
2300       return false;
2301     } else {
2302       requested_addr = NULL;
2303     }
2304   }
2305 
2306   char* addr = NULL;
2307 
2308   // Align size of shm up to the largest possible page size, to avoid errors later on when we try to change
2309   // pagesize dynamically.
2310   const size_t size = align_size_up(bytes, SIZE_16M);
2311 
2312   // reserve the shared segment
2313   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2314   if (shmid == -1) {
2315     warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);
2316     return false;
2317   }
2318 
2319   // Important note:
2320   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2321   // We must right after attaching it remove it from the system. System V shm segments are global and
2322   // survive the process.
2323   // So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".
2324 
2325   // try forcing the page size
2326   size_t pagesize = -1; // unknown so far
2327 
2328   if (UseLargePages) {
2329 
2330     struct shmid_ds shmbuf;
2331     memset(&shmbuf, 0, sizeof(shmbuf));
2332 
2333     // First, try to take from 16M page pool if...
2334     if (os::Aix::can_use_16M_pages()  // we can ...
2335         && Use16MPages                // we are not explicitly forbidden to do so (-XX:-Use16MPages)..
2336         && try_16M_pages) {           // caller wants us to.
2337       shmbuf.shm_pagesize = SIZE_16M;
2338       if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2339         pagesize = SIZE_16M;
2340       } else {
2341         warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",
2342                 size / SIZE_16M, errno);
2343         if (f16M_pages_or_fail) {
2344           goto cleanup_shm;
2345         }
2346       }
2347     }
2348 
2349     // Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,
2350     // because the 64K page pool may also be exhausted.
2351     if (pagesize == -1) {
2352       shmbuf.shm_pagesize = SIZE_64K;
2353       if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2354         pagesize = SIZE_64K;
2355       } else {
2356         warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",
2357                 size / SIZE_64K, errno);
2358         // here I give up. leave page_size -1 - later, after attaching, we will query the
2359         // real page size of the attached memory. (in theory, it may be something different
2360         // from 4K if LDR_CNTRL SHM_PSIZE is set)
2361       }
2362     }
2363   }
2364 
2365   // sanity point
2366   assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");
2367 
2368   // Now attach the shared segment.
2369   addr = (char*) shmat(shmid, requested_addr, 0);
2370   if (addr == (char*)-1) {
2371     // How to handle attach failure:
2372     // If it failed for a specific wish address, tolerate this: in that case, if wish address was
2373     // mandatory, fail, if not, retry anywhere.
2374     // If it failed for any other reason, treat that as fatal error.
2375     addr = NULL;
2376     if (requested_addr) {
2377       if (wishaddr_or_fail) {
2378         goto cleanup_shm;
2379       } else {
2380         addr = (char*) shmat(shmid, NULL, 0);
2381         if (addr == (char*)-1) { // fatal
2382           addr = NULL;
2383           warning("shmat failed (errno: %d)", errno);
2384           goto cleanup_shm;
2385         }
2386       }
2387     } else { // fatal
2388       addr = NULL;
2389       warning("shmat failed (errno: %d)", errno);
2390       goto cleanup_shm;
2391     }
2392   }
2393 
2394   // sanity point
2395   assert(addr && addr != (char*) -1, "wrong address");
2396 
2397   // after successful Attach remove the segment - right away.
2398   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2399     warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2400     guarantee(false, "failed to remove shared memory segment!");
2401   }
2402   shmid = -1;
2403 
2404   // query the real page size. In case setting the page size did not work (see above), the system
2405   // may have given us something other then 4K (LDR_CNTRL)
2406   {
2407     const size_t real_pagesize = os::Aix::query_pagesize(addr);
2408     if (pagesize != -1) {
2409       assert(pagesize == real_pagesize, "unexpected pagesize after shmat");
2410     } else {
2411       pagesize = real_pagesize;
2412     }
2413   }
2414 
2415   // Now register the reserved block with internal book keeping.
2416   LOCK_SHMBK
2417     const bool pinned = pagesize >= SIZE_16M ? true : false;
2418     ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);
2419     assert(p_block, "");
2420     shmbk_register(p_block);
2421   UNLOCK_SHMBK
2422 
2423 cleanup_shm:
2424 
2425   // if we have not done so yet, remove the shared memory segment. This is very important.
2426   if (shmid != -1) {
2427     if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2428       warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2429       guarantee(false, "failed to remove shared memory segment!");
2430     }
2431     shmid = -1;
2432   }
2433 
2434   // trace
2435   if (Verbose && !addr) {
2436     if (requested_addr != NULL) {
2437       warning("failed to shm-allocate 0x%llX bytes at wish address 0x%p.", size, requested_addr);
2438     } else {
2439       warning("failed to shm-allocate 0x%llX bytes at any address.", size);
2440     }
2441   }
2442 
2443   // hand info to caller
2444   if (addr) {
2445     p_info->addr = addr;
2446     p_info->pagesize = pagesize;
2447     p_info->pinned = pagesize == SIZE_16M ? true : false;
2448   }
2449 
2450   // sanity test:
2451   if (requested_addr && addr && wishaddr_or_fail) {
2452     guarantee(addr == requested_addr, "shmat error");
2453   }
2454 
2455   // just one more test to really make sure we have no dangling shm segments.
2456   guarantee(shmid == -1, "dangling shm segments");
2457 
2458   return addr ? true : false;
2459 
2460 } // end: reserve_shmatted_memory
2461 
2462 // Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():
2463 // will return NULL in case of an error.
2464 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
2465 
2466   // if a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2467   if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {
2468     warning("Wish address 0x%p not aligned to page boundary.", requested_addr);
2469     return NULL;
2470   }
2471 
2472   const size_t size = align_size_up(bytes, SIZE_4K);
2473 
2474   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2475   // msync(MS_INVALIDATE) (see os::uncommit_memory)
2476   int flags = MAP_ANONYMOUS | MAP_SHARED;
2477 
2478   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2479   // it means if wishaddress is given but MAP_FIXED is not set.
2480   //
2481   // Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED
2482   // clobbers the address range, which is probably not what the caller wants. That's
2483   // why I assert here (again) that the SPEC1170 compat mode is off.
2484   // If we want to be able to run under SPEC1170, we have to do some porting and
2485   // testing.
2486   if (requested_addr != NULL) {
2487     assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");
2488     flags |= MAP_FIXED;
2489   }
2490 
2491   char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2492 
2493   if (addr == MAP_FAILED) {
2494     // attach failed: tolerate for specific wish addresses. Not being able to attach
2495     // anywhere is a fatal error.
2496     if (requested_addr == NULL) {
2497       // It's ok to fail here if the machine has not enough memory.
2498       warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);
2499     }
2500     addr = NULL;
2501     goto cleanup_mmap;
2502   }
2503 
2504   // If we did request a specific address and that address was not available, fail.
2505   if (addr && requested_addr) {
2506     guarantee(addr == requested_addr, "unexpected");
2507   }
2508 
2509   // register this mmap'ed segment with book keeping
2510   LOCK_SHMBK
2511     ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));
2512     assert(p_block, "");
2513     shmbk_register(p_block);
2514   UNLOCK_SHMBK
2515 
2516 cleanup_mmap:
2517 
2518   // trace
2519   if (Verbose) {
2520     if (addr) {
2521       fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);
2522     }
2523     else {
2524       if (requested_addr != NULL) {
2525         warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
2526       } else {
2527         warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
2528       }
2529     }
2530   }
2531 
2532   return addr;
2533 
2534 } // end: reserve_mmaped_memory
2535 
2536 // Reserves and attaches a shared memory segment.
2537 // Will assert if a wish address is given and could not be obtained.
2538 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2539   return os::attempt_reserve_memory_at(bytes, requested_addr);
2540 }
2541 
2542 bool os::pd_release_memory(char* addr, size_t size) {
2543 
2544   // delegate to ShmBkBlock class which knows how to uncommit its memory.
2545 
2546   bool rc = false;
2547   LOCK_SHMBK
2548     ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2549     if (!block) {
2550       fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2551       shmbk_dump_info();
2552       assert(false, "invalid pointer");
2553       return false;
2554     }
2555     else if (!block->isSameRange(addr, size)) {
2556       if (block->getType() == ShmBkBlock::MMAP) {
2557         // Release only the same range or a the beginning or the end of a range.
2558         if (block->base() == addr && size < block->size()) {
2559           ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size));
2560           assert(b, "");
2561           shmbk_register(b);
2562           block->setAddrRange(AddrRange(addr, size));
2563         }
2564         else if (addr > block->base() && addr + size == block->base() + block->size()) {
2565           ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size));
2566           assert(b, "");
2567           shmbk_register(b);
2568           block->setAddrRange(AddrRange(addr, size));
2569         }
2570         else {
2571           fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size);
2572           shmbk_dump_info();
2573           assert(false, "invalid mmap range");
2574           return false;
2575         }
2576       }
2577       else {
2578         // Release only the same range. No partial release allowed.
2579         // Soften the requirement a bit, because the user may think he owns a smaller size
2580         // than the block is due to alignment etc.
2581         if (block->base() != addr || block->size() < size) {
2582           fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size);
2583           shmbk_dump_info();
2584           assert(false, "invalid shmget range");
2585           return false;
2586         }
2587       }
2588     }
2589     rc = block->release();
2590     assert(rc, "release failed");
2591     // remove block from bookkeeping
2592     shmbk_unregister(block);
2593     delete block;
2594   UNLOCK_SHMBK
2595 
2596   if (!rc) {
2597     warning("failed to released %lu bytes at 0x%p", size, addr);
2598   }
2599 
2600   return rc;
2601 }
2602 
2603 static bool checked_mprotect(char* addr, size_t size, int prot) {
2604 
2605   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2606   // not tell me if protection failed when trying to protect an un-protectable range.
2607   //
2608   // This means if the memory was allocated using shmget/shmat, protection wont work
2609   // but mprotect will still return 0:
2610   //
2611   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2612 
2613   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2614 
2615   if (!rc) {
2616     const char* const s_errno = strerror(errno);
2617     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2618     return false;
2619   }
2620 
2621   // mprotect success check
2622   //
2623   // Mprotect said it changed the protection but can I believe it?
2624   //
2625   // To be sure I need to check the protection afterwards. Try to
2626   // read from protected memory and check whether that causes a segfault.
2627   //
2628   if (!os::Aix::xpg_sus_mode()) {
2629 
2630     if (StubRoutines::SafeFetch32_stub()) {
2631 
2632       const bool read_protected =
2633         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2634          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2635 
2636       if (prot & PROT_READ) {
2637         rc = !read_protected;
2638       } else {
2639         rc = read_protected;
2640       }
2641     }
2642   }
2643   if (!rc) {
2644     assert(false, "mprotect failed.");
2645   }
2646   return rc;
2647 }
2648 
2649 // Set protections specified
2650 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2651   unsigned int p = 0;
2652   switch (prot) {
2653   case MEM_PROT_NONE: p = PROT_NONE; break;
2654   case MEM_PROT_READ: p = PROT_READ; break;
2655   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2656   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2657   default:
2658     ShouldNotReachHere();
2659   }
2660   // is_committed is unused.
2661   return checked_mprotect(addr, size, p);
2662 }
2663 
2664 bool os::guard_memory(char* addr, size_t size) {
2665   return checked_mprotect(addr, size, PROT_NONE);
2666 }
2667 
2668 bool os::unguard_memory(char* addr, size_t size) {
2669   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2670 }
2671 
2672 // Large page support
2673 
2674 static size_t _large_page_size = 0;
2675 
2676 // Enable large page support if OS allows that.
2677 void os::large_page_init() {
2678 
2679   // Note: os::Aix::query_multipage_support must run first.
2680 
2681   if (!UseLargePages) {
2682     return;
2683   }
2684 
2685   if (!Aix::can_use_64K_pages()) {
2686     assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");
2687     UseLargePages = false;
2688     return;
2689   }
2690 
2691   if (!Aix::can_use_16M_pages() && Use16MPages) {
2692     fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "
2693             " and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n");
2694   }
2695 
2696   // Do not report 16M page alignment as part of os::_page_sizes if we are
2697   // explicitly forbidden from using 16M pages. Doing so would increase the
2698   // alignment the garbage collector calculates with, slightly increasing
2699   // heap usage. We should only pay for 16M alignment if we really want to
2700   // use 16M pages.
2701   if (Use16MPages && Aix::can_use_16M_pages()) {
2702     _large_page_size = SIZE_16M;
2703     _page_sizes[0] = SIZE_16M;
2704     _page_sizes[1] = SIZE_64K;
2705     _page_sizes[2] = SIZE_4K;
2706     _page_sizes[3] = 0;
2707   } else if (Aix::can_use_64K_pages()) {
2708     _large_page_size = SIZE_64K;
2709     _page_sizes[0] = SIZE_64K;
2710     _page_sizes[1] = SIZE_4K;
2711     _page_sizes[2] = 0;
2712   }
2713 
2714   if (Verbose) {
2715     ("Default large page size is 0x%llX.", _large_page_size);
2716   }
2717 } // end: os::large_page_init()
2718 
2719 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2720   // "exec" is passed in but not used. Creating the shared image for
2721   // the code cache doesn't have an SHM_X executable permission to check.
2722   Unimplemented();
2723   return 0;
2724 }
2725 
2726 bool os::release_memory_special(char* base, size_t bytes) {
2727   // detaching the SHM segment will also delete it, see reserve_memory_special()
2728   Unimplemented();
2729   return false;
2730 }
2731 
2732 size_t os::large_page_size() {
2733   return _large_page_size;
2734 }
2735 
2736 bool os::can_commit_large_page_memory() {
2737   // Well, sadly we cannot commit anything at all (see comment in
2738   // os::commit_memory) but we claim to so we can make use of large pages
2739   return true;
2740 }
2741 
2742 bool os::can_execute_large_page_memory() {
2743   // We can do that
2744   return true;
2745 }
2746 
2747 // Reserve memory at an arbitrary address, only if that area is
2748 // available (and not reserved for something else).
2749 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2750 
2751   bool use_mmap = false;
2752 
2753   // mmap: smaller graining, no large page support
2754   // shm: large graining (256M), large page support, limited number of shm segments
2755   //
2756   // Prefer mmap wherever we either do not need large page support or have OS limits
2757 
2758   if (!UseLargePages || bytes < SIZE_16M) {
2759     use_mmap = true;
2760   }
2761 
2762   char* addr = NULL;
2763   if (use_mmap) {
2764     addr = reserve_mmaped_memory(bytes, requested_addr);
2765   } else {
2766     // shmat: wish address is mandatory, and do not try 16M pages here.
2767     shmatted_memory_info_t info;
2768     const int flags = RESSHM_WISHADDR_OR_FAIL;
2769     if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {
2770       addr = info.addr;
2771     }
2772   }
2773 
2774   return addr;
2775 }
2776 
2777 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2778   return ::read(fd, buf, nBytes);
2779 }
2780 




2781 void os::naked_short_sleep(jlong ms) {
2782   struct timespec req;
2783 
2784   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2785   req.tv_sec = 0;
2786   if (ms > 0) {
2787     req.tv_nsec = (ms % 1000) * 1000000;
2788   }
2789   else {
2790     req.tv_nsec = 1;
2791   }
2792 
2793   nanosleep(&req, NULL);
2794 
2795   return;
2796 }
2797 
2798 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2799 void os::infinite_sleep() {
2800   while (true) {    // sleep forever ...
2801     ::sleep(100);   // ... 100 seconds at a time
2802   }
2803 }
2804 
2805 // Used to convert frequent JVM_Yield() to nops
2806 bool os::dont_yield() {
2807   return DontYieldALot;
2808 }
2809 
2810 void os::naked_yield() {
2811   sched_yield();
2812 }
2813 
2814 ////////////////////////////////////////////////////////////////////////////////
2815 // thread priority support
2816 
2817 // From AIX manpage to pthread_setschedparam
2818 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2819 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2820 //
2821 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2822 // range from 40 to 80, where 40 is the least favored priority and 80
2823 // is the most favored."
2824 //
2825 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2826 // scheduling there; however, this still leaves iSeries.)
2827 //
2828 // We use the same values for AIX and PASE.
2829 int os::java_to_os_priority[CriticalPriority + 1] = {
2830   54,             // 0 Entry should never be used
2831 
2832   55,             // 1 MinPriority
2833   55,             // 2
2834   56,             // 3
2835 
2836   56,             // 4
2837   57,             // 5 NormPriority
2838   57,             // 6
2839 
2840   58,             // 7
2841   58,             // 8
2842   59,             // 9 NearMaxPriority
2843 
2844   60,             // 10 MaxPriority
2845 
2846   60              // 11 CriticalPriority
2847 };
2848 
2849 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2850   if (!UseThreadPriorities) return OS_OK;
2851   pthread_t thr = thread->osthread()->pthread_id();
2852   int policy = SCHED_OTHER;
2853   struct sched_param param;
2854   param.sched_priority = newpri;
2855   int ret = pthread_setschedparam(thr, policy, &param);
2856 
2857   if (Verbose) {
2858     if (ret == 0) {
2859       fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
2860     } else {
2861       fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
2862               (int)thr, newpri, ret, strerror(ret));
2863     }
2864   }
2865   return (ret == 0) ? OS_OK : OS_ERR;
2866 }
2867 
2868 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2869   if (!UseThreadPriorities) {
2870     *priority_ptr = java_to_os_priority[NormPriority];
2871     return OS_OK;
2872   }
2873   pthread_t thr = thread->osthread()->pthread_id();
2874   int policy = SCHED_OTHER;
2875   struct sched_param param;
2876   int ret = pthread_getschedparam(thr, &policy, &param);
2877   *priority_ptr = param.sched_priority;
2878 
2879   return (ret == 0) ? OS_OK : OS_ERR;
2880 }
2881 
2882 // Hint to the underlying OS that a task switch would not be good.
2883 // Void return because it's a hint and can fail.
2884 void os::hint_no_preempt() {}
2885 
2886 ////////////////////////////////////////////////////////////////////////////////
2887 // suspend/resume support
2888 
2889 //  the low-level signal-based suspend/resume support is a remnant from the
2890 //  old VM-suspension that used to be for java-suspension, safepoints etc,
2891 //  within hotspot. Now there is a single use-case for this:
2892 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
2893 //      that runs in the watcher thread.
2894 //  The remaining code is greatly simplified from the more general suspension
2895 //  code that used to be used.
2896 //
2897 //  The protocol is quite simple:
2898 //  - suspend:
2899 //      - sends a signal to the target thread
2900 //      - polls the suspend state of the osthread using a yield loop
2901 //      - target thread signal handler (SR_handler) sets suspend state
2902 //        and blocks in sigsuspend until continued
2903 //  - resume:
2904 //      - sets target osthread state to continue
2905 //      - sends signal to end the sigsuspend loop in the SR_handler
2906 //
2907 //  Note that the SR_lock plays no role in this suspend/resume protocol.
2908 //
2909 
2910 static void resume_clear_context(OSThread *osthread) {
2911   osthread->set_ucontext(NULL);
2912   osthread->set_siginfo(NULL);
2913 }
2914 
2915 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2916   osthread->set_ucontext(context);
2917   osthread->set_siginfo(siginfo);
2918 }
2919 
2920 //
2921 // Handler function invoked when a thread's execution is suspended or
2922 // resumed. We have to be careful that only async-safe functions are
2923 // called here (Note: most pthread functions are not async safe and
2924 // should be avoided.)
2925 //
2926 // Note: sigwait() is a more natural fit than sigsuspend() from an
2927 // interface point of view, but sigwait() prevents the signal hander
2928 // from being run. libpthread would get very confused by not having
2929 // its signal handlers run and prevents sigwait()'s use with the
2930 // mutex granting granting signal.
2931 //
2932 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
2933 //
2934 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2935   // Save and restore errno to avoid confusing native code with EINTR
2936   // after sigsuspend.
2937   int old_errno = errno;
2938 
2939   Thread* thread = Thread::current();
2940   OSThread* osthread = thread->osthread();
2941   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2942 
2943   os::SuspendResume::State current = osthread->sr.state();
2944   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2945     suspend_save_context(osthread, siginfo, context);
2946 
2947     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2948     os::SuspendResume::State state = osthread->sr.suspended();
2949     if (state == os::SuspendResume::SR_SUSPENDED) {
2950       sigset_t suspend_set;  // signals for sigsuspend()
2951 
2952       // get current set of blocked signals and unblock resume signal
2953       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2954       sigdelset(&suspend_set, SR_signum);
2955 
2956       // wait here until we are resumed
2957       while (1) {
2958         sigsuspend(&suspend_set);
2959 
2960         os::SuspendResume::State result = osthread->sr.running();
2961         if (result == os::SuspendResume::SR_RUNNING) {
2962           break;
2963         }
2964       }
2965 
2966     } else if (state == os::SuspendResume::SR_RUNNING) {
2967       // request was cancelled, continue
2968     } else {
2969       ShouldNotReachHere();
2970     }
2971 
2972     resume_clear_context(osthread);
2973   } else if (current == os::SuspendResume::SR_RUNNING) {
2974     // request was cancelled, continue
2975   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2976     // ignore
2977   } else {
2978     ShouldNotReachHere();
2979   }
2980 
2981   errno = old_errno;
2982 }
2983 
2984 
2985 static int SR_initialize() {
2986   struct sigaction act;
2987   char *s;
2988   // Get signal number to use for suspend/resume
2989   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2990     int sig = ::strtol(s, 0, 10);
2991     if (sig > 0 || sig < NSIG) {
2992       SR_signum = sig;
2993     }
2994   }
2995 
2996   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2997         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2998 
2999   sigemptyset(&SR_sigset);
3000   sigaddset(&SR_sigset, SR_signum);
3001 
3002   // Set up signal handler for suspend/resume.
3003   act.sa_flags = SA_RESTART|SA_SIGINFO;
3004   act.sa_handler = (void (*)(int)) SR_handler;
3005 
3006   // SR_signum is blocked by default.
3007   // 4528190 - We also need to block pthread restart signal (32 on all
3008   // supported Linux platforms). Note that LinuxThreads need to block
3009   // this signal for all threads to work properly. So we don't have
3010   // to use hard-coded signal number when setting up the mask.
3011   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
3012 
3013   if (sigaction(SR_signum, &act, 0) == -1) {
3014     return -1;
3015   }
3016 
3017   // Save signal flag
3018   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
3019   return 0;
3020 }
3021 
3022 static int SR_finalize() {
3023   return 0;
3024 }
3025 
3026 static int sr_notify(OSThread* osthread) {
3027   int status = pthread_kill(osthread->pthread_id(), SR_signum);
3028   assert_status(status == 0, status, "pthread_kill");
3029   return status;
3030 }
3031 
3032 // "Randomly" selected value for how long we want to spin
3033 // before bailing out on suspending a thread, also how often
3034 // we send a signal to a thread we want to resume
3035 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3036 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3037 
3038 // returns true on success and false on error - really an error is fatal
3039 // but this seems the normal response to library errors
3040 static bool do_suspend(OSThread* osthread) {
3041   assert(osthread->sr.is_running(), "thread should be running");
3042   // mark as suspended and send signal
3043 
3044   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3045     // failed to switch, state wasn't running?
3046     ShouldNotReachHere();
3047     return false;
3048   }
3049 
3050   if (sr_notify(osthread) != 0) {
3051     // try to cancel, switch to running
3052 
3053     os::SuspendResume::State result = osthread->sr.cancel_suspend();
3054     if (result == os::SuspendResume::SR_RUNNING) {
3055       // cancelled
3056       return false;
3057     } else if (result == os::SuspendResume::SR_SUSPENDED) {
3058       // somehow managed to suspend
3059       return true;
3060     } else {
3061       ShouldNotReachHere();
3062       return false;
3063     }
3064   }
3065 
3066   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3067 
3068   for (int n = 0; !osthread->sr.is_suspended(); n++) {
3069     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
3070       os::naked_yield();
3071     }
3072 
3073     // timeout, try to cancel the request
3074     if (n >= RANDOMLY_LARGE_INTEGER) {
3075       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3076       if (cancelled == os::SuspendResume::SR_RUNNING) {
3077         return false;
3078       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3079         return true;
3080       } else {
3081         ShouldNotReachHere();
3082         return false;
3083       }
3084     }
3085   }
3086 
3087   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3088   return true;
3089 }
3090 
3091 static void do_resume(OSThread* osthread) {
3092   //assert(osthread->sr.is_suspended(), "thread should be suspended");
3093 
3094   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3095     // failed to switch to WAKEUP_REQUEST
3096     ShouldNotReachHere();
3097     return;
3098   }
3099 
3100   while (!osthread->sr.is_running()) {
3101     if (sr_notify(osthread) == 0) {
3102       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
3103         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
3104           os::naked_yield();
3105         }
3106       }
3107     } else {
3108       ShouldNotReachHere();
3109     }
3110   }
3111 
3112   guarantee(osthread->sr.is_running(), "Must be running!");
3113 }
3114 
3115 ///////////////////////////////////////////////////////////////////////////////////
3116 // signal handling (except suspend/resume)
3117 
3118 // This routine may be used by user applications as a "hook" to catch signals.
3119 // The user-defined signal handler must pass unrecognized signals to this
3120 // routine, and if it returns true (non-zero), then the signal handler must
3121 // return immediately. If the flag "abort_if_unrecognized" is true, then this
3122 // routine will never retun false (zero), but instead will execute a VM panic
3123 // routine kill the process.
3124 //
3125 // If this routine returns false, it is OK to call it again. This allows
3126 // the user-defined signal handler to perform checks either before or after
3127 // the VM performs its own checks. Naturally, the user code would be making
3128 // a serious error if it tried to handle an exception (such as a null check
3129 // or breakpoint) that the VM was generating for its own correct operation.
3130 //
3131 // This routine may recognize any of the following kinds of signals:
3132 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
3133 // It should be consulted by handlers for any of those signals.
3134 //
3135 // The caller of this routine must pass in the three arguments supplied
3136 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3137 // field of the structure passed to sigaction(). This routine assumes that
3138 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3139 //
3140 // Note that the VM will print warnings if it detects conflicting signal
3141 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3142 //
3143 extern "C" JNIEXPORT int
3144 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
3145 
3146 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
3147 // to be the thing to call; documentation is not terribly clear about whether
3148 // pthread_sigmask also works, and if it does, whether it does the same.
3149 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
3150   const int rc = ::pthread_sigmask(how, set, oset);
3151   // return value semantics differ slightly for error case:
3152   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
3153   // (so, pthread_sigmask is more theadsafe for error handling)
3154   // But success is always 0.
3155   return rc == 0 ? true : false;
3156 }
3157 
3158 // Function to unblock all signals which are, according
3159 // to POSIX, typical program error signals. If they happen while being blocked,
3160 // they typically will bring down the process immediately.
3161 bool unblock_program_error_signals() {
3162   sigset_t set;
3163   ::sigemptyset(&set);
3164   ::sigaddset(&set, SIGILL);
3165   ::sigaddset(&set, SIGBUS);
3166   ::sigaddset(&set, SIGFPE);
3167   ::sigaddset(&set, SIGSEGV);
3168   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
3169 }
3170 
3171 // Renamed from 'signalHandler' to avoid collision with other shared libs.
3172 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
3173   assert(info != NULL && uc != NULL, "it must be old kernel");
3174 
3175   // Never leave program error signals blocked;
3176   // on all our platforms they would bring down the process immediately when
3177   // getting raised while being blocked.
3178   unblock_program_error_signals();
3179 
3180   JVM_handle_aix_signal(sig, info, uc, true);
3181 }
3182 
3183 
3184 // This boolean allows users to forward their own non-matching signals
3185 // to JVM_handle_aix_signal, harmlessly.
3186 bool os::Aix::signal_handlers_are_installed = false;
3187 
3188 // For signal-chaining
3189 struct sigaction os::Aix::sigact[MAXSIGNUM];
3190 unsigned int os::Aix::sigs = 0;
3191 bool os::Aix::libjsig_is_loaded = false;
3192 typedef struct sigaction *(*get_signal_t)(int);
3193 get_signal_t os::Aix::get_signal_action = NULL;
3194 
3195 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3196   struct sigaction *actp = NULL;
3197 
3198   if (libjsig_is_loaded) {
3199     // Retrieve the old signal handler from libjsig
3200     actp = (*get_signal_action)(sig);
3201   }
3202   if (actp == NULL) {
3203     // Retrieve the preinstalled signal handler from jvm
3204     actp = get_preinstalled_handler(sig);
3205   }
3206 
3207   return actp;
3208 }
3209 
3210 static bool call_chained_handler(struct sigaction *actp, int sig,
3211                                  siginfo_t *siginfo, void *context) {
3212   // Call the old signal handler
3213   if (actp->sa_handler == SIG_DFL) {
3214     // It's more reasonable to let jvm treat it as an unexpected exception
3215     // instead of taking the default action.
3216     return false;
3217   } else if (actp->sa_handler != SIG_IGN) {
3218     if ((actp->sa_flags & SA_NODEFER) == 0) {
3219       // automaticlly block the signal
3220       sigaddset(&(actp->sa_mask), sig);
3221     }
3222 
3223     sa_handler_t hand = NULL;
3224     sa_sigaction_t sa = NULL;
3225     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3226     // retrieve the chained handler
3227     if (siginfo_flag_set) {
3228       sa = actp->sa_sigaction;
3229     } else {
3230       hand = actp->sa_handler;
3231     }
3232 
3233     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3234       actp->sa_handler = SIG_DFL;
3235     }
3236 
3237     // try to honor the signal mask
3238     sigset_t oset;
3239     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3240 
3241     // call into the chained handler
3242     if (siginfo_flag_set) {
3243       (*sa)(sig, siginfo, context);
3244     } else {
3245       (*hand)(sig);
3246     }
3247 
3248     // restore the signal mask
3249     pthread_sigmask(SIG_SETMASK, &oset, 0);
3250   }
3251   // Tell jvm's signal handler the signal is taken care of.
3252   return true;
3253 }
3254 
3255 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3256   bool chained = false;
3257   // signal-chaining
3258   if (UseSignalChaining) {
3259     struct sigaction *actp = get_chained_signal_action(sig);
3260     if (actp != NULL) {
3261       chained = call_chained_handler(actp, sig, siginfo, context);
3262     }
3263   }
3264   return chained;
3265 }
3266 
3267 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3268   if ((((unsigned int)1 << sig) & sigs) != 0) {
3269     return &sigact[sig];
3270   }
3271   return NULL;
3272 }
3273 
3274 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3275   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3276   sigact[sig] = oldAct;
3277   sigs |= (unsigned int)1 << sig;
3278 }
3279 
3280 // for diagnostic
3281 int os::Aix::sigflags[MAXSIGNUM];
3282 
3283 int os::Aix::get_our_sigflags(int sig) {
3284   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3285   return sigflags[sig];
3286 }
3287 
3288 void os::Aix::set_our_sigflags(int sig, int flags) {
3289   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3290   sigflags[sig] = flags;
3291 }
3292 
3293 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3294   // Check for overwrite.
3295   struct sigaction oldAct;
3296   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3297 
3298   void* oldhand = oldAct.sa_sigaction
3299     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3300     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3301   // Renamed 'signalHandler' to avoid collision with other shared libs.
3302   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3303       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3304       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3305     if (AllowUserSignalHandlers || !set_installed) {
3306       // Do not overwrite; user takes responsibility to forward to us.
3307       return;
3308     } else if (UseSignalChaining) {
3309       // save the old handler in jvm
3310       save_preinstalled_handler(sig, oldAct);
3311       // libjsig also interposes the sigaction() call below and saves the
3312       // old sigaction on it own.
3313     } else {
3314       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
3315                     "%#lx for signal %d.", (long)oldhand, sig));
3316     }
3317   }
3318 
3319   struct sigaction sigAct;
3320   sigfillset(&(sigAct.sa_mask));
3321   if (!set_installed) {
3322     sigAct.sa_handler = SIG_DFL;
3323     sigAct.sa_flags = SA_RESTART;
3324   } else {
3325     // Renamed 'signalHandler' to avoid collision with other shared libs.
3326     sigAct.sa_sigaction = javaSignalHandler;
3327     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3328   }
3329   // Save flags, which are set by ours
3330   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3331   sigflags[sig] = sigAct.sa_flags;
3332 
3333   int ret = sigaction(sig, &sigAct, &oldAct);
3334   assert(ret == 0, "check");
3335 
3336   void* oldhand2 = oldAct.sa_sigaction
3337                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3338                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3339   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3340 }
3341 
3342 // install signal handlers for signals that HotSpot needs to
3343 // handle in order to support Java-level exception handling.
3344 void os::Aix::install_signal_handlers() {
3345   if (!signal_handlers_are_installed) {
3346     signal_handlers_are_installed = true;
3347 
3348     // signal-chaining
3349     typedef void (*signal_setting_t)();
3350     signal_setting_t begin_signal_setting = NULL;
3351     signal_setting_t end_signal_setting = NULL;
3352     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3353                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3354     if (begin_signal_setting != NULL) {
3355       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3356                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3357       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3358                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3359       libjsig_is_loaded = true;
3360       assert(UseSignalChaining, "should enable signal-chaining");
3361     }
3362     if (libjsig_is_loaded) {
3363       // Tell libjsig jvm is setting signal handlers
3364       (*begin_signal_setting)();
3365     }
3366 
3367     set_signal_handler(SIGSEGV, true);
3368     set_signal_handler(SIGPIPE, true);
3369     set_signal_handler(SIGBUS, true);
3370     set_signal_handler(SIGILL, true);
3371     set_signal_handler(SIGFPE, true);
3372     set_signal_handler(SIGTRAP, true);
3373     set_signal_handler(SIGXFSZ, true);
3374     set_signal_handler(SIGDANGER, true);
3375 
3376     if (libjsig_is_loaded) {
3377       // Tell libjsig jvm finishes setting signal handlers
3378       (*end_signal_setting)();
3379     }
3380 
3381     // We don't activate signal checker if libjsig is in place, we trust ourselves
3382     // and if UserSignalHandler is installed all bets are off.
3383     // Log that signal checking is off only if -verbose:jni is specified.
3384     if (CheckJNICalls) {
3385       if (libjsig_is_loaded) {
3386         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3387         check_signals = false;
3388       }
3389       if (AllowUserSignalHandlers) {
3390         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3391         check_signals = false;
3392       }
3393       // need to initialize check_signal_done
3394       ::sigemptyset(&check_signal_done);
3395     }
3396   }
3397 }
3398 
3399 static const char* get_signal_handler_name(address handler,
3400                                            char* buf, int buflen) {
3401   int offset;
3402   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3403   if (found) {
3404     // skip directory names
3405     const char *p1, *p2;
3406     p1 = buf;
3407     size_t len = strlen(os::file_separator());
3408     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3409     // The way os::dll_address_to_library_name is implemented on Aix
3410     // right now, it always returns -1 for the offset which is not
3411     // terribly informative.
3412     // Will fix that. For now, omit the offset.
3413     jio_snprintf(buf, buflen, "%s", p1);
3414   } else {
3415     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3416   }
3417   return buf;
3418 }
3419 
3420 static void print_signal_handler(outputStream* st, int sig,
3421                                  char* buf, size_t buflen) {
3422   struct sigaction sa;
3423   sigaction(sig, NULL, &sa);
3424 
3425   st->print("%s: ", os::exception_name(sig, buf, buflen));
3426 
3427   address handler = (sa.sa_flags & SA_SIGINFO)
3428     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3429     : CAST_FROM_FN_PTR(address, sa.sa_handler);
3430 
3431   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3432     st->print("SIG_DFL");
3433   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3434     st->print("SIG_IGN");
3435   } else {
3436     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3437   }
3438 
3439   // Print readable mask.
3440   st->print(", sa_mask[0]=");
3441   os::Posix::print_signal_set_short(st, &sa.sa_mask);
3442 
3443   address rh = VMError::get_resetted_sighandler(sig);
3444   // May be, handler was resetted by VMError?
3445   if (rh != NULL) {
3446     handler = rh;
3447     sa.sa_flags = VMError::get_resetted_sigflags(sig);
3448   }
3449 
3450   // Print textual representation of sa_flags.
3451   st->print(", sa_flags=");
3452   os::Posix::print_sa_flags(st, sa.sa_flags);
3453 
3454   // Check: is it our handler?
3455   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3456       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3457     // It is our signal handler.
3458     // Check for flags, reset system-used one!
3459     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3460       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3461                 os::Aix::get_our_sigflags(sig));
3462     }
3463   }
3464   st->cr();
3465 }
3466 
3467 
3468 #define DO_SIGNAL_CHECK(sig) \
3469   if (!sigismember(&check_signal_done, sig)) \
3470     os::Aix::check_signal_handler(sig)
3471 
3472 // This method is a periodic task to check for misbehaving JNI applications
3473 // under CheckJNI, we can add any periodic checks here
3474 
3475 void os::run_periodic_checks() {
3476 
3477   if (check_signals == false) return;
3478 
3479   // SEGV and BUS if overridden could potentially prevent
3480   // generation of hs*.log in the event of a crash, debugging
3481   // such a case can be very challenging, so we absolutely
3482   // check the following for a good measure:
3483   DO_SIGNAL_CHECK(SIGSEGV);
3484   DO_SIGNAL_CHECK(SIGILL);
3485   DO_SIGNAL_CHECK(SIGFPE);
3486   DO_SIGNAL_CHECK(SIGBUS);
3487   DO_SIGNAL_CHECK(SIGPIPE);
3488   DO_SIGNAL_CHECK(SIGXFSZ);
3489   if (UseSIGTRAP) {
3490     DO_SIGNAL_CHECK(SIGTRAP);
3491   }
3492   DO_SIGNAL_CHECK(SIGDANGER);
3493 
3494   // ReduceSignalUsage allows the user to override these handlers
3495   // see comments at the very top and jvm_solaris.h
3496   if (!ReduceSignalUsage) {
3497     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3498     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3499     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3500     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3501   }
3502 
3503   DO_SIGNAL_CHECK(SR_signum);
3504   DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3505 }
3506 
3507 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3508 
3509 static os_sigaction_t os_sigaction = NULL;
3510 
3511 void os::Aix::check_signal_handler(int sig) {
3512   char buf[O_BUFLEN];
3513   address jvmHandler = NULL;
3514 
3515   struct sigaction act;
3516   if (os_sigaction == NULL) {
3517     // only trust the default sigaction, in case it has been interposed
3518     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3519     if (os_sigaction == NULL) return;
3520   }
3521 
3522   os_sigaction(sig, (struct sigaction*)NULL, &act);
3523 
3524   address thisHandler = (act.sa_flags & SA_SIGINFO)
3525     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3526     : CAST_FROM_FN_PTR(address, act.sa_handler);
3527 
3528 
3529   switch(sig) {
3530   case SIGSEGV:
3531   case SIGBUS:
3532   case SIGFPE:
3533   case SIGPIPE:
3534   case SIGILL:
3535   case SIGXFSZ:
3536     // Renamed 'signalHandler' to avoid collision with other shared libs.
3537     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3538     break;
3539 
3540   case SHUTDOWN1_SIGNAL:
3541   case SHUTDOWN2_SIGNAL:
3542   case SHUTDOWN3_SIGNAL:
3543   case BREAK_SIGNAL:
3544     jvmHandler = (address)user_handler();
3545     break;
3546 
3547   case INTERRUPT_SIGNAL:
3548     jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3549     break;
3550 
3551   default:
3552     if (sig == SR_signum) {
3553       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3554     } else {
3555       return;
3556     }
3557     break;
3558   }
3559 
3560   if (thisHandler != jvmHandler) {
3561     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3562     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3563     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3564     // No need to check this sig any longer
3565     sigaddset(&check_signal_done, sig);
3566     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3567     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3568       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3569                     exception_name(sig, buf, O_BUFLEN));
3570     }
3571   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3572     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3573     tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3574     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3575     // No need to check this sig any longer
3576     sigaddset(&check_signal_done, sig);
3577   }
3578 
3579   // Dump all the signal
3580   if (sigismember(&check_signal_done, sig)) {
3581     print_signal_handlers(tty, buf, O_BUFLEN);
3582   }
3583 }
3584 
3585 extern bool signal_name(int signo, char* buf, size_t len);
3586 
3587 const char* os::exception_name(int exception_code, char* buf, size_t size) {
3588   if (0 < exception_code && exception_code <= SIGRTMAX) {
3589     // signal
3590     if (!signal_name(exception_code, buf, size)) {
3591       jio_snprintf(buf, size, "SIG%d", exception_code);
3592     }
3593     return buf;
3594   } else {
3595     return NULL;
3596   }
3597 }
3598 
3599 // To install functions for atexit system call
3600 extern "C" {
3601   static void perfMemory_exit_helper() {
3602     perfMemory_exit();
3603   }
3604 }
3605 
3606 // This is called _before_ the most of global arguments have been parsed.
3607 void os::init(void) {
3608   // This is basic, we want to know if that ever changes.
3609   // (shared memory boundary is supposed to be a 256M aligned)
3610   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3611 
3612   // First off, we need to know whether we run on AIX or PASE, and
3613   // the OS level we run on.
3614   os::Aix::initialize_os_info();
3615 
3616   // Scan environment (SPEC1170 behaviour, etc)
3617   os::Aix::scan_environment();
3618 
3619   // Check which pages are supported by AIX.
3620   os::Aix::query_multipage_support();
3621 
3622   // Next, we need to initialize libo4 and libperfstat libraries.
3623   if (os::Aix::on_pase()) {
3624     os::Aix::initialize_libo4();
3625   } else {
3626     os::Aix::initialize_libperfstat();
3627   }
3628 
3629   // Reset the perfstat information provided by ODM.
3630   if (os::Aix::on_aix()) {
3631     libperfstat::perfstat_reset();
3632   }
3633 
3634   // Now initialze basic system properties. Note that for some of the values we
3635   // need libperfstat etc.
3636   os::Aix::initialize_system_info();
3637 
3638   // Initialize large page support.
3639   if (UseLargePages) {
3640     os::large_page_init();
3641     if (!UseLargePages) {
3642       // initialize os::_page_sizes
3643       _page_sizes[0] = Aix::page_size();
3644       _page_sizes[1] = 0;
3645       if (Verbose) {
3646         fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n");
3647       }
3648     }
3649   } else {
3650     // initialize os::_page_sizes
3651     _page_sizes[0] = Aix::page_size();
3652     _page_sizes[1] = 0;
3653   }
3654 
3655   // debug trace
3656   if (Verbose) {
3657     fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());
3658     fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());
3659     fprintf(stderr, "os::_page_sizes = ( ");
3660     for (int i = 0; _page_sizes[i]; i ++) {
3661       fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));
3662     }
3663     fprintf(stderr, ")\n");
3664   }
3665 
3666   _initial_pid = getpid();
3667 
3668   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3669 
3670   init_random(1234567);
3671 
3672   ThreadCritical::initialize();
3673 
3674   // Main_thread points to the aboriginal thread.
3675   Aix::_main_thread = pthread_self();
3676 
3677   initial_time_count = os::elapsed_counter();
3678   pthread_mutex_init(&dl_mutex, NULL);
3679 }
3680 
3681 // this is called _after_ the global arguments have been parsed
3682 jint os::init_2(void) {
3683 
3684   if (Verbose) {
3685     fprintf(stderr, "processor count: %d\n", os::_processor_count);
3686     fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
3687   }
3688 
3689   // initially build up the loaded dll map
3690   LoadedLibraries::reload();
3691 
3692   const int page_size = Aix::page_size();
3693   const int map_size = page_size;
3694 
3695   address map_address = (address) MAP_FAILED;
3696   const int prot  = PROT_READ;
3697   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3698 
3699   // use optimized addresses for the polling page,
3700   // e.g. map it to a special 32-bit address.
3701   if (OptimizePollingPageLocation) {
3702     // architecture-specific list of address wishes:
3703     address address_wishes[] = {
3704       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3705       // PPC64: all address wishes are non-negative 32 bit values where
3706       // the lower 16 bits are all zero. we can load these addresses
3707       // with a single ppc_lis instruction.
3708       (address) 0x30000000, (address) 0x31000000,
3709       (address) 0x32000000, (address) 0x33000000,
3710       (address) 0x40000000, (address) 0x41000000,
3711       (address) 0x42000000, (address) 0x43000000,
3712       (address) 0x50000000, (address) 0x51000000,
3713       (address) 0x52000000, (address) 0x53000000,
3714       (address) 0x60000000, (address) 0x61000000,
3715       (address) 0x62000000, (address) 0x63000000
3716     };
3717     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3718 
3719     // iterate over the list of address wishes:
3720     for (int i=0; i<address_wishes_length; i++) {
3721       // try to map with current address wish.
3722       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3723       // fail if the address is already mapped.
3724       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3725                                      map_size, prot,
3726                                      flags | MAP_FIXED,
3727                                      -1, 0);
3728       if (Verbose) {
3729         fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3730                 address_wishes[i], map_address + (ssize_t)page_size);
3731       }
3732 
3733       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3734         // map succeeded and map_address is at wished address, exit loop.
3735         break;
3736       }
3737 
3738       if (map_address != (address) MAP_FAILED) {
3739         // map succeeded, but polling_page is not at wished address, unmap and continue.
3740         ::munmap(map_address, map_size);
3741         map_address = (address) MAP_FAILED;
3742       }
3743       // map failed, continue loop.
3744     }
3745   } // end OptimizePollingPageLocation
3746 
3747   if (map_address == (address) MAP_FAILED) {
3748     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3749   }
3750   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3751   os::set_polling_page(map_address);
3752 
3753   if (!UseMembar) {
3754     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3755     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3756     os::set_memory_serialize_page(mem_serialize_page);
3757 
3758 #ifndef PRODUCT
3759     if (Verbose && PrintMiscellaneous)
3760       tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3761 #endif
3762   }
3763 
3764   // initialize suspend/resume support - must do this before signal_sets_init()
3765   if (SR_initialize() != 0) {
3766     perror("SR_initialize failed");
3767     return JNI_ERR;
3768   }
3769 
3770   Aix::signal_sets_init();
3771   Aix::install_signal_handlers();
3772 
3773   // Check minimum allowable stack size for thread creation and to initialize
3774   // the java system classes, including StackOverflowError - depends on page
3775   // size. Add a page for compiler2 recursion in main thread.
3776   // Add in 2*BytesPerWord times page size to account for VM stack during
3777   // class initialization depending on 32 or 64 bit VM.
3778   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3779             (size_t)(StackYellowPages+StackRedPages+StackShadowPages +
3780                      2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());
3781 
3782   size_t threadStackSizeInBytes = ThreadStackSize * K;
3783   if (threadStackSizeInBytes != 0 &&
3784       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3785         tty->print_cr("\nThe stack size specified is too small, "
3786                       "Specify at least %dk",
3787                       os::Aix::min_stack_allowed / K);
3788         return JNI_ERR;
3789   }
3790 
3791   // Make the stack size a multiple of the page size so that
3792   // the yellow/red zones can be guarded.
3793   // note that this can be 0, if no default stacksize was set
3794   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3795 
3796   Aix::libpthread_init();
3797 
3798   if (MaxFDLimit) {
3799     // set the number of file descriptors to max. print out error
3800     // if getrlimit/setrlimit fails but continue regardless.
3801     struct rlimit nbr_files;
3802     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3803     if (status != 0) {
3804       if (PrintMiscellaneous && (Verbose || WizardMode))
3805         perror("os::init_2 getrlimit failed");
3806     } else {
3807       nbr_files.rlim_cur = nbr_files.rlim_max;
3808       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3809       if (status != 0) {
3810         if (PrintMiscellaneous && (Verbose || WizardMode))
3811           perror("os::init_2 setrlimit failed");
3812       }
3813     }
3814   }
3815 
3816   if (PerfAllowAtExitRegistration) {
3817     // only register atexit functions if PerfAllowAtExitRegistration is set.
3818     // atexit functions can be delayed until process exit time, which
3819     // can be problematic for embedded VM situations. Embedded VMs should
3820     // call DestroyJavaVM() to assure that VM resources are released.
3821 
3822     // note: perfMemory_exit_helper atexit function may be removed in
3823     // the future if the appropriate cleanup code can be added to the
3824     // VM_Exit VMOperation's doit method.
3825     if (atexit(perfMemory_exit_helper) != 0) {
3826       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3827     }
3828   }
3829 
3830   return JNI_OK;
3831 }
3832 
3833 // Mark the polling page as unreadable
3834 void os::make_polling_page_unreadable(void) {
3835   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3836     fatal("Could not disable polling page");
3837   }
3838 };
3839 
3840 // Mark the polling page as readable
3841 void os::make_polling_page_readable(void) {
3842   // Changed according to os_linux.cpp.
3843   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3844     fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
3845   }
3846 };
3847 
3848 int os::active_processor_count() {
3849   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3850   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3851   return online_cpus;
3852 }
3853 
3854 void os::set_native_thread_name(const char *name) {
3855   // Not yet implemented.
3856   return;
3857 }
3858 
3859 bool os::distribute_processes(uint length, uint* distribution) {
3860   // Not yet implemented.
3861   return false;
3862 }
3863 
3864 bool os::bind_to_processor(uint processor_id) {
3865   // Not yet implemented.
3866   return false;
3867 }
3868 
3869 void os::SuspendedThreadTask::internal_do_task() {
3870   if (do_suspend(_thread->osthread())) {
3871     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3872     do_task(context);
3873     do_resume(_thread->osthread());
3874   }
3875 }
3876 
3877 class PcFetcher : public os::SuspendedThreadTask {
3878 public:
3879   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3880   ExtendedPC result();
3881 protected:
3882   void do_task(const os::SuspendedThreadTaskContext& context);
3883 private:
3884   ExtendedPC _epc;
3885 };
3886 
3887 ExtendedPC PcFetcher::result() {
3888   guarantee(is_done(), "task is not done yet.");
3889   return _epc;
3890 }
3891 
3892 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3893   Thread* thread = context.thread();
3894   OSThread* osthread = thread->osthread();
3895   if (osthread->ucontext() != NULL) {
3896     _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3897   } else {
3898     // NULL context is unexpected, double-check this is the VMThread.
3899     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3900   }
3901 }
3902 
3903 // Suspends the target using the signal mechanism and then grabs the PC before
3904 // resuming the target. Used by the flat-profiler only
3905 ExtendedPC os::get_thread_pc(Thread* thread) {
3906   // Make sure that it is called by the watcher for the VMThread.
3907   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3908   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3909 
3910   PcFetcher fetcher(thread);
3911   fetcher.run();
3912   return fetcher.result();
3913 }
3914 
3915 // Not neede on Aix.
3916 // int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
3917 // }
3918 
3919 ////////////////////////////////////////////////////////////////////////////////
3920 // debug support
3921 
3922 static address same_page(address x, address y) {
3923   intptr_t page_bits = -os::vm_page_size();
3924   if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3925     return x;
3926   else if (x > y)
3927     return (address)(intptr_t(y) | ~page_bits) + 1;
3928   else
3929     return (address)(intptr_t(y) & page_bits);
3930 }
3931 
3932 bool os::find(address addr, outputStream* st) {
3933 
3934   st->print(PTR_FORMAT ": ", addr);
3935 
3936   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
3937   if (lib) {
3938     lib->print(st);
3939     return true;
3940   } else {
3941     lib = LoadedLibraries::find_for_data_address(addr);
3942     if (lib) {
3943       lib->print(st);
3944       return true;
3945     } else {
3946       st->print_cr("(outside any module)");
3947     }
3948   }
3949 
3950   return false;
3951 }
3952 
3953 ////////////////////////////////////////////////////////////////////////////////
3954 // misc
3955 
3956 // This does not do anything on Aix. This is basically a hook for being
3957 // able to use structured exception handling (thread-local exception filters)
3958 // on, e.g., Win32.
3959 void
3960 os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
3961                          JavaCallArguments* args, Thread* thread) {
3962   f(value, method, args, thread);
3963 }
3964 
3965 void os::print_statistics() {
3966 }
3967 
3968 int os::message_box(const char* title, const char* message) {
3969   int i;
3970   fdStream err(defaultStream::error_fd());
3971   for (i = 0; i < 78; i++) err.print_raw("=");
3972   err.cr();
3973   err.print_raw_cr(title);
3974   for (i = 0; i < 78; i++) err.print_raw("-");
3975   err.cr();
3976   err.print_raw_cr(message);
3977   for (i = 0; i < 78; i++) err.print_raw("=");
3978   err.cr();
3979 
3980   char buf[16];
3981   // Prevent process from exiting upon "read error" without consuming all CPU
3982   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3983 
3984   return buf[0] == 'y' || buf[0] == 'Y';
3985 }
3986 
3987 int os::stat(const char *path, struct stat *sbuf) {
3988   char pathbuf[MAX_PATH];
3989   if (strlen(path) > MAX_PATH - 1) {
3990     errno = ENAMETOOLONG;
3991     return -1;
3992   }
3993   os::native_path(strcpy(pathbuf, path));
3994   return ::stat(pathbuf, sbuf);
3995 }
3996 
3997 bool os::check_heap(bool force) {
3998   return true;
3999 }
4000 
4001 // Is a (classpath) directory empty?
4002 bool os::dir_is_empty(const char* path) {
4003   DIR *dir = NULL;
4004   struct dirent *ptr;
4005 
4006   dir = opendir(path);
4007   if (dir == NULL) return true;
4008 
4009   /* Scan the directory */
4010   bool result = true;
4011   char buf[sizeof(struct dirent) + MAX_PATH];
4012   while (result && (ptr = ::readdir(dir)) != NULL) {
4013     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4014       result = false;
4015     }
4016   }
4017   closedir(dir);
4018   return result;
4019 }
4020 
4021 // This code originates from JDK's sysOpen and open64_w
4022 // from src/solaris/hpi/src/system_md.c
4023 
4024 int os::open(const char *path, int oflag, int mode) {
4025 
4026   if (strlen(path) > MAX_PATH - 1) {
4027     errno = ENAMETOOLONG;
4028     return -1;
4029   }
4030   int fd;
4031 
4032   fd = ::open64(path, oflag, mode);
4033   if (fd == -1) return -1;
4034 
4035   // If the open succeeded, the file might still be a directory.
4036   {
4037     struct stat64 buf64;
4038     int ret = ::fstat64(fd, &buf64);
4039     int st_mode = buf64.st_mode;
4040 
4041     if (ret != -1) {
4042       if ((st_mode & S_IFMT) == S_IFDIR) {
4043         errno = EISDIR;
4044         ::close(fd);
4045         return -1;
4046       }
4047     } else {
4048       ::close(fd);
4049       return -1;
4050     }
4051   }
4052 
4053   // All file descriptors that are opened in the JVM and not
4054   // specifically destined for a subprocess should have the
4055   // close-on-exec flag set. If we don't set it, then careless 3rd
4056   // party native code might fork and exec without closing all
4057   // appropriate file descriptors (e.g. as we do in closeDescriptors in
4058   // UNIXProcess.c), and this in turn might:
4059   //
4060   // - cause end-of-file to fail to be detected on some file
4061   //   descriptors, resulting in mysterious hangs, or
4062   //
4063   // - might cause an fopen in the subprocess to fail on a system
4064   //   suffering from bug 1085341.
4065   //
4066   // (Yes, the default setting of the close-on-exec flag is a Unix
4067   // design flaw.)
4068   //
4069   // See:
4070   // 1085341: 32-bit stdio routines should support file descriptors >255
4071   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4072   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4073 #ifdef FD_CLOEXEC
4074   {
4075     int flags = ::fcntl(fd, F_GETFD);
4076     if (flags != -1)
4077       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4078   }
4079 #endif
4080 
4081   return fd;
4082 }
4083 
4084 
4085 // create binary file, rewriting existing file if required
4086 int os::create_binary_file(const char* path, bool rewrite_existing) {
4087   int oflags = O_WRONLY | O_CREAT;
4088   if (!rewrite_existing) {
4089     oflags |= O_EXCL;
4090   }
4091   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4092 }
4093 
4094 // return current position of file pointer
4095 jlong os::current_file_offset(int fd) {
4096   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4097 }
4098 
4099 // move file pointer to the specified offset
4100 jlong os::seek_to_file_offset(int fd, jlong offset) {
4101   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4102 }
4103 
4104 // This code originates from JDK's sysAvailable
4105 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
4106 
4107 int os::available(int fd, jlong *bytes) {
4108   jlong cur, end;
4109   int mode;
4110   struct stat64 buf64;
4111 
4112   if (::fstat64(fd, &buf64) >= 0) {
4113     mode = buf64.st_mode;
4114     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4115       // XXX: is the following call interruptible? If so, this might
4116       // need to go through the INTERRUPT_IO() wrapper as for other
4117       // blocking, interruptible calls in this file.
4118       int n;
4119       if (::ioctl(fd, FIONREAD, &n) >= 0) {
4120         *bytes = n;
4121         return 1;
4122       }
4123     }
4124   }
4125   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4126     return 0;
4127   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4128     return 0;
4129   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4130     return 0;
4131   }
4132   *bytes = end - cur;
4133   return 1;
4134 }
4135 
4136 // Map a block of memory.
4137 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4138                         char *addr, size_t bytes, bool read_only,
4139                         bool allow_exec) {
4140   Unimplemented();
4141   return NULL;
4142 }
4143 
4144 
4145 // Remap a block of memory.
4146 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4147                           char *addr, size_t bytes, bool read_only,
4148                           bool allow_exec) {
4149   // same as map_memory() on this OS
4150   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4151                         allow_exec);
4152 }
4153 
4154 // Unmap a block of memory.
4155 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4156   return munmap(addr, bytes) == 0;
4157 }
4158 
4159 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4160 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4161 // of a thread.
4162 //
4163 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4164 // the fast estimate available on the platform.
4165 
4166 jlong os::current_thread_cpu_time() {
4167   // return user + sys since the cost is the same
4168   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4169   assert(n >= 0, "negative CPU time");
4170   return n;
4171 }
4172 
4173 jlong os::thread_cpu_time(Thread* thread) {
4174   // consistent with what current_thread_cpu_time() returns
4175   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4176   assert(n >= 0, "negative CPU time");
4177   return n;
4178 }
4179 
4180 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4181   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4182   assert(n >= 0, "negative CPU time");
4183   return n;
4184 }
4185 
4186 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4187   bool error = false;
4188 
4189   jlong sys_time = 0;
4190   jlong user_time = 0;
4191 
4192   // reimplemented using getthrds64().
4193   //
4194   // goes like this:
4195   // For the thread in question, get the kernel thread id. Then get the
4196   // kernel thread statistics using that id.
4197   //
4198   // This only works of course when no pthread scheduling is used,
4199   // ie there is a 1:1 relationship to kernel threads.
4200   // On AIX, see AIXTHREAD_SCOPE variable.
4201 
4202   pthread_t pthtid = thread->osthread()->pthread_id();
4203 
4204   // retrieve kernel thread id for the pthread:
4205   tid64_t tid = 0;
4206   struct __pthrdsinfo pinfo;
4207   // I just love those otherworldly IBM APIs which force me to hand down
4208   // dummy buffers for stuff I dont care for...
4209   char dummy[1];
4210   int dummy_size = sizeof(dummy);
4211   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4212                           dummy, &dummy_size) == 0) {
4213     tid = pinfo.__pi_tid;
4214   } else {
4215     tty->print_cr("pthread_getthrds_np failed.");
4216     error = true;
4217   }
4218 
4219   // retrieve kernel timing info for that kernel thread
4220   if (!error) {
4221     struct thrdentry64 thrdentry;
4222     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4223       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4224       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4225     } else {
4226       tty->print_cr("pthread_getthrds_np failed.");
4227       error = true;
4228     }
4229   }
4230 
4231   if (p_sys_time) {
4232     *p_sys_time = sys_time;
4233   }
4234 
4235   if (p_user_time) {
4236     *p_user_time = user_time;
4237   }
4238 
4239   if (error) {
4240     return false;
4241   }
4242 
4243   return true;
4244 }
4245 
4246 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4247   jlong sys_time;
4248   jlong user_time;
4249 
4250   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4251     return -1;
4252   }
4253 
4254   return user_sys_cpu_time ? sys_time + user_time : user_time;
4255 }
4256 
4257 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4258   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4259   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4260   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4261   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4262 }
4263 
4264 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4265   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4266   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4267   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4268   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4269 }
4270 
4271 bool os::is_thread_cpu_time_supported() {
4272   return true;
4273 }
4274 
4275 // System loadavg support. Returns -1 if load average cannot be obtained.
4276 // For now just return the system wide load average (no processor sets).
4277 int os::loadavg(double values[], int nelem) {
4278 
4279   // Implemented using libperfstat on AIX.
4280 
4281   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4282   guarantee(values, "argument error");
4283 
4284   if (os::Aix::on_pase()) {
4285     Unimplemented();
4286     return -1;
4287   } else {
4288     // AIX: use libperfstat
4289     //
4290     // See also:
4291     // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4292     // /usr/include/libperfstat.h:
4293 
4294     // Use the already AIX version independent get_cpuinfo.
4295     os::Aix::cpuinfo_t ci;
4296     if (os::Aix::get_cpuinfo(&ci)) {
4297       for (int i = 0; i < nelem; i++) {
4298         values[i] = ci.loadavg[i];
4299       }
4300     } else {
4301       return -1;
4302     }
4303     return nelem;
4304   }
4305 }
4306 
4307 void os::pause() {
4308   char filename[MAX_PATH];
4309   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4310     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4311   } else {
4312     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4313   }
4314 
4315   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4316   if (fd != -1) {
4317     struct stat buf;
4318     ::close(fd);
4319     while (::stat(filename, &buf) == 0) {
4320       (void)::poll(NULL, 0, 100);
4321     }
4322   } else {
4323     jio_fprintf(stderr,
4324       "Could not open pause file '%s', continuing immediately.\n", filename);
4325   }
4326 }
4327 
4328 bool os::Aix::is_primordial_thread() {
4329   if (pthread_self() == (pthread_t)1) {
4330     return true;
4331   } else {
4332     return false;
4333   }
4334 }
4335 
4336 // OS recognitions (PASE/AIX, OS level) call this before calling any
4337 // one of Aix::on_pase(), Aix::os_version() static
4338 void os::Aix::initialize_os_info() {
4339 
4340   assert(_on_pase == -1 && _os_version == -1, "already called.");
4341 
4342   struct utsname uts;
4343   memset(&uts, 0, sizeof(uts));
4344   strcpy(uts.sysname, "?");
4345   if (::uname(&uts) == -1) {
4346     fprintf(stderr, "uname failed (%d)\n", errno);
4347     guarantee(0, "Could not determine whether we run on AIX or PASE");
4348   } else {
4349     if (Verbose) {
4350       fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4351               "node \"%s\" machine \"%s\"\n",
4352               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4353     }
4354     const int major = atoi(uts.version);
4355     assert(major > 0, "invalid OS version");
4356     const int minor = atoi(uts.release);
4357     assert(minor > 0, "invalid OS release");
4358     _os_version = (major << 8) | minor;
4359     if (strcmp(uts.sysname, "OS400") == 0) {
4360       Unimplemented();
4361     } else if (strcmp(uts.sysname, "AIX") == 0) {
4362       // We run on AIX. We do not support versions older than AIX 5.3.
4363       _on_pase = 0;
4364       if (_os_version < 0x0503) {
4365         fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
4366         assert(false, "AIX release too old.");
4367       } else {
4368         if (Verbose) {
4369           fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
4370         }
4371       }
4372     } else {
4373       assert(false, "unknown OS");
4374     }
4375   }
4376 
4377   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4378 
4379 } // end: os::Aix::initialize_os_info()
4380 
4381 // Scan environment for important settings which might effect the VM.
4382 // Trace out settings. Warn about invalid settings and/or correct them.
4383 //
4384 // Must run after os::Aix::initialue_os_info().
4385 void os::Aix::scan_environment() {
4386 
4387   char* p;
4388   int rc;
4389 
4390   // Warn explicity if EXTSHM=ON is used. That switch changes how
4391   // System V shared memory behaves. One effect is that page size of
4392   // shared memory cannot be change dynamically, effectivly preventing
4393   // large pages from working.
4394   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4395   // recommendation is (in OSS notes) to switch it off.
4396   p = ::getenv("EXTSHM");
4397   if (Verbose) {
4398     fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4399   }
4400   if (p && strcmp(p, "ON") == 0) {
4401     fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4402     _extshm = 1;
4403   } else {
4404     _extshm = 0;
4405   }
4406 
4407   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4408   // Not tested, not supported.
4409   //
4410   // Note that it might be worth the trouble to test and to require it, if only to
4411   // get useful return codes for mprotect.
4412   //
4413   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4414   // exec() ? before loading the libjvm ? ....)
4415   p = ::getenv("XPG_SUS_ENV");
4416   if (Verbose) {
4417     fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
4418   }
4419   if (p && strcmp(p, "ON") == 0) {
4420     _xpg_sus_mode = 1;
4421     fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
4422     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4423     // clobber address ranges. If we ever want to support that, we have to do some
4424     // testing first.
4425     guarantee(false, "XPG_SUS_ENV=ON not supported");
4426   } else {
4427     _xpg_sus_mode = 0;
4428   }
4429 
4430   // Switch off AIX internal (pthread) guard pages. This has
4431   // immediate effect for any pthread_create calls which follow.
4432   p = ::getenv("AIXTHREAD_GUARDPAGES");
4433   if (Verbose) {
4434     fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
4435     fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
4436   }
4437   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4438   guarantee(rc == 0, "");
4439 
4440 } // end: os::Aix::scan_environment()
4441 
4442 // PASE: initialize the libo4 library (AS400 PASE porting library).
4443 void os::Aix::initialize_libo4() {
4444   Unimplemented();
4445 }
4446 
4447 // AIX: initialize the libperfstat library (we load this dynamically
4448 // because it is only available on AIX.
4449 void os::Aix::initialize_libperfstat() {
4450 
4451   assert(os::Aix::on_aix(), "AIX only");
4452 
4453   if (!libperfstat::init()) {
4454     fprintf(stderr, "libperfstat initialization failed.\n");
4455     assert(false, "libperfstat initialization failed");
4456   } else {
4457     if (Verbose) {
4458       fprintf(stderr, "libperfstat initialized.\n");
4459     }
4460   }
4461 } // end: os::Aix::initialize_libperfstat
4462 
4463 /////////////////////////////////////////////////////////////////////////////
4464 // thread stack
4465 
4466 // function to query the current stack size using pthread_getthrds_np
4467 //
4468 // ! do not change anything here unless you know what you are doing !
4469 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4470 
4471   // This only works when invoked on a pthread. As we agreed not to use
4472   // primordial threads anyway, I assert here
4473   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4474 
4475   // information about this api can be found (a) in the pthread.h header and
4476   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4477   //
4478   // The use of this API to find out the current stack is kind of undefined.
4479   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4480   // enough for cases where I let the pthread library create its stacks. For cases
4481   // where I create an own stack and pass this to pthread_create, it seems not to
4482   // work (the returned stack size in that case is 0).
4483 
4484   pthread_t tid = pthread_self();
4485   struct __pthrdsinfo pinfo;
4486   char dummy[1]; // we only need this to satisfy the api and to not get E
4487   int dummy_size = sizeof(dummy);
4488 
4489   memset(&pinfo, 0, sizeof(pinfo));
4490 
4491   const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4492                                       sizeof(pinfo), dummy, &dummy_size);
4493 
4494   if (rc != 0) {
4495     fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc);
4496     guarantee(0, "pthread_getthrds_np failed");
4497   }
4498 
4499   guarantee(pinfo.__pi_stackend, "returned stack base invalid");
4500 
4501   // the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack
4502   // (when handing down a stack to pthread create, see pthread_attr_setstackaddr).
4503   // Not sure what to do here - I feel inclined to forbid this use case completely.
4504   guarantee(pinfo.__pi_stacksize, "returned stack size invalid");
4505 
4506   // On AIX, stacks are not necessarily page aligned so round the base and size accordingly
4507   if (p_stack_base) {
4508     (*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());
4509   }
4510 
4511   if (p_stack_size) {
4512     (*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();
4513   }
4514 
4515 #ifndef PRODUCT
4516   if (Verbose) {
4517     fprintf(stderr,
4518             "query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT
4519             ", real stack_size=" INTPTR_FORMAT
4520             ", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n",
4521             (intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,
4522             (intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()),
4523             pinfo.__pi_stacksize - os::Aix::stack_page_size());
4524   }
4525 #endif
4526 
4527 } // end query_stack_dimensions
4528 
4529 // get the current stack base from the OS (actually, the pthread library)
4530 address os::current_stack_base() {
4531   address p;
4532   query_stack_dimensions(&p, 0);
4533   return p;
4534 }
4535 
4536 // get the current stack size from the OS (actually, the pthread library)
4537 size_t os::current_stack_size() {
4538   size_t s;
4539   query_stack_dimensions(0, &s);
4540   return s;
4541 }
4542 
4543 // Refer to the comments in os_solaris.cpp park-unpark.
4544 //
4545 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4546 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4547 // For specifics regarding the bug see GLIBC BUGID 261237 :
4548 //    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4549 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4550 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4551 // is used. (The simple C test-case provided in the GLIBC bug report manifests the
4552 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4553 // and monitorenter when we're using 1-0 locking. All those operations may result in
4554 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4555 // of libpthread avoids the problem, but isn't practical.
4556 //
4557 // Possible remedies:
4558 //
4559 // 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4560 //      This is palliative and probabilistic, however. If the thread is preempted
4561 //      between the call to compute_abstime() and pthread_cond_timedwait(), more
4562 //      than the minimum period may have passed, and the abstime may be stale (in the
4563 //      past) resultin in a hang. Using this technique reduces the odds of a hang
4564 //      but the JVM is still vulnerable, particularly on heavily loaded systems.
4565 //
4566 // 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4567 //      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4568 //      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4569 //      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4570 //      thread.
4571 //
4572 // 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4573 //      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4574 //      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4575 //      This also works well. In fact it avoids kernel-level scalability impediments
4576 //      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4577 //      timers in a graceful fashion.
4578 //
4579 // 4.   When the abstime value is in the past it appears that control returns
4580 //      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4581 //      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4582 //      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4583 //      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4584 //      It may be possible to avoid reinitialization by checking the return
4585 //      value from pthread_cond_timedwait(). In addition to reinitializing the
4586 //      condvar we must establish the invariant that cond_signal() is only called
4587 //      within critical sections protected by the adjunct mutex. This prevents
4588 //      cond_signal() from "seeing" a condvar that's in the midst of being
4589 //      reinitialized or that is corrupt. Sadly, this invariant obviates the
4590 //      desirable signal-after-unlock optimization that avoids futile context switching.
4591 //
4592 //      I'm also concerned that some versions of NTPL might allocate an auxilliary
4593 //      structure when a condvar is used or initialized. cond_destroy() would
4594 //      release the helper structure. Our reinitialize-after-timedwait fix
4595 //      put excessive stress on malloc/free and locks protecting the c-heap.
4596 //
4597 // We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4598 // It may be possible to refine (4) by checking the kernel and NTPL verisons
4599 // and only enabling the work-around for vulnerable environments.
4600 
4601 // utility to compute the abstime argument to timedwait:
4602 // millis is the relative timeout time
4603 // abstime will be the absolute timeout time
4604 // TODO: replace compute_abstime() with unpackTime()
4605 
4606 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4607   if (millis < 0) millis = 0;
4608   struct timeval now;
4609   int status = gettimeofday(&now, NULL);
4610   assert(status == 0, "gettimeofday");
4611   jlong seconds = millis / 1000;
4612   millis %= 1000;
4613   if (seconds > 50000000) { // see man cond_timedwait(3T)
4614     seconds = 50000000;
4615   }
4616   abstime->tv_sec = now.tv_sec  + seconds;
4617   long       usec = now.tv_usec + millis * 1000;
4618   if (usec >= 1000000) {
4619     abstime->tv_sec += 1;
4620     usec -= 1000000;
4621   }
4622   abstime->tv_nsec = usec * 1000;
4623   return abstime;
4624 }
4625 
4626 
4627 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4628 // Conceptually TryPark() should be equivalent to park(0).
4629 
4630 int os::PlatformEvent::TryPark() {
4631   for (;;) {
4632     const int v = _Event;
4633     guarantee ((v == 0) || (v == 1), "invariant");
4634     if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4635   }
4636 }
4637 
4638 void os::PlatformEvent::park() {       // AKA "down()"
4639   // Invariant: Only the thread associated with the Event/PlatformEvent
4640   // may call park().
4641   // TODO: assert that _Assoc != NULL or _Assoc == Self
4642   int v;
4643   for (;;) {
4644     v = _Event;
4645     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4646   }
4647   guarantee (v >= 0, "invariant");
4648   if (v == 0) {
4649     // Do this the hard way by blocking ...
4650     int status = pthread_mutex_lock(_mutex);
4651     assert_status(status == 0, status, "mutex_lock");
4652     guarantee (_nParked == 0, "invariant");
4653     ++ _nParked;
4654     while (_Event < 0) {
4655       status = pthread_cond_wait(_cond, _mutex);
4656       assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4657     }
4658     -- _nParked;
4659 
4660     // In theory we could move the ST of 0 into _Event past the unlock(),
4661     // but then we'd need a MEMBAR after the ST.
4662     _Event = 0;
4663     status = pthread_mutex_unlock(_mutex);
4664     assert_status(status == 0, status, "mutex_unlock");
4665   }
4666   guarantee (_Event >= 0, "invariant");
4667 }
4668 
4669 int os::PlatformEvent::park(jlong millis) {
4670   guarantee (_nParked == 0, "invariant");
4671 
4672   int v;
4673   for (;;) {
4674     v = _Event;
4675     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4676   }
4677   guarantee (v >= 0, "invariant");
4678   if (v != 0) return OS_OK;
4679 
4680   // We do this the hard way, by blocking the thread.
4681   // Consider enforcing a minimum timeout value.
4682   struct timespec abst;
4683   compute_abstime(&abst, millis);
4684 
4685   int ret = OS_TIMEOUT;
4686   int status = pthread_mutex_lock(_mutex);
4687   assert_status(status == 0, status, "mutex_lock");
4688   guarantee (_nParked == 0, "invariant");
4689   ++_nParked;
4690 
4691   // Object.wait(timo) will return because of
4692   // (a) notification
4693   // (b) timeout
4694   // (c) thread.interrupt
4695   //
4696   // Thread.interrupt and object.notify{All} both call Event::set.
4697   // That is, we treat thread.interrupt as a special case of notification.
4698   // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4699   // We assume all ETIME returns are valid.
4700   //
4701   // TODO: properly differentiate simultaneous notify+interrupt.
4702   // In that case, we should propagate the notify to another waiter.
4703 
4704   while (_Event < 0) {
4705     status = pthread_cond_timedwait(_cond, _mutex, &abst);
4706     assert_status(status == 0 || status == ETIMEDOUT,
4707           status, "cond_timedwait");
4708     if (!FilterSpuriousWakeups) break;         // previous semantics
4709     if (status == ETIMEDOUT) break;
4710     // We consume and ignore EINTR and spurious wakeups.
4711   }
4712   --_nParked;
4713   if (_Event >= 0) {
4714      ret = OS_OK;
4715   }
4716   _Event = 0;
4717   status = pthread_mutex_unlock(_mutex);
4718   assert_status(status == 0, status, "mutex_unlock");
4719   assert (_nParked == 0, "invariant");
4720   return ret;
4721 }
4722 
4723 void os::PlatformEvent::unpark() {
4724   int v, AnyWaiters;
4725   for (;;) {
4726     v = _Event;
4727     if (v > 0) {
4728       // The LD of _Event could have reordered or be satisfied
4729       // by a read-aside from this processor's write buffer.
4730       // To avoid problems execute a barrier and then
4731       // ratify the value.
4732       OrderAccess::fence();
4733       if (_Event == v) return;
4734       continue;
4735     }
4736     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4737   }
4738   if (v < 0) {
4739     // Wait for the thread associated with the event to vacate
4740     int status = pthread_mutex_lock(_mutex);
4741     assert_status(status == 0, status, "mutex_lock");
4742     AnyWaiters = _nParked;
4743 
4744     if (AnyWaiters != 0) {
4745       // We intentional signal *after* dropping the lock
4746       // to avoid a common class of futile wakeups.
4747       status = pthread_cond_signal(_cond);
4748       assert_status(status == 0, status, "cond_signal");
4749     }
4750     // Mutex should be locked for pthread_cond_signal(_cond).
4751     status = pthread_mutex_unlock(_mutex);
4752     assert_status(status == 0, status, "mutex_unlock");
4753   }
4754 
4755   // Note that we signal() _after dropping the lock for "immortal" Events.
4756   // This is safe and avoids a common class of futile wakeups. In rare
4757   // circumstances this can cause a thread to return prematurely from
4758   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4759   // simply re-test the condition and re-park itself.
4760 }
4761 
4762 
4763 // JSR166
4764 // -------------------------------------------------------
4765 
4766 //
4767 // The solaris and linux implementations of park/unpark are fairly
4768 // conservative for now, but can be improved. They currently use a
4769 // mutex/condvar pair, plus a a count.
4770 // Park decrements count if > 0, else does a condvar wait. Unpark
4771 // sets count to 1 and signals condvar. Only one thread ever waits
4772 // on the condvar. Contention seen when trying to park implies that someone
4773 // is unparking you, so don't wait. And spurious returns are fine, so there
4774 // is no need to track notifications.
4775 //
4776 
4777 #define MAX_SECS 100000000
4778 //
4779 // This code is common to linux and solaris and will be moved to a
4780 // common place in dolphin.
4781 //
4782 // The passed in time value is either a relative time in nanoseconds
4783 // or an absolute time in milliseconds. Either way it has to be unpacked
4784 // into suitable seconds and nanoseconds components and stored in the
4785 // given timespec structure.
4786 // Given time is a 64-bit value and the time_t used in the timespec is only
4787 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
4788 // overflow if times way in the future are given. Further on Solaris versions
4789 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4790 // number of seconds, in abstime, is less than current_time + 100,000,000.
4791 // As it will be 28 years before "now + 100000000" will overflow we can
4792 // ignore overflow and just impose a hard-limit on seconds using the value
4793 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4794 // years from "now".
4795 //
4796 
4797 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4798   assert (time > 0, "convertTime");
4799 
4800   struct timeval now;
4801   int status = gettimeofday(&now, NULL);
4802   assert(status == 0, "gettimeofday");
4803 
4804   time_t max_secs = now.tv_sec + MAX_SECS;
4805 
4806   if (isAbsolute) {
4807     jlong secs = time / 1000;
4808     if (secs > max_secs) {
4809       absTime->tv_sec = max_secs;
4810     }
4811     else {
4812       absTime->tv_sec = secs;
4813     }
4814     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4815   }
4816   else {
4817     jlong secs = time / NANOSECS_PER_SEC;
4818     if (secs >= MAX_SECS) {
4819       absTime->tv_sec = max_secs;
4820       absTime->tv_nsec = 0;
4821     }
4822     else {
4823       absTime->tv_sec = now.tv_sec + secs;
4824       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4825       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4826         absTime->tv_nsec -= NANOSECS_PER_SEC;
4827         ++absTime->tv_sec; // note: this must be <= max_secs
4828       }
4829     }
4830   }
4831   assert(absTime->tv_sec >= 0, "tv_sec < 0");
4832   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4833   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4834   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4835 }
4836 
4837 void Parker::park(bool isAbsolute, jlong time) {
4838   // Optional fast-path check:
4839   // Return immediately if a permit is available.
4840   if (_counter > 0) {
4841       _counter = 0;
4842       OrderAccess::fence();
4843       return;
4844   }
4845 
4846   Thread* thread = Thread::current();
4847   assert(thread->is_Java_thread(), "Must be JavaThread");
4848   JavaThread *jt = (JavaThread *)thread;
4849 
4850   // Optional optimization -- avoid state transitions if there's an interrupt pending.
4851   // Check interrupt before trying to wait
4852   if (Thread::is_interrupted(thread, false)) {
4853     return;
4854   }
4855 
4856   // Next, demultiplex/decode time arguments
4857   timespec absTime;
4858   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4859     return;
4860   }
4861   if (time > 0) {
4862     unpackTime(&absTime, isAbsolute, time);
4863   }
4864 
4865 
4866   // Enter safepoint region
4867   // Beware of deadlocks such as 6317397.
4868   // The per-thread Parker:: mutex is a classic leaf-lock.
4869   // In particular a thread must never block on the Threads_lock while
4870   // holding the Parker:: mutex. If safepoints are pending both the
4871   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4872   ThreadBlockInVM tbivm(jt);
4873 
4874   // Don't wait if cannot get lock since interference arises from
4875   // unblocking. Also. check interrupt before trying wait
4876   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4877     return;
4878   }
4879 
4880   int status;
4881   if (_counter > 0) { // no wait needed
4882     _counter = 0;
4883     status = pthread_mutex_unlock(_mutex);
4884     assert (status == 0, "invariant");
4885     OrderAccess::fence();
4886     return;
4887   }
4888 
4889 #ifdef ASSERT
4890   // Don't catch signals while blocked; let the running threads have the signals.
4891   // (This allows a debugger to break into the running thread.)
4892   sigset_t oldsigs;
4893   sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4894   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4895 #endif
4896 
4897   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4898   jt->set_suspend_equivalent();
4899   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4900 
4901   if (time == 0) {
4902     status = pthread_cond_wait (_cond, _mutex);
4903   } else {
4904     status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4905     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4906       pthread_cond_destroy (_cond);
4907       pthread_cond_init    (_cond, NULL);
4908     }
4909   }
4910   assert_status(status == 0 || status == EINTR ||
4911                 status == ETIME || status == ETIMEDOUT,
4912                 status, "cond_timedwait");
4913 
4914 #ifdef ASSERT
4915   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4916 #endif
4917 
4918   _counter = 0;
4919   status = pthread_mutex_unlock(_mutex);
4920   assert_status(status == 0, status, "invariant");
4921   // If externally suspended while waiting, re-suspend
4922   if (jt->handle_special_suspend_equivalent_condition()) {
4923     jt->java_suspend_self();
4924   }
4925 
4926   OrderAccess::fence();
4927 }
4928 
4929 void Parker::unpark() {
4930   int s, status;
4931   status = pthread_mutex_lock(_mutex);
4932   assert (status == 0, "invariant");
4933   s = _counter;
4934   _counter = 1;
4935   if (s < 1) {
4936     if (WorkAroundNPTLTimedWaitHang) {
4937       status = pthread_cond_signal (_cond);
4938       assert (status == 0, "invariant");
4939       status = pthread_mutex_unlock(_mutex);
4940       assert (status == 0, "invariant");
4941     } else {
4942       status = pthread_mutex_unlock(_mutex);
4943       assert (status == 0, "invariant");
4944       status = pthread_cond_signal (_cond);
4945       assert (status == 0, "invariant");
4946     }
4947   } else {
4948     pthread_mutex_unlock(_mutex);
4949     assert (status == 0, "invariant");
4950   }
4951 }
4952 
4953 
4954 extern char** environ;
4955 
4956 // Run the specified command in a separate process. Return its exit value,
4957 // or -1 on failure (e.g. can't fork a new process).
4958 // Unlike system(), this function can be called from signal handler. It
4959 // doesn't block SIGINT et al.
4960 int os::fork_and_exec(char* cmd) {
4961   char * argv[4] = {"sh", "-c", cmd, NULL};
4962 
4963   pid_t pid = fork();
4964 
4965   if (pid < 0) {
4966     // fork failed
4967     return -1;
4968 
4969   } else if (pid == 0) {
4970     // child process
4971 
4972     // try to be consistent with system(), which uses "/usr/bin/sh" on AIX
4973     execve("/usr/bin/sh", argv, environ);
4974 
4975     // execve failed
4976     _exit(-1);
4977 
4978   } else  {
4979     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4980     // care about the actual exit code, for now.
4981 
4982     int status;
4983 
4984     // Wait for the child process to exit.  This returns immediately if
4985     // the child has already exited. */
4986     while (waitpid(pid, &status, 0) < 0) {
4987         switch (errno) {
4988         case ECHILD: return 0;
4989         case EINTR: break;
4990         default: return -1;
4991         }
4992     }
4993 
4994     if (WIFEXITED(status)) {
4995        // The child exited normally; get its exit code.
4996        return WEXITSTATUS(status);
4997     } else if (WIFSIGNALED(status)) {
4998        // The child exited because of a signal
4999        // The best value to return is 0x80 + signal number,
5000        // because that is what all Unix shells do, and because
5001        // it allows callers to distinguish between process exit and
5002        // process death by signal.
5003        return 0x80 + WTERMSIG(status);
5004     } else {
5005        // Unknown exit code; pass it through
5006        return status;
5007     }
5008   }
5009   // Remove warning.
5010   return -1;
5011 }
5012 
5013 // is_headless_jre()
5014 //
5015 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5016 // in order to report if we are running in a headless jre.
5017 //
5018 // Since JDK8 xawt/libmawt.so is moved into the same directory
5019 // as libawt.so, and renamed libawt_xawt.so
5020 bool os::is_headless_jre() {
5021   struct stat statbuf;
5022   char buf[MAXPATHLEN];
5023   char libmawtpath[MAXPATHLEN];
5024   const char *xawtstr  = "/xawt/libmawt.so";
5025   const char *new_xawtstr = "/libawt_xawt.so";
5026 
5027   char *p;
5028 
5029   // Get path to libjvm.so
5030   os::jvm_path(buf, sizeof(buf));
5031 
5032   // Get rid of libjvm.so
5033   p = strrchr(buf, '/');
5034   if (p == NULL) return false;
5035   else *p = '\0';
5036 
5037   // Get rid of client or server
5038   p = strrchr(buf, '/');
5039   if (p == NULL) return false;
5040   else *p = '\0';
5041 
5042   // check xawt/libmawt.so
5043   strcpy(libmawtpath, buf);
5044   strcat(libmawtpath, xawtstr);
5045   if (::stat(libmawtpath, &statbuf) == 0) return false;
5046 
5047   // check libawt_xawt.so
5048   strcpy(libmawtpath, buf);
5049   strcat(libmawtpath, new_xawtstr);
5050   if (::stat(libmawtpath, &statbuf) == 0) return false;
5051 
5052   return true;
5053 }
5054 
5055 // Get the default path to the core file
5056 // Returns the length of the string
5057 int os::get_core_path(char* buffer, size_t bufferSize) {
5058   const char* p = get_current_directory(buffer, bufferSize);
5059 
5060   if (p == NULL) {
5061     assert(p != NULL, "failed to get current directory");
5062     return 0;
5063   }
5064 
5065   return strlen(buffer);
5066 }
5067 
5068 #ifndef PRODUCT
5069 void TestReserveMemorySpecial_test() {
5070   // No tests available for this platform
5071 }
5072 #endif
--- EOF ---