< prev index next >

src/os/aix/vm/os_aix.cpp

Print this page
rev 9449 : 8143125-Further Developments for AIX


  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"

  39 #include "libperfstat_aix.hpp"
  40 #include "loadlib_aix.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "misc_aix.hpp"
  44 #include "mutex_aix.inline.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "os_aix.inline.hpp"
  47 #include "os_share_aix.hpp"
  48 #include "porting_aix.hpp"
  49 #include "prims/jniFastGetField.hpp"
  50 #include "prims/jvm.h"
  51 #include "prims/jvm_misc.hpp"
  52 #include "runtime/arguments.hpp"
  53 #include "runtime/atomic.inline.hpp"
  54 #include "runtime/extendedPC.hpp"
  55 #include "runtime/globals.hpp"
  56 #include "runtime/interfaceSupport.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/javaCalls.hpp"
  59 #include "runtime/mutexLocker.hpp"
  60 #include "runtime/objectMonitor.hpp"
  61 #include "runtime/orderAccess.inline.hpp"
  62 #include "runtime/os.hpp"
  63 #include "runtime/osThread.hpp"
  64 #include "runtime/perfMemory.hpp"
  65 #include "runtime/sharedRuntime.hpp"
  66 #include "runtime/statSampler.hpp"
  67 #include "runtime/stubRoutines.hpp"
  68 #include "runtime/thread.inline.hpp"
  69 #include "runtime/threadCritical.hpp"
  70 #include "runtime/timer.hpp"
  71 #include "runtime/vm_version.hpp"
  72 #include "services/attachListener.hpp"
  73 #include "services/runtimeService.hpp"
  74 #include "utilities/decoder.hpp"
  75 #include "utilities/defaultStream.hpp"

  76 #include "utilities/events.hpp"
  77 #include "utilities/growableArray.hpp"
  78 #include "utilities/vmError.hpp"
  79 
  80 // put OS-includes here (sorted alphabetically)
  81 #include <errno.h>
  82 #include <fcntl.h>
  83 #include <inttypes.h>
  84 #include <poll.h>
  85 #include <procinfo.h>
  86 #include <pthread.h>
  87 #include <pwd.h>
  88 #include <semaphore.h>
  89 #include <signal.h>
  90 #include <stdint.h>
  91 #include <stdio.h>
  92 #include <string.h>
  93 #include <unistd.h>
  94 #include <sys/ioctl.h>
  95 #include <sys/ipc.h>
  96 #include <sys/mman.h>
  97 #include <sys/resource.h>
  98 #include <sys/select.h>
  99 #include <sys/shm.h>
 100 #include <sys/socket.h>
 101 #include <sys/stat.h>
 102 #include <sys/sysinfo.h>
 103 #include <sys/systemcfg.h>
 104 #include <sys/time.h>
 105 #include <sys/times.h>
 106 #include <sys/types.h>
 107 #include <sys/utsname.h>
 108 #include <sys/vminfo.h>
 109 #include <sys/wait.h>
 110 
 111 // If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 112 // getrusage() is prepared to handle the associated failure.
 113 #ifndef RUSAGE_THREAD
 114 #define RUSAGE_THREAD   (1)               /* only the calling thread */
 115 #endif
 116 
 117 // PPC port
 118 static const uintx Use64KPagesThreshold       = 1*M;
 119 static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
 120 
 121 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
 122 #if !defined(_AIXVERSION_610)
 123 extern "C" {
 124   int getthrds64(pid_t ProcessIdentifier,
 125                  struct thrdentry64* ThreadBuffer,
 126                  int ThreadSize,
 127                  tid64_t* IndexPointer,
 128                  int Count);
 129 }
 130 #endif
 131 
 132 #define MAX_PATH (2 * K)
 133 
 134 // for timer info max values which include all bits
 135 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 136 // for multipage initialization error analysis (in 'g_multipage_error')
 137 #define ERROR_MP_OS_TOO_OLD                          100
 138 #define ERROR_MP_EXTSHM_ACTIVE                       101
 139 #define ERROR_MP_VMGETINFO_FAILED                    102
 140 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 141 
 142 // The semantics in this file are thus that codeptr_t is a *real code ptr*.
 143 // This means that any function taking codeptr_t as arguments will assume
 144 // a real codeptr and won't handle function descriptors (eg getFuncName),
 145 // whereas functions taking address as args will deal with function
 146 // descriptors (eg os::dll_address_to_library_name).
 147 typedef unsigned int* codeptr_t;
 148 
 149 // Typedefs for stackslots, stack pointers, pointers to op codes.
 150 typedef unsigned long stackslot_t;
 151 typedef stackslot_t* stackptr_t;
 152 
 153 // Excerpts from systemcfg.h definitions newer than AIX 5.3.
 154 #ifndef PV_7
 155 #define PV_7 0x200000          /* Power PC 7 */
 156 #define PV_7_Compat 0x208000   /* Power PC 7 */
 157 #endif
 158 #ifndef PV_8
 159 #define PV_8 0x300000          /* Power PC 8 */
 160 #define PV_8_Compat 0x308000   /* Power PC 8 */
 161 #endif
 162 
 163 // Query dimensions of the stack of the calling thread.
 164 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);

 165 
 166 // Function to check a given stack pointer against given stack limits.
 167 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 168   if (((uintptr_t)sp) & 0x7) {
 169     return false;
 170   }
 171   if (sp > stack_base) {
 172     return false;
 173   }
 174   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 175     return false;
 176   }
 177   return true;
 178 }
 179 
 180 // Returns true if function is a valid codepointer.
 181 inline bool is_valid_codepointer(codeptr_t p) {
 182   if (!p) {
 183     return false;
 184   }
 185   if (((uintptr_t)p) & 0x3) {
 186     return false;
 187   }
 188   if (!LoadedLibraries::find_for_text_address(p, NULL)) {
 189     return false;
 190   }
 191   return true;
 192 }
 193 
 194 // Macro to check a given stack pointer against given stack limits and to die if test fails.
 195 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 196     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 197 }
 198 
 199 // Macro to check the current stack pointer against given stacklimits.
 200 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 201   address sp; \
 202   sp = os::current_stack_pointer(); \
 203   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 204 }
 205 


 206 ////////////////////////////////////////////////////////////////////////////////
 207 // global variables (for a description see os_aix.hpp)
 208 
 209 julong    os::Aix::_physical_memory = 0;

 210 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 211 int       os::Aix::_page_size = -1;


 212 int       os::Aix::_on_pase = -1;



 213 int       os::Aix::_os_version = -1;

 214 int       os::Aix::_stack_page_size = -1;


 215 int       os::Aix::_xpg_sus_mode = -1;


 216 int       os::Aix::_extshm = -1;
 217 int       os::Aix::_logical_cpus = -1;
 218 
 219 ////////////////////////////////////////////////////////////////////////////////
 220 // local variables
 221 
 222 static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 223 static jlong    initial_time_count = 0;
 224 static int      clock_tics_per_sec = 100;
 225 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 226 static bool     check_signals      = true;
 227 static pid_t    _initial_pid       = 0;
 228 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 229 static sigset_t SR_sigset;
 230 



 231 // This describes the state of multipage support of the underlying
 232 // OS. Note that this is of no interest to the outsize world and
 233 // therefore should not be defined in AIX class.
 234 //
 235 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 236 // latter two (16M "large" resp. 16G "huge" pages) require special
 237 // setup and are normally not available.
 238 //
 239 // AIX supports multiple page sizes per process, for:
 240 //  - Stack (of the primordial thread, so not relevant for us)
 241 //  - Data - data, bss, heap, for us also pthread stacks
 242 //  - Text - text code
 243 //  - shared memory
 244 //
 245 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 246 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 247 //
 248 // For shared memory, page size can be set dynamically via
 249 // shmctl(). Different shared memory regions can have different page
 250 // sizes.


 261   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 262   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 263   int error;                  // Error describing if something went wrong at multipage init.
 264 } g_multipage_support = {
 265   (size_t) -1,
 266   (size_t) -1,
 267   (size_t) -1,
 268   (size_t) -1,
 269   (size_t) -1,
 270   false, false,
 271   0
 272 };
 273 
 274 // We must not accidentally allocate memory close to the BRK - even if
 275 // that would work - because then we prevent the BRK segment from
 276 // growing which may result in a malloc OOM even though there is
 277 // enough memory. The problem only arises if we shmat() or mmap() at
 278 // a specific wish address, e.g. to place the heap in a
 279 // compressed-oops-friendly way.
 280 static bool is_close_to_brk(address a) {
 281   address a1 = (address) sbrk(0);
 282   if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {

 283     return true;
 284   }
 285   return false;
 286 }
 287 
 288 julong os::available_memory() {
 289   return Aix::available_memory();
 290 }
 291 
 292 julong os::Aix::available_memory() {




 293   os::Aix::meminfo_t mi;
 294   if (os::Aix::get_meminfo(&mi)) {
 295     return mi.real_free;
 296   } else {
 297     return 0xFFFFFFFFFFFFFFFFLL;
 298   }
 299 }
 300 
 301 julong os::physical_memory() {
 302   return Aix::physical_memory();
 303 }
 304 
 305 // Return true if user is running as root.
 306 
 307 bool os::have_special_privileges() {
 308   static bool init = false;
 309   static bool privileges = false;
 310   if (!init) {
 311     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 312     init = true;
 313   }
 314   return privileges;
 315 }
 316 
 317 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 318 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 319 static bool my_disclaim64(char* addr, size_t size) {
 320 
 321   if (size == 0) {
 322     return true;
 323   }
 324 
 325   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 326   const unsigned int maxDisclaimSize = 0x40000000;
 327 
 328   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 329   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 330 
 331   char* p = addr;
 332 
 333   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 334     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 335       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 336       return false;
 337     }
 338     p += maxDisclaimSize;
 339   }
 340 
 341   if (lastDisclaimSize > 0) {
 342     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 343       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 344       return false;
 345     }
 346   }
 347 
 348   return true;
 349 }
 350 
 351 // Cpu architecture string
 352 #if defined(PPC32)
 353 static char cpu_arch[] = "ppc";
 354 #elif defined(PPC64)
 355 static char cpu_arch[] = "ppc64";
 356 #else
 357 #error Add appropriate cpu_arch setting
 358 #endif
 359 







 360 
 361 // Given an address, returns the size of the page backing that address.
 362 size_t os::Aix::query_pagesize(void* addr) {
 363 





 364   vm_page_info pi;
 365   pi.addr = (uint64_t)addr;
 366   if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 367     return pi.pagesize;
 368   } else {
 369     fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
 370     assert(false, "vmgetinfo failed to retrieve page size");
 371     return SIZE_4K;
 372   }
 373 
 374 }
 375 
 376 // Returns the kernel thread id of the currently running thread.
 377 pid_t os::Aix::gettid() {
 378   return (pid_t) thread_self();
 379 }
 380 
 381 void os::Aix::initialize_system_info() {
 382 
 383   // Get the number of online(logical) cpus instead of configured.
 384   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 385   assert(_processor_count > 0, "_processor_count must be > 0");
 386 
 387   // Retrieve total physical storage.
 388   os::Aix::meminfo_t mi;
 389   if (!os::Aix::get_meminfo(&mi)) {
 390     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
 391     assert(false, "os::Aix::get_meminfo failed.");
 392   }
 393   _physical_memory = (julong) mi.real_total;
 394 }
 395 
 396 // Helper function for tracing page sizes.
 397 static const char* describe_pagesize(size_t pagesize) {
 398   switch (pagesize) {
 399     case SIZE_4K : return "4K";
 400     case SIZE_64K: return "64K";
 401     case SIZE_16M: return "16M";
 402     case SIZE_16G: return "16G";
 403     case -1:       return "not set";
 404     default:
 405       assert(false, "surprise");
 406       return "??";
 407   }
 408 }
 409 
 410 // Probe OS for multipage support.
 411 // Will fill the global g_multipage_support structure.
 412 // Must be called before calling os::large_page_init().
 413 static void query_multipage_support() {
 414 
 415   guarantee(g_multipage_support.pagesize == -1,
 416             "do not call twice");
 417 
 418   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 419 
 420   // This really would surprise me.
 421   assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
 422 
 423   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 424   // Default data page size is defined either by linker options (-bdatapsize)
 425   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 426   // default should be 4K.
 427   {
 428     void* p = ::malloc(SIZE_16M);
 429     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 430     ::free(p);
 431   }
 432 
 433   // Query default shm page size (LDR_CNTRL SHMPSIZE).


 434   {
 435     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 436     guarantee(shmid != -1, "shmget failed");
 437     void* p = ::shmat(shmid, NULL, 0);
 438     ::shmctl(shmid, IPC_RMID, NULL);
 439     guarantee(p != (void*) -1, "shmat failed");
 440     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 441     ::shmdt(p);
 442   }
 443 
 444   // Before querying the stack page size, make sure we are not running as primordial
 445   // thread (because primordial thread's stack may have different page size than
 446   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 447   // number of reasons so we may just as well guarantee it here.
 448   guarantee0(!os::Aix::is_primordial_thread());
 449 
 450   // Query pthread stack page size.

 451   {
 452     int dummy = 0;
 453     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 454   }
 455 
 456   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 457   /* PPC port: so far unused.
 458   {
 459     address any_function =
 460       (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 461     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 462   }
 463   */
 464 
 465   // Now probe for support of 64K pages and 16M pages.
 466 
 467   // Before OS/400 V6R1, there is no support for pages other than 4K.
 468   if (os::Aix::on_pase_V5R4_or_older()) {
 469     Unimplemented();

 470     goto query_multipage_support_end;
 471   }
 472 
 473   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 474   {
 475     const int MAX_PAGE_SIZES = 4;
 476     psize_t sizes[MAX_PAGE_SIZES];
 477     const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 478     if (num_psizes == -1) {
 479       trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
 480       trc("disabling multipage support.\n");
 481       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 482       goto query_multipage_support_end;
 483     }
 484     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 485     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 486     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 487     for (int i = 0; i < num_psizes; i ++) {
 488       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 489     }
 490 
 491     // Can we use 64K, 16M pages?
 492     for (int i = 0; i < num_psizes; i ++) {
 493       const size_t pagesize = sizes[i];
 494       if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
 495         continue;
 496       }
 497       bool can_use = false;
 498       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 499       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 500         IPC_CREAT | S_IRUSR | S_IWUSR);
 501       guarantee0(shmid != -1); // Should always work.
 502       // Try to set pagesize.
 503       struct shmid_ds shm_buf = { 0 };
 504       shm_buf.shm_pagesize = pagesize;
 505       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 506         const int en = errno;
 507         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 508         // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
 509         // PPC port  MiscUtils::describe_errno(en));
 510       } else {
 511         // Attach and double check pageisze.
 512         void* p = ::shmat(shmid, NULL, 0);
 513         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 514         guarantee0(p != (void*) -1); // Should always work.
 515         const size_t real_pagesize = os::Aix::query_pagesize(p);
 516         if (real_pagesize != pagesize) {
 517           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 518         } else {
 519           can_use = true;
 520         }
 521         ::shmdt(p);
 522       }
 523       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 524       if (pagesize == SIZE_64K) {
 525         g_multipage_support.can_use_64K_pages = can_use;
 526       } else if (pagesize == SIZE_16M) {
 527         g_multipage_support.can_use_16M_pages = can_use;
 528       }
 529     }
 530 
 531   } // end: check which pages can be used for shared memory
 532 
 533 query_multipage_support_end:
 534 
 535   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
 536       describe_pagesize(g_multipage_support.pagesize));
 537   trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
 538       describe_pagesize(g_multipage_support.datapsize));
 539   trcVerbose("Text page size: %s\n",
 540       describe_pagesize(g_multipage_support.textpsize));
 541   trcVerbose("Thread stack page size (pthread): %s\n",
 542       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 543   trcVerbose("Default shared memory page size: %s\n",
 544       describe_pagesize(g_multipage_support.shmpsize));
 545   trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
 546       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 547   trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
 548       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 549   trcVerbose("Multipage error details: %d\n",
 550       g_multipage_support.error);
 551 
 552   // sanity checks
 553   assert0(g_multipage_support.pagesize == SIZE_4K);
 554   assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
 555   // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
 556   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 557   assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
 558 
 559 } // end os::Aix::query_multipage_support()
 560 
 561 void os::init_system_properties_values() {
 562 
 563 #define DEFAULT_LIBPATH "/usr/lib:/lib"
 564 #define EXTENSIONS_DIR  "/lib/ext"
 565 
 566   // Buffer that fits several sprintfs.
 567   // Note that the space for the trailing null is provided
 568   // by the nulls included by the sizeof operator.
 569   const size_t bufsize =
 570     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 571          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 572   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 573 
 574   // sysclasspath, java_home, dll_dir
 575   {
 576     char *pslash;
 577     os::jvm_path(buf, bufsize);
 578 
 579     // Found the full path to libjvm.so.
 580     // Now cut the path to <java_home>/jre if we can.
 581     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.



 582     pslash = strrchr(buf, '/');
 583     if (pslash != NULL) {
 584       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 585     }
 586     Arguments::set_dll_dir(buf);
 587 
 588     if (pslash != NULL) {
 589       pslash = strrchr(buf, '/');
 590       if (pslash != NULL) {
 591         *pslash = '\0';          // Get rid of /<arch>.
 592         pslash = strrchr(buf, '/');
 593         if (pslash != NULL) {
 594           *pslash = '\0';        // Get rid of /lib.
 595         }
 596       }
 597     }
 598     Arguments::set_java_home(buf);
 599     set_boot_path('/', ':');
 600   }
 601 


 736     if (thread->is_VM_thread()) {
 737       // Only the VM thread handles BREAK_SIGNAL ...
 738       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 739     } else {
 740       // ... all other threads block BREAK_SIGNAL
 741       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 742     }
 743   }
 744 }
 745 
 746 // retrieve memory information.
 747 // Returns false if something went wrong;
 748 // content of pmi undefined in this case.
 749 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 750 
 751   assert(pmi, "get_meminfo: invalid parameter");
 752 
 753   memset(pmi, 0, sizeof(meminfo_t));
 754 
 755   if (os::Aix::on_pase()) {

 756 
 757     Unimplemented();












 758     return false;
 759 
 760   } else {
 761 
 762     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 763     // See:
 764     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 765     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 766     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 767     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 768 
 769     perfstat_memory_total_t psmt;
 770     memset (&psmt, '\0', sizeof(psmt));
 771     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 772     if (rc == -1) {
 773       fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
 774       assert(0, "perfstat_memory_total() failed");
 775       return false;
 776     }
 777 


 781     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 782     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 783     // The fields of perfstat_memory_total_t:
 784     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 785     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 786     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 787     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 788     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 789 
 790     pmi->virt_total = psmt.virt_total * 4096;
 791     pmi->real_total = psmt.real_total * 4096;
 792     pmi->real_free = psmt.real_free * 4096;
 793     pmi->pgsp_total = psmt.pgsp_total * 4096;
 794     pmi->pgsp_free = psmt.pgsp_free * 4096;
 795 
 796     return true;
 797 
 798   }
 799 } // end os::Aix::get_meminfo
 800 
 801 // Retrieve global cpu information.
 802 // Returns false if something went wrong;
 803 // the content of pci is undefined in this case.
 804 bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
 805   assert(pci, "get_cpuinfo: invalid parameter");
 806   memset(pci, 0, sizeof(cpuinfo_t));
 807 
 808   perfstat_cpu_total_t psct;
 809   memset (&psct, '\0', sizeof(psct));
 810 
 811   if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
 812     fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
 813     assert(0, "perfstat_cpu_total() failed");
 814     return false;
 815   }
 816 
 817   // global cpu information
 818   strcpy (pci->description, psct.description);
 819   pci->processorHZ = psct.processorHZ;
 820   pci->ncpus = psct.ncpus;
 821   os::Aix::_logical_cpus = psct.ncpus;
 822   for (int i = 0; i < 3; i++) {
 823     pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
 824   }
 825 
 826   // get the processor version from _system_configuration
 827   switch (_system_configuration.version) {
 828   case PV_8:
 829     strcpy(pci->version, "Power PC 8");
 830     break;
 831   case PV_7:
 832     strcpy(pci->version, "Power PC 7");
 833     break;
 834   case PV_6_1:
 835     strcpy(pci->version, "Power PC 6 DD1.x");
 836     break;
 837   case PV_6:
 838     strcpy(pci->version, "Power PC 6");
 839     break;
 840   case PV_5:
 841     strcpy(pci->version, "Power PC 5");
 842     break;
 843   case PV_5_2:
 844     strcpy(pci->version, "Power PC 5_2");
 845     break;
 846   case PV_5_3:
 847     strcpy(pci->version, "Power PC 5_3");
 848     break;
 849   case PV_5_Compat:
 850     strcpy(pci->version, "PV_5_Compat");
 851     break;
 852   case PV_6_Compat:
 853     strcpy(pci->version, "PV_6_Compat");
 854     break;
 855   case PV_7_Compat:
 856     strcpy(pci->version, "PV_7_Compat");
 857     break;
 858   case PV_8_Compat:
 859     strcpy(pci->version, "PV_8_Compat");
 860     break;
 861   default:
 862     strcpy(pci->version, "unknown");
 863   }
 864 
 865   return true;
 866 
 867 } //end os::Aix::get_cpuinfo
 868 
 869 //////////////////////////////////////////////////////////////////////////////
 870 // detecting pthread library
 871 
 872 void os::Aix::libpthread_init() {
 873   return;
 874 }
 875 
 876 //////////////////////////////////////////////////////////////////////////////
 877 // create new thread
 878 
 879 // Thread start routine for all newly created threads
 880 static void *java_start(Thread *thread) {
 881 
 882   // find out my own stack dimensions
 883   {
 884     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 885     address base = 0;
 886     size_t size = 0;
 887     query_stack_dimensions(&base, &size);
 888     thread->set_stack_base(base);
 889     thread->set_stack_size(size);
 890   }
 891 




















 892   // Do some sanity checks.
 893   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 894 
 895   // Try to randomize the cache line index of hot stack frames.
 896   // This helps when threads of the same stack traces evict each other's
 897   // cache lines. The threads can be either from the same JVM instance, or
 898   // from different JVM instances. The benefit is especially true for
 899   // processors with hyperthreading technology.
 900 
 901   static int counter = 0;
 902   int pid = os::current_process_id();
 903   alloca(((pid ^ counter++) & 7) * 128);
 904 
 905   ThreadLocalStorage::set_thread(thread);
 906 
 907   OSThread* osthread = thread->osthread();
 908 
 909   // thread_id is kernel thread id (similar to Solaris LWP id)
 910   osthread->set_thread_id(os::Aix::gettid());
 911 
 912   // initialize signal mask for this thread
 913   os::Aix::hotspot_sigmask(thread);
 914 
 915   // initialize floating point control register
 916   os::Aix::init_thread_fpu_state();
 917 
 918   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 919 
 920   // call one more level start routine
 921   thread->run();
 922 



 923   return 0;
 924 }
 925 
 926 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 927 
 928   // We want the whole function to be synchronized.
 929   ThreadCritical cs;
 930 
 931   assert(thread->osthread() == NULL, "caller responsible");
 932 
 933   // Allocate the OSThread object
 934   OSThread* osthread = new OSThread(NULL, NULL);
 935   if (osthread == NULL) {
 936     return false;
 937   }
 938 
 939   // set the correct thread state
 940   osthread->set_thread_type(thr_type);
 941 
 942   // Initial state is ALLOCATED but not INITIALIZED
 943   osthread->set_state(ALLOCATED);
 944 
 945   thread->set_osthread(osthread);
 946 
 947   // init thread attributes
 948   pthread_attr_t attr;
 949   pthread_attr_init(&attr);
 950   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");


 975       } // else fall through:
 976         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 977     case os::vm_thread:
 978     case os::pgc_thread:
 979     case os::cgc_thread:
 980     case os::watcher_thread:
 981       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 982       break;
 983     }
 984   }
 985 
 986   stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
 987   pthread_attr_setstacksize(&attr, stack_size);
 988 
 989   pthread_t tid;
 990   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
 991 
 992   pthread_attr_destroy(&attr);
 993 
 994   if (ret == 0) {
 995     // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
 996   } else {






 997     if (PrintMiscellaneous && (Verbose || WizardMode)) {
 998       perror("pthread_create()");
 999     }
1000     // Need to clean up stuff we've allocated so far
1001     thread->set_osthread(NULL);
1002     delete osthread;
1003     return false;
1004   }
1005 
1006   // Store pthread info into the OSThread
1007   osthread->set_pthread_id(tid);
1008 
1009   return true;
1010 }
1011 
1012 /////////////////////////////////////////////////////////////////////////////
1013 // attach existing thread
1014 
1015 // bootstrap the main thread
1016 bool os::create_main_thread(JavaThread* thread) {
1017   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
1018   return create_attached_thread(thread);
1019 }
1020 
1021 bool os::create_attached_thread(JavaThread* thread) {
1022 #ifdef ASSERT
1023     thread->verify_not_published();
1024 #endif
1025 
1026   // Allocate the OSThread object
1027   OSThread* osthread = new OSThread(NULL, NULL);
1028 
1029   if (osthread == NULL) {
1030     return false;
1031   }
1032 
1033   // Store pthread info into the OSThread
1034   osthread->set_thread_id(os::Aix::gettid());
1035   osthread->set_pthread_id(::pthread_self());









1036 
1037   // initialize floating point control register
1038   os::Aix::init_thread_fpu_state();
1039 
1040   // some sanity checks
1041   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1042 
1043   // Initial thread state is RUNNABLE
1044   osthread->set_state(RUNNABLE);
1045 
1046   thread->set_osthread(osthread);
1047 
1048   if (UseNUMA) {
1049     int lgrp_id = os::numa_get_group_id();
1050     if (lgrp_id != -1) {
1051       thread->set_lgrp_id(lgrp_id);
1052     }
1053   }
1054 
1055   // initialize signal mask for this thread


1135     // better than nothing, but not much
1136     return elapsedTime();
1137   }
1138 }
1139 
1140 jlong os::javaTimeMillis() {
1141   timeval time;
1142   int status = gettimeofday(&time, NULL);
1143   assert(status != -1, "aix error at gettimeofday()");
1144   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1145 }
1146 
1147 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1148   timeval time;
1149   int status = gettimeofday(&time, NULL);
1150   assert(status != -1, "aix error at gettimeofday()");
1151   seconds = jlong(time.tv_sec);
1152   nanos = jlong(time.tv_usec) * 1000;
1153 }
1154 
1155 
1156 // We need to manually declare mread_real_time,
1157 // because IBM didn't provide a prototype in time.h.
1158 // (they probably only ever tested in C, not C++)
1159 extern "C"
1160 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1161 
1162 jlong os::javaTimeNanos() {
1163   if (os::Aix::on_pase()) {
1164     Unimplemented();
1165     return 0;





1166   } else {
1167     // On AIX use the precision of processors real time clock
1168     // or time base registers.
1169     timebasestruct_t time;
1170     int rc;
1171 
1172     // If the CPU has a time register, it will be used and
1173     // we have to convert to real time first. After convertion we have following data:
1174     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1175     // time.tb_low  [nanoseconds after the last full second above]
1176     // We better use mread_real_time here instead of read_real_time
1177     // to ensure that we will get a monotonic increasing time.
1178     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1179       rc = time_base_to_time(&time, TIMEBASE_SZ);
1180       assert(rc != -1, "aix error at time_base_to_time()");
1181     }
1182     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1183   }
1184 }
1185 


1274 void os::die() {
1275   ::abort();
1276 }
1277 
1278 // This method is a copy of JDK's sysGetLastErrorString
1279 // from src/solaris/hpi/src/system_md.c
1280 
1281 size_t os::lasterror(char *buf, size_t len) {
1282   if (errno == 0) return 0;
1283 
1284   const char *s = ::strerror(errno);
1285   size_t n = ::strlen(s);
1286   if (n >= len) {
1287     n = len - 1;
1288   }
1289   ::strncpy(buf, s, n);
1290   buf[n] = '\0';
1291   return n;
1292 }
1293 
1294 intx os::current_thread_id() { return (intx)pthread_self(); }


1295 
1296 int os::current_process_id() {
1297 
1298   // This implementation returns a unique pid, the pid of the
1299   // launcher thread that starts the vm 'process'.
1300 
1301   // Under POSIX, getpid() returns the same pid as the
1302   // launcher thread rather than a unique pid per thread.
1303   // Use gettid() if you want the old pre NPTL behaviour.
1304 
1305   // if you are looking for the result of a call to getpid() that
1306   // returns a unique pid for the calling thread, then look at the
1307   // OSThread::thread_id() method in osThread_linux.hpp file
1308 
1309   return (int)(_initial_pid ? _initial_pid : getpid());
1310 }
1311 
1312 // DLL functions
1313 
1314 const char* os::dll_file_extension() { return ".so"; }
1315 
1316 // This must be hard coded because it's the system's temporary
1317 // directory not the java application's temp directory, ala java.io.tmpdir.
1318 const char* os::get_temp_directory() { return "/tmp"; }
1319 
1320 static bool file_exists(const char* filename) {
1321   struct stat statbuf;
1322   if (filename == NULL || strlen(filename) == 0) {
1323     return false;
1324   }
1325   return os::stat(filename, &statbuf) == 0;
1326 }
1327 
1328 bool os::dll_build_name(char* buffer, size_t buflen,
1329                         const char* pname, const char* fname) {
1330   bool retval = false;
1331   // Copied from libhpi
1332   const size_t pnamelen = pname ? strlen(pname) : 0;
1333 
1334   // Return error on buffer overflow.
1335   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1336     *buffer = '\0';
1337     return retval;
1338   }
1339 
1340   if (pnamelen == 0) {
1341     snprintf(buffer, buflen, "lib%s.so", fname);
1342     retval = true;
1343   } else if (strchr(pname, *os::path_separator()) != NULL) {
1344     int n;
1345     char** pelements = split_path(pname, &n);



1346     for (int i = 0; i < n; i++) {
1347       // Really shouldn't be NULL, but check can't hurt
1348       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1349         continue; // skip the empty path values
1350       }
1351       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1352       if (file_exists(buffer)) {
1353         retval = true;
1354         break;
1355       }
1356     }
1357     // release the storage
1358     for (int i = 0; i < n; i++) {
1359       if (pelements[i] != NULL) {
1360         FREE_C_HEAP_ARRAY(char, pelements[i]);
1361       }
1362     }
1363     if (pelements != NULL) {
1364       FREE_C_HEAP_ARRAY(char*, pelements);
1365     }


1563   else st->print("%d", rlim.rlim_cur);
1564 
1565   st->print(", AS ");
1566   getrlimit(RLIMIT_AS, &rlim);
1567   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1568   else st->print("%uk", rlim.rlim_cur >> 10);
1569 
1570   // Print limits on DATA, because it limits the C-heap.
1571   st->print(", DATA ");
1572   getrlimit(RLIMIT_DATA, &rlim);
1573   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1574   else st->print("%uk", rlim.rlim_cur >> 10);
1575   st->cr();
1576 
1577   // load average
1578   st->print("load average:");
1579   double loadavg[3] = {-1.L, -1.L, -1.L};
1580   os::loadavg(loadavg, 3);
1581   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1582   st->cr();

















1583 }
1584 
1585 void os::print_memory_info(outputStream* st) {
1586 
1587   st->print_cr("Memory:");
1588 
1589   st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1590   st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));






1591   st->print_cr("  Default shared memory page size:        %s",
1592     describe_pagesize(g_multipage_support.shmpsize));
1593   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1594     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1595   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1596     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1597   if (g_multipage_error != 0) {
1598     st->print_cr("  multipage error: %d", g_multipage_error);
1599   }


1600 
1601   // print out LDR_CNTRL because it affects the default page sizes
1602   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1603   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1604 

1605   const char* const extshm = ::getenv("EXTSHM");
1606   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1607   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1608     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1609   }
1610 
1611   // Call os::Aix::get_meminfo() to retrieve memory statistics.




1612   os::Aix::meminfo_t mi;
1613   if (os::Aix::get_meminfo(&mi)) {
1614     char buffer[256];
1615     if (os::Aix::on_aix()) {
1616       jio_snprintf(buffer, sizeof(buffer),
1617                    "  physical total : %llu\n"
1618                    "  physical free  : %llu\n"
1619                    "  swap total     : %llu\n"
1620                    "  swap free      : %llu\n",
1621                    mi.real_total,
1622                    mi.real_free,
1623                    mi.pgsp_total,
1624                    mi.pgsp_free);
1625     } else {
1626       Unimplemented();



1627     }
1628     st->print_raw(buffer);
1629   } else {
1630     st->print_cr("  (no more information available)");
1631   }





1632 }
1633 
1634 // Get a string for the cpuinfo that is a summary of the cpu type
1635 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1636   // This looks good
1637   os::Aix::cpuinfo_t ci;
1638   if (os::Aix::get_cpuinfo(&ci)) {
1639     strncpy(buf, ci.version, buflen);
1640   } else {
1641     strncpy(buf, "AIX", buflen);
1642   }
1643 }
1644 
1645 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {






1646 }
1647 
1648 void os::print_siginfo(outputStream* st, void* siginfo) {
1649   // Use common posix version.
1650   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1651   st->cr();
1652 }
1653 
1654 static void print_signal_handler(outputStream* st, int sig,
1655                                  char* buf, size_t buflen);
1656 
1657 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1658   st->print_cr("Signal Handlers:");
1659   print_signal_handler(st, SIGSEGV, buf, buflen);
1660   print_signal_handler(st, SIGBUS , buf, buflen);
1661   print_signal_handler(st, SIGFPE , buf, buflen);
1662   print_signal_handler(st, SIGPIPE, buf, buflen);
1663   print_signal_handler(st, SIGXFSZ, buf, buflen);
1664   print_signal_handler(st, SIGILL , buf, buflen);
1665   print_signal_handler(st, SR_signum, buf, buflen);
1666   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1667   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1668   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1669   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);


1768   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1769 }
1770 
1771 void os::signal_raise(int signal_number) {
1772   ::raise(signal_number);
1773 }
1774 
1775 //
1776 // The following code is moved from os.cpp for making this
1777 // code platform specific, which it is by its very nature.
1778 //
1779 
1780 // Will be modified when max signal is changed to be dynamic
1781 int os::sigexitnum_pd() {
1782   return NSIG;
1783 }
1784 
1785 // a counter for each possible signal value
1786 static volatile jint pending_signals[NSIG+1] = { 0 };
1787 
1788 // Linux(POSIX) specific hand shaking semaphore.





1789 static sem_t sig_sem;


















































1790 
1791 void os::signal_init_pd() {
1792   // Initialize signal structures
1793   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1794 
1795   // Initialize signal semaphore
1796   int rc = ::sem_init(&sig_sem, 0, 0);
1797   guarantee(rc != -1, "sem_init failed");
1798 }
1799 
1800 void os::signal_notify(int sig) {
1801   Atomic::inc(&pending_signals[sig]);
1802   ::sem_post(&sig_sem);
1803 }
1804 
1805 static int check_pending_signals(bool wait) {
1806   Atomic::store(0, &sigint_count);
1807   for (;;) {
1808     for (int i = 0; i < NSIG + 1; i++) {
1809       jint n = pending_signals[i];
1810       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1811         return i;
1812       }
1813     }
1814     if (!wait) {
1815       return -1;
1816     }
1817     JavaThread *thread = JavaThread::current();
1818     ThreadBlockInVM tbivm(thread);
1819 
1820     bool threadIsSuspended;
1821     do {
1822       thread->set_suspend_equivalent();
1823       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1824 
1825       ::sem_wait(&sig_sem);
1826 
1827       // were we externally suspended while we were waiting?
1828       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1829       if (threadIsSuspended) {
1830         //
1831         // The semaphore has been incremented, but while we were waiting
1832         // another thread suspended us. We don't want to continue running
1833         // while suspended because that would surprise the thread that
1834         // suspended us.
1835         //
1836         ::sem_post(&sig_sem);

1837 
1838         thread->java_suspend_self();
1839       }
1840     } while (threadIsSuspended);
1841   }
1842 }
1843 
1844 int os::signal_lookup() {
1845   return check_pending_signals(false);
1846 }
1847 
1848 int os::signal_wait() {
1849   return check_pending_signals(true);
1850 }
1851 
1852 ////////////////////////////////////////////////////////////////////////////////
1853 // Virtual Memory
1854 
1855 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1856 


1869   }
1870 
1871   bool contains_range(char* p, size_t s) const {
1872     return contains_addr(p) && contains_addr(p + s - 1);
1873   }
1874 
1875   void print_on(outputStream* os) const {
1876     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1877       " bytes, %d %s pages), %s",
1878       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1879       (type == VMEM_SHMATED ? "shmat" : "mmap")
1880     );
1881   }
1882 
1883   // Check that range is a sub range of memory block (or equal to memory block);
1884   // also check that range is fully page aligned to the page size if the block.
1885   void assert_is_valid_subrange(char* p, size_t s) const {
1886     if (!contains_range(p, s)) {
1887       fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1888               "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1889               p, p + s - 1, addr, addr + size - 1);
1890       guarantee0(false);
1891     }
1892     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1893       fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1894               " aligned to pagesize (%s)\n", p, p + s);
1895       guarantee0(false);
1896     }
1897   }
1898 };
1899 
1900 static struct {
1901   vmembk_t* first;
1902   MiscUtils::CritSect cs;
1903 } vmem;
1904 
1905 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1906   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1907   assert0(p);
1908   if (p) {
1909     MiscUtils::AutoCritSect lck(&vmem.cs);
1910     p->addr = addr; p->size = size;
1911     p->pagesize = pagesize;
1912     p->type = type;
1913     p->next = vmem.first;
1914     vmem.first = p;


1971   // BRK because that may cause malloc OOM.
1972   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1973     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1974       "Will attach anywhere.", requested_addr);
1975     // Act like the OS refused to attach there.
1976     requested_addr = NULL;
1977   }
1978 
1979   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1980   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1981   if (os::Aix::on_pase_V5R4_or_older()) {
1982     ShouldNotReachHere();
1983   }
1984 
1985   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1986   const size_t size = align_size_up(bytes, SIZE_64K);
1987 
1988   // Reserve the shared segment.
1989   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1990   if (shmid == -1) {
1991     trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1992     return NULL;
1993   }
1994 
1995   // Important note:
1996   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1997   // We must right after attaching it remove it from the system. System V shm segments are global and
1998   // survive the process.
1999   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2000 
2001   struct shmid_ds shmbuf;
2002   memset(&shmbuf, 0, sizeof(shmbuf));
2003   shmbuf.shm_pagesize = SIZE_64K;
2004   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2005     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2006                size / SIZE_64K, errno);
2007     // I want to know if this ever happens.
2008     assert(false, "failed to set page size for shmat");
2009   }
2010 
2011   // Now attach the shared segment.
2012   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2013   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2014   // were not a segment boundary.
2015   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2016   const int errno_shmat = errno;
2017 
2018   // (A) Right after shmat and before handing shmat errors delete the shm segment.
2019   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2020     trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2021     assert(false, "failed to remove shared memory segment!");
2022   }
2023 
2024   // Handle shmat error. If we failed to attach, just return.
2025   if (addr == (char*)-1) {
2026     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2027     return NULL;
2028   }
2029 
2030   // Just for info: query the real page size. In case setting the page size did not
2031   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2032   const size_t real_pagesize = os::Aix::query_pagesize(addr);
2033   if (real_pagesize != shmbuf.shm_pagesize) {
2034     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2035   }
2036 
2037   if (addr) {
2038     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2039       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2040   } else {


2065   } else {
2066     trcVerbose("ok.");
2067     rc = true;
2068   }
2069   return rc;
2070 }
2071 
2072 static bool uncommit_shmated_memory(char* addr, size_t size) {
2073   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2074     addr, addr + size - 1);
2075 
2076   const bool rc = my_disclaim64(addr, size);
2077 
2078   if (!rc) {
2079     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2080     return false;
2081   }
2082   return true;
2083 }
2084 


2085 // Reserve memory via mmap.
2086 // If <requested_addr> is given, an attempt is made to attach at the given address.
2087 // Failing that, memory is allocated at any address.
2088 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2089 // allocate at an address aligned with the given alignment. Failing that, memory
2090 // is aligned anywhere.
2091 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2092   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2093     "alignment_hint " UINTX_FORMAT "...",
2094     bytes, requested_addr, alignment_hint);
2095 
2096   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2097   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2098     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2099     return NULL;
2100   }
2101 
2102   // We must prevent anyone from attaching too close to the
2103   // BRK because that may cause malloc OOM.
2104   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {


2210 
2211   assert0(is_aligned_to(addr, os::vm_page_size()));
2212   assert0(is_aligned_to(size, os::vm_page_size()));
2213 
2214   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2215     addr, addr + size - 1);
2216   bool rc = false;
2217 
2218   // Uncommit mmap memory with msync MS_INVALIDATE.
2219   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2220     trcVerbose("failed (%d)\n", errno);
2221     rc = false;
2222   } else {
2223     trcVerbose("ok.");
2224     rc = true;
2225   }
2226 
2227   return rc;
2228 }
2229 
2230 // End: shared memory bookkeeping
2231 ////////////////////////////////////////////////////////////////////////////////////////////////////
2232 
2233 int os::vm_page_size() {
2234   // Seems redundant as all get out.
2235   assert(os::Aix::page_size() != -1, "must call os::init");
2236   return os::Aix::page_size();
2237 }
2238 
2239 // Aix allocates memory by pages.
2240 int os::vm_allocation_granularity() {
2241   assert(os::Aix::page_size() != -1, "must call os::init");
2242   return os::Aix::page_size();
2243 }
2244 
2245 #ifdef PRODUCT
2246 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2247                                     int err) {
2248   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2249           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2250           strerror(err), err);
2251 }
2252 #endif
2253 
2254 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2255                                   const char* mesg) {
2256   assert(mesg != NULL, "mesg must be specified");
2257   if (!pd_commit_memory(addr, size, exec)) {
2258     // Add extra info in product mode for vm_exit_out_of_memory():
2259     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2260     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2261   }
2262 }
2263 
2264 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2265 
2266   assert0(is_aligned_to(addr, os::vm_page_size()));
2267   assert0(is_aligned_to(size, os::vm_page_size()));
2268 
2269   vmembk_t* const vmi = vmembk_find(addr);
2270   assert0(vmi);
2271   vmi->assert_is_valid_subrange(addr, size);
2272 
2273   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2274 







2275   return true;
2276 }
2277 
2278 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2279   return pd_commit_memory(addr, size, exec);
2280 }
2281 
2282 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2283                                   size_t alignment_hint, bool exec,
2284                                   const char* mesg) {
2285   // Alignment_hint is ignored on this OS.
2286   pd_commit_memory_or_exit(addr, size, exec, mesg);
2287 }
2288 
2289 bool os::pd_uncommit_memory(char* addr, size_t size) {
2290   assert0(is_aligned_to(addr, os::vm_page_size()));
2291   assert0(is_aligned_to(size, os::vm_page_size()));
2292 
2293   // Dynamically do different things for mmap/shmat.
2294   const vmembk_t* const vmi = vmembk_find(addr);
2295   assert0(vmi);
2296   vmi->assert_is_valid_subrange(addr, size);
2297 
2298   if (vmi->type == VMEM_SHMATED) {
2299     return uncommit_shmated_memory(addr, size);
2300   } else {
2301     return uncommit_mmaped_memory(addr, size);
2302   }
2303 }
2304 
2305 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2306   // Do not call this; no need to commit stack pages on AIX.
2307   ShouldNotReachHere();
2308   return true;
2309 }
2310 
2311 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2312   // Do not call this; no need to commit stack pages on AIX.
2313   ShouldNotReachHere();
2314   return true;
2315 }


2373   const size_t alignment_hint0 =
2374     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2375 
2376   // In 4K mode always use mmap.
2377   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2378   if (os::vm_page_size() == SIZE_4K) {
2379     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2380   } else {
2381     if (bytes >= Use64KPagesThreshold) {
2382       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2383     } else {
2384       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2385     }
2386   }
2387 }
2388 
2389 bool os::pd_release_memory(char* addr, size_t size) {
2390 
2391   // Dynamically do different things for mmap/shmat.
2392   vmembk_t* const vmi = vmembk_find(addr);
2393   assert0(vmi);
2394 
2395   // Always round to os::vm_page_size(), which may be larger than 4K.
2396   size = align_size_up(size, os::vm_page_size());
2397   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2398 
2399   bool rc = false;
2400   bool remove_bookkeeping = false;
2401   if (vmi->type == VMEM_SHMATED) {
2402     // For shmatted memory, we do:
2403     // - If user wants to release the whole range, release the memory (shmdt).
2404     // - If user only wants to release a partial range, uncommit (disclaim) that
2405     //   range. That way, at least, we do not use memory anymore (bust still page
2406     //   table space).
2407     vmi->assert_is_valid_subrange(addr, size);
2408     if (addr == vmi->addr && size == vmi->size) {
2409       rc = release_shmated_memory(addr, size);
2410       remove_bookkeeping = true;
2411     } else {
2412       rc = uncommit_shmated_memory(addr, size);
2413     }


2449   // mprotect success check
2450   //
2451   // Mprotect said it changed the protection but can I believe it?
2452   //
2453   // To be sure I need to check the protection afterwards. Try to
2454   // read from protected memory and check whether that causes a segfault.
2455   //
2456   if (!os::Aix::xpg_sus_mode()) {
2457 
2458     if (CanUseSafeFetch32()) {
2459 
2460       const bool read_protected =
2461         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2462          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2463 
2464       if (prot & PROT_READ) {
2465         rc = !read_protected;
2466       } else {
2467         rc = read_protected;
2468       }


















2469     }
2470   }
2471   if (!rc) {
2472     assert(false, "mprotect failed.");
2473   }




2474   return rc;
2475 }
2476 
2477 // Set protections specified
2478 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2479   unsigned int p = 0;
2480   switch (prot) {
2481   case MEM_PROT_NONE: p = PROT_NONE; break;
2482   case MEM_PROT_READ: p = PROT_READ; break;
2483   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2484   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2485   default:
2486     ShouldNotReachHere();
2487   }
2488   // is_committed is unused.
2489   return checked_mprotect(addr, size, p);
2490 }
2491 
2492 bool os::guard_memory(char* addr, size_t size) {
2493   return checked_mprotect(addr, size, PROT_NONE);
2494 }
2495 
2496 bool os::unguard_memory(char* addr, size_t size) {
2497   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2498 }
2499 
2500 // Large page support
2501 
2502 static size_t _large_page_size = 0;
2503 
2504 // Enable large page support if OS allows that.
2505 void os::large_page_init() {
2506   return; // Nothing to do. See query_multipage_support and friends.
2507 }
2508 
2509 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2510   // "exec" is passed in but not used. Creating the shared image for
2511   // the code cache doesn't have an SHM_X executable permission to check.
2512   Unimplemented();
2513   return 0;

2514 }
2515 
2516 bool os::release_memory_special(char* base, size_t bytes) {
2517   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2518   Unimplemented();
2519   return false;
2520 }
2521 
2522 size_t os::large_page_size() {
2523   return _large_page_size;
2524 }
2525 
2526 bool os::can_commit_large_page_memory() {
2527   // Does not matter, we do not support huge pages.
2528   return false;
2529 }
2530 
2531 bool os::can_execute_large_page_memory() {
2532   // Does not matter, we do not support huge pages.
2533   return false;


2945 // they typically will bring down the process immediately.
2946 bool unblock_program_error_signals() {
2947   sigset_t set;
2948   ::sigemptyset(&set);
2949   ::sigaddset(&set, SIGILL);
2950   ::sigaddset(&set, SIGBUS);
2951   ::sigaddset(&set, SIGFPE);
2952   ::sigaddset(&set, SIGSEGV);
2953   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2954 }
2955 
2956 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2957 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2958   assert(info != NULL && uc != NULL, "it must be old kernel");
2959 
2960   // Never leave program error signals blocked;
2961   // on all our platforms they would bring down the process immediately when
2962   // getting raised while being blocked.
2963   unblock_program_error_signals();
2964 

2965   JVM_handle_aix_signal(sig, info, uc, true);

2966 }
2967 
2968 // This boolean allows users to forward their own non-matching signals
2969 // to JVM_handle_aix_signal, harmlessly.
2970 bool os::Aix::signal_handlers_are_installed = false;
2971 
2972 // For signal-chaining
2973 struct sigaction sigact[NSIG];
2974 sigset_t sigs;
2975 bool os::Aix::libjsig_is_loaded = false;
2976 typedef struct sigaction *(*get_signal_t)(int);
2977 get_signal_t os::Aix::get_signal_action = NULL;
2978 
2979 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2980   struct sigaction *actp = NULL;
2981 
2982   if (libjsig_is_loaded) {
2983     // Retrieve the old signal handler from libjsig
2984     actp = (*get_signal_action)(sig);
2985   }


3067 int os::Aix::get_our_sigflags(int sig) {
3068   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3069   return sigflags[sig];
3070 }
3071 
3072 void os::Aix::set_our_sigflags(int sig, int flags) {
3073   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3074   if (sig > 0 && sig < NSIG) {
3075     sigflags[sig] = flags;
3076   }
3077 }
3078 
3079 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3080   // Check for overwrite.
3081   struct sigaction oldAct;
3082   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3083 
3084   void* oldhand = oldAct.sa_sigaction
3085     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3086     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3087   // Renamed 'signalHandler' to avoid collision with other shared libs.
3088   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3089       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3090       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3091     if (AllowUserSignalHandlers || !set_installed) {
3092       // Do not overwrite; user takes responsibility to forward to us.
3093       return;
3094     } else if (UseSignalChaining) {
3095       // save the old handler in jvm
3096       save_preinstalled_handler(sig, oldAct);
3097       // libjsig also interposes the sigaction() call below and saves the
3098       // old sigaction on it own.
3099     } else {
3100       fatal("Encountered unexpected pre-existing sigaction handler "
3101             "%#lx for signal %d.", (long)oldhand, sig);
3102     }
3103   }
3104 
3105   struct sigaction sigAct;
3106   sigfillset(&(sigAct.sa_mask));
3107   if (!set_installed) {
3108     sigAct.sa_handler = SIG_DFL;
3109     sigAct.sa_flags = SA_RESTART;
3110   } else {
3111     // Renamed 'signalHandler' to avoid collision with other shared libs.
3112     sigAct.sa_sigaction = javaSignalHandler;
3113     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3114   }
3115   // Save flags, which are set by ours
3116   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3117   sigflags[sig] = sigAct.sa_flags;
3118 
3119   int ret = sigaction(sig, &sigAct, &oldAct);
3120   assert(ret == 0, "check");
3121 
3122   void* oldhand2 = oldAct.sa_sigaction
3123                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3124                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3125   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3126 }
3127 
3128 // install signal handlers for signals that HotSpot needs to
3129 // handle in order to support Java-level exception handling.
3130 void os::Aix::install_signal_handlers() {
3131   if (!signal_handlers_are_installed) {


3283     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3284     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3285     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3286     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3287   }
3288 
3289   DO_SIGNAL_CHECK(SR_signum);
3290 }
3291 
3292 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3293 
3294 static os_sigaction_t os_sigaction = NULL;
3295 
3296 void os::Aix::check_signal_handler(int sig) {
3297   char buf[O_BUFLEN];
3298   address jvmHandler = NULL;
3299 
3300   struct sigaction act;
3301   if (os_sigaction == NULL) {
3302     // only trust the default sigaction, in case it has been interposed
3303     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3304     if (os_sigaction == NULL) return;
3305   }
3306 
3307   os_sigaction(sig, (struct sigaction*)NULL, &act);
3308 
3309   address thisHandler = (act.sa_flags & SA_SIGINFO)
3310     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3311     : CAST_FROM_FN_PTR(address, act.sa_handler);
3312 
3313   switch(sig) {
3314   case SIGSEGV:
3315   case SIGBUS:
3316   case SIGFPE:
3317   case SIGPIPE:
3318   case SIGILL:
3319   case SIGXFSZ:
3320     // Renamed 'signalHandler' to avoid collision with other shared libs.
3321     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3322     break;
3323 
3324   case SHUTDOWN1_SIGNAL:
3325   case SHUTDOWN2_SIGNAL:
3326   case SHUTDOWN3_SIGNAL:
3327   case BREAK_SIGNAL:
3328     jvmHandler = (address)user_handler();
3329     break;
3330 
3331   default:
3332     if (sig == SR_signum) {
3333       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3334     } else {
3335       return;
3336     }
3337     break;
3338   }
3339 
3340   if (thisHandler != jvmHandler) {


3372     }
3373     return buf;
3374   } else {
3375     return NULL;
3376   }
3377 }
3378 
3379 // To install functions for atexit system call
3380 extern "C" {
3381   static void perfMemory_exit_helper() {
3382     perfMemory_exit();
3383   }
3384 }
3385 
3386 // This is called _before_ the most of global arguments have been parsed.
3387 void os::init(void) {
3388   // This is basic, we want to know if that ever changes.
3389   // (Shared memory boundary is supposed to be a 256M aligned.)
3390   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3391 




3392   // First off, we need to know whether we run on AIX or PASE, and
3393   // the OS level we run on.
3394   os::Aix::initialize_os_info();
3395 
3396   // Scan environment (SPEC1170 behaviour, etc).
3397   os::Aix::scan_environment();
3398 
3399   // Check which pages are supported by AIX.
3400   query_multipage_support();
3401 
3402   // Act like we only have one page size by eliminating corner cases which
3403   // we did not support very well anyway.
3404   // We have two input conditions:
3405   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3406   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3407   //    setting.
3408   //    Data segment page size is important for us because it defines the thread stack page
3409   //    size, which is needed for guard page handling, stack banging etc.
3410   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3411   //    and should be allocated with 64k pages.
3412   //
3413   // So, we do the following:
3414   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3415   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3416   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3417   // 64k          no              --- AIX 5.2 ? ---
3418   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3419 


3448       FLAG_SET_ERGO(bool, Use64KPages, false);
3449     }
3450   } else {
3451     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3452     //   This normally means that we can allocate 64k pages dynamically.
3453     //   (There is one special case where this may be false: EXTSHM=on.
3454     //    but we decided to not support that mode).
3455     assert0(g_multipage_support.can_use_64K_pages);
3456     Aix::_page_size = SIZE_64K;
3457     trcVerbose("64K page mode");
3458     FLAG_SET_ERGO(bool, Use64KPages, true);
3459   }
3460 
3461   // Short-wire stack page size to base page size; if that works, we just remove
3462   // that stack page size altogether.
3463   Aix::_stack_page_size = Aix::_page_size;
3464 
3465   // For now UseLargePages is just ignored.
3466   FLAG_SET_ERGO(bool, UseLargePages, false);
3467   _page_sizes[0] = 0;

3468 
3469   // debug trace
3470   trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
3471 
3472   // Next, we need to initialize libo4 and libperfstat libraries.
3473   if (os::Aix::on_pase()) {
3474     os::Aix::initialize_libo4();
3475   } else {
3476     os::Aix::initialize_libperfstat();
3477   }
3478 
3479   // Reset the perfstat information provided by ODM.
3480   if (os::Aix::on_aix()) {
3481     libperfstat::perfstat_reset();
3482   }
3483 
3484   // Now initialze basic system properties. Note that for some of the values we
3485   // need libperfstat etc.
3486   os::Aix::initialize_system_info();
3487 
3488   _initial_pid = getpid();
3489 
3490   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3491 
3492   init_random(1234567);
3493 
3494   ThreadCritical::initialize();
3495 
3496   // Main_thread points to the aboriginal thread.
3497   Aix::_main_thread = pthread_self();
3498 
3499   initial_time_count = os::elapsed_counter();
3500 
3501   // If the pagesize of the VM is greater than 8K determine the appropriate
3502   // number of initial guard pages. The user can change this with the
3503   // command line arguments, if needed.
3504   if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3505     StackYellowPages = 1;
3506     StackRedPages = 1;
3507     StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3508   }
3509 }
3510 
3511 // This is called _after_ the global arguments have been parsed.
3512 jint os::init_2(void) {
3513 






3514   trcVerbose("processor count: %d", os::_processor_count);
3515   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3516 
3517   // Initially build up the loaded dll map.
3518   LoadedLibraries::reload();




3519 
3520   const int page_size = Aix::page_size();
3521   const int map_size = page_size;
3522 
3523   address map_address = (address) MAP_FAILED;
3524   const int prot  = PROT_READ;
3525   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3526 
3527   // Use optimized addresses for the polling page,
3528   // e.g. map it to a special 32-bit address.
3529   if (OptimizePollingPageLocation) {
3530     // architecture-specific list of address wishes:
3531     address address_wishes[] = {
3532       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3533       // PPC64: all address wishes are non-negative 32 bit values where
3534       // the lower 16 bits are all zero. we can load these addresses
3535       // with a single ppc_lis instruction.
3536       (address) 0x30000000, (address) 0x31000000,
3537       (address) 0x32000000, (address) 0x33000000,
3538       (address) 0x40000000, (address) 0x41000000,
3539       (address) 0x42000000, (address) 0x43000000,
3540       (address) 0x50000000, (address) 0x51000000,
3541       (address) 0x52000000, (address) 0x53000000,
3542       (address) 0x60000000, (address) 0x61000000,
3543       (address) 0x62000000, (address) 0x63000000
3544     };
3545     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3546 
3547     // iterate over the list of address wishes:
3548     for (int i=0; i<address_wishes_length; i++) {
3549       // Try to map with current address wish.
3550       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3551       // fail if the address is already mapped.
3552       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3553                                      map_size, prot,
3554                                      flags | MAP_FIXED,
3555                                      -1, 0);
3556       if (Verbose) {
3557         fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3558                 address_wishes[i], map_address + (ssize_t)page_size);
3559       }
3560 
3561       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3562         // Map succeeded and map_address is at wished address, exit loop.
3563         break;
3564       }
3565 
3566       if (map_address != (address) MAP_FAILED) {
3567         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3568         ::munmap(map_address, map_size);
3569         map_address = (address) MAP_FAILED;
3570       }
3571       // Map failed, continue loop.
3572     }
3573   } // end OptimizePollingPageLocation
3574 
3575   if (map_address == (address) MAP_FAILED) {
3576     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3577   }
3578   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3579   os::set_polling_page(map_address);
3580 
3581   if (!UseMembar) {
3582     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3583     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3584     os::set_memory_serialize_page(mem_serialize_page);
3585 
3586 #ifndef PRODUCT
3587     if (Verbose && PrintMiscellaneous) {
3588       tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3589     }
3590 #endif
3591   }
3592 
3593   // initialize suspend/resume support - must do this before signal_sets_init()
3594   if (SR_initialize() != 0) {
3595     perror("SR_initialize failed");
3596     return JNI_ERR;
3597   }
3598 
3599   Aix::signal_sets_init();
3600   Aix::install_signal_handlers();
3601 
3602   // Check minimum allowable stack size for thread creation and to initialize
3603   // the java system classes, including StackOverflowError - depends on page
3604   // size. Add a page for compiler2 recursion in main thread.
3605   // Add in 2*BytesPerWord times page size to account for VM stack during
3606   // class initialization depending on 32 or 64 bit VM.
3607   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3608             (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3609                      (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3610 
3611   os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3612 
3613   size_t threadStackSizeInBytes = ThreadStackSize * K;
3614   if (threadStackSizeInBytes != 0 &&
3615       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3616     tty->print_cr("\nThe stack size specified is too small, "
3617                   "Specify at least %dk",
3618                   os::Aix::min_stack_allowed / K);
3619     return JNI_ERR;
3620   }
3621 
3622   // Make the stack size a multiple of the page size so that
3623   // the yellow/red zones can be guarded.
3624   // Note that this can be 0, if no default stacksize was set.
3625   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3626 
3627   Aix::libpthread_init();



3628 
3629   if (MaxFDLimit) {
3630     // Set the number of file descriptors to max. print out error
3631     // if getrlimit/setrlimit fails but continue regardless.
3632     struct rlimit nbr_files;
3633     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3634     if (status != 0) {
3635       if (PrintMiscellaneous && (Verbose || WizardMode))
3636         perror("os::init_2 getrlimit failed");
3637     } else {
3638       nbr_files.rlim_cur = nbr_files.rlim_max;
3639       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3640       if (status != 0) {
3641         if (PrintMiscellaneous && (Verbose || WizardMode))
3642           perror("os::init_2 setrlimit failed");
3643       }
3644     }
3645   }
3646 
3647   if (PerfAllowAtExitRegistration) {
3648     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3649     // Atexit functions can be delayed until process exit time, which
3650     // can be problematic for embedded VM situations. Embedded VMs should
3651     // call DestroyJavaVM() to assure that VM resources are released.
3652 
3653     // Note: perfMemory_exit_helper atexit function may be removed in
3654     // the future if the appropriate cleanup code can be added to the
3655     // VM_Exit VMOperation's doit method.
3656     if (atexit(perfMemory_exit_helper) != 0) {
3657       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3658     }
3659   }
3660 
3661   return JNI_OK;
3662 }
3663 
3664 // Mark the polling page as unreadable
3665 void os::make_polling_page_unreadable(void) {
3666   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3667     fatal("Could not disable polling page");
3668   }
3669 };


3729     // NULL context is unexpected, double-check this is the VMThread.
3730     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3731   }
3732 }
3733 
3734 // Suspends the target using the signal mechanism and then grabs the PC before
3735 // resuming the target. Used by the flat-profiler only
3736 ExtendedPC os::get_thread_pc(Thread* thread) {
3737   // Make sure that it is called by the watcher for the VMThread.
3738   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3739   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3740 
3741   PcFetcher fetcher(thread);
3742   fetcher.run();
3743   return fetcher.result();
3744 }
3745 
3746 ////////////////////////////////////////////////////////////////////////////////
3747 // debug support
3748 
3749 static address same_page(address x, address y) {
3750   intptr_t page_bits = -os::vm_page_size();
3751   if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3752     return x;
3753   else if (x > y)
3754     return (address)(intptr_t(y) | ~page_bits) + 1;
3755   else
3756     return (address)(intptr_t(y) & page_bits);
3757 }
3758 
3759 bool os::find(address addr, outputStream* st) {
3760 
3761   st->print(PTR_FORMAT ": ", addr);
3762 
3763   loaded_module_t lm;
3764   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3765       LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3766     st->print("%s", lm.path);
3767     return true;
3768   }
3769 
3770   return false;
3771 }
3772 
3773 ////////////////////////////////////////////////////////////////////////////////
3774 // misc
3775 
3776 // This does not do anything on Aix. This is basically a hook for being
3777 // able to use structured exception handling (thread-local exception filters)
3778 // on, e.g., Win32.


4102   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4103   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4104   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4105 }
4106 
4107 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4108   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4109   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4110   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4111   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4112 }
4113 
4114 bool os::is_thread_cpu_time_supported() {
4115   return true;
4116 }
4117 
4118 // System loadavg support. Returns -1 if load average cannot be obtained.
4119 // For now just return the system wide load average (no processor sets).
4120 int os::loadavg(double values[], int nelem) {
4121 
4122   // Implemented using libperfstat on AIX.
4123 
4124   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4125   guarantee(values, "argument error");
4126 
4127   if (os::Aix::on_pase()) {
4128     Unimplemented();









4129     return -1;


4130   } else {

4131     // AIX: use libperfstat
4132     //
4133     // See also:
4134     // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4135     // /usr/include/libperfstat.h:
4136 
4137     // Use the already AIX version independent get_cpuinfo.
4138     os::Aix::cpuinfo_t ci;
4139     if (os::Aix::get_cpuinfo(&ci)) {
4140       for (int i = 0; i < nelem; i++) {
4141         values[i] = ci.loadavg[i];
4142       }
4143     } else {
4144       return -1;
4145     }
4146     return nelem;
4147   }
4148 }
4149 
4150 void os::pause() {
4151   char filename[MAX_PATH];
4152   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4153     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4154   } else {
4155     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4156   }
4157 
4158   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4159   if (fd != -1) {


4169 }
4170 
4171 bool os::Aix::is_primordial_thread() {
4172   if (pthread_self() == (pthread_t)1) {
4173     return true;
4174   } else {
4175     return false;
4176   }
4177 }
4178 
4179 // OS recognitions (PASE/AIX, OS level) call this before calling any
4180 // one of Aix::on_pase(), Aix::os_version() static
4181 void os::Aix::initialize_os_info() {
4182 
4183   assert(_on_pase == -1 && _os_version == -1, "already called.");
4184 
4185   struct utsname uts;
4186   memset(&uts, 0, sizeof(uts));
4187   strcpy(uts.sysname, "?");
4188   if (::uname(&uts) == -1) {
4189     trc("uname failed (%d)", errno);
4190     guarantee(0, "Could not determine whether we run on AIX or PASE");
4191   } else {
4192     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4193                "node \"%s\" machine \"%s\"\n",
4194                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4195     const int major = atoi(uts.version);
4196     assert(major > 0, "invalid OS version");
4197     const int minor = atoi(uts.release);
4198     assert(minor > 0, "invalid OS release");
4199     _os_version = (major << 8) | minor;
4200     if (strcmp(uts.sysname, "OS400") == 0) {
4201       Unimplemented();







4202     } else if (strcmp(uts.sysname, "AIX") == 0) {
4203       // We run on AIX. We do not support versions older than AIX 5.3.
4204       _on_pase = 0;
4205       if (_os_version < 0x0503) {
4206         trc("AIX release older than AIX 5.3 not supported.");
4207         assert(false, "AIX release too old.");
4208       } else {
4209         trcVerbose("We run on AIX %d.%d\n", major, minor);
4210       }
4211     } else {
4212       assert(false, "unknown OS");
4213     }
4214   }
4215 
4216   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4217 } // end: os::Aix::initialize_os_info()
4218 
4219 // Scan environment for important settings which might effect the VM.
4220 // Trace out settings. Warn about invalid settings and/or correct them.
4221 //
4222 // Must run after os::Aix::initialue_os_info().
4223 void os::Aix::scan_environment() {
4224 
4225   char* p;
4226   int rc;
4227 
4228   // Warn explicity if EXTSHM=ON is used. That switch changes how
4229   // System V shared memory behaves. One effect is that page size of
4230   // shared memory cannot be change dynamically, effectivly preventing
4231   // large pages from working.
4232   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4233   // recommendation is (in OSS notes) to switch it off.
4234   p = ::getenv("EXTSHM");
4235   if (Verbose) {
4236     fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4237   }
4238   if (p && strcasecmp(p, "ON") == 0) {
4239     fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4240     _extshm = 1;








4241   } else {
4242     _extshm = 0;
4243   }
4244 
4245   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4246   // Not tested, not supported.
4247   //
4248   // Note that it might be worth the trouble to test and to require it, if only to
4249   // get useful return codes for mprotect.
4250   //
4251   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4252   // exec() ? before loading the libjvm ? ....)
4253   p = ::getenv("XPG_SUS_ENV");
4254   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4255   if (p && strcmp(p, "ON") == 0) {
4256     _xpg_sus_mode = 1;
4257     trc("Unsupported setting: XPG_SUS_ENV=ON");
4258     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4259     // clobber address ranges. If we ever want to support that, we have to do some
4260     // testing first.
4261     guarantee(false, "XPG_SUS_ENV=ON not supported");
4262   } else {
4263     _xpg_sus_mode = 0;
4264   }
4265 
4266   // Switch off AIX internal (pthread) guard pages. This has
4267   // immediate effect for any pthread_create calls which follow.












4268   p = ::getenv("AIXTHREAD_GUARDPAGES");
4269   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4270   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4271   guarantee(rc == 0, "");
4272 
4273 } // end: os::Aix::scan_environment()
4274 
4275 // PASE: initialize the libo4 library (AS400 PASE porting library).
4276 void os::Aix::initialize_libo4() {
4277   Unimplemented();






4278 }
4279 
4280 // AIX: initialize the libperfstat library (we load this dynamically
4281 // because it is only available on AIX.
4282 void os::Aix::initialize_libperfstat() {
4283 
4284   assert(os::Aix::on_aix(), "AIX only");
4285 
4286   if (!libperfstat::init()) {
4287     trc("libperfstat initialization failed.");
4288     assert(false, "libperfstat initialization failed");
4289   } else {
4290     if (Verbose) {
4291       fprintf(stderr, "libperfstat initialized.\n");
4292     }
4293   }
4294 } // end: os::Aix::initialize_libperfstat
4295 
4296 /////////////////////////////////////////////////////////////////////////////
4297 // thread stack
4298 
4299 // Function to query the current stack size using pthread_getthrds_np.
4300 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4301   // This only works when invoked on a pthread. As we agreed not to use
4302   // primordial threads anyway, I assert here.
4303   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4304 
4305   // Information about this api can be found (a) in the pthread.h header and
4306   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4307   //
4308   // The use of this API to find out the current stack is kind of undefined.
4309   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4310   // enough for cases where I let the pthread library create its stacks. For cases
4311   // where I create an own stack and pass this to pthread_create, it seems not to
4312   // work (the returned stack size in that case is 0).
4313 
4314   pthread_t tid = pthread_self();




  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "libo4.hpp"
  40 #include "libperfstat_aix.hpp"
  41 #include "loadlib_aix.hpp"
  42 #include "memory/allocation.inline.hpp"
  43 #include "memory/filemap.hpp"
  44 #include "misc_aix.hpp"
  45 #include "mutex_aix.inline.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "os_aix.inline.hpp"
  48 #include "os_share_aix.hpp"
  49 #include "porting_aix.hpp"
  50 #include "prims/jniFastGetField.hpp"
  51 #include "prims/jvm.h"
  52 #include "prims/jvm_misc.hpp"
  53 #include "runtime/arguments.hpp"
  54 #include "runtime/atomic.inline.hpp"
  55 #include "runtime/extendedPC.hpp"
  56 #include "runtime/globals.hpp"
  57 #include "runtime/interfaceSupport.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/javaCalls.hpp"
  60 #include "runtime/mutexLocker.hpp"
  61 #include "runtime/objectMonitor.hpp"
  62 #include "runtime/orderAccess.inline.hpp"
  63 #include "runtime/os.hpp"
  64 #include "runtime/osThread.hpp"
  65 #include "runtime/perfMemory.hpp"
  66 #include "runtime/sharedRuntime.hpp"
  67 #include "runtime/statSampler.hpp"
  68 #include "runtime/stubRoutines.hpp"
  69 #include "runtime/thread.inline.hpp"
  70 #include "runtime/threadCritical.hpp"
  71 #include "runtime/timer.hpp"
  72 #include "runtime/vm_version.hpp"
  73 #include "services/attachListener.hpp"
  74 #include "services/runtimeService.hpp"
  75 #include "utilities/decoder.hpp"
  76 #include "utilities/defaultStream.hpp"
  77 #define PV_8_Compat 0x308000   /* Power PC 8 */
  78 #include "utilities/events.hpp"
  79 #include "utilities/growableArray.hpp"
  80 #include "utilities/vmError.hpp"
  81 
  82 // put OS-includes here (sorted alphabetically)
  83 #include <errno.h>
  84 #include <fcntl.h>
  85 #include <inttypes.h>
  86 #include <poll.h>
  87 #include <procinfo.h>
  88 #include <pthread.h>
  89 #include <pwd.h>
  90 #include <semaphore.h>
  91 #include <signal.h>
  92 #include <stdint.h>
  93 #include <stdio.h>
  94 #include <string.h>
  95 #include <unistd.h>
  96 #include <sys/ioctl.h>
  97 #include <sys/ipc.h>
  98 #include <sys/mman.h>
  99 #include <sys/resource.h>
 100 #include <sys/select.h>
 101 #include <sys/shm.h>
 102 #include <sys/socket.h>
 103 #include <sys/stat.h>
 104 #include <sys/sysinfo.h>
 105 #include <sys/systemcfg.h>
 106 #include <sys/time.h>
 107 #include <sys/times.h>
 108 #include <sys/types.h>
 109 #include <sys/utsname.h>
 110 #include <sys/vminfo.h>
 111 #include <sys/wait.h>
 112 
 113 // Missing prototypes for various system APIs.
 114 extern "C"
 115 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);






 116 

 117 #if !defined(_AIXVERSION_610)
 118 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
 119 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
 120 extern "C" int getargs   (procsinfo*, int, char*, int);




 121 #endif
 122 
 123 #define MAX_PATH (2 * K)
 124 
 125 // for timer info max values which include all bits
 126 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 127 // for multipage initialization error analysis (in 'g_multipage_error')
 128 #define ERROR_MP_OS_TOO_OLD                          100
 129 #define ERROR_MP_EXTSHM_ACTIVE                       101
 130 #define ERROR_MP_VMGETINFO_FAILED                    102
 131 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 132 
 133 // The semantics in this file are thus that codeptr_t is a *real code ptr*.
 134 // This means that any function taking codeptr_t as arguments will assume
 135 // a real codeptr and won't handle function descriptors (eg getFuncName),
 136 // whereas functions taking address as args will deal with function
 137 // descriptors (eg os::dll_address_to_library_name).
 138 typedef unsigned int* codeptr_t;
 139 
 140 // Typedefs for stackslots, stack pointers, pointers to op codes.
 141 typedef unsigned long stackslot_t;
 142 typedef stackslot_t* stackptr_t;
 143 










 144 // Query dimensions of the stack of the calling thread.
 145 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 146 static address resolve_function_descriptor_to_code_pointer(address p);
 147 
 148 // Function to check a given stack pointer against given stack limits.
 149 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 150   if (((uintptr_t)sp) & 0x7) {
 151     return false;
 152   }
 153   if (sp > stack_base) {
 154     return false;
 155   }
 156   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 157     return false;
 158   }
 159   return true;
 160 }
 161 
 162 // Returns true if function is a valid codepointer.
 163 inline bool is_valid_codepointer(codeptr_t p) {
 164   if (!p) {
 165     return false;
 166   }
 167   if (((uintptr_t)p) & 0x3) {
 168     return false;
 169   }
 170   if (LoadedLibraries::find_for_text_address(p, NULL) == NULL) {
 171     return false;
 172   }
 173   return true;
 174 }
 175 
 176 // Macro to check a given stack pointer against given stack limits and to die if test fails.
 177 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 178     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 179 }
 180 
 181 // Macro to check the current stack pointer against given stacklimits.
 182 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 183   address sp; \
 184   sp = os::current_stack_pointer(); \
 185   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 186 }
 187 
 188 static void vmembk_print_on(outputStream* os);
 189 
 190 ////////////////////////////////////////////////////////////////////////////////
 191 // global variables (for a description see os_aix.hpp)
 192 
 193 julong    os::Aix::_physical_memory = 0;
 194 
 195 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 196 int       os::Aix::_page_size = -1;
 197 
 198 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 199 int       os::Aix::_on_pase = -1;
 200 
 201 // -1 = uninitialized, otherwise os version in the form 0xMMmm - MM:major, mm:minor
 202 //  E.g. 0x0601 for  AIX 6.1 or 0x0504 for OS/400 V5R4
 203 int       os::Aix::_os_version = -1;
 204 
 205 int       os::Aix::_stack_page_size = -1;
 206 
 207 // -1 = uninitialized, 0 - no, 1 - yes
 208 int       os::Aix::_xpg_sus_mode = -1;
 209 
 210 // -1 = uninitialized, 0 - no, 1 - yes
 211 int       os::Aix::_extshm = -1;

 212 
 213 ////////////////////////////////////////////////////////////////////////////////
 214 // local variables
 215 

 216 static jlong    initial_time_count = 0;
 217 static int      clock_tics_per_sec = 100;
 218 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 219 static bool     check_signals      = true;

 220 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 221 static sigset_t SR_sigset;
 222 
 223 // Process break recorded at startup.
 224 static address g_brk_at_startup = NULL;
 225 
 226 // This describes the state of multipage support of the underlying
 227 // OS. Note that this is of no interest to the outsize world and
 228 // therefore should not be defined in AIX class.
 229 //
 230 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 231 // latter two (16M "large" resp. 16G "huge" pages) require special
 232 // setup and are normally not available.
 233 //
 234 // AIX supports multiple page sizes per process, for:
 235 //  - Stack (of the primordial thread, so not relevant for us)
 236 //  - Data - data, bss, heap, for us also pthread stacks
 237 //  - Text - text code
 238 //  - shared memory
 239 //
 240 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 241 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 242 //
 243 // For shared memory, page size can be set dynamically via
 244 // shmctl(). Different shared memory regions can have different page
 245 // sizes.


 256   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 257   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 258   int error;                  // Error describing if something went wrong at multipage init.
 259 } g_multipage_support = {
 260   (size_t) -1,
 261   (size_t) -1,
 262   (size_t) -1,
 263   (size_t) -1,
 264   (size_t) -1,
 265   false, false,
 266   0
 267 };
 268 
 269 // We must not accidentally allocate memory close to the BRK - even if
 270 // that would work - because then we prevent the BRK segment from
 271 // growing which may result in a malloc OOM even though there is
 272 // enough memory. The problem only arises if we shmat() or mmap() at
 273 // a specific wish address, e.g. to place the heap in a
 274 // compressed-oops-friendly way.
 275 static bool is_close_to_brk(address a) {
 276   assert0(g_brk_at_startup != NULL);
 277   if (a >= g_brk_at_startup &&
 278       a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
 279     return true;
 280   }
 281   return false;
 282 }
 283 
 284 julong os::available_memory() {
 285   return Aix::available_memory();
 286 }
 287 
 288 julong os::Aix::available_memory() {
 289   // Avoid expensive API call here, as returned value will always be null.
 290   if (os::Aix::on_pase()) {
 291     return 0x0LL;
 292   }
 293   os::Aix::meminfo_t mi;
 294   if (os::Aix::get_meminfo(&mi)) {
 295     return mi.real_free;
 296   } else {
 297     return ULONG_MAX;
 298   }
 299 }
 300 
 301 julong os::physical_memory() {
 302   return Aix::physical_memory();
 303 }
 304 
 305 // Return true if user is running as root.
 306 
 307 bool os::have_special_privileges() {
 308   static bool init = false;
 309   static bool privileges = false;
 310   if (!init) {
 311     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 312     init = true;
 313   }
 314   return privileges;
 315 }
 316 
 317 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 318 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 319 static bool my_disclaim64(char* addr, size_t size) {
 320 
 321   if (size == 0) {
 322     return true;
 323   }
 324 
 325   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 326   const unsigned int maxDisclaimSize = 0x40000000;
 327 
 328   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 329   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 330 
 331   char* p = addr;
 332 
 333   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 334     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 335       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 336       return false;
 337     }
 338     p += maxDisclaimSize;
 339   }
 340 
 341   if (lastDisclaimSize > 0) {
 342     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 343       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 344       return false;
 345     }
 346   }
 347 
 348   return true;
 349 }
 350 
 351 // Cpu architecture string
 352 #if defined(PPC32)
 353 static char cpu_arch[] = "ppc";
 354 #elif defined(PPC64)
 355 static char cpu_arch[] = "ppc64";
 356 #else
 357 #error Add appropriate cpu_arch setting
 358 #endif
 359 
 360 // Wrap the function "vmgetinfo" which is not available on older OS releases.
 361 static int checked_vmgetinfo(void *out, int command, int arg) {
 362   if (os::Aix::on_pase() && os::Aix::os_version() < 0x0601) {
 363     guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
 364   }
 365   return ::vmgetinfo(out, command, arg);
 366 }
 367 
 368 // Given an address, returns the size of the page backing that address.
 369 size_t os::Aix::query_pagesize(void* addr) {
 370 
 371   if (os::Aix::on_pase() && os::Aix::os_version() < 0x0601) {
 372     // AS/400 older than V6R1: no vmgetinfo here, default to 4K
 373     return SIZE_4K;
 374   }
 375 
 376   vm_page_info pi;
 377   pi.addr = (uint64_t)addr;
 378   if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 379     return pi.pagesize;
 380   } else {

 381     assert(false, "vmgetinfo failed to retrieve page size");
 382     return SIZE_4K;
 383   }






 384 }
 385 
 386 void os::Aix::initialize_system_info() {
 387 
 388   // Get the number of online(logical) cpus instead of configured.
 389   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 390   assert(_processor_count > 0, "_processor_count must be > 0");
 391 
 392   // Retrieve total physical storage.
 393   os::Aix::meminfo_t mi;
 394   if (!os::Aix::get_meminfo(&mi)) {

 395     assert(false, "os::Aix::get_meminfo failed.");
 396   }
 397   _physical_memory = (julong) mi.real_total;
 398 }
 399 
 400 // Helper function for tracing page sizes.
 401 static const char* describe_pagesize(size_t pagesize) {
 402   switch (pagesize) {
 403     case SIZE_4K : return "4K";
 404     case SIZE_64K: return "64K";
 405     case SIZE_16M: return "16M";
 406     case SIZE_16G: return "16G";

 407     default:
 408       assert(false, "surprise");
 409       return "??";
 410   }
 411 }
 412 
 413 // Probe OS for multipage support.
 414 // Will fill the global g_multipage_support structure.
 415 // Must be called before calling os::large_page_init().
 416 static void query_multipage_support() {
 417 
 418   guarantee(g_multipage_support.pagesize == -1,
 419             "do not call twice");
 420 
 421   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 422 
 423   // This really would surprise me.
 424   assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
 425 
 426   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 427   // Default data page size is defined either by linker options (-bdatapsize)
 428   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 429   // default should be 4K.
 430   {
 431     void* p = ::malloc(SIZE_16M);
 432     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 433     ::free(p);
 434   }
 435 
 436   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 437   // Note that this is pure curiosity. We do not rely on default page size but set
 438   // our own page size after allocated.
 439   {
 440     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 441     guarantee(shmid != -1, "shmget failed");
 442     void* p = ::shmat(shmid, NULL, 0);
 443     ::shmctl(shmid, IPC_RMID, NULL);
 444     guarantee(p != (void*) -1, "shmat failed");
 445     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 446     ::shmdt(p);
 447   }
 448 
 449   // Before querying the stack page size, make sure we are not running as primordial
 450   // thread (because primordial thread's stack may have different page size than
 451   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 452   // number of reasons so we may just as well guarantee it here.
 453   guarantee0(!os::Aix::is_primordial_thread());
 454 
 455   // Query pthread stack page size. Should be the same as data page size because
 456   // pthread stacks are allocated from C-Heap.
 457   {
 458     int dummy = 0;
 459     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 460   }
 461 
 462   // Query default text page size (LDR_CNTRL TEXTPSIZE).

 463   {
 464     address any_function =
 465       resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 466     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 467   }

 468 
 469   // Now probe for support of 64K pages and 16M pages.
 470 
 471   // Before OS/400 V6R1, there is no support for pages other than 4K.
 472   if (os::Aix::on_pase_V5R4_or_older()) {
 473     trcVerbose("OS/400 < V6R1 - no large page support.");
 474     g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
 475     goto query_multipage_support_end;
 476   }
 477 
 478   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 479   {
 480     const int MAX_PAGE_SIZES = 4;
 481     psize_t sizes[MAX_PAGE_SIZES];
 482     const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 483     if (num_psizes == -1) {
 484       trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
 485       trcVerbose("disabling multipage support.");
 486       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 487       goto query_multipage_support_end;
 488     }
 489     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 490     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 491     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 492     for (int i = 0; i < num_psizes; i ++) {
 493       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 494     }
 495 
 496     // Can we use 64K, 16M pages?
 497     for (int i = 0; i < num_psizes; i ++) {
 498       const size_t pagesize = sizes[i];
 499       if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
 500         continue;
 501       }
 502       bool can_use = false;
 503       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 504       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 505         IPC_CREAT | S_IRUSR | S_IWUSR);
 506       guarantee0(shmid != -1); // Should always work.
 507       // Try to set pagesize.
 508       struct shmid_ds shm_buf = { 0 };
 509       shm_buf.shm_pagesize = pagesize;
 510       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 511         const int en = errno;
 512         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 513         trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
 514           MiscUtils::describe_errno(en));
 515       } else {
 516         // Attach and double check pageisze.
 517         void* p = ::shmat(shmid, NULL, 0);
 518         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 519         guarantee0(p != (void*) -1); // Should always work.
 520         const size_t real_pagesize = os::Aix::query_pagesize(p);
 521         if (real_pagesize != pagesize) {
 522           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 523         } else {
 524           can_use = true;
 525         }
 526         ::shmdt(p);
 527       }
 528       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 529       if (pagesize == SIZE_64K) {
 530         g_multipage_support.can_use_64K_pages = can_use;
 531       } else if (pagesize == SIZE_16M) {
 532         g_multipage_support.can_use_16M_pages = can_use;
 533       }
 534     }
 535 
 536   } // end: check which pages can be used for shared memory
 537 
 538 query_multipage_support_end:
 539 
 540   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
 541       describe_pagesize(g_multipage_support.pagesize));
 542   trcVerbose("Data page size (C-Heap, bss, etc): %s",
 543       describe_pagesize(g_multipage_support.datapsize));
 544   trcVerbose("Text page size: %s",
 545       describe_pagesize(g_multipage_support.textpsize));
 546   trcVerbose("Thread stack page size (pthread): %s",
 547       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 548   trcVerbose("Default shared memory page size: %s",
 549       describe_pagesize(g_multipage_support.shmpsize));
 550   trcVerbose("Can use 64K pages dynamically with shared meory: %s",
 551       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 552   trcVerbose("Can use 16M pages dynamically with shared memory: %s",
 553       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 554   trcVerbose("Multipage error details: %d",
 555       g_multipage_support.error);
 556 
 557   // sanity checks
 558   assert0(g_multipage_support.pagesize == SIZE_4K);
 559   assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
 560   assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
 561   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 562   assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
 563 
 564 }
 565 
 566 void os::init_system_properties_values() {
 567 
 568 #define DEFAULT_LIBPATH "/lib:/usr/lib"
 569 #define EXTENSIONS_DIR  "/lib/ext"
 570 
 571   // Buffer that fits several sprintfs.
 572   // Note that the space for the trailing null is provided
 573   // by the nulls included by the sizeof operator.
 574   const size_t bufsize =
 575     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 576          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 577   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 578 
 579   // sysclasspath, java_home, dll_dir
 580   {
 581     char *pslash;
 582     os::jvm_path(buf, bufsize);
 583 
 584     // Found the full path to libjvm.so.
 585     // Now cut the path to <java_home>/jre if we can.
 586     pslash = strrchr(buf, '/');
 587     if (pslash != NULL) {
 588       *pslash = '\0';            // Get rid of /libjvm.so.
 589     }
 590     pslash = strrchr(buf, '/');
 591     if (pslash != NULL) {
 592       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 593     }
 594     Arguments::set_dll_dir(buf);
 595 
 596     if (pslash != NULL) {
 597       pslash = strrchr(buf, '/');
 598       if (pslash != NULL) {
 599         *pslash = '\0';          // Get rid of /<arch>.
 600         pslash = strrchr(buf, '/');
 601         if (pslash != NULL) {
 602           *pslash = '\0';        // Get rid of /lib.
 603         }
 604       }
 605     }
 606     Arguments::set_java_home(buf);
 607     set_boot_path('/', ':');
 608   }
 609 


 744     if (thread->is_VM_thread()) {
 745       // Only the VM thread handles BREAK_SIGNAL ...
 746       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 747     } else {
 748       // ... all other threads block BREAK_SIGNAL
 749       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 750     }
 751   }
 752 }
 753 
 754 // retrieve memory information.
 755 // Returns false if something went wrong;
 756 // content of pmi undefined in this case.
 757 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 758 
 759   assert(pmi, "get_meminfo: invalid parameter");
 760 
 761   memset(pmi, 0, sizeof(meminfo_t));
 762 
 763   if (os::Aix::on_pase()) {
 764     // On PASE, use the libo4 porting library.
 765 
 766     unsigned long long virt_total = 0;
 767     unsigned long long real_total = 0;
 768     unsigned long long real_free = 0;
 769     unsigned long long pgsp_total = 0;
 770     unsigned long long pgsp_free = 0;
 771     if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
 772       pmi->virt_total = virt_total;
 773       pmi->real_total = real_total;
 774       pmi->real_free = real_free;
 775       pmi->pgsp_total = pgsp_total;
 776       pmi->pgsp_free = pgsp_free;
 777       return true;
 778     }
 779     return false;
 780 
 781   } else {
 782 
 783     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 784     // See:
 785     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 786     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 787     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 788     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 789 
 790     perfstat_memory_total_t psmt;
 791     memset (&psmt, '\0', sizeof(psmt));
 792     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 793     if (rc == -1) {
 794       fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
 795       assert(0, "perfstat_memory_total() failed");
 796       return false;
 797     }
 798 


 802     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 803     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 804     // The fields of perfstat_memory_total_t:
 805     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 806     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 807     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 808     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 809     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 810 
 811     pmi->virt_total = psmt.virt_total * 4096;
 812     pmi->real_total = psmt.real_total * 4096;
 813     pmi->real_free = psmt.real_free * 4096;
 814     pmi->pgsp_total = psmt.pgsp_total * 4096;
 815     pmi->pgsp_free = psmt.pgsp_free * 4096;
 816 
 817     return true;
 818 
 819   }
 820 } // end os::Aix::get_meminfo
 821 











































































 822 //////////////////////////////////////////////////////////////////////////////
 823 // create new thread
 824 
 825 // Thread start routine for all newly created threads
 826 static void *java_start(Thread *thread) {
 827 
 828   // find out my own stack dimensions
 829   {
 830     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 831     address base = 0;
 832     size_t size = 0;
 833     query_stack_dimensions(&base, &size);
 834     thread->set_stack_base(base);
 835     thread->set_stack_size(size);
 836   }
 837 
 838   const pthread_t pthread_id = ::pthread_self();
 839   const tid_t kernel_thread_id = ::thread_self();
 840 
 841   trcVerbose("newborn Thread : pthread-id %u, ktid " UINT64_FORMAT
 842     ", stack %p ... %p, stacksize 0x%IX (%IB)",
 843     pthread_id, kernel_thread_id,
 844     thread->stack_base() - thread->stack_size(),
 845     thread->stack_base(),
 846     thread->stack_size(),
 847     thread->stack_size());
 848 
 849   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
 850   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
 851   // tools hook pthread_create(). In this case, we may run into problems establishing
 852   // guard pages on those stacks, because the stacks may reside in memory which is not
 853   // protectable (shmated).
 854   if (thread->stack_base() > ::sbrk(0)) {
 855     fprintf(stderr, "Thread " UINT64_FORMAT ": stack not in data segment.", (uint64_t) pthread_id);
 856   }
 857 
 858   // Do some sanity checks.
 859   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 860 
 861   // Try to randomize the cache line index of hot stack frames.
 862   // This helps when threads of the same stack traces evict each other's
 863   // cache lines. The threads can be either from the same JVM instance, or
 864   // from different JVM instances. The benefit is especially true for
 865   // processors with hyperthreading technology.
 866 
 867   static int counter = 0;
 868   int pid = os::current_process_id();
 869   alloca(((pid ^ counter++) & 7) * 128);
 870 
 871   ThreadLocalStorage::set_thread(thread);
 872 
 873   OSThread* osthread = thread->osthread();
 874 
 875   // Thread_id is pthread id.
 876   osthread->set_thread_id(pthread_id);
 877 
 878   // Initialize signal mask for this thread.
 879   os::Aix::hotspot_sigmask(thread);
 880 
 881   // Initialize floating point control register.
 882   os::Aix::init_thread_fpu_state();
 883 
 884   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 885 
 886   // Call one more level start routine.
 887   thread->run();
 888 
 889   trcVerbose("Thread finished : pthread-id %u, ktid " UINT64_FORMAT ".",
 890     pthread_id, kernel_thread_id);
 891 
 892   return 0;
 893 }
 894 
 895 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 896 



 897   assert(thread->osthread() == NULL, "caller responsible");
 898 
 899   // Allocate the OSThread object
 900   OSThread* osthread = new OSThread(NULL, NULL);
 901   if (osthread == NULL) {
 902     return false;
 903   }
 904 
 905   // set the correct thread state
 906   osthread->set_thread_type(thr_type);
 907 
 908   // Initial state is ALLOCATED but not INITIALIZED
 909   osthread->set_state(ALLOCATED);
 910 
 911   thread->set_osthread(osthread);
 912 
 913   // init thread attributes
 914   pthread_attr_t attr;
 915   pthread_attr_init(&attr);
 916   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");


 941       } // else fall through:
 942         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 943     case os::vm_thread:
 944     case os::pgc_thread:
 945     case os::cgc_thread:
 946     case os::watcher_thread:
 947       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 948       break;
 949     }
 950   }
 951 
 952   stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
 953   pthread_attr_setstacksize(&attr, stack_size);
 954 
 955   pthread_t tid;
 956   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
 957 
 958   pthread_attr_destroy(&attr);
 959 
 960   if (ret == 0) {
 961     trcVerbose("Created New Thread : pthread-id %u", tid);
 962   } else {
 963     if (os::Aix::on_pase()) {
 964       // QIBM_MULTI_THREADED=Y is needed when the launcher is started on iSeries
 965       // using QSH. Otherwise pthread_create fails with errno=11.
 966       trcVerbose("(Please make sure you set the environment variable "
 967               "QIBM_MULTI_THREADED=Y before running this program.)");
 968     }
 969     if (PrintMiscellaneous && (Verbose || WizardMode)) {
 970       perror("pthread_create()");
 971     }
 972     // Need to clean up stuff we've allocated so far
 973     thread->set_osthread(NULL);
 974     delete osthread;
 975     return false;
 976   }
 977 
 978   // OSThread::thread_id is the pthread id.
 979   osthread->set_thread_id(tid);
 980 
 981   return true;
 982 }
 983 
 984 /////////////////////////////////////////////////////////////////////////////
 985 // attach existing thread
 986 
 987 // bootstrap the main thread
 988 bool os::create_main_thread(JavaThread* thread) {
 989   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 990   return create_attached_thread(thread);
 991 }
 992 
 993 bool os::create_attached_thread(JavaThread* thread) {
 994 #ifdef ASSERT
 995     thread->verify_not_published();
 996 #endif
 997 
 998   // Allocate the OSThread object
 999   OSThread* osthread = new OSThread(NULL, NULL);
1000 
1001   if (osthread == NULL) {
1002     return false;
1003   }
1004 
1005   const pthread_t pthread_id = ::pthread_self();
1006   const tid_t kernel_thread_id = ::thread_self();
1007 
1008   trcVerbose("attaching Thread : pthread-id %u, ktid " UINT64_FORMAT ", stack %p ... %p, stacksize 0x%IX (%IB)",
1009     pthread_id, kernel_thread_id,
1010     thread->stack_base() - thread->stack_size(),
1011     thread->stack_base(),
1012     thread->stack_size(),
1013     thread->stack_size());
1014 
1015   // OSThread::thread_id is the pthread id.
1016   osthread->set_thread_id(pthread_id);
1017 
1018   // initialize floating point control register
1019   os::Aix::init_thread_fpu_state();
1020 
1021   // some sanity checks
1022   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1023 
1024   // Initial thread state is RUNNABLE
1025   osthread->set_state(RUNNABLE);
1026 
1027   thread->set_osthread(osthread);
1028 
1029   if (UseNUMA) {
1030     int lgrp_id = os::numa_get_group_id();
1031     if (lgrp_id != -1) {
1032       thread->set_lgrp_id(lgrp_id);
1033     }
1034   }
1035 
1036   // initialize signal mask for this thread


1116     // better than nothing, but not much
1117     return elapsedTime();
1118   }
1119 }
1120 
1121 jlong os::javaTimeMillis() {
1122   timeval time;
1123   int status = gettimeofday(&time, NULL);
1124   assert(status != -1, "aix error at gettimeofday()");
1125   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1126 }
1127 
1128 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1129   timeval time;
1130   int status = gettimeofday(&time, NULL);
1131   assert(status != -1, "aix error at gettimeofday()");
1132   seconds = jlong(time.tv_sec);
1133   nanos = jlong(time.tv_usec) * 1000;
1134 }
1135 







1136 jlong os::javaTimeNanos() {
1137   if (os::Aix::on_pase()) {
1138 
1139     timeval time;
1140     int status = gettimeofday(&time, NULL);
1141     assert(status != -1, "PASE error at gettimeofday()");
1142     jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1143     return 1000 * usecs;
1144 
1145   } else {
1146     // On AIX use the precision of processors real time clock
1147     // or time base registers.
1148     timebasestruct_t time;
1149     int rc;
1150 
1151     // If the CPU has a time register, it will be used and
1152     // we have to convert to real time first. After convertion we have following data:
1153     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1154     // time.tb_low  [nanoseconds after the last full second above]
1155     // We better use mread_real_time here instead of read_real_time
1156     // to ensure that we will get a monotonic increasing time.
1157     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1158       rc = time_base_to_time(&time, TIMEBASE_SZ);
1159       assert(rc != -1, "aix error at time_base_to_time()");
1160     }
1161     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1162   }
1163 }
1164 


1253 void os::die() {
1254   ::abort();
1255 }
1256 
1257 // This method is a copy of JDK's sysGetLastErrorString
1258 // from src/solaris/hpi/src/system_md.c
1259 
1260 size_t os::lasterror(char *buf, size_t len) {
1261   if (errno == 0) return 0;
1262 
1263   const char *s = ::strerror(errno);
1264   size_t n = ::strlen(s);
1265   if (n >= len) {
1266     n = len - 1;
1267   }
1268   ::strncpy(buf, s, n);
1269   buf[n] = '\0';
1270   return n;
1271 }
1272 
1273 intx os::current_thread_id() {
1274   return (intx)pthread_self();
1275 }
1276 
1277 int os::current_process_id() {
1278   return getpid();












1279 }
1280 
1281 // DLL functions
1282 
1283 const char* os::dll_file_extension() { return ".so"; }
1284 
1285 // This must be hard coded because it's the system's temporary
1286 // directory not the java application's temp directory, ala java.io.tmpdir.
1287 const char* os::get_temp_directory() { return "/tmp"; }
1288 
1289 static bool file_exists(const char* filename) {
1290   struct stat statbuf;
1291   if (filename == NULL || strlen(filename) == 0) {
1292     return false;
1293   }
1294   return os::stat(filename, &statbuf) == 0;
1295 }
1296 
1297 bool os::dll_build_name(char* buffer, size_t buflen,
1298                         const char* pname, const char* fname) {
1299   bool retval = false;
1300   // Copied from libhpi
1301   const size_t pnamelen = pname ? strlen(pname) : 0;
1302 
1303   // Return error on buffer overflow.
1304   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1305     *buffer = '\0';
1306     return retval;
1307   }
1308 
1309   if (pnamelen == 0) {
1310     snprintf(buffer, buflen, "lib%s.so", fname);
1311     retval = true;
1312   } else if (strchr(pname, *os::path_separator()) != NULL) {
1313     int n;
1314     char** pelements = split_path(pname, &n);
1315     if (pelements == NULL) {
1316       return false;
1317     }
1318     for (int i = 0; i < n; i++) {
1319       // Really shouldn't be NULL, but check can't hurt
1320       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1321         continue; // skip the empty path values
1322       }
1323       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1324       if (file_exists(buffer)) {
1325         retval = true;
1326         break;
1327       }
1328     }
1329     // release the storage
1330     for (int i = 0; i < n; i++) {
1331       if (pelements[i] != NULL) {
1332         FREE_C_HEAP_ARRAY(char, pelements[i]);
1333       }
1334     }
1335     if (pelements != NULL) {
1336       FREE_C_HEAP_ARRAY(char*, pelements);
1337     }


1535   else st->print("%d", rlim.rlim_cur);
1536 
1537   st->print(", AS ");
1538   getrlimit(RLIMIT_AS, &rlim);
1539   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1540   else st->print("%uk", rlim.rlim_cur >> 10);
1541 
1542   // Print limits on DATA, because it limits the C-heap.
1543   st->print(", DATA ");
1544   getrlimit(RLIMIT_DATA, &rlim);
1545   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1546   else st->print("%uk", rlim.rlim_cur >> 10);
1547   st->cr();
1548 
1549   // load average
1550   st->print("load average:");
1551   double loadavg[3] = {-1.L, -1.L, -1.L};
1552   os::loadavg(loadavg, 3);
1553   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1554   st->cr();
1555 
1556   // print wpar info
1557   libperfstat::wparinfo_t wi;
1558   if (libperfstat::get_wparinfo(&wi)) {
1559     st->print_cr("wpar info");
1560     st->print_cr("name: %s", wi.name);
1561     st->print_cr("id:   %d", wi.wpar_id);
1562     st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1563   }
1564 
1565   // print partition info
1566   libperfstat::partitioninfo_t pi;
1567   if (libperfstat::get_partitioninfo(&pi)) {
1568     st->print_cr("partition info");
1569     st->print_cr(" name: %s", pi.name);
1570   }
1571 
1572 }
1573 
1574 void os::print_memory_info(outputStream* st) {
1575 
1576   st->print_cr("Memory:");
1577 
1578   st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1579     describe_pagesize(g_multipage_support.pagesize));
1580   st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1581     describe_pagesize(g_multipage_support.datapsize));
1582   st->print_cr("  Text page size:                         %s",
1583     describe_pagesize(g_multipage_support.textpsize));
1584   st->print_cr("  Thread stack page size (pthread):       %s",
1585     describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1586   st->print_cr("  Default shared memory page size:        %s",
1587     describe_pagesize(g_multipage_support.shmpsize));
1588   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1589     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1590   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1591     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1592   st->print_cr("  Multipage error: %d",
1593     g_multipage_support.error);
1594   st->cr();
1595   st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1596   // not used in OpenJDK st->print_cr("  os::stack_page_size:    %s", describe_pagesize(os::stack_page_size()));
1597 
1598   // print out LDR_CNTRL because it affects the default page sizes
1599   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1600   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1601 
1602   // Print out EXTSHM because it is an unsupported setting.
1603   const char* const extshm = ::getenv("EXTSHM");
1604   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1605   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1606     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1607   }
1608 
1609   // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1610   const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1611   st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1612       aixthread_guardpages ? aixthread_guardpages : "<unset>");
1613 
1614   os::Aix::meminfo_t mi;
1615   if (os::Aix::get_meminfo(&mi)) {
1616     char buffer[256];
1617     if (os::Aix::on_aix()) {
1618       st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1619       st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1620       st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1621       st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1622     } else {
1623       // PASE - Numbers are result of QWCRSSTS; they mean:
1624       // real_total: Sum of all system pools
1625       // real_free: always 0
1626       // pgsp_total: we take the size of the system ASP
1627       // pgsp_free: size of system ASP times percentage of system ASP unused
1628       st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1629       st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1630       st->print_cr("%% system asp used : " SIZE_FORMAT,
1631         mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1632     }
1633     st->print_raw(buffer);


1634   }
1635   st->cr();
1636 
1637   // Print segments allocated with os::reserve_memory.
1638   st->print_cr("internal virtual memory regions used by vm:");
1639   vmembk_print_on(st);
1640 }
1641 
1642 // Get a string for the cpuinfo that is a summary of the cpu type
1643 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1644   // This looks good
1645   libperfstat::cpuinfo_t ci;
1646   if (libperfstat::get_cpuinfo(&ci)) {
1647     strncpy(buf, ci.version, buflen);
1648   } else {
1649     strncpy(buf, "AIX", buflen);
1650   }
1651 }
1652 
1653 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1654   st->print("CPU:");
1655   st->print("total %d", os::processor_count());
1656   // It's not safe to query number of active processors after crash.
1657   // st->print("(active %d)", os::active_processor_count());
1658   st->print(" %s", VM_Version::cpu_features());
1659   st->cr();
1660 }
1661 
1662 void os::print_siginfo(outputStream* st, void* siginfo) {

1663   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1664   st->cr();
1665 }
1666 
1667 static void print_signal_handler(outputStream* st, int sig,
1668                                  char* buf, size_t buflen);
1669 
1670 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1671   st->print_cr("Signal Handlers:");
1672   print_signal_handler(st, SIGSEGV, buf, buflen);
1673   print_signal_handler(st, SIGBUS , buf, buflen);
1674   print_signal_handler(st, SIGFPE , buf, buflen);
1675   print_signal_handler(st, SIGPIPE, buf, buflen);
1676   print_signal_handler(st, SIGXFSZ, buf, buflen);
1677   print_signal_handler(st, SIGILL , buf, buflen);
1678   print_signal_handler(st, SR_signum, buf, buflen);
1679   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1680   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1681   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1682   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);


1781   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1782 }
1783 
1784 void os::signal_raise(int signal_number) {
1785   ::raise(signal_number);
1786 }
1787 
1788 //
1789 // The following code is moved from os.cpp for making this
1790 // code platform specific, which it is by its very nature.
1791 //
1792 
1793 // Will be modified when max signal is changed to be dynamic
1794 int os::sigexitnum_pd() {
1795   return NSIG;
1796 }
1797 
1798 // a counter for each possible signal value
1799 static volatile jint pending_signals[NSIG+1] = { 0 };
1800 
1801 // Wrapper functions for: sem_init(), sem_post(), sem_wait()
1802 // On AIX, we use sem_init(), sem_post(), sem_wait()
1803 // On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1804 // do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1805 // Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1806 // on AIX, msem_..() calls are suspected of causing problems.
1807 static sem_t sig_sem;
1808 static msemaphore* p_sig_msem = 0;
1809 
1810 static void local_sem_init() {
1811   if (os::Aix::on_aix()) {
1812     int rc = ::sem_init(&sig_sem, 0, 0);
1813     guarantee(rc != -1, "sem_init failed");
1814   } else {
1815     // Memory semaphores must live in shared mem.
1816     guarantee0(p_sig_msem == NULL);
1817     p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1818     guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1819     guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1820   }
1821 }
1822 
1823 static void local_sem_post() {
1824   static bool warn_only_once = false;
1825   if (os::Aix::on_aix()) {
1826     int rc = ::sem_post(&sig_sem);
1827     if (rc == -1 && !warn_only_once) {
1828       trcVerbose("sem_post failed (errno = %d, %s)", errno, strerror(errno));
1829       warn_only_once = true;
1830     }
1831   } else {
1832     guarantee0(p_sig_msem != NULL);
1833     int rc = ::msem_unlock(p_sig_msem, 0);
1834     if (rc == -1 && !warn_only_once) {
1835       trcVerbose("msem_unlock failed (errno = %d, %s)", errno, strerror(errno));
1836       warn_only_once = true;
1837     }
1838   }
1839 }
1840 
1841 static void local_sem_wait() {
1842   static bool warn_only_once = false;
1843   if (os::Aix::on_aix()) {
1844     int rc = ::sem_wait(&sig_sem);
1845     if (rc == -1 && !warn_only_once) {
1846       trcVerbose("sem_wait failed (errno = %d, %s)", errno, strerror(errno));
1847       warn_only_once = true;
1848     }
1849   } else {
1850     guarantee0(p_sig_msem != NULL); // must init before use
1851     int rc = ::msem_lock(p_sig_msem, 0);
1852     if (rc == -1 && !warn_only_once) {
1853       trcVerbose("msem_lock failed (errno = %d, %s)", errno, strerror(errno));
1854       warn_only_once = true;
1855     }
1856   }
1857 }
1858 
1859 void os::signal_init_pd() {
1860   // Initialize signal structures
1861   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1862 
1863   // Initialize signal semaphore
1864   local_sem_init();

1865 }
1866 
1867 void os::signal_notify(int sig) {
1868   Atomic::inc(&pending_signals[sig]);
1869   local_sem_post();
1870 }
1871 
1872 static int check_pending_signals(bool wait) {
1873   Atomic::store(0, &sigint_count);
1874   for (;;) {
1875     for (int i = 0; i < NSIG + 1; i++) {
1876       jint n = pending_signals[i];
1877       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1878         return i;
1879       }
1880     }
1881     if (!wait) {
1882       return -1;
1883     }
1884     JavaThread *thread = JavaThread::current();
1885     ThreadBlockInVM tbivm(thread);
1886 
1887     bool threadIsSuspended;
1888     do {
1889       thread->set_suspend_equivalent();
1890       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1891 
1892       local_sem_wait();
1893 
1894       // were we externally suspended while we were waiting?
1895       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1896       if (threadIsSuspended) {
1897         //
1898         // The semaphore has been incremented, but while we were waiting
1899         // another thread suspended us. We don't want to continue running
1900         // while suspended because that would surprise the thread that
1901         // suspended us.
1902         //
1903 
1904         local_sem_post();
1905 
1906         thread->java_suspend_self();
1907       }
1908     } while (threadIsSuspended);
1909   }
1910 }
1911 
1912 int os::signal_lookup() {
1913   return check_pending_signals(false);
1914 }
1915 
1916 int os::signal_wait() {
1917   return check_pending_signals(true);
1918 }
1919 
1920 ////////////////////////////////////////////////////////////////////////////////
1921 // Virtual Memory
1922 
1923 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1924 


1937   }
1938 
1939   bool contains_range(char* p, size_t s) const {
1940     return contains_addr(p) && contains_addr(p + s - 1);
1941   }
1942 
1943   void print_on(outputStream* os) const {
1944     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1945       " bytes, %d %s pages), %s",
1946       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1947       (type == VMEM_SHMATED ? "shmat" : "mmap")
1948     );
1949   }
1950 
1951   // Check that range is a sub range of memory block (or equal to memory block);
1952   // also check that range is fully page aligned to the page size if the block.
1953   void assert_is_valid_subrange(char* p, size_t s) const {
1954     if (!contains_range(p, s)) {
1955       fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1956               "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1957               p, p + s, addr, addr + size);
1958       guarantee0(false);
1959     }
1960     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1961       fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1962               " aligned to pagesize (%lu)\n", p, p + s, (unsigned long) pagesize);
1963       guarantee0(false);
1964     }
1965   }
1966 };
1967 
1968 static struct {
1969   vmembk_t* first;
1970   MiscUtils::CritSect cs;
1971 } vmem;
1972 
1973 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1974   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1975   assert0(p);
1976   if (p) {
1977     MiscUtils::AutoCritSect lck(&vmem.cs);
1978     p->addr = addr; p->size = size;
1979     p->pagesize = pagesize;
1980     p->type = type;
1981     p->next = vmem.first;
1982     vmem.first = p;


2039   // BRK because that may cause malloc OOM.
2040   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2041     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2042       "Will attach anywhere.", requested_addr);
2043     // Act like the OS refused to attach there.
2044     requested_addr = NULL;
2045   }
2046 
2047   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
2048   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
2049   if (os::Aix::on_pase_V5R4_or_older()) {
2050     ShouldNotReachHere();
2051   }
2052 
2053   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
2054   const size_t size = align_size_up(bytes, SIZE_64K);
2055 
2056   // Reserve the shared segment.
2057   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2058   if (shmid == -1) {
2059     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
2060     return NULL;
2061   }
2062 
2063   // Important note:
2064   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2065   // We must right after attaching it remove it from the system. System V shm segments are global and
2066   // survive the process.
2067   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2068 
2069   struct shmid_ds shmbuf;
2070   memset(&shmbuf, 0, sizeof(shmbuf));
2071   shmbuf.shm_pagesize = SIZE_64K;
2072   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2073     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2074                size / SIZE_64K, errno);
2075     // I want to know if this ever happens.
2076     assert(false, "failed to set page size for shmat");
2077   }
2078 
2079   // Now attach the shared segment.
2080   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2081   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2082   // were not a segment boundary.
2083   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2084   const int errno_shmat = errno;
2085 
2086   // (A) Right after shmat and before handing shmat errors delete the shm segment.
2087   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2088     trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2089     assert(false, "failed to remove shared memory segment!");
2090   }
2091 
2092   // Handle shmat error. If we failed to attach, just return.
2093   if (addr == (char*)-1) {
2094     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2095     return NULL;
2096   }
2097 
2098   // Just for info: query the real page size. In case setting the page size did not
2099   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2100   const size_t real_pagesize = os::Aix::query_pagesize(addr);
2101   if (real_pagesize != shmbuf.shm_pagesize) {
2102     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2103   }
2104 
2105   if (addr) {
2106     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2107       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2108   } else {


2133   } else {
2134     trcVerbose("ok.");
2135     rc = true;
2136   }
2137   return rc;
2138 }
2139 
2140 static bool uncommit_shmated_memory(char* addr, size_t size) {
2141   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2142     addr, addr + size - 1);
2143 
2144   const bool rc = my_disclaim64(addr, size);
2145 
2146   if (!rc) {
2147     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2148     return false;
2149   }
2150   return true;
2151 }
2152 
2153 ////////////////////////////////  mmap-based routines /////////////////////////////////
2154 
2155 // Reserve memory via mmap.
2156 // If <requested_addr> is given, an attempt is made to attach at the given address.
2157 // Failing that, memory is allocated at any address.
2158 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2159 // allocate at an address aligned with the given alignment. Failing that, memory
2160 // is aligned anywhere.
2161 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2162   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2163     "alignment_hint " UINTX_FORMAT "...",
2164     bytes, requested_addr, alignment_hint);
2165 
2166   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2167   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2168     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2169     return NULL;
2170   }
2171 
2172   // We must prevent anyone from attaching too close to the
2173   // BRK because that may cause malloc OOM.
2174   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {


2280 
2281   assert0(is_aligned_to(addr, os::vm_page_size()));
2282   assert0(is_aligned_to(size, os::vm_page_size()));
2283 
2284   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2285     addr, addr + size - 1);
2286   bool rc = false;
2287 
2288   // Uncommit mmap memory with msync MS_INVALIDATE.
2289   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2290     trcVerbose("failed (%d)\n", errno);
2291     rc = false;
2292   } else {
2293     trcVerbose("ok.");
2294     rc = true;
2295   }
2296 
2297   return rc;
2298 }
2299 



2300 int os::vm_page_size() {
2301   // Seems redundant as all get out.
2302   assert(os::Aix::page_size() != -1, "must call os::init");
2303   return os::Aix::page_size();
2304 }
2305 
2306 // Aix allocates memory by pages.
2307 int os::vm_allocation_granularity() {
2308   assert(os::Aix::page_size() != -1, "must call os::init");
2309   return os::Aix::page_size();
2310 }
2311 
2312 #ifdef PRODUCT
2313 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2314                                     int err) {
2315   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2316           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2317           strerror(err), err);
2318 }
2319 #endif
2320 
2321 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2322                                   const char* mesg) {
2323   assert(mesg != NULL, "mesg must be specified");
2324   if (!pd_commit_memory(addr, size, exec)) {
2325     // Add extra info in product mode for vm_exit_out_of_memory():
2326     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2327     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2328   }
2329 }
2330 
2331 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2332 
2333   assert0(is_aligned_to(addr, os::vm_page_size()));
2334   assert0(is_aligned_to(size, os::vm_page_size()));
2335 
2336   vmembk_t* const vmi = vmembk_find(addr);
2337   guarantee0(vmi);
2338   vmi->assert_is_valid_subrange(addr, size);
2339 
2340   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2341 
2342   if (UseExplicitCommit) {
2343     // AIX commits memory on touch. So, touch all pages to be committed.
2344     for (char* p = addr; p < (addr + size); p += SIZE_4K) {
2345       *p = '\0';
2346     }
2347   }
2348 
2349   return true;
2350 }
2351 
2352 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2353   return pd_commit_memory(addr, size, exec);
2354 }
2355 
2356 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2357                                   size_t alignment_hint, bool exec,
2358                                   const char* mesg) {
2359   // Alignment_hint is ignored on this OS.
2360   pd_commit_memory_or_exit(addr, size, exec, mesg);
2361 }
2362 
2363 bool os::pd_uncommit_memory(char* addr, size_t size) {
2364   assert0(is_aligned_to(addr, os::vm_page_size()));
2365   assert0(is_aligned_to(size, os::vm_page_size()));
2366 
2367   // Dynamically do different things for mmap/shmat.
2368   const vmembk_t* const vmi = vmembk_find(addr);
2369   guarantee0(vmi);
2370   vmi->assert_is_valid_subrange(addr, size);
2371 
2372   if (vmi->type == VMEM_SHMATED) {
2373     return uncommit_shmated_memory(addr, size);
2374   } else {
2375     return uncommit_mmaped_memory(addr, size);
2376   }
2377 }
2378 
2379 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2380   // Do not call this; no need to commit stack pages on AIX.
2381   ShouldNotReachHere();
2382   return true;
2383 }
2384 
2385 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2386   // Do not call this; no need to commit stack pages on AIX.
2387   ShouldNotReachHere();
2388   return true;
2389 }


2447   const size_t alignment_hint0 =
2448     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2449 
2450   // In 4K mode always use mmap.
2451   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2452   if (os::vm_page_size() == SIZE_4K) {
2453     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2454   } else {
2455     if (bytes >= Use64KPagesThreshold) {
2456       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2457     } else {
2458       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2459     }
2460   }
2461 }
2462 
2463 bool os::pd_release_memory(char* addr, size_t size) {
2464 
2465   // Dynamically do different things for mmap/shmat.
2466   vmembk_t* const vmi = vmembk_find(addr);
2467   guarantee0(vmi);
2468 
2469   // Always round to os::vm_page_size(), which may be larger than 4K.
2470   size = align_size_up(size, os::vm_page_size());
2471   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2472 
2473   bool rc = false;
2474   bool remove_bookkeeping = false;
2475   if (vmi->type == VMEM_SHMATED) {
2476     // For shmatted memory, we do:
2477     // - If user wants to release the whole range, release the memory (shmdt).
2478     // - If user only wants to release a partial range, uncommit (disclaim) that
2479     //   range. That way, at least, we do not use memory anymore (bust still page
2480     //   table space).
2481     vmi->assert_is_valid_subrange(addr, size);
2482     if (addr == vmi->addr && size == vmi->size) {
2483       rc = release_shmated_memory(addr, size);
2484       remove_bookkeeping = true;
2485     } else {
2486       rc = uncommit_shmated_memory(addr, size);
2487     }


2523   // mprotect success check
2524   //
2525   // Mprotect said it changed the protection but can I believe it?
2526   //
2527   // To be sure I need to check the protection afterwards. Try to
2528   // read from protected memory and check whether that causes a segfault.
2529   //
2530   if (!os::Aix::xpg_sus_mode()) {
2531 
2532     if (CanUseSafeFetch32()) {
2533 
2534       const bool read_protected =
2535         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2536          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2537 
2538       if (prot & PROT_READ) {
2539         rc = !read_protected;
2540       } else {
2541         rc = read_protected;
2542       }
2543 
2544       if (!rc) {
2545         if (os::Aix::on_pase()) {
2546           // There is an issue on older PASE systems where mprotect() will return success but the
2547           // memory will not be protected.
2548           // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2549           // machines; we only see it rarely, when using mprotect() to protect the guard page of
2550           // a stack. It is an OS error.
2551           //
2552           // A valid strategy is just to try again. This usually works. :-/
2553 
2554           MiscUtils::sleep_ms(1);
2555           if (::mprotect(addr, size, prot) == 0) {
2556             const bool read_protected_2 =
2557               (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2558               SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2559             rc = true;
2560           }
2561         }
2562       }


2563     }
2564   }
2565 
2566   assert(rc == true, "mprotect failed.");
2567 
2568   return rc;
2569 }
2570 
2571 // Set protections specified
2572 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2573   unsigned int p = 0;
2574   switch (prot) {
2575   case MEM_PROT_NONE: p = PROT_NONE; break;
2576   case MEM_PROT_READ: p = PROT_READ; break;
2577   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2578   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2579   default:
2580     ShouldNotReachHere();
2581   }
2582   // is_committed is unused.
2583   return checked_mprotect(addr, size, p);
2584 }
2585 
2586 bool os::guard_memory(char* addr, size_t size) {
2587   return checked_mprotect(addr, size, PROT_NONE);
2588 }
2589 
2590 bool os::unguard_memory(char* addr, size_t size) {
2591   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2592 }
2593 
2594 // Large page support
2595 
2596 static size_t _large_page_size = 0;
2597 
2598 // Enable large page support if OS allows that.
2599 void os::large_page_init() {
2600   return; // Nothing to do. See query_multipage_support and friends.
2601 }
2602 
2603 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2604   // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2605   // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2606   // so this is not needed.
2607   assert(false, "should not be called on AIX");
2608   return NULL;
2609 }
2610 
2611 bool os::release_memory_special(char* base, size_t bytes) {
2612   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2613   Unimplemented();
2614   return false;
2615 }
2616 
2617 size_t os::large_page_size() {
2618   return _large_page_size;
2619 }
2620 
2621 bool os::can_commit_large_page_memory() {
2622   // Does not matter, we do not support huge pages.
2623   return false;
2624 }
2625 
2626 bool os::can_execute_large_page_memory() {
2627   // Does not matter, we do not support huge pages.
2628   return false;


3040 // they typically will bring down the process immediately.
3041 bool unblock_program_error_signals() {
3042   sigset_t set;
3043   ::sigemptyset(&set);
3044   ::sigaddset(&set, SIGILL);
3045   ::sigaddset(&set, SIGBUS);
3046   ::sigaddset(&set, SIGFPE);
3047   ::sigaddset(&set, SIGSEGV);
3048   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
3049 }
3050 
3051 // Renamed from 'signalHandler' to avoid collision with other shared libs.
3052 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
3053   assert(info != NULL && uc != NULL, "it must be old kernel");
3054 
3055   // Never leave program error signals blocked;
3056   // on all our platforms they would bring down the process immediately when
3057   // getting raised while being blocked.
3058   unblock_program_error_signals();
3059 
3060   int orig_errno = errno;  // Preserve errno value over signal handler.
3061   JVM_handle_aix_signal(sig, info, uc, true);
3062   errno = orig_errno;
3063 }
3064 
3065 // This boolean allows users to forward their own non-matching signals
3066 // to JVM_handle_aix_signal, harmlessly.
3067 bool os::Aix::signal_handlers_are_installed = false;
3068 
3069 // For signal-chaining
3070 struct sigaction sigact[NSIG];
3071 sigset_t sigs;
3072 bool os::Aix::libjsig_is_loaded = false;
3073 typedef struct sigaction *(*get_signal_t)(int);
3074 get_signal_t os::Aix::get_signal_action = NULL;
3075 
3076 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3077   struct sigaction *actp = NULL;
3078 
3079   if (libjsig_is_loaded) {
3080     // Retrieve the old signal handler from libjsig
3081     actp = (*get_signal_action)(sig);
3082   }


3164 int os::Aix::get_our_sigflags(int sig) {
3165   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3166   return sigflags[sig];
3167 }
3168 
3169 void os::Aix::set_our_sigflags(int sig, int flags) {
3170   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3171   if (sig > 0 && sig < NSIG) {
3172     sigflags[sig] = flags;
3173   }
3174 }
3175 
3176 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3177   // Check for overwrite.
3178   struct sigaction oldAct;
3179   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3180 
3181   void* oldhand = oldAct.sa_sigaction
3182     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3183     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);

3184   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3185       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3186       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3187     if (AllowUserSignalHandlers || !set_installed) {
3188       // Do not overwrite; user takes responsibility to forward to us.
3189       return;
3190     } else if (UseSignalChaining) {
3191       // save the old handler in jvm
3192       save_preinstalled_handler(sig, oldAct);
3193       // libjsig also interposes the sigaction() call below and saves the
3194       // old sigaction on it own.
3195     } else {
3196       fatal("Encountered unexpected pre-existing sigaction handler "
3197             "%#lx for signal %d.", (long)oldhand, sig);
3198     }
3199   }
3200 
3201   struct sigaction sigAct;
3202   sigfillset(&(sigAct.sa_mask));
3203   if (!set_installed) {
3204     sigAct.sa_handler = SIG_DFL;
3205     sigAct.sa_flags = SA_RESTART;
3206   } else {

3207     sigAct.sa_sigaction = javaSignalHandler;
3208     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3209   }
3210   // Save flags, which are set by ours
3211   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3212   sigflags[sig] = sigAct.sa_flags;
3213 
3214   int ret = sigaction(sig, &sigAct, &oldAct);
3215   assert(ret == 0, "check");
3216 
3217   void* oldhand2 = oldAct.sa_sigaction
3218                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3219                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3220   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3221 }
3222 
3223 // install signal handlers for signals that HotSpot needs to
3224 // handle in order to support Java-level exception handling.
3225 void os::Aix::install_signal_handlers() {
3226   if (!signal_handlers_are_installed) {


3378     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3379     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3380     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3381     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3382   }
3383 
3384   DO_SIGNAL_CHECK(SR_signum);
3385 }
3386 
3387 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3388 
3389 static os_sigaction_t os_sigaction = NULL;
3390 
3391 void os::Aix::check_signal_handler(int sig) {
3392   char buf[O_BUFLEN];
3393   address jvmHandler = NULL;
3394 
3395   struct sigaction act;
3396   if (os_sigaction == NULL) {
3397     // only trust the default sigaction, in case it has been interposed
3398     os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3399     if (os_sigaction == NULL) return;
3400   }
3401 
3402   os_sigaction(sig, (struct sigaction*)NULL, &act);
3403 
3404   address thisHandler = (act.sa_flags & SA_SIGINFO)
3405     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3406     : CAST_FROM_FN_PTR(address, act.sa_handler);
3407 
3408   switch(sig) {
3409   case SIGSEGV:
3410   case SIGBUS:
3411   case SIGFPE:
3412   case SIGPIPE:
3413   case SIGILL:
3414   case SIGXFSZ:

3415     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3416     break;
3417 
3418   case SHUTDOWN1_SIGNAL:
3419   case SHUTDOWN2_SIGNAL:
3420   case SHUTDOWN3_SIGNAL:
3421   case BREAK_SIGNAL:
3422     jvmHandler = (address)user_handler();
3423     break;
3424 
3425   default:
3426     if (sig == SR_signum) {
3427       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3428     } else {
3429       return;
3430     }
3431     break;
3432   }
3433 
3434   if (thisHandler != jvmHandler) {


3466     }
3467     return buf;
3468   } else {
3469     return NULL;
3470   }
3471 }
3472 
3473 // To install functions for atexit system call
3474 extern "C" {
3475   static void perfMemory_exit_helper() {
3476     perfMemory_exit();
3477   }
3478 }
3479 
3480 // This is called _before_ the most of global arguments have been parsed.
3481 void os::init(void) {
3482   // This is basic, we want to know if that ever changes.
3483   // (Shared memory boundary is supposed to be a 256M aligned.)
3484   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3485 
3486   // Record process break at startup.
3487   g_brk_at_startup = (address) ::sbrk(0);
3488   assert(g_brk_at_startup != (address) -1, "sbrk failed");
3489 
3490   // First off, we need to know whether we run on AIX or PASE, and
3491   // the OS level we run on.
3492   os::Aix::initialize_os_info();
3493 
3494   // Scan environment (SPEC1170 behaviour, etc).
3495   os::Aix::scan_environment();
3496 
3497   // Probe multipage support.
3498   query_multipage_support();
3499 
3500   // Act like we only have one page size by eliminating corner cases which
3501   // we did not support very well anyway.
3502   // We have two input conditions:
3503   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3504   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3505   //    setting.
3506   //    Data segment page size is important for us because it defines the thread stack page
3507   //    size, which is needed for guard page handling, stack banging etc.
3508   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3509   //    and should be allocated with 64k pages.
3510   //
3511   // So, we do the following:
3512   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3513   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3514   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3515   // 64k          no              --- AIX 5.2 ? ---
3516   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3517 


3546       FLAG_SET_ERGO(bool, Use64KPages, false);
3547     }
3548   } else {
3549     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3550     // This normally means that we can allocate 64k pages dynamically.
3551     // (There is one special case where this may be false: EXTSHM=on.
3552     // but we decided to not support that mode).
3553     assert0(g_multipage_support.can_use_64K_pages);
3554     Aix::_page_size = SIZE_64K;
3555     trcVerbose("64K page mode");
3556     FLAG_SET_ERGO(bool, Use64KPages, true);
3557   }
3558 
3559   // Short-wire stack page size to base page size; if that works, we just remove
3560   // that stack page size altogether.
3561   Aix::_stack_page_size = Aix::_page_size;
3562 
3563   // For now UseLargePages is just ignored.
3564   FLAG_SET_ERGO(bool, UseLargePages, false);
3565   _page_sizes[0] = 0;
3566   _large_page_size = -1;
3567 
3568   // debug trace
3569   trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3570 
3571   // Next, we need to initialize libo4 and libperfstat libraries.
3572   if (os::Aix::on_pase()) {
3573     os::Aix::initialize_libo4();
3574   } else {
3575     os::Aix::initialize_libperfstat();
3576   }
3577 
3578   // Reset the perfstat information provided by ODM.
3579   if (os::Aix::on_aix()) {
3580     libperfstat::perfstat_reset();
3581   }
3582 
3583   // Now initialze basic system properties. Note that for some of the values we
3584   // need libperfstat etc.
3585   os::Aix::initialize_system_info();
3586 


3587   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3588 
3589   init_random(1234567);
3590 
3591   ThreadCritical::initialize();
3592 
3593   // Main_thread points to the aboriginal thread.
3594   Aix::_main_thread = pthread_self();
3595 
3596   initial_time_count = os::elapsed_counter();
3597 
3598   // If the pagesize of the VM is greater than 8K determine the appropriate
3599   // number of initial guard pages. The user can change this with the
3600   // command line arguments, if needed.
3601   if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3602     StackYellowPages = 1;
3603     StackRedPages = 1;
3604     StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3605   }
3606 }
3607 
3608 // This is called _after_ the global arguments have been parsed.
3609 jint os::init_2(void) {
3610 
3611   if (os::Aix::on_pase()) {
3612     trcVerbose("Running on PASE.");
3613   } else {
3614     trcVerbose("Running on AIX (not PASE).");
3615   }
3616 
3617   trcVerbose("processor count: %d", os::_processor_count);
3618   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3619 
3620   // Initially build up the loaded dll map.
3621   LoadedLibraries::reload();
3622   if (Verbose) {
3623     trcVerbose("Loaded Libraries: ");
3624     LoadedLibraries::print(tty);
3625   }
3626 
3627   const int page_size = Aix::page_size();
3628   const int map_size = page_size;
3629 
3630   address map_address = (address) MAP_FAILED;
3631   const int prot  = PROT_READ;
3632   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3633 
3634   // Use optimized addresses for the polling page,
3635   // e.g. map it to a special 32-bit address.
3636   if (OptimizePollingPageLocation) {
3637     // architecture-specific list of address wishes:
3638     address address_wishes[] = {
3639       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3640       // PPC64: all address wishes are non-negative 32 bit values where
3641       // the lower 16 bits are all zero. we can load these addresses
3642       // with a single ppc_lis instruction.
3643       (address) 0x30000000, (address) 0x31000000,
3644       (address) 0x32000000, (address) 0x33000000,
3645       (address) 0x40000000, (address) 0x41000000,
3646       (address) 0x42000000, (address) 0x43000000,
3647       (address) 0x50000000, (address) 0x51000000,
3648       (address) 0x52000000, (address) 0x53000000,
3649       (address) 0x60000000, (address) 0x61000000,
3650       (address) 0x62000000, (address) 0x63000000
3651     };
3652     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3653 
3654     // iterate over the list of address wishes:
3655     for (int i=0; i<address_wishes_length; i++) {
3656       // Try to map with current address wish.
3657       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3658       // fail if the address is already mapped.
3659       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3660                                      map_size, prot,
3661                                      flags | MAP_FIXED,
3662                                      -1, 0);
3663       trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",

3664                    address_wishes[i], map_address + (ssize_t)page_size);

3665 
3666       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3667         // Map succeeded and map_address is at wished address, exit loop.
3668         break;
3669       }
3670 
3671       if (map_address != (address) MAP_FAILED) {
3672         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3673         ::munmap(map_address, map_size);
3674         map_address = (address) MAP_FAILED;
3675       }
3676       // Map failed, continue loop.
3677     }
3678   } // end OptimizePollingPageLocation
3679 
3680   if (map_address == (address) MAP_FAILED) {
3681     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3682   }
3683   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3684   os::set_polling_page(map_address);
3685 
3686   if (!UseMembar) {
3687     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3688     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3689     os::set_memory_serialize_page(mem_serialize_page);
3690 
3691     trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3692         mem_serialize_page, mem_serialize_page + Aix::page_size(),
3693         Aix::page_size(), Aix::page_size());


3694   }
3695 
3696   // initialize suspend/resume support - must do this before signal_sets_init()
3697   if (SR_initialize() != 0) {
3698     perror("SR_initialize failed");
3699     return JNI_ERR;
3700   }
3701 
3702   Aix::signal_sets_init();
3703   Aix::install_signal_handlers();
3704 
3705   // Check minimum allowable stack size for thread creation and to initialize
3706   // the java system classes, including StackOverflowError - depends on page
3707   // size. Add a page for compiler2 recursion in main thread.
3708   // Add in 2*BytesPerWord times page size to account for VM stack during
3709   // class initialization depending on 32 or 64 bit VM.
3710   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3711             (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3712                      (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3713 
3714   os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3715 
3716   size_t threadStackSizeInBytes = ThreadStackSize * K;
3717   if (threadStackSizeInBytes != 0 &&
3718       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3719     tty->print_cr("\nThe stack size specified is too small, "
3720                   "Specify at least %dk",
3721                   os::Aix::min_stack_allowed / K);
3722     return JNI_ERR;
3723   }
3724 
3725   // Make the stack size a multiple of the page size so that
3726   // the yellow/red zones can be guarded.
3727   // Note that this can be 0, if no default stacksize was set.
3728   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3729 
3730   if (UseNUMA) {
3731     UseNUMA = false;
3732     warning("NUMA optimizations are not available on this OS.");
3733   }
3734 
3735   if (MaxFDLimit) {
3736     // Set the number of file descriptors to max. print out error
3737     // if getrlimit/setrlimit fails but continue regardless.
3738     struct rlimit nbr_files;
3739     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3740     if (status != 0) {
3741       if (PrintMiscellaneous && (Verbose || WizardMode))
3742         perror("os::init_2 getrlimit failed");
3743     } else {
3744       nbr_files.rlim_cur = nbr_files.rlim_max;
3745       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3746       if (status != 0) {
3747         if (PrintMiscellaneous && (Verbose || WizardMode))
3748           perror("os::init_2 setrlimit failed");
3749       }
3750     }
3751   }
3752 
3753   if (PerfAllowAtExitRegistration) {
3754     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3755     // At exit functions can be delayed until process exit time, which
3756     // can be problematic for embedded VM situations. Embedded VMs should
3757     // call DestroyJavaVM() to assure that VM resources are released.
3758 
3759     // Note: perfMemory_exit_helper atexit function may be removed in
3760     // the future if the appropriate cleanup code can be added to the
3761     // VM_Exit VMOperation's doit method.
3762     if (atexit(perfMemory_exit_helper) != 0) {
3763       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3764     }
3765   }
3766 
3767   return JNI_OK;
3768 }
3769 
3770 // Mark the polling page as unreadable
3771 void os::make_polling_page_unreadable(void) {
3772   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3773     fatal("Could not disable polling page");
3774   }
3775 };


3835     // NULL context is unexpected, double-check this is the VMThread.
3836     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3837   }
3838 }
3839 
3840 // Suspends the target using the signal mechanism and then grabs the PC before
3841 // resuming the target. Used by the flat-profiler only
3842 ExtendedPC os::get_thread_pc(Thread* thread) {
3843   // Make sure that it is called by the watcher for the VMThread.
3844   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3845   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3846 
3847   PcFetcher fetcher(thread);
3848   fetcher.run();
3849   return fetcher.result();
3850 }
3851 
3852 ////////////////////////////////////////////////////////////////////////////////
3853 // debug support
3854 










3855 bool os::find(address addr, outputStream* st) {
3856 
3857   st->print(PTR_FORMAT ": ", addr);
3858 
3859   loaded_module_t lm;
3860   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3861       LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3862     st->print("%s", lm.path);
3863     return true;
3864   }
3865 
3866   return false;
3867 }
3868 
3869 ////////////////////////////////////////////////////////////////////////////////
3870 // misc
3871 
3872 // This does not do anything on Aix. This is basically a hook for being
3873 // able to use structured exception handling (thread-local exception filters)
3874 // on, e.g., Win32.


4198   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4199   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4200   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4201 }
4202 
4203 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4204   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4205   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4206   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4207   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4208 }
4209 
4210 bool os::is_thread_cpu_time_supported() {
4211   return true;
4212 }
4213 
4214 // System loadavg support. Returns -1 if load average cannot be obtained.
4215 // For now just return the system wide load average (no processor sets).
4216 int os::loadavg(double values[], int nelem) {
4217 


4218   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4219   guarantee(values, "argument error");
4220 
4221   if (os::Aix::on_pase()) {
4222 
4223     // AS/400 PASE: use libo4 porting library
4224     double v[3] = { 0.0, 0.0, 0.0 };
4225 
4226     if (libo4::get_load_avg(v, v + 1, v + 2)) {
4227       for (int i = 0; i < nelem; i ++) {
4228         values[i] = v[i];
4229       }
4230       return nelem;
4231     } else {
4232       return -1;
4233     }
4234 
4235   } else {
4236 
4237     // AIX: use libperfstat
4238     libperfstat::cpuinfo_t ci;
4239     if (libperfstat::get_cpuinfo(&ci)) {






4240       for (int i = 0; i < nelem; i++) {
4241         values[i] = ci.loadavg[i];
4242       }
4243     } else {
4244       return -1;
4245     }
4246     return nelem;
4247   }
4248 }
4249 
4250 void os::pause() {
4251   char filename[MAX_PATH];
4252   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4253     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4254   } else {
4255     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4256   }
4257 
4258   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4259   if (fd != -1) {


4269 }
4270 
4271 bool os::Aix::is_primordial_thread() {
4272   if (pthread_self() == (pthread_t)1) {
4273     return true;
4274   } else {
4275     return false;
4276   }
4277 }
4278 
4279 // OS recognitions (PASE/AIX, OS level) call this before calling any
4280 // one of Aix::on_pase(), Aix::os_version() static
4281 void os::Aix::initialize_os_info() {
4282 
4283   assert(_on_pase == -1 && _os_version == -1, "already called.");
4284 
4285   struct utsname uts;
4286   memset(&uts, 0, sizeof(uts));
4287   strcpy(uts.sysname, "?");
4288   if (::uname(&uts) == -1) {
4289     trcVerbose("uname failed (%d)", errno);
4290     guarantee(0, "Could not determine whether we run on AIX or PASE");
4291   } else {
4292     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4293                "node \"%s\" machine \"%s\"\n",
4294                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4295     const int major = atoi(uts.version);
4296     assert(major > 0, "invalid OS version");
4297     const int minor = atoi(uts.release);
4298     assert(minor > 0, "invalid OS release");
4299     _os_version = (major << 8) | minor;
4300     if (strcmp(uts.sysname, "OS400") == 0) {
4301       // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4302       _on_pase = 1;
4303       if (_os_version < 0x0504) {
4304         trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4305         assert(false, "OS/400 release too old.");
4306       } else {
4307         trcVerbose("We run on OS/400 (pase) V%dR%d", major, minor);
4308       }
4309     } else if (strcmp(uts.sysname, "AIX") == 0) {
4310       // We run on AIX. We do not support versions older than AIX 5.3.
4311       _on_pase = 0;
4312       if (_os_version < 0x0503) {
4313         trcVerbose("AIX release older than AIX 5.3 not supported.");
4314         assert(false, "AIX release too old.");
4315       } else {
4316         trcVerbose("We run on AIX %d.%d", major, minor);
4317       }
4318     } else {
4319       assert(false, "unknown OS");
4320     }
4321   }
4322 
4323   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4324 } // end: os::Aix::initialize_os_info()
4325 
4326 // Scan environment for important settings which might effect the VM.
4327 // Trace out settings. Warn about invalid settings and/or correct them.
4328 //
4329 // Must run after os::Aix::initialue_os_info().
4330 void os::Aix::scan_environment() {
4331 
4332   char* p;
4333   int rc;
4334 
4335   // Warn explicity if EXTSHM=ON is used. That switch changes how
4336   // System V shared memory behaves. One effect is that page size of
4337   // shared memory cannot be change dynamically, effectivly preventing
4338   // large pages from working.
4339   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4340   // recommendation is (in OSS notes) to switch it off.
4341   p = ::getenv("EXTSHM");
4342   trcVerbose("EXTSHM=%s.", p ? p : "<unset>");


4343   if (p && strcasecmp(p, "ON") == 0) {

4344     _extshm = 1;
4345     trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4346     if (!AllowExtshm) {
4347       // We allow under certain conditions the user to continue. However, we want this
4348       // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4349       // that the VM is not able to allocate 64k pages for the heap.
4350       // We do not want to run with reduced performance.
4351       vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4352     }
4353   } else {
4354     _extshm = 0;
4355   }
4356 
4357   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4358   // Not tested, not supported.
4359   //
4360   // Note that it might be worth the trouble to test and to require it, if only to
4361   // get useful return codes for mprotect.
4362   //
4363   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4364   // exec() ? before loading the libjvm ? ....)
4365   p = ::getenv("XPG_SUS_ENV");
4366   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4367   if (p && strcmp(p, "ON") == 0) {
4368     _xpg_sus_mode = 1;
4369     trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4370     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4371     // clobber address ranges. If we ever want to support that, we have to do some
4372     // testing first.
4373     guarantee(false, "XPG_SUS_ENV=ON not supported");
4374   } else {
4375     _xpg_sus_mode = 0;
4376   }
4377 
4378   if (os::Aix::on_pase()) {
4379     p = ::getenv("QIBM_MULTI_THREADED");
4380     trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4381   }
4382 
4383   p = ::getenv("LDR_CNTRL");
4384   trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4385   if (os::Aix::on_pase() && os::Aix::os_version() == 0x0701) {
4386     if (p && ::strstr(p, "TEXTPSIZE")) {
4387       trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4388         "you may experience hangs or crashes on OS/400 V7R1.");
4389     }
4390   }
4391 
4392   p = ::getenv("AIXTHREAD_GUARDPAGES");
4393   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");


4394 
4395 } // end: os::Aix::scan_environment()
4396 
4397 // PASE: initialize the libo4 library (PASE porting library).
4398 void os::Aix::initialize_libo4() {
4399   guarantee(os::Aix::on_pase(), "OS/400 only.");
4400   if (!libo4::init()) {
4401     trcVerbose("libo4 initialization failed.");
4402     assert(false, "libo4 initialization failed");
4403   } else {
4404     trcVerbose("libo4 initialized.");
4405   }
4406 }
4407 
4408 // AIX: initialize the libperfstat library.

4409 void os::Aix::initialize_libperfstat() {

4410   assert(os::Aix::on_aix(), "AIX only");

4411   if (!libperfstat::init()) {
4412     trcVerbose("libperfstat initialization failed.");
4413     assert(false, "libperfstat initialization failed");
4414   } else {
4415     trcVerbose("libperfstat initialized.");


4416   }
4417 }
4418 
4419 /////////////////////////////////////////////////////////////////////////////
4420 // thread stack
4421 
4422 // Function to query the current stack size using pthread_getthrds_np.
4423 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4424   // This only works when invoked on a pthread. As we agreed not to use
4425   // primordial threads anyway, I assert here.
4426   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4427 
4428   // Information about this api can be found (a) in the pthread.h header and
4429   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4430   //
4431   // The use of this API to find out the current stack is kind of undefined.
4432   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4433   // enough for cases where I let the pthread library create its stacks. For cases
4434   // where I create an own stack and pass this to pthread_create, it seems not to
4435   // work (the returned stack size in that case is 0).
4436 
4437   pthread_t tid = pthread_self();


< prev index next >