< prev index next >

src/os/aix/vm/os_aix.cpp

Print this page
rev 9422 : 8143125-Further Developments for AIX


  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"

  39 #include "libperfstat_aix.hpp"
  40 #include "loadlib_aix.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "misc_aix.hpp"
  44 #include "mutex_aix.inline.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "os_aix.inline.hpp"
  47 #include "os_share_aix.hpp"
  48 #include "porting_aix.hpp"
  49 #include "prims/jniFastGetField.hpp"
  50 #include "prims/jvm.h"
  51 #include "prims/jvm_misc.hpp"
  52 #include "runtime/arguments.hpp"
  53 #include "runtime/atomic.inline.hpp"
  54 #include "runtime/extendedPC.hpp"
  55 #include "runtime/globals.hpp"
  56 #include "runtime/interfaceSupport.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/javaCalls.hpp"


  91 #include <stdio.h>
  92 #include <string.h>
  93 #include <unistd.h>
  94 #include <sys/ioctl.h>
  95 #include <sys/ipc.h>
  96 #include <sys/mman.h>
  97 #include <sys/resource.h>
  98 #include <sys/select.h>
  99 #include <sys/shm.h>
 100 #include <sys/socket.h>
 101 #include <sys/stat.h>
 102 #include <sys/sysinfo.h>
 103 #include <sys/systemcfg.h>
 104 #include <sys/time.h>
 105 #include <sys/times.h>
 106 #include <sys/types.h>
 107 #include <sys/utsname.h>
 108 #include <sys/vminfo.h>
 109 #include <sys/wait.h>
 110 
 111 // If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 112 // getrusage() is prepared to handle the associated failure.
 113 #ifndef RUSAGE_THREAD
 114 #define RUSAGE_THREAD   (1)               /* only the calling thread */
 115 #endif
 116 
 117 // PPC port
 118 static const uintx Use64KPagesThreshold       = 1*M;
 119 static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
 120 
 121 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
 122 #if !defined(_AIXVERSION_610)
 123 extern "C" {
 124   int getthrds64(pid_t ProcessIdentifier,
 125                  struct thrdentry64* ThreadBuffer,
 126                  int ThreadSize,
 127                  tid64_t* IndexPointer,
 128                  int Count);
 129 }
 130 #endif
 131 
 132 #define MAX_PATH (2 * K)
 133 
 134 // for timer info max values which include all bits
 135 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 136 // for multipage initialization error analysis (in 'g_multipage_error')
 137 #define ERROR_MP_OS_TOO_OLD                          100
 138 #define ERROR_MP_EXTSHM_ACTIVE                       101
 139 #define ERROR_MP_VMGETINFO_FAILED                    102
 140 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 141 
 142 // The semantics in this file are thus that codeptr_t is a *real code ptr*.
 143 // This means that any function taking codeptr_t as arguments will assume
 144 // a real codeptr and won't handle function descriptors (eg getFuncName),
 145 // whereas functions taking address as args will deal with function
 146 // descriptors (eg os::dll_address_to_library_name).
 147 typedef unsigned int* codeptr_t;
 148 
 149 // Typedefs for stackslots, stack pointers, pointers to op codes.
 150 typedef unsigned long stackslot_t;
 151 typedef stackslot_t* stackptr_t;
 152 
 153 // Excerpts from systemcfg.h definitions newer than AIX 5.3.
 154 #ifndef PV_7
 155 #define PV_7 0x200000          /* Power PC 7 */
 156 #define PV_7_Compat 0x208000   /* Power PC 7 */
 157 #endif
 158 #ifndef PV_8
 159 #define PV_8 0x300000          /* Power PC 8 */
 160 #define PV_8_Compat 0x308000   /* Power PC 8 */
 161 #endif
 162 
 163 // Query dimensions of the stack of the calling thread.
 164 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 165 
 166 // Function to check a given stack pointer against given stack limits.
 167 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 168   if (((uintptr_t)sp) & 0x7) {
 169     return false;
 170   }
 171   if (sp > stack_base) {
 172     return false;
 173   }
 174   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 175     return false;
 176   }
 177   return true;
 178 }
 179 
 180 // Returns true if function is a valid codepointer.
 181 inline bool is_valid_codepointer(codeptr_t p) {
 182   if (!p) {
 183     return false;
 184   }
 185   if (((uintptr_t)p) & 0x3) {
 186     return false;
 187   }
 188   if (!LoadedLibraries::find_for_text_address(p, NULL)) {
 189     return false;
 190   }
 191   return true;
 192 }
 193 
 194 // Macro to check a given stack pointer against given stack limits and to die if test fails.
 195 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 196     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 197 }
 198 
 199 // Macro to check the current stack pointer against given stacklimits.
 200 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 201   address sp; \
 202   sp = os::current_stack_pointer(); \
 203   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 204 }
 205 


 206 ////////////////////////////////////////////////////////////////////////////////
 207 // global variables (for a description see os_aix.hpp)
 208 
 209 julong    os::Aix::_physical_memory = 0;

 210 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 211 int       os::Aix::_page_size = -1;


 212 int       os::Aix::_on_pase = -1;



 213 int       os::Aix::_os_version = -1;

 214 int       os::Aix::_stack_page_size = -1;


 215 int       os::Aix::_xpg_sus_mode = -1;


 216 int       os::Aix::_extshm = -1;
 217 int       os::Aix::_logical_cpus = -1;
 218 
 219 ////////////////////////////////////////////////////////////////////////////////
 220 // local variables
 221 
 222 static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 223 static jlong    initial_time_count = 0;
 224 static int      clock_tics_per_sec = 100;
 225 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 226 static bool     check_signals      = true;
 227 static pid_t    _initial_pid       = 0;
 228 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 229 static sigset_t SR_sigset;
 230 



 231 // This describes the state of multipage support of the underlying
 232 // OS. Note that this is of no interest to the outsize world and
 233 // therefore should not be defined in AIX class.
 234 //
 235 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 236 // latter two (16M "large" resp. 16G "huge" pages) require special
 237 // setup and are normally not available.
 238 //
 239 // AIX supports multiple page sizes per process, for:
 240 //  - Stack (of the primordial thread, so not relevant for us)
 241 //  - Data - data, bss, heap, for us also pthread stacks
 242 //  - Text - text code
 243 //  - shared memory
 244 //
 245 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 246 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 247 //
 248 // For shared memory, page size can be set dynamically via
 249 // shmctl(). Different shared memory regions can have different page
 250 // sizes.


 261   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 262   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 263   int error;                  // Error describing if something went wrong at multipage init.
 264 } g_multipage_support = {
 265   (size_t) -1,
 266   (size_t) -1,
 267   (size_t) -1,
 268   (size_t) -1,
 269   (size_t) -1,
 270   false, false,
 271   0
 272 };
 273 
 274 // We must not accidentally allocate memory close to the BRK - even if
 275 // that would work - because then we prevent the BRK segment from
 276 // growing which may result in a malloc OOM even though there is
 277 // enough memory. The problem only arises if we shmat() or mmap() at
 278 // a specific wish address, e.g. to place the heap in a
 279 // compressed-oops-friendly way.
 280 static bool is_close_to_brk(address a) {
 281   address a1 = (address) sbrk(0);
 282   if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {

 283     return true;
 284   }
 285   return false;
 286 }
 287 
 288 julong os::available_memory() {
 289   return Aix::available_memory();
 290 }
 291 
 292 julong os::Aix::available_memory() {




 293   os::Aix::meminfo_t mi;
 294   if (os::Aix::get_meminfo(&mi)) {
 295     return mi.real_free;
 296   } else {
 297     return 0xFFFFFFFFFFFFFFFFLL;
 298   }
 299 }
 300 
 301 julong os::physical_memory() {
 302   return Aix::physical_memory();
 303 }
 304 
 305 // Return true if user is running as root.
 306 
 307 bool os::have_special_privileges() {
 308   static bool init = false;
 309   static bool privileges = false;
 310   if (!init) {
 311     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 312     init = true;
 313   }
 314   return privileges;
 315 }
 316 
 317 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 318 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 319 static bool my_disclaim64(char* addr, size_t size) {
 320 
 321   if (size == 0) {
 322     return true;
 323   }
 324 
 325   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 326   const unsigned int maxDisclaimSize = 0x40000000;
 327 
 328   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 329   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 330 
 331   char* p = addr;
 332 
 333   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 334     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 335       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 336       return false;
 337     }
 338     p += maxDisclaimSize;
 339   }
 340 
 341   if (lastDisclaimSize > 0) {
 342     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 343       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 344       return false;
 345     }
 346   }
 347 
 348   return true;
 349 }
 350 
 351 // Cpu architecture string
 352 #if defined(PPC32)
 353 static char cpu_arch[] = "ppc";
 354 #elif defined(PPC64)
 355 static char cpu_arch[] = "ppc64";
 356 #else
 357 #error Add appropriate cpu_arch setting
 358 #endif
 359 







 360 
 361 // Given an address, returns the size of the page backing that address.
 362 size_t os::Aix::query_pagesize(void* addr) {
 363 





 364   vm_page_info pi;
 365   pi.addr = (uint64_t)addr;
 366   if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 367     return pi.pagesize;
 368   } else {
 369     fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
 370     assert(false, "vmgetinfo failed to retrieve page size");
 371     return SIZE_4K;
 372   }
 373 
 374 }
 375 
 376 // Returns the kernel thread id of the currently running thread.
 377 pid_t os::Aix::gettid() {
 378   return (pid_t) thread_self();
 379 }
 380 
 381 void os::Aix::initialize_system_info() {
 382 
 383   // Get the number of online(logical) cpus instead of configured.
 384   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 385   assert(_processor_count > 0, "_processor_count must be > 0");
 386 
 387   // Retrieve total physical storage.
 388   os::Aix::meminfo_t mi;
 389   if (!os::Aix::get_meminfo(&mi)) {
 390     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
 391     assert(false, "os::Aix::get_meminfo failed.");
 392   }
 393   _physical_memory = (julong) mi.real_total;
 394 }
 395 
 396 // Helper function for tracing page sizes.
 397 static const char* describe_pagesize(size_t pagesize) {
 398   switch (pagesize) {
 399     case SIZE_4K : return "4K";
 400     case SIZE_64K: return "64K";
 401     case SIZE_16M: return "16M";
 402     case SIZE_16G: return "16G";
 403     case -1:       return "not set";
 404     default:
 405       assert(false, "surprise");
 406       return "??";
 407   }
 408 }
 409 
 410 // Probe OS for multipage support.
 411 // Will fill the global g_multipage_support structure.
 412 // Must be called before calling os::large_page_init().
 413 static void query_multipage_support() {
 414 
 415   guarantee(g_multipage_support.pagesize == -1,
 416             "do not call twice");
 417 
 418   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 419 
 420   // This really would surprise me.
 421   assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
 422 
 423   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 424   // Default data page size is defined either by linker options (-bdatapsize)
 425   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 426   // default should be 4K.
 427   {
 428     void* p = ::malloc(SIZE_16M);
 429     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 430     ::free(p);
 431   }
 432 
 433   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 434   {
 435     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 436     guarantee(shmid != -1, "shmget failed");
 437     void* p = ::shmat(shmid, NULL, 0);
 438     ::shmctl(shmid, IPC_RMID, NULL);
 439     guarantee(p != (void*) -1, "shmat failed");
 440     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 441     ::shmdt(p);
 442   }
 443 
 444   // Before querying the stack page size, make sure we are not running as primordial
 445   // thread (because primordial thread's stack may have different page size than
 446   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 447   // number of reasons so we may just as well guarantee it here.
 448   guarantee0(!os::Aix::is_primordial_thread());
 449 
 450   // Query pthread stack page size.
 451   {
 452     int dummy = 0;
 453     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
 454   }
 455 
 456   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 457   /* PPC port: so far unused.
 458   {
 459     address any_function =
 460       (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 461     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 462   }
 463   */
 464 
 465   // Now probe for support of 64K pages and 16M pages.
 466 
 467   // Before OS/400 V6R1, there is no support for pages other than 4K.
 468   if (os::Aix::on_pase_V5R4_or_older()) {
 469     Unimplemented();
 470     goto query_multipage_support_end;
 471   }
 472 
 473   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 474   {
 475     const int MAX_PAGE_SIZES = 4;
 476     psize_t sizes[MAX_PAGE_SIZES];
 477     const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 478     if (num_psizes == -1) {
 479       trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
 480       trc("disabling multipage support.\n");
 481       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
 482       goto query_multipage_support_end;
 483     }
 484     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 485     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 486     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 487     for (int i = 0; i < num_psizes; i ++) {
 488       trcVerbose(" %s ", describe_pagesize(sizes[i]));
 489     }
 490 
 491     // Can we use 64K, 16M pages?
 492     for (int i = 0; i < num_psizes; i ++) {
 493       const size_t pagesize = sizes[i];
 494       if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
 495         continue;
 496       }
 497       bool can_use = false;
 498       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 499       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 500         IPC_CREAT | S_IRUSR | S_IWUSR);
 501       guarantee0(shmid != -1); // Should always work.
 502       // Try to set pagesize.
 503       struct shmid_ds shm_buf = { 0 };
 504       shm_buf.shm_pagesize = pagesize;
 505       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 506         const int en = errno;
 507         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 508         // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
 509         // PPC port  MiscUtils::describe_errno(en));
 510       } else {
 511         // Attach and double check pageisze.
 512         void* p = ::shmat(shmid, NULL, 0);
 513         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 514         guarantee0(p != (void*) -1); // Should always work.
 515         const size_t real_pagesize = os::Aix::query_pagesize(p);
 516         if (real_pagesize != pagesize) {
 517           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 518         } else {
 519           can_use = true;
 520         }
 521         ::shmdt(p);
 522       }
 523       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 524       if (pagesize == SIZE_64K) {
 525         g_multipage_support.can_use_64K_pages = can_use;
 526       } else if (pagesize == SIZE_16M) {
 527         g_multipage_support.can_use_16M_pages = can_use;
 528       }
 529     }
 530 
 531   } // end: check which pages can be used for shared memory
 532 
 533 query_multipage_support_end:
 534 
 535   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
 536       describe_pagesize(g_multipage_support.pagesize));
 537   trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
 538       describe_pagesize(g_multipage_support.datapsize));
 539   trcVerbose("Text page size: %s\n",
 540       describe_pagesize(g_multipage_support.textpsize));
 541   trcVerbose("Thread stack page size (pthread): %s\n",
 542       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 543   trcVerbose("Default shared memory page size: %s\n",
 544       describe_pagesize(g_multipage_support.shmpsize));
 545   trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
 546       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 547   trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
 548       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 549   trcVerbose("Multipage error details: %d\n",
 550       g_multipage_support.error);
 551 
 552   // sanity checks
 553   assert0(g_multipage_support.pagesize == SIZE_4K);
 554   assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
 555   // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
 556   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 557   assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
 558 
 559 } // end os::Aix::query_multipage_support()
 560 
 561 void os::init_system_properties_values() {
 562 
 563 #define DEFAULT_LIBPATH "/usr/lib:/lib"
 564 #define EXTENSIONS_DIR  "/lib/ext"
 565 
 566   // Buffer that fits several sprintfs.
 567   // Note that the space for the trailing null is provided
 568   // by the nulls included by the sizeof operator.
 569   const size_t bufsize =
 570     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 571          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 572   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 573 
 574   // sysclasspath, java_home, dll_dir
 575   {
 576     char *pslash;
 577     os::jvm_path(buf, bufsize);
 578 
 579     // Found the full path to libjvm.so.
 580     // Now cut the path to <java_home>/jre if we can.
 581     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.



 582     pslash = strrchr(buf, '/');
 583     if (pslash != NULL) {
 584       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 585     }
 586     Arguments::set_dll_dir(buf);
 587 
 588     if (pslash != NULL) {
 589       pslash = strrchr(buf, '/');
 590       if (pslash != NULL) {
 591         *pslash = '\0';          // Get rid of /<arch>.
 592         pslash = strrchr(buf, '/');
 593         if (pslash != NULL) {
 594           *pslash = '\0';        // Get rid of /lib.
 595         }
 596       }
 597     }
 598     Arguments::set_java_home(buf);
 599     set_boot_path('/', ':');
 600   }
 601 


 737       // Only the VM thread handles BREAK_SIGNAL ...
 738       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 739     } else {
 740       // ... all other threads block BREAK_SIGNAL
 741       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 742     }
 743   }
 744 }
 745 
 746 // retrieve memory information.
 747 // Returns false if something went wrong;
 748 // content of pmi undefined in this case.
 749 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 750 
 751   assert(pmi, "get_meminfo: invalid parameter");
 752 
 753   memset(pmi, 0, sizeof(meminfo_t));
 754 
 755   if (os::Aix::on_pase()) {
 756 
 757     Unimplemented();
















 758     return false;
 759 
 760   } else {
 761 
 762     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 763     // See:
 764     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 765     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 766     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 767     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 768 
 769     perfstat_memory_total_t psmt;
 770     memset (&psmt, '\0', sizeof(psmt));
 771     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 772     if (rc == -1) {
 773       fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
 774       assert(0, "perfstat_memory_total() failed");
 775       return false;
 776     }
 777 


 781     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 782     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 783     // The fields of perfstat_memory_total_t:
 784     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 785     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 786     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 787     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 788     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 789 
 790     pmi->virt_total = psmt.virt_total * 4096;
 791     pmi->real_total = psmt.real_total * 4096;
 792     pmi->real_free = psmt.real_free * 4096;
 793     pmi->pgsp_total = psmt.pgsp_total * 4096;
 794     pmi->pgsp_free = psmt.pgsp_free * 4096;
 795 
 796     return true;
 797 
 798   }
 799 } // end os::Aix::get_meminfo
 800 
 801 // Retrieve global cpu information.
 802 // Returns false if something went wrong;
 803 // the content of pci is undefined in this case.
 804 bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
 805   assert(pci, "get_cpuinfo: invalid parameter");
 806   memset(pci, 0, sizeof(cpuinfo_t));
 807 
 808   perfstat_cpu_total_t psct;
 809   memset (&psct, '\0', sizeof(psct));
 810 
 811   if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
 812     fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
 813     assert(0, "perfstat_cpu_total() failed");
 814     return false;
 815   }
 816 
 817   // global cpu information
 818   strcpy (pci->description, psct.description);
 819   pci->processorHZ = psct.processorHZ;
 820   pci->ncpus = psct.ncpus;
 821   os::Aix::_logical_cpus = psct.ncpus;
 822   for (int i = 0; i < 3; i++) {
 823     pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
 824   }
 825 
 826   // get the processor version from _system_configuration
 827   switch (_system_configuration.version) {
 828   case PV_8:
 829     strcpy(pci->version, "Power PC 8");
 830     break;
 831   case PV_7:
 832     strcpy(pci->version, "Power PC 7");
 833     break;
 834   case PV_6_1:
 835     strcpy(pci->version, "Power PC 6 DD1.x");
 836     break;
 837   case PV_6:
 838     strcpy(pci->version, "Power PC 6");
 839     break;
 840   case PV_5:
 841     strcpy(pci->version, "Power PC 5");
 842     break;
 843   case PV_5_2:
 844     strcpy(pci->version, "Power PC 5_2");
 845     break;
 846   case PV_5_3:
 847     strcpy(pci->version, "Power PC 5_3");
 848     break;
 849   case PV_5_Compat:
 850     strcpy(pci->version, "PV_5_Compat");
 851     break;
 852   case PV_6_Compat:
 853     strcpy(pci->version, "PV_6_Compat");
 854     break;
 855   case PV_7_Compat:
 856     strcpy(pci->version, "PV_7_Compat");
 857     break;
 858   case PV_8_Compat:
 859     strcpy(pci->version, "PV_8_Compat");
 860     break;
 861   default:
 862     strcpy(pci->version, "unknown");
 863   }
 864 
 865   return true;
 866 
 867 } //end os::Aix::get_cpuinfo
 868 
 869 //////////////////////////////////////////////////////////////////////////////
 870 // detecting pthread library
 871 
 872 void os::Aix::libpthread_init() {
 873   return;
 874 }
 875 
 876 //////////////////////////////////////////////////////////////////////////////
 877 // create new thread
 878 
 879 // Thread start routine for all newly created threads
 880 static void *java_start(Thread *thread) {
 881 
 882   // find out my own stack dimensions
 883   {
 884     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 885     address base = 0;
 886     size_t size = 0;
 887     query_stack_dimensions(&base, &size);
 888     thread->set_stack_base(base);
 889     thread->set_stack_size(size);
 890   }
 891 




















 892   // Do some sanity checks.
 893   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 894 
 895   // Try to randomize the cache line index of hot stack frames.
 896   // This helps when threads of the same stack traces evict each other's
 897   // cache lines. The threads can be either from the same JVM instance, or
 898   // from different JVM instances. The benefit is especially true for
 899   // processors with hyperthreading technology.
 900 
 901   static int counter = 0;
 902   int pid = os::current_process_id();
 903   alloca(((pid ^ counter++) & 7) * 128);
 904 
 905   ThreadLocalStorage::set_thread(thread);
 906 
 907   OSThread* osthread = thread->osthread();
 908 
 909   // thread_id is kernel thread id (similar to Solaris LWP id)
 910   osthread->set_thread_id(os::Aix::gettid());
 911 
 912   // initialize signal mask for this thread
 913   os::Aix::hotspot_sigmask(thread);
 914 
 915   // initialize floating point control register
 916   os::Aix::init_thread_fpu_state();
 917 
 918   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 919 
 920   // call one more level start routine
 921   thread->run();
 922 



 923   return 0;
 924 }
 925 
 926 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 927 
 928   // We want the whole function to be synchronized.
 929   ThreadCritical cs;
 930 
 931   assert(thread->osthread() == NULL, "caller responsible");
 932 
 933   // Allocate the OSThread object
 934   OSThread* osthread = new OSThread(NULL, NULL);
 935   if (osthread == NULL) {
 936     return false;
 937   }
 938 
 939   // set the correct thread state
 940   osthread->set_thread_type(thr_type);
 941 
 942   // Initial state is ALLOCATED but not INITIALIZED
 943   osthread->set_state(ALLOCATED);
 944 
 945   thread->set_osthread(osthread);
 946 
 947   // init thread attributes
 948   pthread_attr_t attr;
 949   pthread_attr_init(&attr);
 950   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");


 975       } // else fall through:
 976         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 977     case os::vm_thread:
 978     case os::pgc_thread:
 979     case os::cgc_thread:
 980     case os::watcher_thread:
 981       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 982       break;
 983     }
 984   }
 985 
 986   stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
 987   pthread_attr_setstacksize(&attr, stack_size);
 988 
 989   pthread_t tid;
 990   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
 991 
 992   pthread_attr_destroy(&attr);
 993 
 994   if (ret == 0) {
 995     // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
 996   } else {







 997     if (PrintMiscellaneous && (Verbose || WizardMode)) {
 998       perror("pthread_create()");
 999     }
1000     // Need to clean up stuff we've allocated so far
1001     thread->set_osthread(NULL);
1002     delete osthread;
1003     return false;
1004   }
1005 
1006   // Store pthread info into the OSThread
1007   osthread->set_pthread_id(tid);
1008 
1009   return true;
1010 }
1011 
1012 /////////////////////////////////////////////////////////////////////////////
1013 // attach existing thread
1014 
1015 // bootstrap the main thread
1016 bool os::create_main_thread(JavaThread* thread) {
1017   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
1018   return create_attached_thread(thread);
1019 }
1020 
1021 bool os::create_attached_thread(JavaThread* thread) {
1022 #ifdef ASSERT
1023     thread->verify_not_published();
1024 #endif
1025 
1026   // Allocate the OSThread object
1027   OSThread* osthread = new OSThread(NULL, NULL);
1028 
1029   if (osthread == NULL) {
1030     return false;
1031   }
1032 
1033   // Store pthread info into the OSThread
1034   osthread->set_thread_id(os::Aix::gettid());
1035   osthread->set_pthread_id(::pthread_self());









1036 
1037   // initialize floating point control register
1038   os::Aix::init_thread_fpu_state();
1039 
1040   // some sanity checks
1041   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1042 
1043   // Initial thread state is RUNNABLE
1044   osthread->set_state(RUNNABLE);
1045 
1046   thread->set_osthread(osthread);
1047 
1048   if (UseNUMA) {
1049     int lgrp_id = os::numa_get_group_id();
1050     if (lgrp_id != -1) {
1051       thread->set_lgrp_id(lgrp_id);
1052     }
1053   }
1054 
1055   // initialize signal mask for this thread


1135     // better than nothing, but not much
1136     return elapsedTime();
1137   }
1138 }
1139 
1140 jlong os::javaTimeMillis() {
1141   timeval time;
1142   int status = gettimeofday(&time, NULL);
1143   assert(status != -1, "aix error at gettimeofday()");
1144   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1145 }
1146 
1147 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1148   timeval time;
1149   int status = gettimeofday(&time, NULL);
1150   assert(status != -1, "aix error at gettimeofday()");
1151   seconds = jlong(time.tv_sec);
1152   nanos = jlong(time.tv_usec) * 1000;
1153 }
1154 
1155 
1156 // We need to manually declare mread_real_time,
1157 // because IBM didn't provide a prototype in time.h.
1158 // (they probably only ever tested in C, not C++)
1159 extern "C"
1160 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1161 
1162 jlong os::javaTimeNanos() {
1163   if (os::Aix::on_pase()) {
1164     Unimplemented();
1165     return 0;





1166   } else {
1167     // On AIX use the precision of processors real time clock
1168     // or time base registers.
1169     timebasestruct_t time;
1170     int rc;
1171 
1172     // If the CPU has a time register, it will be used and
1173     // we have to convert to real time first. After convertion we have following data:
1174     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1175     // time.tb_low  [nanoseconds after the last full second above]
1176     // We better use mread_real_time here instead of read_real_time
1177     // to ensure that we will get a monotonic increasing time.
1178     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1179       rc = time_base_to_time(&time, TIMEBASE_SZ);
1180       assert(rc != -1, "aix error at time_base_to_time()");
1181     }
1182     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1183   }
1184 }
1185 


1274 void os::die() {
1275   ::abort();
1276 }
1277 
1278 // This method is a copy of JDK's sysGetLastErrorString
1279 // from src/solaris/hpi/src/system_md.c
1280 
1281 size_t os::lasterror(char *buf, size_t len) {
1282   if (errno == 0) return 0;
1283 
1284   const char *s = ::strerror(errno);
1285   size_t n = ::strlen(s);
1286   if (n >= len) {
1287     n = len - 1;
1288   }
1289   ::strncpy(buf, s, n);
1290   buf[n] = '\0';
1291   return n;
1292 }
1293 
1294 intx os::current_thread_id() { return (intx)pthread_self(); }


1295 
1296 int os::current_process_id() {
1297 
1298   // This implementation returns a unique pid, the pid of the
1299   // launcher thread that starts the vm 'process'.
1300 
1301   // Under POSIX, getpid() returns the same pid as the
1302   // launcher thread rather than a unique pid per thread.
1303   // Use gettid() if you want the old pre NPTL behaviour.
1304 
1305   // if you are looking for the result of a call to getpid() that
1306   // returns a unique pid for the calling thread, then look at the
1307   // OSThread::thread_id() method in osThread_linux.hpp file
1308 
1309   return (int)(_initial_pid ? _initial_pid : getpid());
1310 }
1311 
1312 // DLL functions
1313 
1314 const char* os::dll_file_extension() { return ".so"; }
1315 
1316 // This must be hard coded because it's the system's temporary
1317 // directory not the java application's temp directory, ala java.io.tmpdir.
1318 const char* os::get_temp_directory() { return "/tmp"; }
1319 
1320 static bool file_exists(const char* filename) {
1321   struct stat statbuf;
1322   if (filename == NULL || strlen(filename) == 0) {
1323     return false;
1324   }
1325   return os::stat(filename, &statbuf) == 0;
1326 }
1327 
1328 bool os::dll_build_name(char* buffer, size_t buflen,
1329                         const char* pname, const char* fname) {
1330   bool retval = false;
1331   // Copied from libhpi
1332   const size_t pnamelen = pname ? strlen(pname) : 0;
1333 
1334   // Return error on buffer overflow.
1335   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1336     *buffer = '\0';
1337     return retval;
1338   }
1339 
1340   if (pnamelen == 0) {
1341     snprintf(buffer, buflen, "lib%s.so", fname);
1342     retval = true;
1343   } else if (strchr(pname, *os::path_separator()) != NULL) {
1344     int n;
1345     char** pelements = split_path(pname, &n);



1346     for (int i = 0; i < n; i++) {
1347       // Really shouldn't be NULL, but check can't hurt
1348       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1349         continue; // skip the empty path values
1350       }
1351       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1352       if (file_exists(buffer)) {
1353         retval = true;
1354         break;
1355       }
1356     }
1357     // release the storage
1358     for (int i = 0; i < n; i++) {
1359       if (pelements[i] != NULL) {
1360         FREE_C_HEAP_ARRAY(char, pelements[i]);
1361       }
1362     }
1363     if (pelements != NULL) {
1364       FREE_C_HEAP_ARRAY(char*, pelements);
1365     }


1569 
1570   // Print limits on DATA, because it limits the C-heap.
1571   st->print(", DATA ");
1572   getrlimit(RLIMIT_DATA, &rlim);
1573   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1574   else st->print("%uk", rlim.rlim_cur >> 10);
1575   st->cr();
1576 
1577   // load average
1578   st->print("load average:");
1579   double loadavg[3] = {-1.L, -1.L, -1.L};
1580   os::loadavg(loadavg, 3);
1581   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1582   st->cr();
1583 }
1584 
1585 void os::print_memory_info(outputStream* st) {
1586 
1587   st->print_cr("Memory:");
1588 
1589   st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1590   st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));






1591   st->print_cr("  Default shared memory page size:        %s",
1592     describe_pagesize(g_multipage_support.shmpsize));
1593   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1594     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1595   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1596     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1597   if (g_multipage_error != 0) {
1598     st->print_cr("  multipage error: %d", g_multipage_error);
1599   }


1600 
1601   // print out LDR_CNTRL because it affects the default page sizes
1602   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1603   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1604 

1605   const char* const extshm = ::getenv("EXTSHM");
1606   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1607   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1608     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1609   }
1610 
1611   // Call os::Aix::get_meminfo() to retrieve memory statistics.




1612   os::Aix::meminfo_t mi;
1613   if (os::Aix::get_meminfo(&mi)) {
1614     char buffer[256];
1615     if (os::Aix::on_aix()) {
1616       jio_snprintf(buffer, sizeof(buffer),
1617                    "  physical total : %llu\n"
1618                    "  physical free  : %llu\n"
1619                    "  swap total     : %llu\n"
1620                    "  swap free      : %llu\n",
1621                    mi.real_total,
1622                    mi.real_free,
1623                    mi.pgsp_total,
1624                    mi.pgsp_free);
1625     } else {
1626       Unimplemented();



1627     }
1628     st->print_raw(buffer);
1629   } else {
1630     st->print_cr("  (no more information available)");
1631   }





1632 }
1633 
1634 // Get a string for the cpuinfo that is a summary of the cpu type
1635 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1636   // This looks good
1637   os::Aix::cpuinfo_t ci;
1638   if (os::Aix::get_cpuinfo(&ci)) {
1639     strncpy(buf, ci.version, buflen);
1640   } else {
1641     strncpy(buf, "AIX", buflen);
1642   }
1643 }
1644 
1645 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {






1646 }
1647 
1648 void os::print_siginfo(outputStream* st, void* siginfo) {
1649   // Use common posix version.
1650   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1651   st->cr();
1652 }
1653 
1654 static void print_signal_handler(outputStream* st, int sig,
1655                                  char* buf, size_t buflen);
1656 
1657 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1658   st->print_cr("Signal Handlers:");
1659   print_signal_handler(st, SIGSEGV, buf, buflen);
1660   print_signal_handler(st, SIGBUS , buf, buflen);
1661   print_signal_handler(st, SIGFPE , buf, buflen);
1662   print_signal_handler(st, SIGPIPE, buf, buflen);
1663   print_signal_handler(st, SIGXFSZ, buf, buflen);
1664   print_signal_handler(st, SIGILL , buf, buflen);
1665   print_signal_handler(st, SR_signum, buf, buflen);
1666   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1667   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1668   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1669   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);


1768   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1769 }
1770 
1771 void os::signal_raise(int signal_number) {
1772   ::raise(signal_number);
1773 }
1774 
1775 //
1776 // The following code is moved from os.cpp for making this
1777 // code platform specific, which it is by its very nature.
1778 //
1779 
1780 // Will be modified when max signal is changed to be dynamic
1781 int os::sigexitnum_pd() {
1782   return NSIG;
1783 }
1784 
1785 // a counter for each possible signal value
1786 static volatile jint pending_signals[NSIG+1] = { 0 };
1787 
1788 // Linux(POSIX) specific hand shaking semaphore.





1789 static sem_t sig_sem;


















































1790 
1791 void os::signal_init_pd() {
1792   // Initialize signal structures
1793   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1794 
1795   // Initialize signal semaphore
1796   int rc = ::sem_init(&sig_sem, 0, 0);
1797   guarantee(rc != -1, "sem_init failed");
1798 }
1799 
1800 void os::signal_notify(int sig) {
1801   Atomic::inc(&pending_signals[sig]);
1802   ::sem_post(&sig_sem);
1803 }
1804 
1805 static int check_pending_signals(bool wait) {
1806   Atomic::store(0, &sigint_count);
1807   for (;;) {
1808     for (int i = 0; i < NSIG + 1; i++) {
1809       jint n = pending_signals[i];
1810       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1811         return i;
1812       }
1813     }
1814     if (!wait) {
1815       return -1;
1816     }
1817     JavaThread *thread = JavaThread::current();
1818     ThreadBlockInVM tbivm(thread);
1819 
1820     bool threadIsSuspended;
1821     do {
1822       thread->set_suspend_equivalent();
1823       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1824 
1825       ::sem_wait(&sig_sem);
1826 
1827       // were we externally suspended while we were waiting?
1828       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1829       if (threadIsSuspended) {
1830         //
1831         // The semaphore has been incremented, but while we were waiting
1832         // another thread suspended us. We don't want to continue running
1833         // while suspended because that would surprise the thread that
1834         // suspended us.
1835         //
1836         ::sem_post(&sig_sem);

1837 
1838         thread->java_suspend_self();
1839       }
1840     } while (threadIsSuspended);
1841   }
1842 }
1843 
1844 int os::signal_lookup() {
1845   return check_pending_signals(false);
1846 }
1847 
1848 int os::signal_wait() {
1849   return check_pending_signals(true);
1850 }
1851 
1852 ////////////////////////////////////////////////////////////////////////////////
1853 // Virtual Memory
1854 
1855 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1856 


1869   }
1870 
1871   bool contains_range(char* p, size_t s) const {
1872     return contains_addr(p) && contains_addr(p + s - 1);
1873   }
1874 
1875   void print_on(outputStream* os) const {
1876     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1877       " bytes, %d %s pages), %s",
1878       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1879       (type == VMEM_SHMATED ? "shmat" : "mmap")
1880     );
1881   }
1882 
1883   // Check that range is a sub range of memory block (or equal to memory block);
1884   // also check that range is fully page aligned to the page size if the block.
1885   void assert_is_valid_subrange(char* p, size_t s) const {
1886     if (!contains_range(p, s)) {
1887       fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1888               "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1889               p, p + s - 1, addr, addr + size - 1);
1890       guarantee0(false);
1891     }
1892     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1893       fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1894               " aligned to pagesize (%s)\n", p, p + s);
1895       guarantee0(false);
1896     }
1897   }
1898 };
1899 
1900 static struct {
1901   vmembk_t* first;
1902   MiscUtils::CritSect cs;
1903 } vmem;
1904 
1905 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1906   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1907   assert0(p);
1908   if (p) {
1909     MiscUtils::AutoCritSect lck(&vmem.cs);
1910     p->addr = addr; p->size = size;
1911     p->pagesize = pagesize;
1912     p->type = type;
1913     p->next = vmem.first;
1914     vmem.first = p;


1930   assert0(p0);
1931   assert0(vmem.first); // List should not be empty.
1932   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1933     if (*pp == p0) {
1934       *pp = p0->next;
1935       ::free(p0);
1936       return;
1937     }
1938   }
1939   assert0(false); // Not found?
1940 }
1941 
1942 static void vmembk_print_on(outputStream* os) {
1943   MiscUtils::AutoCritSect lck(&vmem.cs);
1944   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1945     vmi->print_on(os);
1946     os->cr();
1947   }
1948 }
1949 


1950 // Reserve and attach a section of System V memory.
1951 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1952 // address. Failing that, it will attach the memory anywhere.
1953 // If <requested_addr> is NULL, function will attach the memory anywhere.
1954 //
1955 // <alignment_hint> is being ignored by this function. It is very probable however that the
1956 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1957 // Should this be not enogh, we can put more work into it.
1958 static char* reserve_shmated_memory (
1959   size_t bytes,
1960   char* requested_addr,
1961   size_t alignment_hint) {
1962 
1963   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1964     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1965     bytes, requested_addr, alignment_hint);
1966 
1967   // Either give me wish address or wish alignment but not both.
1968   assert0(!(requested_addr != NULL && alignment_hint != 0));
1969 


1971   // BRK because that may cause malloc OOM.
1972   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1973     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1974       "Will attach anywhere.", requested_addr);
1975     // Act like the OS refused to attach there.
1976     requested_addr = NULL;
1977   }
1978 
1979   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1980   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1981   if (os::Aix::on_pase_V5R4_or_older()) {
1982     ShouldNotReachHere();
1983   }
1984 
1985   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1986   const size_t size = align_size_up(bytes, SIZE_64K);
1987 
1988   // Reserve the shared segment.
1989   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1990   if (shmid == -1) {
1991     trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1992     return NULL;
1993   }
1994 
1995   // Important note:
1996   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1997   // We must right after attaching it remove it from the system. System V shm segments are global and
1998   // survive the process.
1999   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2000 
2001   struct shmid_ds shmbuf;
2002   memset(&shmbuf, 0, sizeof(shmbuf));
2003   shmbuf.shm_pagesize = SIZE_64K;
2004   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2005     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2006                size / SIZE_64K, errno);
2007     // I want to know if this ever happens.
2008     assert(false, "failed to set page size for shmat");
2009   }
2010 
2011   // Now attach the shared segment.
2012   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2013   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2014   // were not a segment boundary.
2015   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2016   const int errno_shmat = errno;
2017 
2018   // (A) Right after shmat and before handing shmat errors delete the shm segment.
2019   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2020     trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2021     assert(false, "failed to remove shared memory segment!");
2022   }
2023 
2024   // Handle shmat error. If we failed to attach, just return.
2025   if (addr == (char*)-1) {
2026     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2027     return NULL;
2028   }
2029 
2030   // Just for info: query the real page size. In case setting the page size did not
2031   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2032   const size_t real_pagesize = os::Aix::query_pagesize(addr);
2033   if (real_pagesize != shmbuf.shm_pagesize) {
2034     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2035   }
2036 
2037   if (addr) {
2038     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2039       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2040   } else {


2065   } else {
2066     trcVerbose("ok.");
2067     rc = true;
2068   }
2069   return rc;
2070 }
2071 
2072 static bool uncommit_shmated_memory(char* addr, size_t size) {
2073   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2074     addr, addr + size - 1);
2075 
2076   const bool rc = my_disclaim64(addr, size);
2077 
2078   if (!rc) {
2079     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2080     return false;
2081   }
2082   return true;
2083 }
2084 


2085 // Reserve memory via mmap.
2086 // If <requested_addr> is given, an attempt is made to attach at the given address.
2087 // Failing that, memory is allocated at any address.
2088 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2089 // allocate at an address aligned with the given alignment. Failing that, memory
2090 // is aligned anywhere.
2091 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2092   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2093     "alignment_hint " UINTX_FORMAT "...",
2094     bytes, requested_addr, alignment_hint);
2095 
2096   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2097   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2098     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2099     return NULL;
2100   }
2101 
2102   // We must prevent anyone from attaching too close to the
2103   // BRK because that may cause malloc OOM.
2104   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {


2210 
2211   assert0(is_aligned_to(addr, os::vm_page_size()));
2212   assert0(is_aligned_to(size, os::vm_page_size()));
2213 
2214   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2215     addr, addr + size - 1);
2216   bool rc = false;
2217 
2218   // Uncommit mmap memory with msync MS_INVALIDATE.
2219   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2220     trcVerbose("failed (%d)\n", errno);
2221     rc = false;
2222   } else {
2223     trcVerbose("ok.");
2224     rc = true;
2225   }
2226 
2227   return rc;
2228 }
2229 
2230 // End: shared memory bookkeeping
2231 ////////////////////////////////////////////////////////////////////////////////////////////////////
2232 
2233 int os::vm_page_size() {
2234   // Seems redundant as all get out.
2235   assert(os::Aix::page_size() != -1, "must call os::init");
2236   return os::Aix::page_size();
2237 }
2238 
2239 // Aix allocates memory by pages.
2240 int os::vm_allocation_granularity() {
2241   assert(os::Aix::page_size() != -1, "must call os::init");
2242   return os::Aix::page_size();
2243 }
2244 
2245 #ifdef PRODUCT
2246 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2247                                     int err) {
2248   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2249           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2250           strerror(err), err);
2251 }
2252 #endif
2253 
2254 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2255                                   const char* mesg) {
2256   assert(mesg != NULL, "mesg must be specified");
2257   if (!pd_commit_memory(addr, size, exec)) {
2258     // Add extra info in product mode for vm_exit_out_of_memory():
2259     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2260     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2261   }
2262 }
2263 
2264 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2265 
2266   assert0(is_aligned_to(addr, os::vm_page_size()));
2267   assert0(is_aligned_to(size, os::vm_page_size()));
2268 
2269   vmembk_t* const vmi = vmembk_find(addr);
2270   assert0(vmi);
2271   vmi->assert_is_valid_subrange(addr, size);
2272 
2273   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2274 







2275   return true;
2276 }
2277 
2278 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2279   return pd_commit_memory(addr, size, exec);
2280 }
2281 
2282 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2283                                   size_t alignment_hint, bool exec,
2284                                   const char* mesg) {
2285   // Alignment_hint is ignored on this OS.
2286   pd_commit_memory_or_exit(addr, size, exec, mesg);
2287 }
2288 
2289 bool os::pd_uncommit_memory(char* addr, size_t size) {
2290   assert0(is_aligned_to(addr, os::vm_page_size()));
2291   assert0(is_aligned_to(size, os::vm_page_size()));
2292 
2293   // Dynamically do different things for mmap/shmat.
2294   const vmembk_t* const vmi = vmembk_find(addr);
2295   assert0(vmi);
2296   vmi->assert_is_valid_subrange(addr, size);
2297 
2298   if (vmi->type == VMEM_SHMATED) {
2299     return uncommit_shmated_memory(addr, size);
2300   } else {
2301     return uncommit_mmaped_memory(addr, size);
2302   }
2303 }
2304 
2305 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2306   // Do not call this; no need to commit stack pages on AIX.
2307   ShouldNotReachHere();
2308   return true;
2309 }
2310 
2311 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2312   // Do not call this; no need to commit stack pages on AIX.
2313   ShouldNotReachHere();
2314   return true;
2315 }


2373   const size_t alignment_hint0 =
2374     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2375 
2376   // In 4K mode always use mmap.
2377   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2378   if (os::vm_page_size() == SIZE_4K) {
2379     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2380   } else {
2381     if (bytes >= Use64KPagesThreshold) {
2382       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2383     } else {
2384       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2385     }
2386   }
2387 }
2388 
2389 bool os::pd_release_memory(char* addr, size_t size) {
2390 
2391   // Dynamically do different things for mmap/shmat.
2392   vmembk_t* const vmi = vmembk_find(addr);
2393   assert0(vmi);
2394 
2395   // Always round to os::vm_page_size(), which may be larger than 4K.
2396   size = align_size_up(size, os::vm_page_size());
2397   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2398 
2399   bool rc = false;
2400   bool remove_bookkeeping = false;
2401   if (vmi->type == VMEM_SHMATED) {
2402     // For shmatted memory, we do:
2403     // - If user wants to release the whole range, release the memory (shmdt).
2404     // - If user only wants to release a partial range, uncommit (disclaim) that
2405     //   range. That way, at least, we do not use memory anymore (bust still page
2406     //   table space).
2407     vmi->assert_is_valid_subrange(addr, size);
2408     if (addr == vmi->addr && size == vmi->size) {
2409       rc = release_shmated_memory(addr, size);
2410       remove_bookkeeping = true;
2411     } else {
2412       rc = uncommit_shmated_memory(addr, size);
2413     }


2449   // mprotect success check
2450   //
2451   // Mprotect said it changed the protection but can I believe it?
2452   //
2453   // To be sure I need to check the protection afterwards. Try to
2454   // read from protected memory and check whether that causes a segfault.
2455   //
2456   if (!os::Aix::xpg_sus_mode()) {
2457 
2458     if (CanUseSafeFetch32()) {
2459 
2460       const bool read_protected =
2461         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2462          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2463 
2464       if (prot & PROT_READ) {
2465         rc = !read_protected;
2466       } else {
2467         rc = read_protected;
2468       }


















2469     }
2470   }
2471   if (!rc) {
2472     assert(false, "mprotect failed.");
2473   }




2474   return rc;
2475 }
2476 
2477 // Set protections specified
2478 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2479   unsigned int p = 0;
2480   switch (prot) {
2481   case MEM_PROT_NONE: p = PROT_NONE; break;
2482   case MEM_PROT_READ: p = PROT_READ; break;
2483   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2484   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2485   default:
2486     ShouldNotReachHere();
2487   }
2488   // is_committed is unused.
2489   return checked_mprotect(addr, size, p);
2490 }
2491 
2492 bool os::guard_memory(char* addr, size_t size) {
2493   return checked_mprotect(addr, size, PROT_NONE);
2494 }
2495 
2496 bool os::unguard_memory(char* addr, size_t size) {
2497   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2498 }
2499 
2500 // Large page support
2501 
2502 static size_t _large_page_size = 0;
2503 
2504 // Enable large page support if OS allows that.
2505 void os::large_page_init() {
2506   return; // Nothing to do. See query_multipage_support and friends.
2507 }
2508 
2509 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2510   // "exec" is passed in but not used. Creating the shared image for
2511   // the code cache doesn't have an SHM_X executable permission to check.
2512   Unimplemented();
2513   return 0;

2514 }
2515 
2516 bool os::release_memory_special(char* base, size_t bytes) {
2517   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2518   Unimplemented();
2519   return false;
2520 }
2521 
2522 size_t os::large_page_size() {
2523   return _large_page_size;
2524 }
2525 
2526 bool os::can_commit_large_page_memory() {
2527   // Does not matter, we do not support huge pages.
2528   return false;
2529 }
2530 
2531 bool os::can_execute_large_page_memory() {
2532   // Does not matter, we do not support huge pages.
2533   return false;


2945 // they typically will bring down the process immediately.
2946 bool unblock_program_error_signals() {
2947   sigset_t set;
2948   ::sigemptyset(&set);
2949   ::sigaddset(&set, SIGILL);
2950   ::sigaddset(&set, SIGBUS);
2951   ::sigaddset(&set, SIGFPE);
2952   ::sigaddset(&set, SIGSEGV);
2953   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2954 }
2955 
2956 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2957 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2958   assert(info != NULL && uc != NULL, "it must be old kernel");
2959 
2960   // Never leave program error signals blocked;
2961   // on all our platforms they would bring down the process immediately when
2962   // getting raised while being blocked.
2963   unblock_program_error_signals();
2964 

2965   JVM_handle_aix_signal(sig, info, uc, true);

2966 }
2967 
2968 // This boolean allows users to forward their own non-matching signals
2969 // to JVM_handle_aix_signal, harmlessly.
2970 bool os::Aix::signal_handlers_are_installed = false;
2971 
2972 // For signal-chaining
2973 struct sigaction sigact[NSIG];
2974 sigset_t sigs;
2975 bool os::Aix::libjsig_is_loaded = false;
2976 typedef struct sigaction *(*get_signal_t)(int);
2977 get_signal_t os::Aix::get_signal_action = NULL;
2978 
2979 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2980   struct sigaction *actp = NULL;
2981 
2982   if (libjsig_is_loaded) {
2983     // Retrieve the old signal handler from libjsig
2984     actp = (*get_signal_action)(sig);
2985   }


3067 int os::Aix::get_our_sigflags(int sig) {
3068   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3069   return sigflags[sig];
3070 }
3071 
3072 void os::Aix::set_our_sigflags(int sig, int flags) {
3073   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3074   if (sig > 0 && sig < NSIG) {
3075     sigflags[sig] = flags;
3076   }
3077 }
3078 
3079 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3080   // Check for overwrite.
3081   struct sigaction oldAct;
3082   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3083 
3084   void* oldhand = oldAct.sa_sigaction
3085     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3086     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3087   // Renamed 'signalHandler' to avoid collision with other shared libs.
3088   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3089       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3090       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3091     if (AllowUserSignalHandlers || !set_installed) {
3092       // Do not overwrite; user takes responsibility to forward to us.
3093       return;
3094     } else if (UseSignalChaining) {
3095       // save the old handler in jvm
3096       save_preinstalled_handler(sig, oldAct);
3097       // libjsig also interposes the sigaction() call below and saves the
3098       // old sigaction on it own.
3099     } else {
3100       fatal("Encountered unexpected pre-existing sigaction handler "
3101             "%#lx for signal %d.", (long)oldhand, sig);
3102     }
3103   }
3104 
3105   struct sigaction sigAct;
3106   sigfillset(&(sigAct.sa_mask));
3107   if (!set_installed) {
3108     sigAct.sa_handler = SIG_DFL;
3109     sigAct.sa_flags = SA_RESTART;
3110   } else {
3111     // Renamed 'signalHandler' to avoid collision with other shared libs.
3112     sigAct.sa_sigaction = javaSignalHandler;
3113     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3114   }
3115   // Save flags, which are set by ours
3116   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3117   sigflags[sig] = sigAct.sa_flags;
3118 
3119   int ret = sigaction(sig, &sigAct, &oldAct);
3120   assert(ret == 0, "check");
3121 
3122   void* oldhand2 = oldAct.sa_sigaction
3123                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3124                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3125   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3126 }
3127 
3128 // install signal handlers for signals that HotSpot needs to
3129 // handle in order to support Java-level exception handling.
3130 void os::Aix::install_signal_handlers() {
3131   if (!signal_handlers_are_installed) {


3283     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3284     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3285     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3286     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3287   }
3288 
3289   DO_SIGNAL_CHECK(SR_signum);
3290 }
3291 
3292 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3293 
3294 static os_sigaction_t os_sigaction = NULL;
3295 
3296 void os::Aix::check_signal_handler(int sig) {
3297   char buf[O_BUFLEN];
3298   address jvmHandler = NULL;
3299 
3300   struct sigaction act;
3301   if (os_sigaction == NULL) {
3302     // only trust the default sigaction, in case it has been interposed
3303     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3304     if (os_sigaction == NULL) return;
3305   }
3306 
3307   os_sigaction(sig, (struct sigaction*)NULL, &act);
3308 
3309   address thisHandler = (act.sa_flags & SA_SIGINFO)
3310     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3311     : CAST_FROM_FN_PTR(address, act.sa_handler);
3312 
3313   switch(sig) {
3314   case SIGSEGV:
3315   case SIGBUS:
3316   case SIGFPE:
3317   case SIGPIPE:
3318   case SIGILL:
3319   case SIGXFSZ:
3320     // Renamed 'signalHandler' to avoid collision with other shared libs.
3321     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3322     break;
3323 
3324   case SHUTDOWN1_SIGNAL:
3325   case SHUTDOWN2_SIGNAL:
3326   case SHUTDOWN3_SIGNAL:
3327   case BREAK_SIGNAL:
3328     jvmHandler = (address)user_handler();
3329     break;
3330 
3331   default:
3332     if (sig == SR_signum) {
3333       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3334     } else {
3335       return;
3336     }
3337     break;
3338   }
3339 
3340   if (thisHandler != jvmHandler) {


3366 
3367 const char* os::exception_name(int exception_code, char* buf, size_t size) {
3368   if (0 < exception_code && exception_code <= SIGRTMAX) {
3369     // signal
3370     if (!signal_name(exception_code, buf, size)) {
3371       jio_snprintf(buf, size, "SIG%d", exception_code);
3372     }
3373     return buf;
3374   } else {
3375     return NULL;
3376   }
3377 }
3378 
3379 // To install functions for atexit system call
3380 extern "C" {
3381   static void perfMemory_exit_helper() {
3382     perfMemory_exit();
3383   }
3384 }
3385 

























































































































































3386 // This is called _before_ the most of global arguments have been parsed.
3387 void os::init(void) {

3388   // This is basic, we want to know if that ever changes.
3389   // (Shared memory boundary is supposed to be a 256M aligned.)
3390   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3391 




3392   // First off, we need to know whether we run on AIX or PASE, and
3393   // the OS level we run on.
3394   os::Aix::initialize_os_info();
3395 
3396   // Scan environment (SPEC1170 behaviour, etc).
3397   os::Aix::scan_environment();
3398 
3399   // Check which pages are supported by AIX.
3400   query_multipage_support();
3401 
3402   // Act like we only have one page size by eliminating corner cases which
3403   // we did not support very well anyway.
3404   // We have two input conditions:
3405   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3406   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3407   //    setting.
3408   //    Data segment page size is important for us because it defines the thread stack page
3409   //    size, which is needed for guard page handling, stack banging etc.
3410   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3411   //    and should be allocated with 64k pages.
3412   //
3413   // So, we do the following:
3414   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3415   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3416   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3417   // 64k          no              --- AIX 5.2 ? ---
3418   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3419 


3448       FLAG_SET_ERGO(bool, Use64KPages, false);
3449     }
3450   } else {
3451     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3452     //   This normally means that we can allocate 64k pages dynamically.
3453     //   (There is one special case where this may be false: EXTSHM=on.
3454     //    but we decided to not support that mode).
3455     assert0(g_multipage_support.can_use_64K_pages);
3456     Aix::_page_size = SIZE_64K;
3457     trcVerbose("64K page mode");
3458     FLAG_SET_ERGO(bool, Use64KPages, true);
3459   }
3460 
3461   // Short-wire stack page size to base page size; if that works, we just remove
3462   // that stack page size altogether.
3463   Aix::_stack_page_size = Aix::_page_size;
3464 
3465   // For now UseLargePages is just ignored.
3466   FLAG_SET_ERGO(bool, UseLargePages, false);
3467   _page_sizes[0] = 0;

3468 
3469   // debug trace
3470   trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
3471 
3472   // Next, we need to initialize libo4 and libperfstat libraries.
3473   if (os::Aix::on_pase()) {
3474     os::Aix::initialize_libo4();
3475   } else {
3476     os::Aix::initialize_libperfstat();
3477   }
3478 
3479   // Reset the perfstat information provided by ODM.
3480   if (os::Aix::on_aix()) {
3481     libperfstat::perfstat_reset();
3482   }
3483 
3484   // Now initialze basic system properties. Note that for some of the values we
3485   // need libperfstat etc.
3486   os::Aix::initialize_system_info();
3487 
3488   _initial_pid = getpid();
3489 
3490   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3491 
3492   init_random(1234567);
3493 
3494   ThreadCritical::initialize();
3495 
3496   // Main_thread points to the aboriginal thread.
3497   Aix::_main_thread = pthread_self();
3498 
3499   initial_time_count = os::elapsed_counter();
3500 
3501   // If the pagesize of the VM is greater than 8K determine the appropriate
3502   // number of initial guard pages. The user can change this with the
3503   // command line arguments, if needed.
3504   if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3505     StackYellowPages = 1;
3506     StackRedPages = 1;
3507     StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3508   }
3509 }
3510 
3511 // This is called _after_ the global arguments have been parsed.
3512 jint os::init_2(void) {
3513 






3514   trcVerbose("processor count: %d", os::_processor_count);
3515   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3516 
3517   // Initially build up the loaded dll map.
3518   LoadedLibraries::reload();




3519 
3520   const int page_size = Aix::page_size();
3521   const int map_size = page_size;
3522 
3523   address map_address = (address) MAP_FAILED;
3524   const int prot  = PROT_READ;
3525   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3526 
3527   // Use optimized addresses for the polling page,
3528   // e.g. map it to a special 32-bit address.
3529   if (OptimizePollingPageLocation) {
3530     // architecture-specific list of address wishes:
3531     address address_wishes[] = {
3532       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3533       // PPC64: all address wishes are non-negative 32 bit values where
3534       // the lower 16 bits are all zero. we can load these addresses
3535       // with a single ppc_lis instruction.
3536       (address) 0x30000000, (address) 0x31000000,
3537       (address) 0x32000000, (address) 0x33000000,
3538       (address) 0x40000000, (address) 0x41000000,
3539       (address) 0x42000000, (address) 0x43000000,
3540       (address) 0x50000000, (address) 0x51000000,
3541       (address) 0x52000000, (address) 0x53000000,
3542       (address) 0x60000000, (address) 0x61000000,
3543       (address) 0x62000000, (address) 0x63000000
3544     };
3545     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3546 
3547     // iterate over the list of address wishes:
3548     for (int i=0; i<address_wishes_length; i++) {
3549       // Try to map with current address wish.
3550       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3551       // fail if the address is already mapped.
3552       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3553                                      map_size, prot,
3554                                      flags | MAP_FIXED,
3555                                      -1, 0);
3556       if (Verbose) {
3557         fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3558                 address_wishes[i], map_address + (ssize_t)page_size);
3559       }
3560 
3561       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3562         // Map succeeded and map_address is at wished address, exit loop.
3563         break;
3564       }
3565 
3566       if (map_address != (address) MAP_FAILED) {
3567         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3568         ::munmap(map_address, map_size);
3569         map_address = (address) MAP_FAILED;
3570       }
3571       // Map failed, continue loop.
3572     }
3573   } // end OptimizePollingPageLocation
3574 
3575   if (map_address == (address) MAP_FAILED) {
3576     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3577   }
3578   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3579   os::set_polling_page(map_address);
3580 
3581   if (!UseMembar) {
3582     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3583     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3584     os::set_memory_serialize_page(mem_serialize_page);
3585 
3586 #ifndef PRODUCT
3587     if (Verbose && PrintMiscellaneous) {
3588       tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3589     }
3590 #endif
3591   }
3592 
3593   // initialize suspend/resume support - must do this before signal_sets_init()
3594   if (SR_initialize() != 0) {
3595     perror("SR_initialize failed");
3596     return JNI_ERR;
3597   }
3598 
3599   Aix::signal_sets_init();
3600   Aix::install_signal_handlers();
3601 
3602   // Check minimum allowable stack size for thread creation and to initialize
3603   // the java system classes, including StackOverflowError - depends on page
3604   // size. Add a page for compiler2 recursion in main thread.
3605   // Add in 2*BytesPerWord times page size to account for VM stack during
3606   // class initialization depending on 32 or 64 bit VM.
3607   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3608             (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3609                      (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3610 
3611   os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3612 
3613   size_t threadStackSizeInBytes = ThreadStackSize * K;
3614   if (threadStackSizeInBytes != 0 &&
3615       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3616     tty->print_cr("\nThe stack size specified is too small, "
3617                   "Specify at least %dk",
3618                   os::Aix::min_stack_allowed / K);
3619     return JNI_ERR;
3620   }
3621 
3622   // Make the stack size a multiple of the page size so that
3623   // the yellow/red zones can be guarded.
3624   // Note that this can be 0, if no default stacksize was set.
3625   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3626 
3627   Aix::libpthread_init();



3628 
3629   if (MaxFDLimit) {
3630     // Set the number of file descriptors to max. print out error
3631     // if getrlimit/setrlimit fails but continue regardless.
3632     struct rlimit nbr_files;
3633     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3634     if (status != 0) {
3635       if (PrintMiscellaneous && (Verbose || WizardMode))
3636         perror("os::init_2 getrlimit failed");
3637     } else {
3638       nbr_files.rlim_cur = nbr_files.rlim_max;
3639       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3640       if (status != 0) {
3641         if (PrintMiscellaneous && (Verbose || WizardMode))
3642           perror("os::init_2 setrlimit failed");
3643       }
3644     }
3645   }
3646 
3647   if (PerfAllowAtExitRegistration) {
3648     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3649     // Atexit functions can be delayed until process exit time, which
3650     // can be problematic for embedded VM situations. Embedded VMs should
3651     // call DestroyJavaVM() to assure that VM resources are released.
3652 
3653     // Note: perfMemory_exit_helper atexit function may be removed in
3654     // the future if the appropriate cleanup code can be added to the
3655     // VM_Exit VMOperation's doit method.
3656     if (atexit(perfMemory_exit_helper) != 0) {
3657       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3658     }
3659   }
3660 
3661   return JNI_OK;
3662 }
3663 
3664 // Mark the polling page as unreadable
3665 void os::make_polling_page_unreadable(void) {
3666   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3667     fatal("Could not disable polling page");
3668   }
3669 };


3729     // NULL context is unexpected, double-check this is the VMThread.
3730     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3731   }
3732 }
3733 
3734 // Suspends the target using the signal mechanism and then grabs the PC before
3735 // resuming the target. Used by the flat-profiler only
3736 ExtendedPC os::get_thread_pc(Thread* thread) {
3737   // Make sure that it is called by the watcher for the VMThread.
3738   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3739   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3740 
3741   PcFetcher fetcher(thread);
3742   fetcher.run();
3743   return fetcher.result();
3744 }
3745 
3746 ////////////////////////////////////////////////////////////////////////////////
3747 // debug support
3748 
3749 static address same_page(address x, address y) {
3750   intptr_t page_bits = -os::vm_page_size();
3751   if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3752     return x;
3753   else if (x > y)
3754     return (address)(intptr_t(y) | ~page_bits) + 1;
3755   else
3756     return (address)(intptr_t(y) & page_bits);
3757 }
3758 
3759 bool os::find(address addr, outputStream* st) {
3760 
3761   st->print(PTR_FORMAT ": ", addr);
3762 
3763   loaded_module_t lm;
3764   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3765       LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3766     st->print("%s", lm.path);
3767     return true;
3768   }
3769 
3770   return false;
3771 }
3772 
3773 ////////////////////////////////////////////////////////////////////////////////
3774 // misc
3775 
3776 // This does not do anything on Aix. This is basically a hook for being
3777 // able to use structured exception handling (thread-local exception filters)
3778 // on, e.g., Win32.


4102   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4103   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4104   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4105 }
4106 
4107 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4108   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4109   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4110   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4111   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4112 }
4113 
4114 bool os::is_thread_cpu_time_supported() {
4115   return true;
4116 }
4117 
4118 // System loadavg support. Returns -1 if load average cannot be obtained.
4119 // For now just return the system wide load average (no processor sets).
4120 int os::loadavg(double values[], int nelem) {
4121 
4122   // Implemented using libperfstat on AIX.
4123 
4124   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4125   guarantee(values, "argument error");
4126 
4127   if (os::Aix::on_pase()) {
4128     Unimplemented();









4129     return -1;


4130   } else {

4131     // AIX: use libperfstat
4132     //
4133     // See also:
4134     // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4135     // /usr/include/libperfstat.h:
4136 
4137     // Use the already AIX version independent get_cpuinfo.
4138     os::Aix::cpuinfo_t ci;
4139     if (os::Aix::get_cpuinfo(&ci)) {
4140       for (int i = 0; i < nelem; i++) {
4141         values[i] = ci.loadavg[i];
4142       }
4143     } else {
4144       return -1;
4145     }
4146     return nelem;
4147   }
4148 }
4149 
4150 void os::pause() {
4151   char filename[MAX_PATH];
4152   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4153     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4154   } else {
4155     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4156   }
4157 
4158   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4159   if (fd != -1) {


4169 }
4170 
4171 bool os::Aix::is_primordial_thread() {
4172   if (pthread_self() == (pthread_t)1) {
4173     return true;
4174   } else {
4175     return false;
4176   }
4177 }
4178 
4179 // OS recognitions (PASE/AIX, OS level) call this before calling any
4180 // one of Aix::on_pase(), Aix::os_version() static
4181 void os::Aix::initialize_os_info() {
4182 
4183   assert(_on_pase == -1 && _os_version == -1, "already called.");
4184 
4185   struct utsname uts;
4186   memset(&uts, 0, sizeof(uts));
4187   strcpy(uts.sysname, "?");
4188   if (::uname(&uts) == -1) {
4189     trc("uname failed (%d)", errno);
4190     guarantee(0, "Could not determine whether we run on AIX or PASE");
4191   } else {
4192     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4193                "node \"%s\" machine \"%s\"\n",
4194                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4195     const int major = atoi(uts.version);
4196     assert(major > 0, "invalid OS version");
4197     const int minor = atoi(uts.release);
4198     assert(minor > 0, "invalid OS release");
4199     _os_version = (major << 8) | minor;
4200     if (strcmp(uts.sysname, "OS400") == 0) {
4201       Unimplemented();







4202     } else if (strcmp(uts.sysname, "AIX") == 0) {
4203       // We run on AIX. We do not support versions older than AIX 5.3.
4204       _on_pase = 0;
4205       if (_os_version < 0x0503) {
4206         trc("AIX release older than AIX 5.3 not supported.");
4207         assert(false, "AIX release too old.");
4208       } else {
4209         trcVerbose("We run on AIX %d.%d\n", major, minor);
4210       }
4211     } else {
4212       assert(false, "unknown OS");
4213     }
4214   }
4215 
4216   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4217 } // end: os::Aix::initialize_os_info()
4218 
4219 // Scan environment for important settings which might effect the VM.
4220 // Trace out settings. Warn about invalid settings and/or correct them.
4221 //
4222 // Must run after os::Aix::initialue_os_info().
4223 void os::Aix::scan_environment() {
4224 
4225   char* p;
4226   int rc;
4227 
4228   // Warn explicity if EXTSHM=ON is used. That switch changes how
4229   // System V shared memory behaves. One effect is that page size of
4230   // shared memory cannot be change dynamically, effectivly preventing
4231   // large pages from working.
4232   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4233   // recommendation is (in OSS notes) to switch it off.
4234   p = ::getenv("EXTSHM");
4235   if (Verbose) {
4236     fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4237   }
4238   if (p && strcasecmp(p, "ON") == 0) {
4239     fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4240     _extshm = 1;








4241   } else {
4242     _extshm = 0;
4243   }
4244 
4245   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4246   // Not tested, not supported.
4247   //
4248   // Note that it might be worth the trouble to test and to require it, if only to
4249   // get useful return codes for mprotect.
4250   //
4251   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4252   // exec() ? before loading the libjvm ? ....)
4253   p = ::getenv("XPG_SUS_ENV");
4254   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4255   if (p && strcmp(p, "ON") == 0) {
4256     _xpg_sus_mode = 1;
4257     trc("Unsupported setting: XPG_SUS_ENV=ON");
4258     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4259     // clobber address ranges. If we ever want to support that, we have to do some
4260     // testing first.
4261     guarantee(false, "XPG_SUS_ENV=ON not supported");
4262   } else {
4263     _xpg_sus_mode = 0;
4264   }
4265 
4266   // Switch off AIX internal (pthread) guard pages. This has
4267   // immediate effect for any pthread_create calls which follow.












4268   p = ::getenv("AIXTHREAD_GUARDPAGES");
4269   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4270   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4271   guarantee(rc == 0, "");
4272 
4273 } // end: os::Aix::scan_environment()
4274 
4275 // PASE: initialize the libo4 library (AS400 PASE porting library).
4276 void os::Aix::initialize_libo4() {
4277   Unimplemented();






4278 }
4279 
4280 // AIX: initialize the libperfstat library (we load this dynamically
4281 // because it is only available on AIX.
4282 void os::Aix::initialize_libperfstat() {
4283 
4284   assert(os::Aix::on_aix(), "AIX only");
4285 
4286   if (!libperfstat::init()) {
4287     trc("libperfstat initialization failed.");
4288     assert(false, "libperfstat initialization failed");
4289   } else {
4290     if (Verbose) {
4291       fprintf(stderr, "libperfstat initialized.\n");
4292     }
4293   }
4294 } // end: os::Aix::initialize_libperfstat
4295 
4296 /////////////////////////////////////////////////////////////////////////////
4297 // thread stack
4298 
4299 // Function to query the current stack size using pthread_getthrds_np.
4300 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4301   // This only works when invoked on a pthread. As we agreed not to use
4302   // primordial threads anyway, I assert here.
4303   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4304 
4305   // Information about this api can be found (a) in the pthread.h header and
4306   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4307   //
4308   // The use of this API to find out the current stack is kind of undefined.
4309   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4310   // enough for cases where I let the pthread library create its stacks. For cases
4311   // where I create an own stack and pass this to pthread_create, it seems not to
4312   // work (the returned stack size in that case is 0).
4313 
4314   pthread_t tid = pthread_self();




  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // According to the AIX OS doc #pragma alloca must be used
  27 // with C++ compiler before referencing the function alloca()
  28 #pragma alloca
  29 
  30 // no precompiled headers
  31 #include "classfile/classLoader.hpp"
  32 #include "classfile/systemDictionary.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "jvm_aix.h"
  39 #include "libo4.hpp"
  40 #include "libperfstat_aix.hpp"
  41 #include "loadlib_aix.hpp"
  42 #include "memory/allocation.inline.hpp"
  43 #include "memory/filemap.hpp"
  44 #include "misc_aix.hpp"
  45 #include "mutex_aix.inline.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "os_aix.inline.hpp"
  48 #include "os_share_aix.hpp"
  49 #include "porting_aix.hpp"
  50 #include "prims/jniFastGetField.hpp"
  51 #include "prims/jvm.h"
  52 #include "prims/jvm_misc.hpp"
  53 #include "runtime/arguments.hpp"
  54 #include "runtime/atomic.inline.hpp"
  55 #include "runtime/extendedPC.hpp"
  56 #include "runtime/globals.hpp"
  57 #include "runtime/interfaceSupport.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/javaCalls.hpp"


  92 #include <stdio.h>
  93 #include <string.h>
  94 #include <unistd.h>
  95 #include <sys/ioctl.h>
  96 #include <sys/ipc.h>
  97 #include <sys/mman.h>
  98 #include <sys/resource.h>
  99 #include <sys/select.h>
 100 #include <sys/shm.h>
 101 #include <sys/socket.h>
 102 #include <sys/stat.h>
 103 #include <sys/sysinfo.h>
 104 #include <sys/systemcfg.h>
 105 #include <sys/time.h>
 106 #include <sys/times.h>
 107 #include <sys/types.h>
 108 #include <sys/utsname.h>
 109 #include <sys/vminfo.h>
 110 #include <sys/wait.h>
 111 
 112 // Missing prototypes for various system APIs.
 113 extern "C"
 114 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);






 115 

 116 #if !defined(_AIXVERSION_610)
 117 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
 118 extern "C" int getprocs64(procentry64 * ,  int ,  fdsinfo * ,  int ,  pid_t * , int);
 119 extern "C" int getargs   (procsinfo *   ,  int ,  char* , int  );




 120 #endif
 121 
 122 #define MAX_PATH (2 * K)
 123 
 124 // for timer info max values which include all bits
 125 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 126 // for multipage initialization error analysis (in 'g_multipage_error')
 127 #define ERROR_MP_OS_TOO_OLD                          100
 128 #define ERROR_MP_EXTSHM_ACTIVE                       101
 129 #define ERROR_MP_VMGETINFO_FAILED                    102
 130 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 131 
 132 // The semantics in this file are thus that codeptr_t is a *real code ptr*.
 133 // This means that any function taking codeptr_t as arguments will assume
 134 // a real codeptr and won't handle function descriptors (eg getFuncName),
 135 // whereas functions taking address as args will deal with function
 136 // descriptors (eg os::dll_address_to_library_name).
 137 typedef unsigned int* codeptr_t;
 138 
 139 // Typedefs for stackslots, stack pointers, pointers to op codes.
 140 typedef unsigned long stackslot_t;
 141 typedef stackslot_t* stackptr_t;
 142 










 143 // Query dimensions of the stack of the calling thread.
 144 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 145 
 146 // Function to check a given stack pointer against given stack limits.
 147 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 148   if (((uintptr_t)sp) & 0x7) {
 149     return false;
 150   }
 151   if (sp > stack_base) {
 152     return false;
 153   }
 154   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 155     return false;
 156   }
 157   return true;
 158 }
 159 
 160 // Returns true if function is a valid codepointer.
 161 inline bool is_valid_codepointer(codeptr_t p) {
 162   if (!p) {
 163     return false;
 164   }
 165   if (((uintptr_t)p) & 0x3) {
 166     return false;
 167   }
 168   if (LoadedLibraries::find_for_text_address(p, NULL) == NULL) {
 169     return false;
 170   }
 171   return true;
 172 }
 173 
 174 // Macro to check a given stack pointer against given stack limits and to die if test fails.
 175 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 176     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 177 }
 178 
 179 // Macro to check the current stack pointer against given stacklimits.
 180 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 181   address sp; \
 182   sp = os::current_stack_pointer(); \
 183   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 184 }
 185 
 186 static void vmembk_print_on(outputStream* os);
 187 
 188 ////////////////////////////////////////////////////////////////////////////////
 189 // global variables (for a description see os_aix.hpp)
 190 
 191 julong    os::Aix::_physical_memory = 0;
 192 
 193 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 194 int       os::Aix::_page_size = -1;
 195 
 196 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 197 int       os::Aix::_on_pase = -1;
 198 
 199 // -1 = uninitialized, otherwise os version in the form 0xMMmm - MM:major, mm:minor
 200 //  E.g. 0x0601 for  AIX 6.1 or 0x0504 for OS/400 V5R4
 201 int       os::Aix::_os_version = -1;
 202 
 203 int       os::Aix::_stack_page_size = -1;
 204 
 205 // -1 = uninitialized, 0 - no, 1 - yes
 206 int       os::Aix::_xpg_sus_mode = -1;
 207 
 208 // -1 = uninitialized, 0 - no, 1 - yes
 209 int       os::Aix::_extshm = -1;

 210 
 211 ////////////////////////////////////////////////////////////////////////////////
 212 // local variables
 213 

 214 static jlong    initial_time_count = 0;
 215 static int      clock_tics_per_sec = 100;
 216 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 217 static bool     check_signals      = true;

 218 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 219 static sigset_t SR_sigset;
 220 
 221 // process break recorded at startup
 222 static address g_brk_at_startup = NULL;
 223 
 224 // This describes the state of multipage support of the underlying
 225 // OS. Note that this is of no interest to the outsize world and
 226 // therefore should not be defined in AIX class.
 227 //
 228 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 229 // latter two (16M "large" resp. 16G "huge" pages) require special
 230 // setup and are normally not available.
 231 //
 232 // AIX supports multiple page sizes per process, for:
 233 //  - Stack (of the primordial thread, so not relevant for us)
 234 //  - Data - data, bss, heap, for us also pthread stacks
 235 //  - Text - text code
 236 //  - shared memory
 237 //
 238 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 239 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 240 //
 241 // For shared memory, page size can be set dynamically via
 242 // shmctl(). Different shared memory regions can have different page
 243 // sizes.


 254   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 255   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 256   int error;                  // Error describing if something went wrong at multipage init.
 257 } g_multipage_support = {
 258   (size_t) -1,
 259   (size_t) -1,
 260   (size_t) -1,
 261   (size_t) -1,
 262   (size_t) -1,
 263   false, false,
 264   0
 265 };
 266 
 267 // We must not accidentally allocate memory close to the BRK - even if
 268 // that would work - because then we prevent the BRK segment from
 269 // growing which may result in a malloc OOM even though there is
 270 // enough memory. The problem only arises if we shmat() or mmap() at
 271 // a specific wish address, e.g. to place the heap in a
 272 // compressed-oops-friendly way.
 273 static bool is_close_to_brk(address a) {
 274   assert0(g_brk_at_startup != NULL);
 275   if (a >= g_brk_at_startup &&
 276       a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
 277     return true;
 278   }
 279   return false;
 280 }
 281 
 282 julong os::available_memory() {
 283   return Aix::available_memory();
 284 }
 285 
 286 julong os::Aix::available_memory() {
 287   // Avoid expensive API call here, as returned value will always be null.
 288   if (os::Aix::on_pase()) {
 289     return 0x0LL;
 290   }
 291   os::Aix::meminfo_t mi;
 292   if (os::Aix::get_meminfo(&mi)) {
 293     return mi.real_free;
 294   } else {
 295     return ULONG_MAX;
 296   }
 297 }
 298 
 299 julong os::physical_memory() {
 300   return Aix::physical_memory();
 301 }
 302 
 303 // Return true if user is running as root.
 304 
 305 bool os::have_special_privileges() {
 306   static bool init = false;
 307   static bool privileges = false;
 308   if (!init) {
 309     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 310     init = true;
 311   }
 312   return privileges;
 313 }
 314 
 315 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 316 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 317 static bool my_disclaim64(char* addr, size_t size) {
 318 
 319   if (size == 0) {
 320     return true;
 321   }
 322 
 323   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 324   const unsigned int maxDisclaimSize = 0x40000000;
 325 
 326   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 327   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 328 
 329   char* p = addr;
 330 
 331   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 332     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 333       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 334       return false;
 335     }
 336     p += maxDisclaimSize;
 337   }
 338 
 339   if (lastDisclaimSize > 0) {
 340     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 341       trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 342       return false;
 343     }
 344   }
 345 
 346   return true;
 347 }
 348 
 349 // Cpu architecture string
 350 #if defined(PPC32)
 351 static char cpu_arch[] = "ppc";
 352 #elif defined(PPC64)
 353 static char cpu_arch[] = "ppc64";
 354 #else
 355 #error Add appropriate cpu_arch setting
 356 #endif
 357 
 358 // Wrap the function "vmgetinfo" which is not available on older OS releases
 359 static int checked_vmgetinfo(void *out, int command, int arg) {
 360   if (os::Aix::on_pase() && os::Aix::os_version() < 0x0601) {
 361     guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
 362   }
 363   return ::vmgetinfo(out, command, arg);
 364 }
 365 
 366 // Given an address, returns the size of the page backing that address.
 367 size_t os::Aix::query_pagesize(void* addr) {
 368 
 369   if (os::Aix::on_pase() && os::Aix::os_version() < 0x0601) {
 370     // AS/400 older than V6R1: no vmgetinfo here, default to 4K
 371     return SIZE_4K;
 372   }
 373 
 374   vm_page_info pi;
 375   pi.addr = (uint64_t)addr;
 376   if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
 377     return pi.pagesize;
 378   } else {

 379     assert(false, "vmgetinfo failed to retrieve page size");
 380     return SIZE_4K;
 381   }






 382 }
 383 
 384 void os::Aix::initialize_system_info() {
 385 
 386   // Get the number of online(logical) cpus instead of configured.
 387   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 388   assert(_processor_count > 0, "_processor_count must be > 0");
 389 
 390   // Retrieve total physical storage.
 391   os::Aix::meminfo_t mi;
 392   if (!os::Aix::get_meminfo(&mi)) {

 393     assert(false, "os::Aix::get_meminfo failed.");
 394   }
 395   _physical_memory = (julong) mi.real_total;
 396 }
 397 
 398 // Helper function for tracing page sizes.
 399 static const char* describe_pagesize(size_t pagesize) {
 400   switch (pagesize) {
 401     case SIZE_4K : return "4K";
 402     case SIZE_64K: return "64K";
 403     case SIZE_16M: return "16M";
 404     case SIZE_16G: return "16G";

 405     default:
 406       assert(false, "surprise");
 407       return "??";
 408   }
 409 }
 410 























































































































































 411 void os::init_system_properties_values() {
 412 
 413 #define DEFAULT_LIBPATH "/lib:/usr/lib"
 414 #define EXTENSIONS_DIR  "/lib/ext"
 415 
 416   // Buffer that fits several sprintfs.
 417   // Note that the space for the trailing null is provided
 418   // by the nulls included by the sizeof operator.
 419   const size_t bufsize =
 420     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 421          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 422   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 423 
 424   // sysclasspath, java_home, dll_dir
 425   {
 426     char *pslash;
 427     os::jvm_path(buf, bufsize);
 428 
 429     // Found the full path to libjvm.so.
 430     // Now cut the path to <java_home>/jre if we can.
 431     pslash = strrchr(buf, '/');
 432     if (pslash != NULL) {
 433       *pslash = '\0';            // Get rid of /libjvm.so.
 434     }
 435     pslash = strrchr(buf, '/');
 436     if (pslash != NULL) {
 437       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 438     }
 439     Arguments::set_dll_dir(buf);
 440 
 441     if (pslash != NULL) {
 442       pslash = strrchr(buf, '/');
 443       if (pslash != NULL) {
 444         *pslash = '\0';          // Get rid of /<arch>.
 445         pslash = strrchr(buf, '/');
 446         if (pslash != NULL) {
 447           *pslash = '\0';        // Get rid of /lib.
 448         }
 449       }
 450     }
 451     Arguments::set_java_home(buf);
 452     set_boot_path('/', ':');
 453   }
 454 


 590       // Only the VM thread handles BREAK_SIGNAL ...
 591       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
 592     } else {
 593       // ... all other threads block BREAK_SIGNAL
 594       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
 595     }
 596   }
 597 }
 598 
 599 // retrieve memory information.
 600 // Returns false if something went wrong;
 601 // content of pmi undefined in this case.
 602 bool os::Aix::get_meminfo(meminfo_t* pmi) {
 603 
 604   assert(pmi, "get_meminfo: invalid parameter");
 605 
 606   memset(pmi, 0, sizeof(meminfo_t));
 607 
 608   if (os::Aix::on_pase()) {
 609 
 610     // On PASE, use the libo4 porting library
 611 
 612     unsigned long long virt_total = 0;
 613     unsigned long long real_total = 0;
 614     unsigned long long real_free = 0;
 615     unsigned long long pgsp_total = 0;
 616     unsigned long long pgsp_free = 0;
 617 
 618     if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
 619       pmi->virt_total = virt_total;
 620       pmi->real_total = real_total;
 621       pmi->real_free = real_free;
 622       pmi->pgsp_total = pgsp_total;
 623       pmi->pgsp_free = pgsp_free;
 624       return true;
 625     }
 626 
 627     return false;
 628 
 629   } else {
 630 
 631     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
 632     // See:
 633     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 634     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
 635     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 636     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 637 
 638     perfstat_memory_total_t psmt;
 639     memset (&psmt, '\0', sizeof(psmt));
 640     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
 641     if (rc == -1) {
 642       fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
 643       assert(0, "perfstat_memory_total() failed");
 644       return false;
 645     }
 646 


 650     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
 651     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
 652     // The fields of perfstat_memory_total_t:
 653     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
 654     // u_longlong_t real_total         Total real memory (in 4 KB pages).
 655     // u_longlong_t real_free          Free real memory (in 4 KB pages).
 656     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
 657     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
 658 
 659     pmi->virt_total = psmt.virt_total * 4096;
 660     pmi->real_total = psmt.real_total * 4096;
 661     pmi->real_free = psmt.real_free * 4096;
 662     pmi->pgsp_total = psmt.pgsp_total * 4096;
 663     pmi->pgsp_free = psmt.pgsp_free * 4096;
 664 
 665     return true;
 666 
 667   }
 668 } // end os::Aix::get_meminfo
 669 











































































 670 //////////////////////////////////////////////////////////////////////////////
 671 // create new thread
 672 
 673 // Thread start routine for all newly created threads
 674 static void *java_start(Thread *thread) {
 675 
 676   // find out my own stack dimensions
 677   {
 678     // actually, this should do exactly the same as thread->record_stack_base_and_size...
 679     address base = 0;
 680     size_t size = 0;
 681     query_stack_dimensions(&base, &size);
 682     thread->set_stack_base(base);
 683     thread->set_stack_size(size);
 684   }
 685 
 686   const pthread_t pthread_id = ::pthread_self();
 687   const tid_t kernel_thread_id = ::thread_self();
 688 
 689   trcVerbose("newborn Thread : pthread-id %u, ktid " UINT64_FORMAT
 690     ", stack %p ... %p, stacksize 0x%IX (%IB)",
 691     pthread_id, kernel_thread_id,
 692     thread->stack_base() - thread->stack_size(),
 693     thread->stack_base(),
 694     thread->stack_size(),
 695     thread->stack_size());
 696 
 697   // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
 698   // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
 699   // tools hook pthread_create(). In this case, we may run into problems establishing
 700   // guard pages on those stacks, because the stacks may reside in memory which is not
 701   // protectable (shmated).
 702   if (thread->stack_base() > ::sbrk(0)) {
 703     fprintf(stderr, "Thread " UINT64_FORMAT ": stack not in data segment.", (uint64_t) pthread_id);
 704   }
 705 
 706   // Do some sanity checks.
 707   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 708 
 709   // Try to randomize the cache line index of hot stack frames.
 710   // This helps when threads of the same stack traces evict each other's
 711   // cache lines. The threads can be either from the same JVM instance, or
 712   // from different JVM instances. The benefit is especially true for
 713   // processors with hyperthreading technology.
 714 
 715   static int counter = 0;
 716   int pid = os::current_process_id();
 717   alloca(((pid ^ counter++) & 7) * 128);
 718 
 719   ThreadLocalStorage::set_thread(thread);
 720 
 721   OSThread* osthread = thread->osthread();
 722 
 723   // Thread_id is pthread id.
 724   osthread->set_thread_id(pthread_id);
 725 
 726   // Initialize signal mask for this thread.
 727   os::Aix::hotspot_sigmask(thread);
 728 
 729   // Initialize floating point control register.
 730   os::Aix::init_thread_fpu_state();
 731 
 732   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 733 
 734   // Call one more level start routine.
 735   thread->run();
 736 
 737   trcVerbose("Thread finished : pthread-id %u, ktid " UINT64_FORMAT ".",
 738     pthread_id, kernel_thread_id);
 739 
 740   return 0;
 741 }
 742 
 743 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 744 



 745   assert(thread->osthread() == NULL, "caller responsible");
 746 
 747   // Allocate the OSThread object
 748   OSThread* osthread = new OSThread(NULL, NULL);
 749   if (osthread == NULL) {
 750     return false;
 751   }
 752 
 753   // set the correct thread state
 754   osthread->set_thread_type(thr_type);
 755 
 756   // Initial state is ALLOCATED but not INITIALIZED
 757   osthread->set_state(ALLOCATED);
 758 
 759   thread->set_osthread(osthread);
 760 
 761   // init thread attributes
 762   pthread_attr_t attr;
 763   pthread_attr_init(&attr);
 764   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");


 789       } // else fall through:
 790         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 791     case os::vm_thread:
 792     case os::pgc_thread:
 793     case os::cgc_thread:
 794     case os::watcher_thread:
 795       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 796       break;
 797     }
 798   }
 799 
 800   stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
 801   pthread_attr_setstacksize(&attr, stack_size);
 802 
 803   pthread_t tid;
 804   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
 805 
 806   pthread_attr_destroy(&attr);
 807 
 808   if (ret == 0) {
 809     trcVerbose("Created New Thread : pthread-id %u", tid);
 810   } else {
 811     if (os::Aix::on_pase()) {
 812       // SAPJVM stuefe 2008-04-24:
 813       // QIBM_MULTI_THREADED=Y is needed when the launcher is started on iSeries
 814       // using QSH. Otherwise pthread_create fails with errno=11.
 815       trcVerbose("(Please make sure you set the environment variable "
 816               "QIBM_MULTI_THREADED=Y before running this program.)");
 817     }
 818     if (PrintMiscellaneous && (Verbose || WizardMode)) {
 819       perror("pthread_create()");
 820     }
 821     // Need to clean up stuff we've allocated so far
 822     thread->set_osthread(NULL);
 823     delete osthread;
 824     return false;
 825   }
 826 
 827   // OSThread::thread_id is the pthread id
 828   osthread->set_thread_id(tid);
 829 
 830   return true;
 831 }
 832 
 833 /////////////////////////////////////////////////////////////////////////////
 834 // attach existing thread
 835 
 836 // bootstrap the main thread
 837 bool os::create_main_thread(JavaThread* thread) {
 838   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
 839   return create_attached_thread(thread);
 840 }
 841 
 842 bool os::create_attached_thread(JavaThread* thread) {
 843 #ifdef ASSERT
 844     thread->verify_not_published();
 845 #endif
 846 
 847   // Allocate the OSThread object
 848   OSThread* osthread = new OSThread(NULL, NULL);
 849 
 850   if (osthread == NULL) {
 851     return false;
 852   }
 853 
 854   const pthread_t pthread_id = ::pthread_self();
 855   const tid_t kernel_thread_id = ::thread_self();
 856 
 857   trcVerbose("attaching Thread : pthread-id %u, ktid " UINT64_FORMAT ", stack %p ... %p, stacksize 0x%IX (%IB)",
 858     pthread_id, kernel_thread_id,
 859     thread->stack_base() - thread->stack_size(),
 860     thread->stack_base(),
 861     thread->stack_size(),
 862     thread->stack_size());
 863 
 864   // OSThread::thread_id is the pthread id
 865   osthread->set_thread_id(pthread_id);
 866 
 867   // initialize floating point control register
 868   os::Aix::init_thread_fpu_state();
 869 
 870   // some sanity checks
 871   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 872 
 873   // Initial thread state is RUNNABLE
 874   osthread->set_state(RUNNABLE);
 875 
 876   thread->set_osthread(osthread);
 877 
 878   if (UseNUMA) {
 879     int lgrp_id = os::numa_get_group_id();
 880     if (lgrp_id != -1) {
 881       thread->set_lgrp_id(lgrp_id);
 882     }
 883   }
 884 
 885   // initialize signal mask for this thread


 965     // better than nothing, but not much
 966     return elapsedTime();
 967   }
 968 }
 969 
 970 jlong os::javaTimeMillis() {
 971   timeval time;
 972   int status = gettimeofday(&time, NULL);
 973   assert(status != -1, "aix error at gettimeofday()");
 974   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
 975 }
 976 
 977 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 978   timeval time;
 979   int status = gettimeofday(&time, NULL);
 980   assert(status != -1, "aix error at gettimeofday()");
 981   seconds = jlong(time.tv_sec);
 982   nanos = jlong(time.tv_usec) * 1000;
 983 }
 984 







 985 jlong os::javaTimeNanos() {
 986   if (os::Aix::on_pase()) {
 987 
 988     timeval time;
 989     int status = gettimeofday(&time, NULL);
 990     assert(status != -1, "PASE error at gettimeofday()");
 991     jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
 992     return 1000 * usecs;
 993 
 994   } else {
 995     // On AIX use the precision of processors real time clock
 996     // or time base registers.
 997     timebasestruct_t time;
 998     int rc;
 999 
1000     // If the CPU has a time register, it will be used and
1001     // we have to convert to real time first. After convertion we have following data:
1002     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1003     // time.tb_low  [nanoseconds after the last full second above]
1004     // We better use mread_real_time here instead of read_real_time
1005     // to ensure that we will get a monotonic increasing time.
1006     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1007       rc = time_base_to_time(&time, TIMEBASE_SZ);
1008       assert(rc != -1, "aix error at time_base_to_time()");
1009     }
1010     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1011   }
1012 }
1013 


1102 void os::die() {
1103   ::abort();
1104 }
1105 
1106 // This method is a copy of JDK's sysGetLastErrorString
1107 // from src/solaris/hpi/src/system_md.c
1108 
1109 size_t os::lasterror(char *buf, size_t len) {
1110   if (errno == 0) return 0;
1111 
1112   const char *s = ::strerror(errno);
1113   size_t n = ::strlen(s);
1114   if (n >= len) {
1115     n = len - 1;
1116   }
1117   ::strncpy(buf, s, n);
1118   buf[n] = '\0';
1119   return n;
1120 }
1121 
1122 intx os::current_thread_id() {
1123   return (intx)pthread_self();
1124 }
1125 
1126 int os::current_process_id() {
1127   return getpid();












1128 }
1129 
1130 // DLL functions
1131 
1132 const char* os::dll_file_extension() { return ".so"; }
1133 
1134 // This must be hard coded because it's the system's temporary
1135 // directory not the java application's temp directory, ala java.io.tmpdir.
1136 const char* os::get_temp_directory() { return "/tmp"; }
1137 
1138 static bool file_exists(const char* filename) {
1139   struct stat statbuf;
1140   if (filename == NULL || strlen(filename) == 0) {
1141     return false;
1142   }
1143   return os::stat(filename, &statbuf) == 0;
1144 }
1145 
1146 bool os::dll_build_name(char* buffer, size_t buflen,
1147                         const char* pname, const char* fname) {
1148   bool retval = false;
1149   // Copied from libhpi
1150   const size_t pnamelen = pname ? strlen(pname) : 0;
1151 
1152   // Return error on buffer overflow.
1153   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1154     *buffer = '\0';
1155     return retval;
1156   }
1157 
1158   if (pnamelen == 0) {
1159     snprintf(buffer, buflen, "lib%s.so", fname);
1160     retval = true;
1161   } else if (strchr(pname, *os::path_separator()) != NULL) {
1162     int n;
1163     char** pelements = split_path(pname, &n);
1164     if (pelements == NULL) {
1165       return false;
1166     }
1167     for (int i = 0; i < n; i++) {
1168       // Really shouldn't be NULL, but check can't hurt
1169       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1170         continue; // skip the empty path values
1171       }
1172       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1173       if (file_exists(buffer)) {
1174         retval = true;
1175         break;
1176       }
1177     }
1178     // release the storage
1179     for (int i = 0; i < n; i++) {
1180       if (pelements[i] != NULL) {
1181         FREE_C_HEAP_ARRAY(char, pelements[i]);
1182       }
1183     }
1184     if (pelements != NULL) {
1185       FREE_C_HEAP_ARRAY(char*, pelements);
1186     }


1390 
1391   // Print limits on DATA, because it limits the C-heap.
1392   st->print(", DATA ");
1393   getrlimit(RLIMIT_DATA, &rlim);
1394   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1395   else st->print("%uk", rlim.rlim_cur >> 10);
1396   st->cr();
1397 
1398   // load average
1399   st->print("load average:");
1400   double loadavg[3] = {-1.L, -1.L, -1.L};
1401   os::loadavg(loadavg, 3);
1402   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1403   st->cr();
1404 }
1405 
1406 void os::print_memory_info(outputStream* st) {
1407 
1408   st->print_cr("Memory:");
1409 
1410   st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1411     describe_pagesize(g_multipage_support.pagesize));
1412   st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1413     describe_pagesize(g_multipage_support.datapsize));
1414   st->print_cr("  Text page size:                         %s",
1415     describe_pagesize(g_multipage_support.textpsize));
1416   st->print_cr("  Thread stack page size (pthread):       %s",
1417     describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1418   st->print_cr("  Default shared memory page size:        %s",
1419     describe_pagesize(g_multipage_support.shmpsize));
1420   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1421     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1422   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1423     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1424   st->print_cr("  Multipage error: %d",
1425     g_multipage_support.error);
1426   st->cr();
1427   st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1428   // not used in OpenJDK st->print_cr("  os::stack_page_size:    %s", describe_pagesize(os::stack_page_size()));
1429 
1430   // print out LDR_CNTRL because it affects the default page sizes
1431   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1432   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1433 
1434   // print out EXTSHM because it is an unsupported setting
1435   const char* const extshm = ::getenv("EXTSHM");
1436   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1437   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1438     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1439   }
1440 
1441   // print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks
1442   const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1443   st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1444       aixthread_guardpages ? aixthread_guardpages : "<unset>");
1445 
1446   os::Aix::meminfo_t mi;
1447   if (os::Aix::get_meminfo(&mi)) {
1448     char buffer[256];
1449     if (os::Aix::on_aix()) {
1450       st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1451       st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1452       st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1453       st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1454     } else {
1455       // PASE - Numbers are result of QWCRSSTS; they mean:
1456       // real_total: Sum of all system pools
1457       // real_free: always 0
1458       // pgsp_total: we take the size of the system ASP
1459       // pgsp_free: size of system ASP times percentage of system ASP unused
1460       st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1461       st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1462       st->print_cr("%% system asp used : " SIZE_FORMAT,
1463         mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1464     }
1465     st->print_raw(buffer);


1466   }
1467   st->cr();
1468 
1469   // print segments allocated with os::reserve_memory
1470   st->print_cr("internal virtual memory regions used by vm:");
1471   vmembk_print_on(st);
1472 }
1473 
1474 // Get a string for the cpuinfo that is a summary of the cpu type
1475 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1476   // This looks good
1477   libperfstat::cpuinfo_t ci;
1478   if (libperfstat::get_cpuinfo(&ci)) {
1479     strncpy(buf, ci.version, buflen);
1480   } else {
1481     strncpy(buf, "AIX", buflen);
1482   }
1483 }
1484 
1485 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1486   st->print("CPU:");
1487   st->print("total %d", os::processor_count());
1488   // It's not safe to query number of active processors after crash
1489   // st->print("(active %d)", os::active_processor_count());
1490   st->print(" %s", VM_Version::cpu_features());
1491   st->cr();
1492 }
1493 
1494 void os::print_siginfo(outputStream* st, void* siginfo) {

1495   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1496   st->cr();
1497 }
1498 
1499 static void print_signal_handler(outputStream* st, int sig,
1500                                  char* buf, size_t buflen);
1501 
1502 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1503   st->print_cr("Signal Handlers:");
1504   print_signal_handler(st, SIGSEGV, buf, buflen);
1505   print_signal_handler(st, SIGBUS , buf, buflen);
1506   print_signal_handler(st, SIGFPE , buf, buflen);
1507   print_signal_handler(st, SIGPIPE, buf, buflen);
1508   print_signal_handler(st, SIGXFSZ, buf, buflen);
1509   print_signal_handler(st, SIGILL , buf, buflen);
1510   print_signal_handler(st, SR_signum, buf, buflen);
1511   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1512   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1513   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1514   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);


1613   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1614 }
1615 
1616 void os::signal_raise(int signal_number) {
1617   ::raise(signal_number);
1618 }
1619 
1620 //
1621 // The following code is moved from os.cpp for making this
1622 // code platform specific, which it is by its very nature.
1623 //
1624 
1625 // Will be modified when max signal is changed to be dynamic
1626 int os::sigexitnum_pd() {
1627   return NSIG;
1628 }
1629 
1630 // a counter for each possible signal value
1631 static volatile jint pending_signals[NSIG+1] = { 0 };
1632 
1633 // Wrapper functions for: sem_init(), sem_post(), sem_wait()
1634 // On AIX, we use sem_init(), sem_post(), sem_wait()
1635 // On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1636 // do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1637 // Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1638 // on AIX, msem_..() calls are suspected of causing problems.
1639 static sem_t sig_sem;
1640 static msemaphore* p_sig_msem = 0;
1641 
1642 static void local_sem_init() {
1643   if (os::Aix::on_aix()) {
1644     int rc = ::sem_init(&sig_sem, 0, 0);
1645     guarantee(rc != -1, "sem_init failed");
1646   } else {
1647     // memory semaphores must live in shared mem
1648     guarantee0(p_sig_msem == NULL);
1649     p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1650     guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1651     guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1652   }
1653 }
1654 
1655 static void local_sem_post() {
1656   static bool warn_only_once = false;
1657   if (os::Aix::on_aix()) {
1658     int rc = ::sem_post(&sig_sem);
1659     if (rc == -1 && !warn_only_once) {
1660       trcVerbose("sem_post failed (errno = %d, %s)", errno, strerror(errno));
1661       warn_only_once = true;
1662     }
1663   } else {
1664     guarantee0(p_sig_msem != NULL);
1665     int rc = ::msem_unlock(p_sig_msem, 0);
1666     if (rc == -1 && !warn_only_once) {
1667       trcVerbose("msem_unlock failed (errno = %d, %s)", errno, strerror(errno));
1668       warn_only_once = true;
1669     }
1670   }
1671 }
1672 
1673 static void local_sem_wait() {
1674   static bool warn_only_once = false;
1675   if (os::Aix::on_aix()) {
1676     int rc = ::sem_wait(&sig_sem);
1677     if (rc == -1 && !warn_only_once) {
1678       trcVerbose("sem_wait failed (errno = %d, %s)", errno, strerror(errno));
1679       warn_only_once = true;
1680     }
1681   } else {
1682     guarantee0(p_sig_msem != NULL); // must init before use
1683     int rc = ::msem_lock(p_sig_msem, 0);
1684     if (rc == -1 && !warn_only_once) {
1685       trcVerbose("msem_lock failed (errno = %d, %s)", errno, strerror(errno));
1686       warn_only_once = true;
1687     }
1688   }
1689 }
1690 
1691 void os::signal_init_pd() {
1692   // Initialize signal structures
1693   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1694 
1695   // Initialize signal semaphore
1696   local_sem_init();

1697 }
1698 
1699 void os::signal_notify(int sig) {
1700   Atomic::inc(&pending_signals[sig]);
1701   local_sem_post();
1702 }
1703 
1704 static int check_pending_signals(bool wait) {
1705   Atomic::store(0, &sigint_count);
1706   for (;;) {
1707     for (int i = 0; i < NSIG + 1; i++) {
1708       jint n = pending_signals[i];
1709       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1710         return i;
1711       }
1712     }
1713     if (!wait) {
1714       return -1;
1715     }
1716     JavaThread *thread = JavaThread::current();
1717     ThreadBlockInVM tbivm(thread);
1718 
1719     bool threadIsSuspended;
1720     do {
1721       thread->set_suspend_equivalent();
1722       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1723 
1724       local_sem_wait();
1725 
1726       // were we externally suspended while we were waiting?
1727       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1728       if (threadIsSuspended) {
1729         //
1730         // The semaphore has been incremented, but while we were waiting
1731         // another thread suspended us. We don't want to continue running
1732         // while suspended because that would surprise the thread that
1733         // suspended us.
1734         //
1735 
1736         local_sem_post();
1737 
1738         thread->java_suspend_self();
1739       }
1740     } while (threadIsSuspended);
1741   }
1742 }
1743 
1744 int os::signal_lookup() {
1745   return check_pending_signals(false);
1746 }
1747 
1748 int os::signal_wait() {
1749   return check_pending_signals(true);
1750 }
1751 
1752 ////////////////////////////////////////////////////////////////////////////////
1753 // Virtual Memory
1754 
1755 // We need to keep small simple bookkeeping for os::reserve_memory and friends.
1756 


1769   }
1770 
1771   bool contains_range(char* p, size_t s) const {
1772     return contains_addr(p) && contains_addr(p + s - 1);
1773   }
1774 
1775   void print_on(outputStream* os) const {
1776     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1777       " bytes, %d %s pages), %s",
1778       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1779       (type == VMEM_SHMATED ? "shmat" : "mmap")
1780     );
1781   }
1782 
1783   // Check that range is a sub range of memory block (or equal to memory block);
1784   // also check that range is fully page aligned to the page size if the block.
1785   void assert_is_valid_subrange(char* p, size_t s) const {
1786     if (!contains_range(p, s)) {
1787       fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1788               "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1789               p, p + s, addr, addr + size);
1790       guarantee0(false);
1791     }
1792     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1793       fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1794               " aligned to pagesize (%lu)\n", p, p + s, (unsigned long) pagesize);
1795       guarantee0(false);
1796     }
1797   }
1798 };
1799 
1800 static struct {
1801   vmembk_t* first;
1802   MiscUtils::CritSect cs;
1803 } vmem;
1804 
1805 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1806   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1807   assert0(p);
1808   if (p) {
1809     MiscUtils::AutoCritSect lck(&vmem.cs);
1810     p->addr = addr; p->size = size;
1811     p->pagesize = pagesize;
1812     p->type = type;
1813     p->next = vmem.first;
1814     vmem.first = p;


1830   assert0(p0);
1831   assert0(vmem.first); // List should not be empty.
1832   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1833     if (*pp == p0) {
1834       *pp = p0->next;
1835       ::free(p0);
1836       return;
1837     }
1838   }
1839   assert0(false); // Not found?
1840 }
1841 
1842 static void vmembk_print_on(outputStream* os) {
1843   MiscUtils::AutoCritSect lck(&vmem.cs);
1844   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1845     vmi->print_on(os);
1846     os->cr();
1847   }
1848 }
1849 
1850 ////////////////////////////////  SAPJVM stuefe 2014-05-06: System V shm routines ///////////
1851 
1852 // Reserve and attach a section of System V memory.
1853 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1854 // address. Failing that, it will attach the memory anywhere.
1855 // If <requested_addr> is NULL, function will attach the memory anywhere.
1856 //
1857 // <alignment_hint> is being ignored by this function. It is very probable however that the
1858 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1859 // Should this be not enogh, we can put more work into it.
1860 static char* reserve_shmated_memory (
1861   size_t bytes,
1862   char* requested_addr,
1863   size_t alignment_hint) {
1864 
1865   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1866     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1867     bytes, requested_addr, alignment_hint);
1868 
1869   // Either give me wish address or wish alignment but not both.
1870   assert0(!(requested_addr != NULL && alignment_hint != 0));
1871 


1873   // BRK because that may cause malloc OOM.
1874   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1875     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1876       "Will attach anywhere.", requested_addr);
1877     // Act like the OS refused to attach there.
1878     requested_addr = NULL;
1879   }
1880 
1881   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1882   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1883   if (os::Aix::on_pase_V5R4_or_older()) {
1884     ShouldNotReachHere();
1885   }
1886 
1887   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1888   const size_t size = align_size_up(bytes, SIZE_64K);
1889 
1890   // Reserve the shared segment.
1891   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1892   if (shmid == -1) {
1893     trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1894     return NULL;
1895   }
1896 
1897   // Important note:
1898   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1899   // We must right after attaching it remove it from the system. System V shm segments are global and
1900   // survive the process.
1901   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1902 
1903   struct shmid_ds shmbuf;
1904   memset(&shmbuf, 0, sizeof(shmbuf));
1905   shmbuf.shm_pagesize = SIZE_64K;
1906   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1907     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1908                size / SIZE_64K, errno);
1909     // I want to know if this ever happens.
1910     assert(false, "failed to set page size for shmat");
1911   }
1912 
1913   // Now attach the shared segment.
1914   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1915   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1916   // were not a segment boundary.
1917   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1918   const int errno_shmat = errno;
1919 
1920   // (A) Right after shmat and before handing shmat errors delete the shm segment.
1921   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
1922     trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1923     assert(false, "failed to remove shared memory segment!");
1924   }
1925 
1926   // Handle shmat error. If we failed to attach, just return.
1927   if (addr == (char*)-1) {
1928     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
1929     return NULL;
1930   }
1931 
1932   // Just for info: query the real page size. In case setting the page size did not
1933   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1934   const size_t real_pagesize = os::Aix::query_pagesize(addr);
1935   if (real_pagesize != shmbuf.shm_pagesize) {
1936     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
1937   }
1938 
1939   if (addr) {
1940     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
1941       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
1942   } else {


1967   } else {
1968     trcVerbose("ok.");
1969     rc = true;
1970   }
1971   return rc;
1972 }
1973 
1974 static bool uncommit_shmated_memory(char* addr, size_t size) {
1975   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1976     addr, addr + size - 1);
1977 
1978   const bool rc = my_disclaim64(addr, size);
1979 
1980   if (!rc) {
1981     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
1982     return false;
1983   }
1984   return true;
1985 }
1986 
1987 ////////////////////////////////  mmap-based routines /////////////////////////////////
1988 
1989 // Reserve memory via mmap.
1990 // If <requested_addr> is given, an attempt is made to attach at the given address.
1991 // Failing that, memory is allocated at any address.
1992 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
1993 // allocate at an address aligned with the given alignment. Failing that, memory
1994 // is aligned anywhere.
1995 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
1996   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
1997     "alignment_hint " UINTX_FORMAT "...",
1998     bytes, requested_addr, alignment_hint);
1999 
2000   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2001   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2002     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2003     return NULL;
2004   }
2005 
2006   // We must prevent anyone from attaching too close to the
2007   // BRK because that may cause malloc OOM.
2008   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {


2114 
2115   assert0(is_aligned_to(addr, os::vm_page_size()));
2116   assert0(is_aligned_to(size, os::vm_page_size()));
2117 
2118   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2119     addr, addr + size - 1);
2120   bool rc = false;
2121 
2122   // Uncommit mmap memory with msync MS_INVALIDATE.
2123   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2124     trcVerbose("failed (%d)\n", errno);
2125     rc = false;
2126   } else {
2127     trcVerbose("ok.");
2128     rc = true;
2129   }
2130 
2131   return rc;
2132 }
2133 



2134 int os::vm_page_size() {
2135   // Seems redundant as all get out.
2136   assert(os::Aix::page_size() != -1, "must call os::init");
2137   return os::Aix::page_size();
2138 }
2139 
2140 // Aix allocates memory by pages.
2141 int os::vm_allocation_granularity() {
2142   assert(os::Aix::page_size() != -1, "must call os::init");
2143   return os::Aix::page_size();
2144 }
2145 
2146 #ifdef PRODUCT
2147 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2148                                     int err) {
2149   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2150           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2151           strerror(err), err);
2152 }
2153 #endif
2154 
2155 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2156                                   const char* mesg) {
2157   assert(mesg != NULL, "mesg must be specified");
2158   if (!pd_commit_memory(addr, size, exec)) {
2159     // Add extra info in product mode for vm_exit_out_of_memory():
2160     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2161     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2162   }
2163 }
2164 
2165 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2166 
2167   assert0(is_aligned_to(addr, os::vm_page_size()));
2168   assert0(is_aligned_to(size, os::vm_page_size()));
2169 
2170   vmembk_t* const vmi = vmembk_find(addr);
2171   guarantee0(vmi);
2172   vmi->assert_is_valid_subrange(addr, size);
2173 
2174   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2175 
2176   if (UseExplicitCommit) {
2177     // AIX commits memory on touch. So, touch all pages to be committed.
2178     for (char* p = addr; p < (addr + size); p += SIZE_4K) {
2179       *p = '\0';
2180     }
2181   }
2182 
2183   return true;
2184 }
2185 
2186 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2187   return pd_commit_memory(addr, size, exec);
2188 }
2189 
2190 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2191                                   size_t alignment_hint, bool exec,
2192                                   const char* mesg) {
2193   // Alignment_hint is ignored on this OS.
2194   pd_commit_memory_or_exit(addr, size, exec, mesg);
2195 }
2196 
2197 bool os::pd_uncommit_memory(char* addr, size_t size) {
2198   assert0(is_aligned_to(addr, os::vm_page_size()));
2199   assert0(is_aligned_to(size, os::vm_page_size()));
2200 
2201   // Dynamically do different things for mmap/shmat.
2202   const vmembk_t* const vmi = vmembk_find(addr);
2203   guarantee0(vmi);
2204   vmi->assert_is_valid_subrange(addr, size);
2205 
2206   if (vmi->type == VMEM_SHMATED) {
2207     return uncommit_shmated_memory(addr, size);
2208   } else {
2209     return uncommit_mmaped_memory(addr, size);
2210   }
2211 }
2212 
2213 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2214   // Do not call this; no need to commit stack pages on AIX.
2215   ShouldNotReachHere();
2216   return true;
2217 }
2218 
2219 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2220   // Do not call this; no need to commit stack pages on AIX.
2221   ShouldNotReachHere();
2222   return true;
2223 }


2281   const size_t alignment_hint0 =
2282     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2283 
2284   // In 4K mode always use mmap.
2285   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2286   if (os::vm_page_size() == SIZE_4K) {
2287     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2288   } else {
2289     if (bytes >= Use64KPagesThreshold) {
2290       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2291     } else {
2292       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2293     }
2294   }
2295 }
2296 
2297 bool os::pd_release_memory(char* addr, size_t size) {
2298 
2299   // Dynamically do different things for mmap/shmat.
2300   vmembk_t* const vmi = vmembk_find(addr);
2301   guarantee0(vmi);
2302 
2303   // Always round to os::vm_page_size(), which may be larger than 4K.
2304   size = align_size_up(size, os::vm_page_size());
2305   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2306 
2307   bool rc = false;
2308   bool remove_bookkeeping = false;
2309   if (vmi->type == VMEM_SHMATED) {
2310     // For shmatted memory, we do:
2311     // - If user wants to release the whole range, release the memory (shmdt).
2312     // - If user only wants to release a partial range, uncommit (disclaim) that
2313     //   range. That way, at least, we do not use memory anymore (bust still page
2314     //   table space).
2315     vmi->assert_is_valid_subrange(addr, size);
2316     if (addr == vmi->addr && size == vmi->size) {
2317       rc = release_shmated_memory(addr, size);
2318       remove_bookkeeping = true;
2319     } else {
2320       rc = uncommit_shmated_memory(addr, size);
2321     }


2357   // mprotect success check
2358   //
2359   // Mprotect said it changed the protection but can I believe it?
2360   //
2361   // To be sure I need to check the protection afterwards. Try to
2362   // read from protected memory and check whether that causes a segfault.
2363   //
2364   if (!os::Aix::xpg_sus_mode()) {
2365 
2366     if (CanUseSafeFetch32()) {
2367 
2368       const bool read_protected =
2369         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2370          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2371 
2372       if (prot & PROT_READ) {
2373         rc = !read_protected;
2374       } else {
2375         rc = read_protected;
2376       }
2377 
2378       if (!rc) {
2379         if (os::Aix::on_pase()) {
2380           // There is an issue on older PASE systems where mprotect() will return success but the
2381           // memory will not be protected.
2382           // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2383           // machines; we only see it rarely, when using mprotect() to protect the guard page of
2384           // a stack. It is an OS error.
2385           //
2386           // A valid strategy is just to try again. This usually works. :-/
2387 
2388           MiscUtils::sleep_ms(1);
2389           if (::mprotect(addr, size, prot) == 0) {
2390             const bool read_protected_2 =
2391               (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2392               SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2393             rc = true;
2394           }
2395         }
2396       }


2397     }
2398   }
2399 
2400   assert(rc == true, "mprotect failed.");
2401 
2402   return rc;
2403 }
2404 
2405 // Set protections specified
2406 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2407   unsigned int p = 0;
2408   switch (prot) {
2409   case MEM_PROT_NONE: p = PROT_NONE; break;
2410   case MEM_PROT_READ: p = PROT_READ; break;
2411   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2412   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2413   default:
2414     ShouldNotReachHere();
2415   }
2416   // is_committed is unused.
2417   return checked_mprotect(addr, size, p);
2418 }
2419 
2420 bool os::guard_memory(char* addr, size_t size) {
2421   return checked_mprotect(addr, size, PROT_NONE);
2422 }
2423 
2424 bool os::unguard_memory(char* addr, size_t size) {
2425   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2426 }
2427 
2428 // Large page support
2429 
2430 static size_t _large_page_size = 0;
2431 
2432 // Enable large page support if OS allows that.
2433 void os::large_page_init() {
2434   return; // Nothing to do. See query_multipage_support and friends.
2435 }
2436 
2437 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2438   // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2439   // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2440   // so this is not needed.
2441   assert(false, "should not be called on AIX");
2442   return NULL;
2443 }
2444 
2445 bool os::release_memory_special(char* base, size_t bytes) {
2446   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2447   Unimplemented();
2448   return false;
2449 }
2450 
2451 size_t os::large_page_size() {
2452   return _large_page_size;
2453 }
2454 
2455 bool os::can_commit_large_page_memory() {
2456   // Does not matter, we do not support huge pages.
2457   return false;
2458 }
2459 
2460 bool os::can_execute_large_page_memory() {
2461   // Does not matter, we do not support huge pages.
2462   return false;


2874 // they typically will bring down the process immediately.
2875 bool unblock_program_error_signals() {
2876   sigset_t set;
2877   ::sigemptyset(&set);
2878   ::sigaddset(&set, SIGILL);
2879   ::sigaddset(&set, SIGBUS);
2880   ::sigaddset(&set, SIGFPE);
2881   ::sigaddset(&set, SIGSEGV);
2882   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2883 }
2884 
2885 // Renamed from 'signalHandler' to avoid collision with other shared libs.
2886 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2887   assert(info != NULL && uc != NULL, "it must be old kernel");
2888 
2889   // Never leave program error signals blocked;
2890   // on all our platforms they would bring down the process immediately when
2891   // getting raised while being blocked.
2892   unblock_program_error_signals();
2893 
2894   int orig_errno = errno;  // Preserve errno value over signal handler.
2895   JVM_handle_aix_signal(sig, info, uc, true);
2896   errno = orig_errno;
2897 }
2898 
2899 // This boolean allows users to forward their own non-matching signals
2900 // to JVM_handle_aix_signal, harmlessly.
2901 bool os::Aix::signal_handlers_are_installed = false;
2902 
2903 // For signal-chaining
2904 struct sigaction sigact[NSIG];
2905 sigset_t sigs;
2906 bool os::Aix::libjsig_is_loaded = false;
2907 typedef struct sigaction *(*get_signal_t)(int);
2908 get_signal_t os::Aix::get_signal_action = NULL;
2909 
2910 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2911   struct sigaction *actp = NULL;
2912 
2913   if (libjsig_is_loaded) {
2914     // Retrieve the old signal handler from libjsig
2915     actp = (*get_signal_action)(sig);
2916   }


2998 int os::Aix::get_our_sigflags(int sig) {
2999   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3000   return sigflags[sig];
3001 }
3002 
3003 void os::Aix::set_our_sigflags(int sig, int flags) {
3004   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3005   if (sig > 0 && sig < NSIG) {
3006     sigflags[sig] = flags;
3007   }
3008 }
3009 
3010 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3011   // Check for overwrite.
3012   struct sigaction oldAct;
3013   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3014 
3015   void* oldhand = oldAct.sa_sigaction
3016     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3017     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);

3018   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3019       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3020       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3021     if (AllowUserSignalHandlers || !set_installed) {
3022       // Do not overwrite; user takes responsibility to forward to us.
3023       return;
3024     } else if (UseSignalChaining) {
3025       // save the old handler in jvm
3026       save_preinstalled_handler(sig, oldAct);
3027       // libjsig also interposes the sigaction() call below and saves the
3028       // old sigaction on it own.
3029     } else {
3030       fatal("Encountered unexpected pre-existing sigaction handler "
3031             "%#lx for signal %d.", (long)oldhand, sig);
3032     }
3033   }
3034 
3035   struct sigaction sigAct;
3036   sigfillset(&(sigAct.sa_mask));
3037   if (!set_installed) {
3038     sigAct.sa_handler = SIG_DFL;
3039     sigAct.sa_flags = SA_RESTART;
3040   } else {

3041     sigAct.sa_sigaction = javaSignalHandler;
3042     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3043   }
3044   // Save flags, which are set by ours
3045   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3046   sigflags[sig] = sigAct.sa_flags;
3047 
3048   int ret = sigaction(sig, &sigAct, &oldAct);
3049   assert(ret == 0, "check");
3050 
3051   void* oldhand2 = oldAct.sa_sigaction
3052                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3053                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3054   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3055 }
3056 
3057 // install signal handlers for signals that HotSpot needs to
3058 // handle in order to support Java-level exception handling.
3059 void os::Aix::install_signal_handlers() {
3060   if (!signal_handlers_are_installed) {


3212     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3213     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3214     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3215     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3216   }
3217 
3218   DO_SIGNAL_CHECK(SR_signum);
3219 }
3220 
3221 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3222 
3223 static os_sigaction_t os_sigaction = NULL;
3224 
3225 void os::Aix::check_signal_handler(int sig) {
3226   char buf[O_BUFLEN];
3227   address jvmHandler = NULL;
3228 
3229   struct sigaction act;
3230   if (os_sigaction == NULL) {
3231     // only trust the default sigaction, in case it has been interposed
3232     os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3233     if (os_sigaction == NULL) return;
3234   }
3235 
3236   os_sigaction(sig, (struct sigaction*)NULL, &act);
3237 
3238   address thisHandler = (act.sa_flags & SA_SIGINFO)
3239     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3240     : CAST_FROM_FN_PTR(address, act.sa_handler);
3241 
3242   switch(sig) {
3243   case SIGSEGV:
3244   case SIGBUS:
3245   case SIGFPE:
3246   case SIGPIPE:
3247   case SIGILL:
3248   case SIGXFSZ:

3249     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3250     break;
3251 
3252   case SHUTDOWN1_SIGNAL:
3253   case SHUTDOWN2_SIGNAL:
3254   case SHUTDOWN3_SIGNAL:
3255   case BREAK_SIGNAL:
3256     jvmHandler = (address)user_handler();
3257     break;
3258 
3259   default:
3260     if (sig == SR_signum) {
3261       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3262     } else {
3263       return;
3264     }
3265     break;
3266   }
3267 
3268   if (thisHandler != jvmHandler) {


3294 
3295 const char* os::exception_name(int exception_code, char* buf, size_t size) {
3296   if (0 < exception_code && exception_code <= SIGRTMAX) {
3297     // signal
3298     if (!signal_name(exception_code, buf, size)) {
3299       jio_snprintf(buf, size, "SIG%d", exception_code);
3300     }
3301     return buf;
3302   } else {
3303     return NULL;
3304   }
3305 }
3306 
3307 // To install functions for atexit system call
3308 extern "C" {
3309   static void perfMemory_exit_helper() {
3310     perfMemory_exit();
3311   }
3312 }
3313 
3314 // Probe OS for multipage support.
3315 // Will fill the global g_multipage_support structure.
3316 // Must be called before calling os::large_page_init().
3317 static void query_multipage_support() {
3318 
3319   guarantee(g_multipage_support.pagesize == -1,
3320             "do not call twice");
3321 
3322   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
3323 
3324   // This really would surprise me.
3325   assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
3326 
3327   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
3328   // Default data page size is defined either by linker options (-bdatapsize)
3329   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
3330   // default should be 4K.
3331   {
3332     void* p = ::malloc(SIZE_16M);
3333     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
3334     ::free(p);
3335   }
3336 
3337   // Query default shm page size (LDR_CNTRL SHMPSIZE).
3338   // Note that this is pure curiosity. We do not rely on default page size but set
3339   // our own page size after allocated.
3340   {
3341     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
3342     guarantee(shmid != -1, "shmget failed");
3343     void* p = ::shmat(shmid, NULL, 0);
3344     ::shmctl(shmid, IPC_RMID, NULL);
3345     guarantee(p != (void*) -1, "shmat failed");
3346     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
3347     ::shmdt(p);
3348   }
3349 
3350   // Before querying the stack page size, make sure we are not running as primordial
3351   // thread (because primordial thread's stack may have different page size than
3352   // pthread thread stacks). Running a VM on the primordial thread won't work for a
3353   // number of reasons so we may just as well guarantee it here.
3354   guarantee0(!os::Aix::is_primordial_thread());
3355 
3356   // Query pthread stack page size. Should be the same as data page size because
3357   // pthread stacks are allocated from C-Heap.
3358   {
3359     int dummy = 0;
3360     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
3361   }
3362 
3363   // Query default text page size (LDR_CNTRL TEXTPSIZE).
3364   {
3365     address any_function =
3366       resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
3367     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
3368   }
3369 
3370   // Now probe for support of 64K pages and 16M pages.
3371 
3372   // Before OS/400 V6R1, there is no support for pages other than 4K.
3373   if (os::Aix::on_pase_V5R4_or_older()) {
3374     trcVerbose("OS/400 < V6R1 - no large page support.");
3375     g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
3376     goto query_multipage_support_end;
3377   }
3378 
3379   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
3380   {
3381     const int MAX_PAGE_SIZES = 4;
3382     psize_t sizes[MAX_PAGE_SIZES];
3383     const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
3384     if (num_psizes == -1) {
3385       trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
3386       trcVerbose("disabling multipage support.");
3387       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
3388       goto query_multipage_support_end;
3389     }
3390     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
3391     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
3392     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
3393     for (int i = 0; i < num_psizes; i ++) {
3394       trcVerbose(" %s ", describe_pagesize(sizes[i]));
3395     }
3396 
3397     // Can we use 64K, 16M pages?
3398     for (int i = 0; i < num_psizes; i ++) {
3399       const size_t pagesize = sizes[i];
3400       if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
3401         continue;
3402       }
3403       bool can_use = false;
3404       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
3405       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
3406         IPC_CREAT | S_IRUSR | S_IWUSR);
3407       guarantee0(shmid != -1); // Should always work.
3408       // Try to set pagesize.
3409       struct shmid_ds shm_buf = { 0 };
3410       shm_buf.shm_pagesize = pagesize;
3411       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
3412         const int en = errno;
3413         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
3414         trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
3415           MiscUtils::describe_errno(en));
3416       } else {
3417         // Attach and double check pageisze.
3418         void* p = ::shmat(shmid, NULL, 0);
3419         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
3420         guarantee0(p != (void*) -1); // Should always work.
3421         const size_t real_pagesize = os::Aix::query_pagesize(p);
3422         if (real_pagesize != pagesize) {
3423           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
3424         } else {
3425           can_use = true;
3426         }
3427         ::shmdt(p);
3428       }
3429       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
3430       if (pagesize == SIZE_64K) {
3431         g_multipage_support.can_use_64K_pages = can_use;
3432       } else if (pagesize == SIZE_16M) {
3433         g_multipage_support.can_use_16M_pages = can_use;
3434       }
3435     }
3436 
3437   } // end: check which pages can be used for shared memory
3438 
3439 query_multipage_support_end:
3440 
3441   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
3442       describe_pagesize(g_multipage_support.pagesize));
3443   trcVerbose("Data page size (C-Heap, bss, etc): %s",
3444       describe_pagesize(g_multipage_support.datapsize));
3445   trcVerbose("Text page size: %s",
3446       describe_pagesize(g_multipage_support.textpsize));
3447   trcVerbose("Thread stack page size (pthread): %s",
3448       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
3449   trcVerbose("Default shared memory page size: %s",
3450       describe_pagesize(g_multipage_support.shmpsize));
3451   trcVerbose("Can use 64K pages dynamically with shared meory: %s",
3452       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
3453   trcVerbose("Can use 16M pages dynamically with shared memory: %s",
3454       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
3455   trcVerbose("Multipage error details: %d",
3456       g_multipage_support.error);
3457 
3458   // sanity checks
3459   assert0(g_multipage_support.pagesize == SIZE_4K);
3460   assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
3461   assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
3462   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
3463   assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
3464 
3465 }
3466 
3467 // This is called _before_ the most of global arguments have been parsed.
3468 void os::init(void) {
3469 
3470   // This is basic, we want to know if that ever changes.
3471   // (Shared memory boundary is supposed to be a 256M aligned.)
3472   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3473 
3474   // record process break at startup
3475   g_brk_at_startup = (address) ::sbrk(0);
3476   assert(g_brk_at_startup != (address) -1, "sbrk failed");
3477 
3478   // First off, we need to know whether we run on AIX or PASE, and
3479   // the OS level we run on.
3480   os::Aix::initialize_os_info();
3481 
3482   // Scan environment (SPEC1170 behaviour, etc).
3483   os::Aix::scan_environment();
3484 
3485   // Probe multipage support.
3486   query_multipage_support();
3487 
3488   // Act like we only have one page size by eliminating corner cases which
3489   // we did not support very well anyway.
3490   // We have two input conditions:
3491   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3492   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3493   //    setting.
3494   //    Data segment page size is important for us because it defines the thread stack page
3495   //    size, which is needed for guard page handling, stack banging etc.
3496   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3497   //    and should be allocated with 64k pages.
3498   //
3499   // So, we do the following:
3500   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3501   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3502   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3503   // 64k          no              --- AIX 5.2 ? ---
3504   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3505 


3534       FLAG_SET_ERGO(bool, Use64KPages, false);
3535     }
3536   } else {
3537     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3538     // This normally means that we can allocate 64k pages dynamically.
3539     // (There is one special case where this may be false: EXTSHM=on.
3540     // but we decided to not support that mode).
3541     assert0(g_multipage_support.can_use_64K_pages);
3542     Aix::_page_size = SIZE_64K;
3543     trcVerbose("64K page mode");
3544     FLAG_SET_ERGO(bool, Use64KPages, true);
3545   }
3546 
3547   // Short-wire stack page size to base page size; if that works, we just remove
3548   // that stack page size altogether.
3549   Aix::_stack_page_size = Aix::_page_size;
3550 
3551   // For now UseLargePages is just ignored.
3552   FLAG_SET_ERGO(bool, UseLargePages, false);
3553   _page_sizes[0] = 0;
3554   _large_page_size = -1;
3555 
3556   // debug trace
3557   trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3558 
3559   // Next, we need to initialize libo4 and libperfstat libraries.
3560   if (os::Aix::on_pase()) {
3561     os::Aix::initialize_libo4();
3562   } else {
3563     os::Aix::initialize_libperfstat();
3564   }
3565 
3566   // Reset the perfstat information provided by ODM.
3567   if (os::Aix::on_aix()) {
3568     libperfstat::perfstat_reset();
3569   }
3570 
3571   // Now initialze basic system properties. Note that for some of the values we
3572   // need libperfstat etc.
3573   os::Aix::initialize_system_info();
3574 


3575   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3576 
3577   init_random(1234567);
3578 
3579   ThreadCritical::initialize();
3580 
3581   // Main_thread points to the aboriginal thread.
3582   Aix::_main_thread = pthread_self();
3583 
3584   initial_time_count = os::elapsed_counter();
3585 
3586   // If the pagesize of the VM is greater than 8K determine the appropriate
3587   // number of initial guard pages. The user can change this with the
3588   // command line arguments, if needed.
3589   if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3590     StackYellowPages = 1;
3591     StackRedPages = 1;
3592     StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3593   }
3594 }
3595 
3596 // This is called _after_ the global arguments have been parsed.
3597 jint os::init_2(void) {
3598 
3599   if (os::Aix::on_pase()) {
3600     trcVerbose("Running on PASE.");
3601   } else {
3602     trcVerbose("Running on AIX (not PASE).");
3603   }
3604 
3605   trcVerbose("processor count: %d", os::_processor_count);
3606   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3607 
3608   // Initially build up the loaded dll map.
3609   LoadedLibraries::reload();
3610   if (Verbose) {
3611     trcVerbose("Loaded Libraries: ");
3612     LoadedLibraries::print(tty);
3613   }
3614 
3615   const int page_size = Aix::page_size();
3616   const int map_size = page_size;
3617 
3618   address map_address = (address) MAP_FAILED;
3619   const int prot  = PROT_READ;
3620   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3621 
3622   // Use optimized addresses for the polling page,
3623   // e.g. map it to a special 32-bit address.
3624   if (OptimizePollingPageLocation) {
3625     // architecture-specific list of address wishes:
3626     address address_wishes[] = {
3627       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3628       // PPC64: all address wishes are non-negative 32 bit values where
3629       // the lower 16 bits are all zero. we can load these addresses
3630       // with a single ppc_lis instruction.
3631       (address) 0x30000000, (address) 0x31000000,
3632       (address) 0x32000000, (address) 0x33000000,
3633       (address) 0x40000000, (address) 0x41000000,
3634       (address) 0x42000000, (address) 0x43000000,
3635       (address) 0x50000000, (address) 0x51000000,
3636       (address) 0x52000000, (address) 0x53000000,
3637       (address) 0x60000000, (address) 0x61000000,
3638       (address) 0x62000000, (address) 0x63000000
3639     };
3640     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3641 
3642     // iterate over the list of address wishes:
3643     for (int i=0; i<address_wishes_length; i++) {
3644       // Try to map with current address wish.
3645       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3646       // fail if the address is already mapped.
3647       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3648                                      map_size, prot,
3649                                      flags | MAP_FIXED,
3650                                      -1, 0);
3651       trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",

3652                    address_wishes[i], map_address + (ssize_t)page_size);

3653 
3654       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3655         // Map succeeded and map_address is at wished address, exit loop.
3656         break;
3657       }
3658 
3659       if (map_address != (address) MAP_FAILED) {
3660         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3661         ::munmap(map_address, map_size);
3662         map_address = (address) MAP_FAILED;
3663       }
3664       // Map failed, continue loop.
3665     }
3666   } // end OptimizePollingPageLocation
3667 
3668   if (map_address == (address) MAP_FAILED) {
3669     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3670   }
3671   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3672   os::set_polling_page(map_address);
3673 
3674   if (!UseMembar) {
3675     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3676     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3677     os::set_memory_serialize_page(mem_serialize_page);
3678 
3679     trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3680         mem_serialize_page, mem_serialize_page + Aix::page_size(),
3681         Aix::page_size(), Aix::page_size());


3682   }
3683 
3684   // initialize suspend/resume support - must do this before signal_sets_init()
3685   if (SR_initialize() != 0) {
3686     perror("SR_initialize failed");
3687     return JNI_ERR;
3688   }
3689 
3690   Aix::signal_sets_init();
3691   Aix::install_signal_handlers();
3692 
3693   // Check minimum allowable stack size for thread creation and to initialize
3694   // the java system classes, including StackOverflowError - depends on page
3695   // size. Add a page for compiler2 recursion in main thread.
3696   // Add in 2*BytesPerWord times page size to account for VM stack during
3697   // class initialization depending on 32 or 64 bit VM.
3698   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3699             (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3700                      (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3701 
3702   os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3703 
3704   size_t threadStackSizeInBytes = ThreadStackSize * K;
3705   if (threadStackSizeInBytes != 0 &&
3706       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3707     tty->print_cr("\nThe stack size specified is too small, "
3708                   "Specify at least %dk",
3709                   os::Aix::min_stack_allowed / K);
3710     return JNI_ERR;
3711   }
3712 
3713   // Make the stack size a multiple of the page size so that
3714   // the yellow/red zones can be guarded.
3715   // Note that this can be 0, if no default stacksize was set.
3716   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3717 
3718   if (UseNUMA) {
3719     UseNUMA = false;
3720     warning("NUMA optimizations are not available on this OS.");
3721   }
3722 
3723   if (MaxFDLimit) {
3724     // Set the number of file descriptors to max. print out error
3725     // if getrlimit/setrlimit fails but continue regardless.
3726     struct rlimit nbr_files;
3727     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3728     if (status != 0) {
3729       if (PrintMiscellaneous && (Verbose || WizardMode))
3730         perror("os::init_2 getrlimit failed");
3731     } else {
3732       nbr_files.rlim_cur = nbr_files.rlim_max;
3733       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3734       if (status != 0) {
3735         if (PrintMiscellaneous && (Verbose || WizardMode))
3736           perror("os::init_2 setrlimit failed");
3737       }
3738     }
3739   }
3740 
3741   if (PerfAllowAtExitRegistration) {
3742     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3743     // At exit functions can be delayed until process exit time, which
3744     // can be problematic for embedded VM situations. Embedded VMs should
3745     // call DestroyJavaVM() to assure that VM resources are released.
3746 
3747     // Note: perfMemory_exit_helper atexit function may be removed in
3748     // the future if the appropriate cleanup code can be added to the
3749     // VM_Exit VMOperation's doit method.
3750     if (atexit(perfMemory_exit_helper) != 0) {
3751       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3752     }
3753   }
3754 
3755   return JNI_OK;
3756 }
3757 
3758 // Mark the polling page as unreadable
3759 void os::make_polling_page_unreadable(void) {
3760   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3761     fatal("Could not disable polling page");
3762   }
3763 };


3823     // NULL context is unexpected, double-check this is the VMThread.
3824     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3825   }
3826 }
3827 
3828 // Suspends the target using the signal mechanism and then grabs the PC before
3829 // resuming the target. Used by the flat-profiler only
3830 ExtendedPC os::get_thread_pc(Thread* thread) {
3831   // Make sure that it is called by the watcher for the VMThread.
3832   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3833   assert(thread->is_VM_thread(), "Can only be called for VMThread");
3834 
3835   PcFetcher fetcher(thread);
3836   fetcher.run();
3837   return fetcher.result();
3838 }
3839 
3840 ////////////////////////////////////////////////////////////////////////////////
3841 // debug support
3842 










3843 bool os::find(address addr, outputStream* st) {
3844 
3845   st->print(PTR_FORMAT ": ", addr);
3846 
3847   loaded_module_t lm;
3848   if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3849       LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3850     st->print("%s", lm.path);
3851     return true;
3852   }
3853 
3854   return false;
3855 }
3856 
3857 ////////////////////////////////////////////////////////////////////////////////
3858 // misc
3859 
3860 // This does not do anything on Aix. This is basically a hook for being
3861 // able to use structured exception handling (thread-local exception filters)
3862 // on, e.g., Win32.


4186   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4187   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4188   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4189 }
4190 
4191 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4192   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4193   info_ptr->may_skip_backward = false;     // elapsed time not wall time
4194   info_ptr->may_skip_forward = false;      // elapsed time not wall time
4195   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4196 }
4197 
4198 bool os::is_thread_cpu_time_supported() {
4199   return true;
4200 }
4201 
4202 // System loadavg support. Returns -1 if load average cannot be obtained.
4203 // For now just return the system wide load average (no processor sets).
4204 int os::loadavg(double values[], int nelem) {
4205 


4206   guarantee(nelem >= 0 && nelem <= 3, "argument error");
4207   guarantee(values, "argument error");
4208 
4209   if (os::Aix::on_pase()) {
4210 
4211     // AS/400 PASE: use libo4 porting library
4212     double v[3] = { 0.0, 0.0, 0.0 };
4213 
4214     if (libo4::get_load_avg(v, v + 1, v + 2)) {
4215       for (int i = 0; i < nelem; i ++) {
4216         values[i] = v[i];
4217       }
4218       return nelem;
4219     } else {
4220       return -1;
4221     }
4222 
4223   } else {
4224 
4225     // AIX: use libperfstat
4226     libperfstat::cpuinfo_t ci;
4227     if (libperfstat::get_cpuinfo(&ci)) {






4228       for (int i = 0; i < nelem; i++) {
4229         values[i] = ci.loadavg[i];
4230       }
4231     } else {
4232       return -1;
4233     }
4234     return nelem;
4235   }
4236 }
4237 
4238 void os::pause() {
4239   char filename[MAX_PATH];
4240   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4241     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4242   } else {
4243     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4244   }
4245 
4246   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4247   if (fd != -1) {


4257 }
4258 
4259 bool os::Aix::is_primordial_thread() {
4260   if (pthread_self() == (pthread_t)1) {
4261     return true;
4262   } else {
4263     return false;
4264   }
4265 }
4266 
4267 // OS recognitions (PASE/AIX, OS level) call this before calling any
4268 // one of Aix::on_pase(), Aix::os_version() static
4269 void os::Aix::initialize_os_info() {
4270 
4271   assert(_on_pase == -1 && _os_version == -1, "already called.");
4272 
4273   struct utsname uts;
4274   memset(&uts, 0, sizeof(uts));
4275   strcpy(uts.sysname, "?");
4276   if (::uname(&uts) == -1) {
4277     trcVerbose("uname failed (%d)", errno);
4278     guarantee(0, "Could not determine whether we run on AIX or PASE");
4279   } else {
4280     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4281                "node \"%s\" machine \"%s\"\n",
4282                uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4283     const int major = atoi(uts.version);
4284     assert(major > 0, "invalid OS version");
4285     const int minor = atoi(uts.release);
4286     assert(minor > 0, "invalid OS release");
4287     _os_version = (major << 8) | minor;
4288     if (strcmp(uts.sysname, "OS400") == 0) {
4289       // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4290       _on_pase = 1;
4291       if (_os_version < 0x0504) {
4292         trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4293         assert(false, "OS/400 release too old.");
4294       } else {
4295         trcVerbose("We run on OS/400 (pase) V%dR%d", major, minor);
4296       }
4297     } else if (strcmp(uts.sysname, "AIX") == 0) {
4298       // We run on AIX. We do not support versions older than AIX 5.3.
4299       _on_pase = 0;
4300       if (_os_version < 0x0503) {
4301         trcVerbose("AIX release older than AIX 5.3 not supported.");
4302         assert(false, "AIX release too old.");
4303       } else {
4304         trcVerbose("We run on AIX %d.%d", major, minor);
4305       }
4306     } else {
4307       assert(false, "unknown OS");
4308     }
4309   }
4310 
4311   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4312 } // end: os::Aix::initialize_os_info()
4313 
4314 // Scan environment for important settings which might effect the VM.
4315 // Trace out settings. Warn about invalid settings and/or correct them.
4316 //
4317 // Must run after os::Aix::initialue_os_info().
4318 void os::Aix::scan_environment() {
4319 
4320   char* p;
4321   int rc;
4322 
4323   // Warn explicity if EXTSHM=ON is used. That switch changes how
4324   // System V shared memory behaves. One effect is that page size of
4325   // shared memory cannot be change dynamically, effectivly preventing
4326   // large pages from working.
4327   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4328   // recommendation is (in OSS notes) to switch it off.
4329   p = ::getenv("EXTSHM");
4330   trcVerbose("EXTSHM=%s.", p ? p : "<unset>");


4331   if (p && strcasecmp(p, "ON") == 0) {

4332     _extshm = 1;
4333     trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4334     if (!AllowExtshm) {
4335       // we allow under certain conditions the user to continue. However, we want this
4336       // to be a fatal error by default, because leaving EXTSHM=ON leads to, on
4337       // ceratin AIX systems, the VM not being able to allocate 64k pages for the heap.
4338       // We do not want to run with reduced performance.
4339       vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4340     }
4341   } else {
4342     _extshm = 0;
4343   }
4344 
4345   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4346   // Not tested, not supported.
4347   //
4348   // Note that it might be worth the trouble to test and to require it, if only to
4349   // get useful return codes for mprotect.
4350   //
4351   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4352   // exec() ? before loading the libjvm ? ....)
4353   p = ::getenv("XPG_SUS_ENV");
4354   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4355   if (p && strcmp(p, "ON") == 0) {
4356     _xpg_sus_mode = 1;
4357     trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4358     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4359     // clobber address ranges. If we ever want to support that, we have to do some
4360     // testing first.
4361     guarantee(false, "XPG_SUS_ENV=ON not supported");
4362   } else {
4363     _xpg_sus_mode = 0;
4364   }
4365 
4366   if (os::Aix::on_pase()) {
4367     p = ::getenv("QIBM_MULTI_THREADED");
4368     trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4369   }
4370 
4371   p = ::getenv("LDR_CNTRL");
4372   trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4373   if (os::Aix::on_pase() && os::Aix::os_version() == 0x0701) {
4374     if (p && ::strstr(p, "TEXTPSIZE")) {
4375       trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4376         "you may experience hangs or crashes on OS/400 V7R1.");
4377     }
4378   }
4379 
4380   p = ::getenv("AIXTHREAD_GUARDPAGES");
4381   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");


4382 
4383 } // end: os::Aix::scan_environment()
4384 
4385 // PASE: initialize the libo4 library (PASE porting library).
4386 void os::Aix::initialize_libo4() {
4387   guarantee(os::Aix::on_pase(), "OS/400 only.");
4388   if (!libo4::init()) {
4389     trcVerbose("libo4 initialization failed.");
4390     assert(false, "libo4 initialization failed");
4391   } else {
4392     trcVerbose("libo4 initialized.");
4393   }
4394 }
4395 
4396 // AIX: initialize the libperfstat library.

4397 void os::Aix::initialize_libperfstat() {

4398   assert(os::Aix::on_aix(), "AIX only");

4399   if (!libperfstat::init()) {
4400     trcVerbose("libperfstat initialization failed.");
4401     assert(false, "libperfstat initialization failed");
4402   } else {
4403     trcVerbose("libperfstat initialized.");


4404   }
4405 }
4406 
4407 /////////////////////////////////////////////////////////////////////////////
4408 // thread stack
4409 
4410 // Function to query the current stack size using pthread_getthrds_np.
4411 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4412   // This only works when invoked on a pthread. As we agreed not to use
4413   // primordial threads anyway, I assert here.
4414   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4415 
4416   // Information about this api can be found (a) in the pthread.h header and
4417   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4418   //
4419   // The use of this API to find out the current stack is kind of undefined.
4420   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4421   // enough for cases where I let the pthread library create its stacks. For cases
4422   // where I create an own stack and pass this to pthread_create, it seems not to
4423   // work (the returned stack size in that case is 0).
4424 
4425   pthread_t tid = pthread_self();


< prev index next >