1 /* 2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2017 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 // According to the AIX OS doc #pragma alloca must be used 27 // with C++ compiler before referencing the function alloca() 28 #pragma alloca 29 30 // no precompiled headers 31 #include "jvm.h" 32 #include "classfile/classLoader.hpp" 33 #include "classfile/systemDictionary.hpp" 34 #include "classfile/vmSymbols.hpp" 35 #include "code/icBuffer.hpp" 36 #include "code/vtableStubs.hpp" 37 #include "compiler/compileBroker.hpp" 38 #include "interpreter/interpreter.hpp" 39 #include "logging/log.hpp" 40 #include "libo4.hpp" 41 #include "libperfstat_aix.hpp" 42 #include "libodm_aix.hpp" 43 #include "loadlib_aix.hpp" 44 #include "memory/allocation.inline.hpp" 45 #include "memory/filemap.hpp" 46 #include "misc_aix.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "os_aix.inline.hpp" 49 #include "os_share_aix.hpp" 50 #include "porting_aix.hpp" 51 #include "prims/jniFastGetField.hpp" 52 #include "prims/jvm_misc.hpp" 53 #include "runtime/arguments.hpp" 54 #include "runtime/atomic.hpp" 55 #include "runtime/extendedPC.hpp" 56 #include "runtime/globals.hpp" 57 #include "runtime/interfaceSupport.hpp" 58 #include "runtime/java.hpp" 59 #include "runtime/javaCalls.hpp" 60 #include "runtime/mutexLocker.hpp" 61 #include "runtime/objectMonitor.hpp" 62 #include "runtime/orderAccess.inline.hpp" 63 #include "runtime/os.hpp" 64 #include "runtime/osThread.hpp" 65 #include "runtime/perfMemory.hpp" 66 #include "runtime/sharedRuntime.hpp" 67 #include "runtime/statSampler.hpp" 68 #include "runtime/stubRoutines.hpp" 69 #include "runtime/thread.inline.hpp" 70 #include "runtime/threadCritical.hpp" 71 #include "runtime/timer.hpp" 72 #include "runtime/vm_version.hpp" 73 #include "services/attachListener.hpp" 74 #include "services/runtimeService.hpp" 75 #include "utilities/align.hpp" 76 #include "utilities/decoder.hpp" 77 #include "utilities/defaultStream.hpp" 78 #include "utilities/events.hpp" 79 #include "utilities/growableArray.hpp" 80 #include "utilities/vmError.hpp" 81 82 // put OS-includes here (sorted alphabetically) 83 #include <errno.h> 84 #include <fcntl.h> 85 #include <inttypes.h> 86 #include <poll.h> 87 #include <procinfo.h> 88 #include <pthread.h> 89 #include <pwd.h> 90 #include <semaphore.h> 91 #include <signal.h> 92 #include <stdint.h> 93 #include <stdio.h> 94 #include <string.h> 95 #include <unistd.h> 96 #include <sys/ioctl.h> 97 #include <sys/ipc.h> 98 #include <sys/mman.h> 99 #include <sys/resource.h> 100 #include <sys/select.h> 101 #include <sys/shm.h> 102 #include <sys/socket.h> 103 #include <sys/stat.h> 104 #include <sys/sysinfo.h> 105 #include <sys/systemcfg.h> 106 #include <sys/time.h> 107 #include <sys/times.h> 108 #include <sys/types.h> 109 #include <sys/utsname.h> 110 #include <sys/vminfo.h> 111 #include <sys/wait.h> 112 113 // Missing prototypes for various system APIs. 114 extern "C" 115 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t); 116 117 #if !defined(_AIXVERSION_610) 118 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int); 119 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int); 120 extern "C" int getargs (procsinfo*, int, char*, int); 121 #endif 122 123 #define MAX_PATH (2 * K) 124 125 // for timer info max values which include all bits 126 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 127 // for multipage initialization error analysis (in 'g_multipage_error') 128 #define ERROR_MP_OS_TOO_OLD 100 129 #define ERROR_MP_EXTSHM_ACTIVE 101 130 #define ERROR_MP_VMGETINFO_FAILED 102 131 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103 132 133 static address resolve_function_descriptor_to_code_pointer(address p); 134 135 static void vmembk_print_on(outputStream* os); 136 137 //////////////////////////////////////////////////////////////////////////////// 138 // global variables (for a description see os_aix.hpp) 139 140 julong os::Aix::_physical_memory = 0; 141 142 pthread_t os::Aix::_main_thread = ((pthread_t)0); 143 int os::Aix::_page_size = -1; 144 145 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase 146 int os::Aix::_on_pase = -1; 147 148 // 0 = uninitialized, otherwise 32 bit number: 149 // 0xVVRRTTSS 150 // VV - major version 151 // RR - minor version 152 // TT - tech level, if known, 0 otherwise 153 // SS - service pack, if known, 0 otherwise 154 uint32_t os::Aix::_os_version = 0; 155 156 // -1 = uninitialized, 0 - no, 1 - yes 157 int os::Aix::_xpg_sus_mode = -1; 158 159 // -1 = uninitialized, 0 - no, 1 - yes 160 int os::Aix::_extshm = -1; 161 162 //////////////////////////////////////////////////////////////////////////////// 163 // local variables 164 165 static jlong initial_time_count = 0; 166 static int clock_tics_per_sec = 100; 167 static sigset_t check_signal_done; // For diagnostics to print a message once (see run_periodic_checks) 168 static bool check_signals = true; 169 static int SR_signum = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769) 170 static sigset_t SR_sigset; 171 172 // Process break recorded at startup. 173 static address g_brk_at_startup = NULL; 174 175 // This describes the state of multipage support of the underlying 176 // OS. Note that this is of no interest to the outsize world and 177 // therefore should not be defined in AIX class. 178 // 179 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The 180 // latter two (16M "large" resp. 16G "huge" pages) require special 181 // setup and are normally not available. 182 // 183 // AIX supports multiple page sizes per process, for: 184 // - Stack (of the primordial thread, so not relevant for us) 185 // - Data - data, bss, heap, for us also pthread stacks 186 // - Text - text code 187 // - shared memory 188 // 189 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...) 190 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...). 191 // 192 // For shared memory, page size can be set dynamically via 193 // shmctl(). Different shared memory regions can have different page 194 // sizes. 195 // 196 // More information can be found at AIBM info center: 197 // http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm 198 // 199 static struct { 200 size_t pagesize; // sysconf _SC_PAGESIZE (4K) 201 size_t datapsize; // default data page size (LDR_CNTRL DATAPSIZE) 202 size_t shmpsize; // default shared memory page size (LDR_CNTRL SHMPSIZE) 203 size_t pthr_stack_pagesize; // stack page size of pthread threads 204 size_t textpsize; // default text page size (LDR_CNTRL STACKPSIZE) 205 bool can_use_64K_pages; // True if we can alloc 64K pages dynamically with Sys V shm. 206 bool can_use_16M_pages; // True if we can alloc 16M pages dynamically with Sys V shm. 207 int error; // Error describing if something went wrong at multipage init. 208 } g_multipage_support = { 209 (size_t) -1, 210 (size_t) -1, 211 (size_t) -1, 212 (size_t) -1, 213 (size_t) -1, 214 false, false, 215 0 216 }; 217 218 // We must not accidentally allocate memory close to the BRK - even if 219 // that would work - because then we prevent the BRK segment from 220 // growing which may result in a malloc OOM even though there is 221 // enough memory. The problem only arises if we shmat() or mmap() at 222 // a specific wish address, e.g. to place the heap in a 223 // compressed-oops-friendly way. 224 static bool is_close_to_brk(address a) { 225 assert0(g_brk_at_startup != NULL); 226 if (a >= g_brk_at_startup && 227 a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) { 228 return true; 229 } 230 return false; 231 } 232 233 julong os::available_memory() { 234 return Aix::available_memory(); 235 } 236 237 julong os::Aix::available_memory() { 238 // Avoid expensive API call here, as returned value will always be null. 239 if (os::Aix::on_pase()) { 240 return 0x0LL; 241 } 242 os::Aix::meminfo_t mi; 243 if (os::Aix::get_meminfo(&mi)) { 244 return mi.real_free; 245 } else { 246 return ULONG_MAX; 247 } 248 } 249 250 julong os::physical_memory() { 251 return Aix::physical_memory(); 252 } 253 254 // Return true if user is running as root. 255 256 bool os::have_special_privileges() { 257 static bool init = false; 258 static bool privileges = false; 259 if (!init) { 260 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 261 init = true; 262 } 263 return privileges; 264 } 265 266 // Helper function, emulates disclaim64 using multiple 32bit disclaims 267 // because we cannot use disclaim64() on AS/400 and old AIX releases. 268 static bool my_disclaim64(char* addr, size_t size) { 269 270 if (size == 0) { 271 return true; 272 } 273 274 // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.) 275 const unsigned int maxDisclaimSize = 0x40000000; 276 277 const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize); 278 const unsigned int lastDisclaimSize = (size % maxDisclaimSize); 279 280 char* p = addr; 281 282 for (int i = 0; i < numFullDisclaimsNeeded; i ++) { 283 if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) { 284 trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno); 285 return false; 286 } 287 p += maxDisclaimSize; 288 } 289 290 if (lastDisclaimSize > 0) { 291 if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) { 292 trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno); 293 return false; 294 } 295 } 296 297 return true; 298 } 299 300 // Cpu architecture string 301 #if defined(PPC32) 302 static char cpu_arch[] = "ppc"; 303 #elif defined(PPC64) 304 static char cpu_arch[] = "ppc64"; 305 #else 306 #error Add appropriate cpu_arch setting 307 #endif 308 309 // Wrap the function "vmgetinfo" which is not available on older OS releases. 310 static int checked_vmgetinfo(void *out, int command, int arg) { 311 if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) { 312 guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1"); 313 } 314 return ::vmgetinfo(out, command, arg); 315 } 316 317 // Given an address, returns the size of the page backing that address. 318 size_t os::Aix::query_pagesize(void* addr) { 319 320 if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) { 321 // AS/400 older than V6R1: no vmgetinfo here, default to 4K 322 return 4*K; 323 } 324 325 vm_page_info pi; 326 pi.addr = (uint64_t)addr; 327 if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) { 328 return pi.pagesize; 329 } else { 330 assert(false, "vmgetinfo failed to retrieve page size"); 331 return 4*K; 332 } 333 } 334 335 void os::Aix::initialize_system_info() { 336 337 // Get the number of online(logical) cpus instead of configured. 338 os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN); 339 assert(_processor_count > 0, "_processor_count must be > 0"); 340 341 // Retrieve total physical storage. 342 os::Aix::meminfo_t mi; 343 if (!os::Aix::get_meminfo(&mi)) { 344 assert(false, "os::Aix::get_meminfo failed."); 345 } 346 _physical_memory = (julong) mi.real_total; 347 } 348 349 // Helper function for tracing page sizes. 350 static const char* describe_pagesize(size_t pagesize) { 351 switch (pagesize) { 352 case 4*K : return "4K"; 353 case 64*K: return "64K"; 354 case 16*M: return "16M"; 355 case 16*G: return "16G"; 356 default: 357 assert(false, "surprise"); 358 return "??"; 359 } 360 } 361 362 // Probe OS for multipage support. 363 // Will fill the global g_multipage_support structure. 364 // Must be called before calling os::large_page_init(). 365 static void query_multipage_support() { 366 367 guarantee(g_multipage_support.pagesize == -1, 368 "do not call twice"); 369 370 g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE); 371 372 // This really would surprise me. 373 assert(g_multipage_support.pagesize == 4*K, "surprise!"); 374 375 // Query default data page size (default page size for C-Heap, pthread stacks and .bss). 376 // Default data page size is defined either by linker options (-bdatapsize) 377 // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given, 378 // default should be 4K. 379 { 380 void* p = ::malloc(16*M); 381 g_multipage_support.datapsize = os::Aix::query_pagesize(p); 382 ::free(p); 383 } 384 385 // Query default shm page size (LDR_CNTRL SHMPSIZE). 386 // Note that this is pure curiosity. We do not rely on default page size but set 387 // our own page size after allocated. 388 { 389 const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR); 390 guarantee(shmid != -1, "shmget failed"); 391 void* p = ::shmat(shmid, NULL, 0); 392 ::shmctl(shmid, IPC_RMID, NULL); 393 guarantee(p != (void*) -1, "shmat failed"); 394 g_multipage_support.shmpsize = os::Aix::query_pagesize(p); 395 ::shmdt(p); 396 } 397 398 // Before querying the stack page size, make sure we are not running as primordial 399 // thread (because primordial thread's stack may have different page size than 400 // pthread thread stacks). Running a VM on the primordial thread won't work for a 401 // number of reasons so we may just as well guarantee it here. 402 guarantee0(!os::Aix::is_primordial_thread()); 403 404 // Query pthread stack page size. Should be the same as data page size because 405 // pthread stacks are allocated from C-Heap. 406 { 407 int dummy = 0; 408 g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy); 409 } 410 411 // Query default text page size (LDR_CNTRL TEXTPSIZE). 412 { 413 address any_function = 414 resolve_function_descriptor_to_code_pointer((address)describe_pagesize); 415 g_multipage_support.textpsize = os::Aix::query_pagesize(any_function); 416 } 417 418 // Now probe for support of 64K pages and 16M pages. 419 420 // Before OS/400 V6R1, there is no support for pages other than 4K. 421 if (os::Aix::on_pase_V5R4_or_older()) { 422 trcVerbose("OS/400 < V6R1 - no large page support."); 423 g_multipage_support.error = ERROR_MP_OS_TOO_OLD; 424 goto query_multipage_support_end; 425 } 426 427 // Now check which page sizes the OS claims it supports, and of those, which actually can be used. 428 { 429 const int MAX_PAGE_SIZES = 4; 430 psize_t sizes[MAX_PAGE_SIZES]; 431 const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES); 432 if (num_psizes == -1) { 433 trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno); 434 trcVerbose("disabling multipage support."); 435 g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED; 436 goto query_multipage_support_end; 437 } 438 guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed."); 439 assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?"); 440 trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes); 441 for (int i = 0; i < num_psizes; i ++) { 442 trcVerbose(" %s ", describe_pagesize(sizes[i])); 443 } 444 445 // Can we use 64K, 16M pages? 446 for (int i = 0; i < num_psizes; i ++) { 447 const size_t pagesize = sizes[i]; 448 if (pagesize != 64*K && pagesize != 16*M) { 449 continue; 450 } 451 bool can_use = false; 452 trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize)); 453 const int shmid = ::shmget(IPC_PRIVATE, pagesize, 454 IPC_CREAT | S_IRUSR | S_IWUSR); 455 guarantee0(shmid != -1); // Should always work. 456 // Try to set pagesize. 457 struct shmid_ds shm_buf = { 0 }; 458 shm_buf.shm_pagesize = pagesize; 459 if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) { 460 const int en = errno; 461 ::shmctl(shmid, IPC_RMID, NULL); // As early as possible! 462 trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n", 463 errno); 464 } else { 465 // Attach and double check pageisze. 466 void* p = ::shmat(shmid, NULL, 0); 467 ::shmctl(shmid, IPC_RMID, NULL); // As early as possible! 468 guarantee0(p != (void*) -1); // Should always work. 469 const size_t real_pagesize = os::Aix::query_pagesize(p); 470 if (real_pagesize != pagesize) { 471 trcVerbose("real page size (0x%llX) differs.", real_pagesize); 472 } else { 473 can_use = true; 474 } 475 ::shmdt(p); 476 } 477 trcVerbose("Can use: %s", (can_use ? "yes" : "no")); 478 if (pagesize == 64*K) { 479 g_multipage_support.can_use_64K_pages = can_use; 480 } else if (pagesize == 16*M) { 481 g_multipage_support.can_use_16M_pages = can_use; 482 } 483 } 484 485 } // end: check which pages can be used for shared memory 486 487 query_multipage_support_end: 488 489 trcVerbose("base page size (sysconf _SC_PAGESIZE): %s", 490 describe_pagesize(g_multipage_support.pagesize)); 491 trcVerbose("Data page size (C-Heap, bss, etc): %s", 492 describe_pagesize(g_multipage_support.datapsize)); 493 trcVerbose("Text page size: %s", 494 describe_pagesize(g_multipage_support.textpsize)); 495 trcVerbose("Thread stack page size (pthread): %s", 496 describe_pagesize(g_multipage_support.pthr_stack_pagesize)); 497 trcVerbose("Default shared memory page size: %s", 498 describe_pagesize(g_multipage_support.shmpsize)); 499 trcVerbose("Can use 64K pages dynamically with shared meory: %s", 500 (g_multipage_support.can_use_64K_pages ? "yes" :"no")); 501 trcVerbose("Can use 16M pages dynamically with shared memory: %s", 502 (g_multipage_support.can_use_16M_pages ? "yes" :"no")); 503 trcVerbose("Multipage error details: %d", 504 g_multipage_support.error); 505 506 // sanity checks 507 assert0(g_multipage_support.pagesize == 4*K); 508 assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K); 509 assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K); 510 assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize); 511 assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K); 512 513 } 514 515 void os::init_system_properties_values() { 516 517 #define DEFAULT_LIBPATH "/lib:/usr/lib" 518 #define EXTENSIONS_DIR "/lib/ext" 519 520 // Buffer that fits several sprintfs. 521 // Note that the space for the trailing null is provided 522 // by the nulls included by the sizeof operator. 523 const size_t bufsize = 524 MAX2((size_t)MAXPATHLEN, // For dll_dir & friends. 525 (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir 526 char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); 527 528 // sysclasspath, java_home, dll_dir 529 { 530 char *pslash; 531 os::jvm_path(buf, bufsize); 532 533 // Found the full path to libjvm.so. 534 // Now cut the path to <java_home>/jre if we can. 535 pslash = strrchr(buf, '/'); 536 if (pslash != NULL) { 537 *pslash = '\0'; // Get rid of /libjvm.so. 538 } 539 pslash = strrchr(buf, '/'); 540 if (pslash != NULL) { 541 *pslash = '\0'; // Get rid of /{client|server|hotspot}. 542 } 543 Arguments::set_dll_dir(buf); 544 545 if (pslash != NULL) { 546 pslash = strrchr(buf, '/'); 547 if (pslash != NULL) { 548 *pslash = '\0'; // Get rid of /lib. 549 } 550 } 551 Arguments::set_java_home(buf); 552 set_boot_path('/', ':'); 553 } 554 555 // Where to look for native libraries. 556 557 // On Aix we get the user setting of LIBPATH. 558 // Eventually, all the library path setting will be done here. 559 // Get the user setting of LIBPATH. 560 const char *v = ::getenv("LIBPATH"); 561 const char *v_colon = ":"; 562 if (v == NULL) { v = ""; v_colon = ""; } 563 564 // Concatenate user and invariant part of ld_library_path. 565 // That's +1 for the colon and +1 for the trailing '\0'. 566 char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal); 567 sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon); 568 Arguments::set_library_path(ld_library_path); 569 FREE_C_HEAP_ARRAY(char, ld_library_path); 570 571 // Extensions directories. 572 sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home()); 573 Arguments::set_ext_dirs(buf); 574 575 FREE_C_HEAP_ARRAY(char, buf); 576 577 #undef DEFAULT_LIBPATH 578 #undef EXTENSIONS_DIR 579 } 580 581 //////////////////////////////////////////////////////////////////////////////// 582 // breakpoint support 583 584 void os::breakpoint() { 585 BREAKPOINT; 586 } 587 588 extern "C" void breakpoint() { 589 // use debugger to set breakpoint here 590 } 591 592 //////////////////////////////////////////////////////////////////////////////// 593 // signal support 594 595 debug_only(static bool signal_sets_initialized = false); 596 static sigset_t unblocked_sigs, vm_sigs; 597 598 bool os::Aix::is_sig_ignored(int sig) { 599 struct sigaction oact; 600 sigaction(sig, (struct sigaction*)NULL, &oact); 601 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 602 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 603 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) { 604 return true; 605 } else { 606 return false; 607 } 608 } 609 610 void os::Aix::signal_sets_init() { 611 // Should also have an assertion stating we are still single-threaded. 612 assert(!signal_sets_initialized, "Already initialized"); 613 // Fill in signals that are necessarily unblocked for all threads in 614 // the VM. Currently, we unblock the following signals: 615 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 616 // by -Xrs (=ReduceSignalUsage)); 617 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 618 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 619 // the dispositions or masks wrt these signals. 620 // Programs embedding the VM that want to use the above signals for their 621 // own purposes must, at this time, use the "-Xrs" option to prevent 622 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 623 // (See bug 4345157, and other related bugs). 624 // In reality, though, unblocking these signals is really a nop, since 625 // these signals are not blocked by default. 626 sigemptyset(&unblocked_sigs); 627 sigaddset(&unblocked_sigs, SIGILL); 628 sigaddset(&unblocked_sigs, SIGSEGV); 629 sigaddset(&unblocked_sigs, SIGBUS); 630 sigaddset(&unblocked_sigs, SIGFPE); 631 sigaddset(&unblocked_sigs, SIGTRAP); 632 sigaddset(&unblocked_sigs, SR_signum); 633 634 if (!ReduceSignalUsage) { 635 if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 636 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 637 } 638 if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 639 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 640 } 641 if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 642 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 643 } 644 } 645 // Fill in signals that are blocked by all but the VM thread. 646 sigemptyset(&vm_sigs); 647 if (!ReduceSignalUsage) 648 sigaddset(&vm_sigs, BREAK_SIGNAL); 649 debug_only(signal_sets_initialized = true); 650 } 651 652 // These are signals that are unblocked while a thread is running Java. 653 // (For some reason, they get blocked by default.) 654 sigset_t* os::Aix::unblocked_signals() { 655 assert(signal_sets_initialized, "Not initialized"); 656 return &unblocked_sigs; 657 } 658 659 // These are the signals that are blocked while a (non-VM) thread is 660 // running Java. Only the VM thread handles these signals. 661 sigset_t* os::Aix::vm_signals() { 662 assert(signal_sets_initialized, "Not initialized"); 663 return &vm_sigs; 664 } 665 666 void os::Aix::hotspot_sigmask(Thread* thread) { 667 668 //Save caller's signal mask before setting VM signal mask 669 sigset_t caller_sigmask; 670 pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask); 671 672 OSThread* osthread = thread->osthread(); 673 osthread->set_caller_sigmask(caller_sigmask); 674 675 pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL); 676 677 if (!ReduceSignalUsage) { 678 if (thread->is_VM_thread()) { 679 // Only the VM thread handles BREAK_SIGNAL ... 680 pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL); 681 } else { 682 // ... all other threads block BREAK_SIGNAL 683 pthread_sigmask(SIG_BLOCK, vm_signals(), NULL); 684 } 685 } 686 } 687 688 // retrieve memory information. 689 // Returns false if something went wrong; 690 // content of pmi undefined in this case. 691 bool os::Aix::get_meminfo(meminfo_t* pmi) { 692 693 assert(pmi, "get_meminfo: invalid parameter"); 694 695 memset(pmi, 0, sizeof(meminfo_t)); 696 697 if (os::Aix::on_pase()) { 698 // On PASE, use the libo4 porting library. 699 700 unsigned long long virt_total = 0; 701 unsigned long long real_total = 0; 702 unsigned long long real_free = 0; 703 unsigned long long pgsp_total = 0; 704 unsigned long long pgsp_free = 0; 705 if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) { 706 pmi->virt_total = virt_total; 707 pmi->real_total = real_total; 708 pmi->real_free = real_free; 709 pmi->pgsp_total = pgsp_total; 710 pmi->pgsp_free = pgsp_free; 711 return true; 712 } 713 return false; 714 715 } else { 716 717 // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics 718 // See: 719 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp 720 // ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm 721 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp 722 // ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm 723 724 perfstat_memory_total_t psmt; 725 memset (&psmt, '\0', sizeof(psmt)); 726 const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1); 727 if (rc == -1) { 728 trcVerbose("perfstat_memory_total() failed (errno=%d)", errno); 729 assert(0, "perfstat_memory_total() failed"); 730 return false; 731 } 732 733 assert(rc == 1, "perfstat_memory_total() - weird return code"); 734 735 // excerpt from 736 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp 737 // ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm 738 // The fields of perfstat_memory_total_t: 739 // u_longlong_t virt_total Total virtual memory (in 4 KB pages). 740 // u_longlong_t real_total Total real memory (in 4 KB pages). 741 // u_longlong_t real_free Free real memory (in 4 KB pages). 742 // u_longlong_t pgsp_total Total paging space (in 4 KB pages). 743 // u_longlong_t pgsp_free Free paging space (in 4 KB pages). 744 745 pmi->virt_total = psmt.virt_total * 4096; 746 pmi->real_total = psmt.real_total * 4096; 747 pmi->real_free = psmt.real_free * 4096; 748 pmi->pgsp_total = psmt.pgsp_total * 4096; 749 pmi->pgsp_free = psmt.pgsp_free * 4096; 750 751 return true; 752 753 } 754 } // end os::Aix::get_meminfo 755 756 ////////////////////////////////////////////////////////////////////////////// 757 // create new thread 758 759 // Thread start routine for all newly created threads 760 static void *thread_native_entry(Thread *thread) { 761 762 // find out my own stack dimensions 763 { 764 // actually, this should do exactly the same as thread->record_stack_base_and_size... 765 thread->set_stack_base(os::current_stack_base()); 766 thread->set_stack_size(os::current_stack_size()); 767 } 768 769 const pthread_t pthread_id = ::pthread_self(); 770 const tid_t kernel_thread_id = ::thread_self(); 771 772 LogTarget(Info, os, thread) lt; 773 if (lt.is_enabled()) { 774 address low_address = thread->stack_end(); 775 address high_address = thread->stack_base(); 776 lt.print("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT 777 ", stack [" PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "k using %uk pages)).", 778 os::current_thread_id(), (uintx) kernel_thread_id, low_address, high_address, 779 (high_address - low_address) / K, os::Aix::query_pagesize(low_address) / K); 780 } 781 782 // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc() 783 // by the pthread library). In rare cases, this may not be the case, e.g. when third-party 784 // tools hook pthread_create(). In this case, we may run into problems establishing 785 // guard pages on those stacks, because the stacks may reside in memory which is not 786 // protectable (shmated). 787 if (thread->stack_base() > ::sbrk(0)) { 788 log_warning(os, thread)("Thread stack not in data segment."); 789 } 790 791 // Try to randomize the cache line index of hot stack frames. 792 // This helps when threads of the same stack traces evict each other's 793 // cache lines. The threads can be either from the same JVM instance, or 794 // from different JVM instances. The benefit is especially true for 795 // processors with hyperthreading technology. 796 797 static int counter = 0; 798 int pid = os::current_process_id(); 799 alloca(((pid ^ counter++) & 7) * 128); 800 801 thread->initialize_thread_current(); 802 803 OSThread* osthread = thread->osthread(); 804 805 // Thread_id is pthread id. 806 osthread->set_thread_id(pthread_id); 807 808 // .. but keep kernel thread id too for diagnostics 809 osthread->set_kernel_thread_id(kernel_thread_id); 810 811 // Initialize signal mask for this thread. 812 os::Aix::hotspot_sigmask(thread); 813 814 // Initialize floating point control register. 815 os::Aix::init_thread_fpu_state(); 816 817 assert(osthread->get_state() == RUNNABLE, "invalid os thread state"); 818 819 // Call one more level start routine. 820 thread->run(); 821 822 log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").", 823 os::current_thread_id(), (uintx) kernel_thread_id); 824 825 // If a thread has not deleted itself ("delete this") as part of its 826 // termination sequence, we have to ensure thread-local-storage is 827 // cleared before we actually terminate. No threads should ever be 828 // deleted asynchronously with respect to their termination. 829 if (Thread::current_or_null_safe() != NULL) { 830 assert(Thread::current_or_null_safe() == thread, "current thread is wrong"); 831 thread->clear_thread_current(); 832 } 833 834 return 0; 835 } 836 837 bool os::create_thread(Thread* thread, ThreadType thr_type, 838 size_t req_stack_size) { 839 840 assert(thread->osthread() == NULL, "caller responsible"); 841 842 // Allocate the OSThread object. 843 OSThread* osthread = new OSThread(NULL, NULL); 844 if (osthread == NULL) { 845 return false; 846 } 847 848 // Set the correct thread state. 849 osthread->set_thread_type(thr_type); 850 851 // Initial state is ALLOCATED but not INITIALIZED 852 osthread->set_state(ALLOCATED); 853 854 thread->set_osthread(osthread); 855 856 // Init thread attributes. 857 pthread_attr_t attr; 858 pthread_attr_init(&attr); 859 guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???"); 860 861 // Make sure we run in 1:1 kernel-user-thread mode. 862 if (os::Aix::on_aix()) { 863 guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???"); 864 guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???"); 865 } 866 867 // Start in suspended state, and in os::thread_start, wake the thread up. 868 guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???"); 869 870 // Calculate stack size if it's not specified by caller. 871 size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size); 872 873 // JDK-8187028: It was observed that on some configurations (4K backed thread stacks) 874 // the real thread stack size may be smaller than the requested stack size, by as much as 64K. 875 // This very much looks like a pthread lib error. As a workaround, increase the stack size 876 // by 64K for small thread stacks (arbitrarily choosen to be < 4MB) 877 if (stack_size < 4096 * K) { 878 stack_size += 64 * K; 879 } 880 881 // On Aix, pthread_attr_setstacksize fails with huge values and leaves the 882 // thread size in attr unchanged. If this is the minimal stack size as set 883 // by pthread_attr_init this leads to crashes after thread creation. E.g. the 884 // guard pages might not fit on the tiny stack created. 885 int ret = pthread_attr_setstacksize(&attr, stack_size); 886 if (ret != 0) { 887 log_warning(os, thread)("The thread stack size specified is invalid: " SIZE_FORMAT "k", 888 stack_size / K); 889 } 890 891 // Save some cycles and a page by disabling OS guard pages where we have our own 892 // VM guard pages (in java threads). For other threads, keep system default guard 893 // pages in place. 894 if (thr_type == java_thread || thr_type == compiler_thread) { 895 ret = pthread_attr_setguardsize(&attr, 0); 896 } 897 898 pthread_t tid = 0; 899 if (ret == 0) { 900 ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread); 901 } 902 903 if (ret == 0) { 904 char buf[64]; 905 log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ", 906 (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); 907 } else { 908 char buf[64]; 909 log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.", 910 ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); 911 } 912 913 pthread_attr_destroy(&attr); 914 915 if (ret != 0) { 916 // Need to clean up stuff we've allocated so far. 917 thread->set_osthread(NULL); 918 delete osthread; 919 return false; 920 } 921 922 // OSThread::thread_id is the pthread id. 923 osthread->set_thread_id(tid); 924 925 return true; 926 } 927 928 ///////////////////////////////////////////////////////////////////////////// 929 // attach existing thread 930 931 // bootstrap the main thread 932 bool os::create_main_thread(JavaThread* thread) { 933 assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread"); 934 return create_attached_thread(thread); 935 } 936 937 bool os::create_attached_thread(JavaThread* thread) { 938 #ifdef ASSERT 939 thread->verify_not_published(); 940 #endif 941 942 // Allocate the OSThread object 943 OSThread* osthread = new OSThread(NULL, NULL); 944 945 if (osthread == NULL) { 946 return false; 947 } 948 949 const pthread_t pthread_id = ::pthread_self(); 950 const tid_t kernel_thread_id = ::thread_self(); 951 952 // OSThread::thread_id is the pthread id. 953 osthread->set_thread_id(pthread_id); 954 955 // .. but keep kernel thread id too for diagnostics 956 osthread->set_kernel_thread_id(kernel_thread_id); 957 958 // initialize floating point control register 959 os::Aix::init_thread_fpu_state(); 960 961 // Initial thread state is RUNNABLE 962 osthread->set_state(RUNNABLE); 963 964 thread->set_osthread(osthread); 965 966 if (UseNUMA) { 967 int lgrp_id = os::numa_get_group_id(); 968 if (lgrp_id != -1) { 969 thread->set_lgrp_id(lgrp_id); 970 } 971 } 972 973 // initialize signal mask for this thread 974 // and save the caller's signal mask 975 os::Aix::hotspot_sigmask(thread); 976 977 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").", 978 os::current_thread_id(), (uintx) kernel_thread_id); 979 980 return true; 981 } 982 983 void os::pd_start_thread(Thread* thread) { 984 int status = pthread_continue_np(thread->osthread()->pthread_id()); 985 assert(status == 0, "thr_continue failed"); 986 } 987 988 // Free OS resources related to the OSThread 989 void os::free_thread(OSThread* osthread) { 990 assert(osthread != NULL, "osthread not set"); 991 992 // We are told to free resources of the argument thread, 993 // but we can only really operate on the current thread. 994 assert(Thread::current()->osthread() == osthread, 995 "os::free_thread but not current thread"); 996 997 // Restore caller's signal mask 998 sigset_t sigmask = osthread->caller_sigmask(); 999 pthread_sigmask(SIG_SETMASK, &sigmask, NULL); 1000 1001 delete osthread; 1002 } 1003 1004 //////////////////////////////////////////////////////////////////////////////// 1005 // time support 1006 1007 // Time since start-up in seconds to a fine granularity. 1008 // Used by VMSelfDestructTimer and the MemProfiler. 1009 double os::elapsedTime() { 1010 return (double)(os::elapsed_counter()) * 0.000001; 1011 } 1012 1013 jlong os::elapsed_counter() { 1014 timeval time; 1015 int status = gettimeofday(&time, NULL); 1016 return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count; 1017 } 1018 1019 jlong os::elapsed_frequency() { 1020 return (1000 * 1000); 1021 } 1022 1023 bool os::supports_vtime() { return true; } 1024 bool os::enable_vtime() { return false; } 1025 bool os::vtime_enabled() { return false; } 1026 1027 double os::elapsedVTime() { 1028 struct rusage usage; 1029 int retval = getrusage(RUSAGE_THREAD, &usage); 1030 if (retval == 0) { 1031 return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000); 1032 } else { 1033 // better than nothing, but not much 1034 return elapsedTime(); 1035 } 1036 } 1037 1038 jlong os::javaTimeMillis() { 1039 timeval time; 1040 int status = gettimeofday(&time, NULL); 1041 assert(status != -1, "aix error at gettimeofday()"); 1042 return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000); 1043 } 1044 1045 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 1046 timeval time; 1047 int status = gettimeofday(&time, NULL); 1048 assert(status != -1, "aix error at gettimeofday()"); 1049 seconds = jlong(time.tv_sec); 1050 nanos = jlong(time.tv_usec) * 1000; 1051 } 1052 1053 jlong os::javaTimeNanos() { 1054 if (os::Aix::on_pase()) { 1055 1056 timeval time; 1057 int status = gettimeofday(&time, NULL); 1058 assert(status != -1, "PASE error at gettimeofday()"); 1059 jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec); 1060 return 1000 * usecs; 1061 1062 } else { 1063 // On AIX use the precision of processors real time clock 1064 // or time base registers. 1065 timebasestruct_t time; 1066 int rc; 1067 1068 // If the CPU has a time register, it will be used and 1069 // we have to convert to real time first. After convertion we have following data: 1070 // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970] 1071 // time.tb_low [nanoseconds after the last full second above] 1072 // We better use mread_real_time here instead of read_real_time 1073 // to ensure that we will get a monotonic increasing time. 1074 if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) { 1075 rc = time_base_to_time(&time, TIMEBASE_SZ); 1076 assert(rc != -1, "aix error at time_base_to_time()"); 1077 } 1078 return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low); 1079 } 1080 } 1081 1082 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1083 info_ptr->max_value = ALL_64_BITS; 1084 // mread_real_time() is monotonic (see 'os::javaTimeNanos()') 1085 info_ptr->may_skip_backward = false; 1086 info_ptr->may_skip_forward = false; 1087 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1088 } 1089 1090 // Return the real, user, and system times in seconds from an 1091 // arbitrary fixed point in the past. 1092 bool os::getTimesSecs(double* process_real_time, 1093 double* process_user_time, 1094 double* process_system_time) { 1095 struct tms ticks; 1096 clock_t real_ticks = times(&ticks); 1097 1098 if (real_ticks == (clock_t) (-1)) { 1099 return false; 1100 } else { 1101 double ticks_per_second = (double) clock_tics_per_sec; 1102 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1103 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1104 *process_real_time = ((double) real_ticks) / ticks_per_second; 1105 1106 return true; 1107 } 1108 } 1109 1110 char * os::local_time_string(char *buf, size_t buflen) { 1111 struct tm t; 1112 time_t long_time; 1113 time(&long_time); 1114 localtime_r(&long_time, &t); 1115 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1116 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1117 t.tm_hour, t.tm_min, t.tm_sec); 1118 return buf; 1119 } 1120 1121 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 1122 return localtime_r(clock, res); 1123 } 1124 1125 //////////////////////////////////////////////////////////////////////////////// 1126 // runtime exit support 1127 1128 // Note: os::shutdown() might be called very early during initialization, or 1129 // called from signal handler. Before adding something to os::shutdown(), make 1130 // sure it is async-safe and can handle partially initialized VM. 1131 void os::shutdown() { 1132 1133 // allow PerfMemory to attempt cleanup of any persistent resources 1134 perfMemory_exit(); 1135 1136 // needs to remove object in file system 1137 AttachListener::abort(); 1138 1139 // flush buffered output, finish log files 1140 ostream_abort(); 1141 1142 // Check for abort hook 1143 abort_hook_t abort_hook = Arguments::abort_hook(); 1144 if (abort_hook != NULL) { 1145 abort_hook(); 1146 } 1147 } 1148 1149 // Note: os::abort() might be called very early during initialization, or 1150 // called from signal handler. Before adding something to os::abort(), make 1151 // sure it is async-safe and can handle partially initialized VM. 1152 void os::abort(bool dump_core, void* siginfo, const void* context) { 1153 os::shutdown(); 1154 if (dump_core) { 1155 #ifndef PRODUCT 1156 fdStream out(defaultStream::output_fd()); 1157 out.print_raw("Current thread is "); 1158 char buf[16]; 1159 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1160 out.print_raw_cr(buf); 1161 out.print_raw_cr("Dumping core ..."); 1162 #endif 1163 ::abort(); // dump core 1164 } 1165 1166 ::exit(1); 1167 } 1168 1169 // Die immediately, no exit hook, no abort hook, no cleanup. 1170 void os::die() { 1171 ::abort(); 1172 } 1173 1174 // This method is a copy of JDK's sysGetLastErrorString 1175 // from src/solaris/hpi/src/system_md.c 1176 1177 size_t os::lasterror(char *buf, size_t len) { 1178 if (errno == 0) return 0; 1179 1180 const char *s = os::strerror(errno); 1181 size_t n = ::strlen(s); 1182 if (n >= len) { 1183 n = len - 1; 1184 } 1185 ::strncpy(buf, s, n); 1186 buf[n] = '\0'; 1187 return n; 1188 } 1189 1190 intx os::current_thread_id() { 1191 return (intx)pthread_self(); 1192 } 1193 1194 int os::current_process_id() { 1195 return getpid(); 1196 } 1197 1198 // DLL functions 1199 1200 const char* os::dll_file_extension() { return ".so"; } 1201 1202 // This must be hard coded because it's the system's temporary 1203 // directory not the java application's temp directory, ala java.io.tmpdir. 1204 const char* os::get_temp_directory() { return "/tmp"; } 1205 1206 // Check if addr is inside libjvm.so. 1207 bool os::address_is_in_vm(address addr) { 1208 1209 // Input could be a real pc or a function pointer literal. The latter 1210 // would be a function descriptor residing in the data segment of a module. 1211 loaded_module_t lm; 1212 if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) { 1213 return lm.is_in_vm; 1214 } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) { 1215 return lm.is_in_vm; 1216 } else { 1217 return false; 1218 } 1219 1220 } 1221 1222 // Resolve an AIX function descriptor literal to a code pointer. 1223 // If the input is a valid code pointer to a text segment of a loaded module, 1224 // it is returned unchanged. 1225 // If the input is a valid AIX function descriptor, it is resolved to the 1226 // code entry point. 1227 // If the input is neither a valid function descriptor nor a valid code pointer, 1228 // NULL is returned. 1229 static address resolve_function_descriptor_to_code_pointer(address p) { 1230 1231 if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) { 1232 // It is a real code pointer. 1233 return p; 1234 } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) { 1235 // Pointer to data segment, potential function descriptor. 1236 address code_entry = (address)(((FunctionDescriptor*)p)->entry()); 1237 if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) { 1238 // It is a function descriptor. 1239 return code_entry; 1240 } 1241 } 1242 1243 return NULL; 1244 } 1245 1246 bool os::dll_address_to_function_name(address addr, char *buf, 1247 int buflen, int *offset, 1248 bool demangle) { 1249 if (offset) { 1250 *offset = -1; 1251 } 1252 // Buf is not optional, but offset is optional. 1253 assert(buf != NULL, "sanity check"); 1254 buf[0] = '\0'; 1255 1256 // Resolve function ptr literals first. 1257 addr = resolve_function_descriptor_to_code_pointer(addr); 1258 if (!addr) { 1259 return false; 1260 } 1261 1262 return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle); 1263 } 1264 1265 bool os::dll_address_to_library_name(address addr, char* buf, 1266 int buflen, int* offset) { 1267 if (offset) { 1268 *offset = -1; 1269 } 1270 // Buf is not optional, but offset is optional. 1271 assert(buf != NULL, "sanity check"); 1272 buf[0] = '\0'; 1273 1274 // Resolve function ptr literals first. 1275 addr = resolve_function_descriptor_to_code_pointer(addr); 1276 if (!addr) { 1277 return false; 1278 } 1279 1280 return AixSymbols::get_module_name(addr, buf, buflen); 1281 } 1282 1283 // Loads .dll/.so and in case of error it checks if .dll/.so was built 1284 // for the same architecture as Hotspot is running on. 1285 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) { 1286 1287 if (ebuf && ebuflen > 0) { 1288 ebuf[0] = '\0'; 1289 ebuf[ebuflen - 1] = '\0'; 1290 } 1291 1292 if (!filename || strlen(filename) == 0) { 1293 ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1); 1294 return NULL; 1295 } 1296 1297 // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants. 1298 void * result= ::dlopen(filename, RTLD_LAZY); 1299 if (result != NULL) { 1300 // Reload dll cache. Don't do this in signal handling. 1301 LoadedLibraries::reload(); 1302 return result; 1303 } else { 1304 // error analysis when dlopen fails 1305 const char* const error_report = ::dlerror(); 1306 if (error_report && ebuf && ebuflen > 0) { 1307 snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s", 1308 filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report); 1309 } 1310 } 1311 return NULL; 1312 } 1313 1314 void* os::dll_lookup(void* handle, const char* name) { 1315 void* res = dlsym(handle, name); 1316 return res; 1317 } 1318 1319 void* os::get_default_process_handle() { 1320 return (void*)::dlopen(NULL, RTLD_LAZY); 1321 } 1322 1323 void os::print_dll_info(outputStream *st) { 1324 st->print_cr("Dynamic libraries:"); 1325 LoadedLibraries::print(st); 1326 } 1327 1328 void os::get_summary_os_info(char* buf, size_t buflen) { 1329 // There might be something more readable than uname results for AIX. 1330 struct utsname name; 1331 uname(&name); 1332 snprintf(buf, buflen, "%s %s", name.release, name.version); 1333 } 1334 1335 void os::print_os_info(outputStream* st) { 1336 st->print("OS:"); 1337 1338 st->print("uname:"); 1339 struct utsname name; 1340 uname(&name); 1341 st->print(name.sysname); st->print(" "); 1342 st->print(name.nodename); st->print(" "); 1343 st->print(name.release); st->print(" "); 1344 st->print(name.version); st->print(" "); 1345 st->print(name.machine); 1346 st->cr(); 1347 1348 uint32_t ver = os::Aix::os_version(); 1349 st->print_cr("AIX kernel version %u.%u.%u.%u", 1350 (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF); 1351 1352 os::Posix::print_rlimit_info(st); 1353 1354 // load average 1355 st->print("load average:"); 1356 double loadavg[3] = {-1.L, -1.L, -1.L}; 1357 os::loadavg(loadavg, 3); 1358 st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]); 1359 st->cr(); 1360 1361 // print wpar info 1362 libperfstat::wparinfo_t wi; 1363 if (libperfstat::get_wparinfo(&wi)) { 1364 st->print_cr("wpar info"); 1365 st->print_cr("name: %s", wi.name); 1366 st->print_cr("id: %d", wi.wpar_id); 1367 st->print_cr("type: %s", (wi.app_wpar ? "application" : "system")); 1368 } 1369 1370 // print partition info 1371 libperfstat::partitioninfo_t pi; 1372 if (libperfstat::get_partitioninfo(&pi)) { 1373 st->print_cr("partition info"); 1374 st->print_cr(" name: %s", pi.name); 1375 } 1376 1377 } 1378 1379 void os::print_memory_info(outputStream* st) { 1380 1381 st->print_cr("Memory:"); 1382 1383 st->print_cr(" Base page size (sysconf _SC_PAGESIZE): %s", 1384 describe_pagesize(g_multipage_support.pagesize)); 1385 st->print_cr(" Data page size (C-Heap, bss, etc): %s", 1386 describe_pagesize(g_multipage_support.datapsize)); 1387 st->print_cr(" Text page size: %s", 1388 describe_pagesize(g_multipage_support.textpsize)); 1389 st->print_cr(" Thread stack page size (pthread): %s", 1390 describe_pagesize(g_multipage_support.pthr_stack_pagesize)); 1391 st->print_cr(" Default shared memory page size: %s", 1392 describe_pagesize(g_multipage_support.shmpsize)); 1393 st->print_cr(" Can use 64K pages dynamically with shared meory: %s", 1394 (g_multipage_support.can_use_64K_pages ? "yes" :"no")); 1395 st->print_cr(" Can use 16M pages dynamically with shared memory: %s", 1396 (g_multipage_support.can_use_16M_pages ? "yes" :"no")); 1397 st->print_cr(" Multipage error: %d", 1398 g_multipage_support.error); 1399 st->cr(); 1400 st->print_cr(" os::vm_page_size: %s", describe_pagesize(os::vm_page_size())); 1401 1402 // print out LDR_CNTRL because it affects the default page sizes 1403 const char* const ldr_cntrl = ::getenv("LDR_CNTRL"); 1404 st->print_cr(" LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>"); 1405 1406 // Print out EXTSHM because it is an unsupported setting. 1407 const char* const extshm = ::getenv("EXTSHM"); 1408 st->print_cr(" EXTSHM=%s.", extshm ? extshm : "<unset>"); 1409 if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) { 1410 st->print_cr(" *** Unsupported! Please remove EXTSHM from your environment! ***"); 1411 } 1412 1413 // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks. 1414 const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES"); 1415 st->print_cr(" AIXTHREAD_GUARDPAGES=%s.", 1416 aixthread_guardpages ? aixthread_guardpages : "<unset>"); 1417 1418 os::Aix::meminfo_t mi; 1419 if (os::Aix::get_meminfo(&mi)) { 1420 char buffer[256]; 1421 if (os::Aix::on_aix()) { 1422 st->print_cr("physical total : " SIZE_FORMAT, mi.real_total); 1423 st->print_cr("physical free : " SIZE_FORMAT, mi.real_free); 1424 st->print_cr("swap total : " SIZE_FORMAT, mi.pgsp_total); 1425 st->print_cr("swap free : " SIZE_FORMAT, mi.pgsp_free); 1426 } else { 1427 // PASE - Numbers are result of QWCRSSTS; they mean: 1428 // real_total: Sum of all system pools 1429 // real_free: always 0 1430 // pgsp_total: we take the size of the system ASP 1431 // pgsp_free: size of system ASP times percentage of system ASP unused 1432 st->print_cr("physical total : " SIZE_FORMAT, mi.real_total); 1433 st->print_cr("system asp total : " SIZE_FORMAT, mi.pgsp_total); 1434 st->print_cr("%% system asp used : " SIZE_FORMAT, 1435 mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f); 1436 } 1437 st->print_raw(buffer); 1438 } 1439 st->cr(); 1440 1441 // Print segments allocated with os::reserve_memory. 1442 st->print_cr("internal virtual memory regions used by vm:"); 1443 vmembk_print_on(st); 1444 } 1445 1446 // Get a string for the cpuinfo that is a summary of the cpu type 1447 void os::get_summary_cpu_info(char* buf, size_t buflen) { 1448 // This looks good 1449 libperfstat::cpuinfo_t ci; 1450 if (libperfstat::get_cpuinfo(&ci)) { 1451 strncpy(buf, ci.version, buflen); 1452 } else { 1453 strncpy(buf, "AIX", buflen); 1454 } 1455 } 1456 1457 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1458 // Nothing to do beyond what os::print_cpu_info() does. 1459 } 1460 1461 static void print_signal_handler(outputStream* st, int sig, 1462 char* buf, size_t buflen); 1463 1464 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1465 st->print_cr("Signal Handlers:"); 1466 print_signal_handler(st, SIGSEGV, buf, buflen); 1467 print_signal_handler(st, SIGBUS , buf, buflen); 1468 print_signal_handler(st, SIGFPE , buf, buflen); 1469 print_signal_handler(st, SIGPIPE, buf, buflen); 1470 print_signal_handler(st, SIGXFSZ, buf, buflen); 1471 print_signal_handler(st, SIGILL , buf, buflen); 1472 print_signal_handler(st, SR_signum, buf, buflen); 1473 print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen); 1474 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 1475 print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen); 1476 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 1477 print_signal_handler(st, SIGTRAP, buf, buflen); 1478 // We also want to know if someone else adds a SIGDANGER handler because 1479 // that will interfere with OOM killling. 1480 print_signal_handler(st, SIGDANGER, buf, buflen); 1481 } 1482 1483 static char saved_jvm_path[MAXPATHLEN] = {0}; 1484 1485 // Find the full path to the current module, libjvm.so. 1486 void os::jvm_path(char *buf, jint buflen) { 1487 // Error checking. 1488 if (buflen < MAXPATHLEN) { 1489 assert(false, "must use a large-enough buffer"); 1490 buf[0] = '\0'; 1491 return; 1492 } 1493 // Lazy resolve the path to current module. 1494 if (saved_jvm_path[0] != 0) { 1495 strcpy(buf, saved_jvm_path); 1496 return; 1497 } 1498 1499 Dl_info dlinfo; 1500 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 1501 assert(ret != 0, "cannot locate libjvm"); 1502 char* rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen); 1503 assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?"); 1504 1505 if (Arguments::sun_java_launcher_is_altjvm()) { 1506 // Support for the java launcher's '-XXaltjvm=<path>' option. Typical 1507 // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so". 1508 // If "/jre/lib/" appears at the right place in the string, then 1509 // assume we are installed in a JDK and we're done. Otherwise, check 1510 // for a JAVA_HOME environment variable and fix up the path so it 1511 // looks like libjvm.so is installed there (append a fake suffix 1512 // hotspot/libjvm.so). 1513 const char *p = buf + strlen(buf) - 1; 1514 for (int count = 0; p > buf && count < 4; ++count) { 1515 for (--p; p > buf && *p != '/'; --p) 1516 /* empty */ ; 1517 } 1518 1519 if (strncmp(p, "/jre/lib/", 9) != 0) { 1520 // Look for JAVA_HOME in the environment. 1521 char* java_home_var = ::getenv("JAVA_HOME"); 1522 if (java_home_var != NULL && java_home_var[0] != 0) { 1523 char* jrelib_p; 1524 int len; 1525 1526 // Check the current module name "libjvm.so". 1527 p = strrchr(buf, '/'); 1528 if (p == NULL) { 1529 return; 1530 } 1531 assert(strstr(p, "/libjvm") == p, "invalid library name"); 1532 1533 rp = os::Posix::realpath(java_home_var, buf, buflen); 1534 if (rp == NULL) { 1535 return; 1536 } 1537 1538 // determine if this is a legacy image or modules image 1539 // modules image doesn't have "jre" subdirectory 1540 len = strlen(buf); 1541 assert(len < buflen, "Ran out of buffer room"); 1542 jrelib_p = buf + len; 1543 snprintf(jrelib_p, buflen-len, "/jre/lib"); 1544 if (0 != access(buf, F_OK)) { 1545 snprintf(jrelib_p, buflen-len, "/lib"); 1546 } 1547 1548 if (0 == access(buf, F_OK)) { 1549 // Use current module name "libjvm.so" 1550 len = strlen(buf); 1551 snprintf(buf + len, buflen-len, "/hotspot/libjvm.so"); 1552 } else { 1553 // Go back to path of .so 1554 rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen); 1555 if (rp == NULL) { 1556 return; 1557 } 1558 } 1559 } 1560 } 1561 } 1562 1563 strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path)); 1564 saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0'; 1565 } 1566 1567 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1568 // no prefix required, not even "_" 1569 } 1570 1571 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1572 // no suffix required 1573 } 1574 1575 //////////////////////////////////////////////////////////////////////////////// 1576 // sun.misc.Signal support 1577 1578 static volatile jint sigint_count = 0; 1579 1580 static void 1581 UserHandler(int sig, void *siginfo, void *context) { 1582 // 4511530 - sem_post is serialized and handled by the manager thread. When 1583 // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We 1584 // don't want to flood the manager thread with sem_post requests. 1585 if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1) 1586 return; 1587 1588 // Ctrl-C is pressed during error reporting, likely because the error 1589 // handler fails to abort. Let VM die immediately. 1590 if (sig == SIGINT && VMError::is_error_reported()) { 1591 os::die(); 1592 } 1593 1594 os::signal_notify(sig); 1595 } 1596 1597 void* os::user_handler() { 1598 return CAST_FROM_FN_PTR(void*, UserHandler); 1599 } 1600 1601 extern "C" { 1602 typedef void (*sa_handler_t)(int); 1603 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 1604 } 1605 1606 void* os::signal(int signal_number, void* handler) { 1607 struct sigaction sigAct, oldSigAct; 1608 1609 sigfillset(&(sigAct.sa_mask)); 1610 1611 // Do not block out synchronous signals in the signal handler. 1612 // Blocking synchronous signals only makes sense if you can really 1613 // be sure that those signals won't happen during signal handling, 1614 // when the blocking applies. Normal signal handlers are lean and 1615 // do not cause signals. But our signal handlers tend to be "risky" 1616 // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen. 1617 // On AIX, PASE there was a case where a SIGSEGV happened, followed 1618 // by a SIGILL, which was blocked due to the signal mask. The process 1619 // just hung forever. Better to crash from a secondary signal than to hang. 1620 sigdelset(&(sigAct.sa_mask), SIGSEGV); 1621 sigdelset(&(sigAct.sa_mask), SIGBUS); 1622 sigdelset(&(sigAct.sa_mask), SIGILL); 1623 sigdelset(&(sigAct.sa_mask), SIGFPE); 1624 sigdelset(&(sigAct.sa_mask), SIGTRAP); 1625 1626 sigAct.sa_flags = SA_RESTART|SA_SIGINFO; 1627 1628 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 1629 1630 if (sigaction(signal_number, &sigAct, &oldSigAct)) { 1631 // -1 means registration failed 1632 return (void *)-1; 1633 } 1634 1635 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 1636 } 1637 1638 void os::signal_raise(int signal_number) { 1639 ::raise(signal_number); 1640 } 1641 1642 // 1643 // The following code is moved from os.cpp for making this 1644 // code platform specific, which it is by its very nature. 1645 // 1646 1647 // Will be modified when max signal is changed to be dynamic 1648 int os::sigexitnum_pd() { 1649 return NSIG; 1650 } 1651 1652 // a counter for each possible signal value 1653 static volatile jint pending_signals[NSIG+1] = { 0 }; 1654 1655 // Wrapper functions for: sem_init(), sem_post(), sem_wait() 1656 // On AIX, we use sem_init(), sem_post(), sem_wait() 1657 // On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores 1658 // do not seem to work at all on PASE (unimplemented, will cause SIGILL). 1659 // Note that just using msem_.. APIs for both PASE and AIX is not an option either, as 1660 // on AIX, msem_..() calls are suspected of causing problems. 1661 static sem_t sig_sem; 1662 static msemaphore* p_sig_msem = 0; 1663 1664 static void local_sem_init() { 1665 if (os::Aix::on_aix()) { 1666 int rc = ::sem_init(&sig_sem, 0, 0); 1667 guarantee(rc != -1, "sem_init failed"); 1668 } else { 1669 // Memory semaphores must live in shared mem. 1670 guarantee0(p_sig_msem == NULL); 1671 p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL); 1672 guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore"); 1673 guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed"); 1674 } 1675 } 1676 1677 static void local_sem_post() { 1678 static bool warn_only_once = false; 1679 if (os::Aix::on_aix()) { 1680 int rc = ::sem_post(&sig_sem); 1681 if (rc == -1 && !warn_only_once) { 1682 trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno)); 1683 warn_only_once = true; 1684 } 1685 } else { 1686 guarantee0(p_sig_msem != NULL); 1687 int rc = ::msem_unlock(p_sig_msem, 0); 1688 if (rc == -1 && !warn_only_once) { 1689 trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno)); 1690 warn_only_once = true; 1691 } 1692 } 1693 } 1694 1695 static void local_sem_wait() { 1696 static bool warn_only_once = false; 1697 if (os::Aix::on_aix()) { 1698 int rc = ::sem_wait(&sig_sem); 1699 if (rc == -1 && !warn_only_once) { 1700 trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno)); 1701 warn_only_once = true; 1702 } 1703 } else { 1704 guarantee0(p_sig_msem != NULL); // must init before use 1705 int rc = ::msem_lock(p_sig_msem, 0); 1706 if (rc == -1 && !warn_only_once) { 1707 trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno)); 1708 warn_only_once = true; 1709 } 1710 } 1711 } 1712 1713 void os::signal_init_pd() { 1714 // Initialize signal structures 1715 ::memset((void*)pending_signals, 0, sizeof(pending_signals)); 1716 1717 // Initialize signal semaphore 1718 local_sem_init(); 1719 } 1720 1721 void os::signal_notify(int sig) { 1722 Atomic::inc(&pending_signals[sig]); 1723 local_sem_post(); 1724 } 1725 1726 static int check_pending_signals(bool wait) { 1727 Atomic::store(0, &sigint_count); 1728 for (;;) { 1729 for (int i = 0; i < NSIG + 1; i++) { 1730 jint n = pending_signals[i]; 1731 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 1732 return i; 1733 } 1734 } 1735 if (!wait) { 1736 return -1; 1737 } 1738 JavaThread *thread = JavaThread::current(); 1739 ThreadBlockInVM tbivm(thread); 1740 1741 bool threadIsSuspended; 1742 do { 1743 thread->set_suspend_equivalent(); 1744 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 1745 1746 local_sem_wait(); 1747 1748 // were we externally suspended while we were waiting? 1749 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 1750 if (threadIsSuspended) { 1751 // 1752 // The semaphore has been incremented, but while we were waiting 1753 // another thread suspended us. We don't want to continue running 1754 // while suspended because that would surprise the thread that 1755 // suspended us. 1756 // 1757 1758 local_sem_post(); 1759 1760 thread->java_suspend_self(); 1761 } 1762 } while (threadIsSuspended); 1763 } 1764 } 1765 1766 int os::signal_lookup() { 1767 return check_pending_signals(false); 1768 } 1769 1770 int os::signal_wait() { 1771 return check_pending_signals(true); 1772 } 1773 1774 //////////////////////////////////////////////////////////////////////////////// 1775 // Virtual Memory 1776 1777 // We need to keep small simple bookkeeping for os::reserve_memory and friends. 1778 1779 #define VMEM_MAPPED 1 1780 #define VMEM_SHMATED 2 1781 1782 struct vmembk_t { 1783 int type; // 1 - mmap, 2 - shmat 1784 char* addr; 1785 size_t size; // Real size, may be larger than usersize. 1786 size_t pagesize; // page size of area 1787 vmembk_t* next; 1788 1789 bool contains_addr(char* p) const { 1790 return p >= addr && p < (addr + size); 1791 } 1792 1793 bool contains_range(char* p, size_t s) const { 1794 return contains_addr(p) && contains_addr(p + s - 1); 1795 } 1796 1797 void print_on(outputStream* os) const { 1798 os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT 1799 " bytes, %d %s pages), %s", 1800 addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize), 1801 (type == VMEM_SHMATED ? "shmat" : "mmap") 1802 ); 1803 } 1804 1805 // Check that range is a sub range of memory block (or equal to memory block); 1806 // also check that range is fully page aligned to the page size if the block. 1807 void assert_is_valid_subrange(char* p, size_t s) const { 1808 if (!contains_range(p, s)) { 1809 trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub " 1810 "range of [" PTR_FORMAT " - " PTR_FORMAT "].", 1811 p, p + s, addr, addr + size); 1812 guarantee0(false); 1813 } 1814 if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) { 1815 trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not" 1816 " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize); 1817 guarantee0(false); 1818 } 1819 } 1820 }; 1821 1822 static struct { 1823 vmembk_t* first; 1824 MiscUtils::CritSect cs; 1825 } vmem; 1826 1827 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) { 1828 vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t)); 1829 assert0(p); 1830 if (p) { 1831 MiscUtils::AutoCritSect lck(&vmem.cs); 1832 p->addr = addr; p->size = size; 1833 p->pagesize = pagesize; 1834 p->type = type; 1835 p->next = vmem.first; 1836 vmem.first = p; 1837 } 1838 } 1839 1840 static vmembk_t* vmembk_find(char* addr) { 1841 MiscUtils::AutoCritSect lck(&vmem.cs); 1842 for (vmembk_t* p = vmem.first; p; p = p->next) { 1843 if (p->addr <= addr && (p->addr + p->size) > addr) { 1844 return p; 1845 } 1846 } 1847 return NULL; 1848 } 1849 1850 static void vmembk_remove(vmembk_t* p0) { 1851 MiscUtils::AutoCritSect lck(&vmem.cs); 1852 assert0(p0); 1853 assert0(vmem.first); // List should not be empty. 1854 for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) { 1855 if (*pp == p0) { 1856 *pp = p0->next; 1857 ::free(p0); 1858 return; 1859 } 1860 } 1861 assert0(false); // Not found? 1862 } 1863 1864 static void vmembk_print_on(outputStream* os) { 1865 MiscUtils::AutoCritSect lck(&vmem.cs); 1866 for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) { 1867 vmi->print_on(os); 1868 os->cr(); 1869 } 1870 } 1871 1872 // Reserve and attach a section of System V memory. 1873 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given 1874 // address. Failing that, it will attach the memory anywhere. 1875 // If <requested_addr> is NULL, function will attach the memory anywhere. 1876 // 1877 // <alignment_hint> is being ignored by this function. It is very probable however that the 1878 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries. 1879 // Should this be not enogh, we can put more work into it. 1880 static char* reserve_shmated_memory ( 1881 size_t bytes, 1882 char* requested_addr, 1883 size_t alignment_hint) { 1884 1885 trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress " 1886 PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...", 1887 bytes, requested_addr, alignment_hint); 1888 1889 // Either give me wish address or wish alignment but not both. 1890 assert0(!(requested_addr != NULL && alignment_hint != 0)); 1891 1892 // We must prevent anyone from attaching too close to the 1893 // BRK because that may cause malloc OOM. 1894 if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) { 1895 trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. " 1896 "Will attach anywhere.", requested_addr); 1897 // Act like the OS refused to attach there. 1898 requested_addr = NULL; 1899 } 1900 1901 // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not 1902 // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead. 1903 if (os::Aix::on_pase_V5R4_or_older()) { 1904 ShouldNotReachHere(); 1905 } 1906 1907 // Align size of shm up to 64K to avoid errors if we later try to change the page size. 1908 const size_t size = align_up(bytes, 64*K); 1909 1910 // Reserve the shared segment. 1911 int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR); 1912 if (shmid == -1) { 1913 trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno); 1914 return NULL; 1915 } 1916 1917 // Important note: 1918 // It is very important that we, upon leaving this function, do not leave a shm segment alive. 1919 // We must right after attaching it remove it from the system. System V shm segments are global and 1920 // survive the process. 1921 // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A). 1922 1923 struct shmid_ds shmbuf; 1924 memset(&shmbuf, 0, sizeof(shmbuf)); 1925 shmbuf.shm_pagesize = 64*K; 1926 if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) { 1927 trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.", 1928 size / (64*K), errno); 1929 // I want to know if this ever happens. 1930 assert(false, "failed to set page size for shmat"); 1931 } 1932 1933 // Now attach the shared segment. 1934 // Note that I attach with SHM_RND - which means that the requested address is rounded down, if 1935 // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address 1936 // were not a segment boundary. 1937 char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND); 1938 const int errno_shmat = errno; 1939 1940 // (A) Right after shmat and before handing shmat errors delete the shm segment. 1941 if (::shmctl(shmid, IPC_RMID, NULL) == -1) { 1942 trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno); 1943 assert(false, "failed to remove shared memory segment!"); 1944 } 1945 1946 // Handle shmat error. If we failed to attach, just return. 1947 if (addr == (char*)-1) { 1948 trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat); 1949 return NULL; 1950 } 1951 1952 // Just for info: query the real page size. In case setting the page size did not 1953 // work (see above), the system may have given us something other then 4K (LDR_CNTRL). 1954 const size_t real_pagesize = os::Aix::query_pagesize(addr); 1955 if (real_pagesize != shmbuf.shm_pagesize) { 1956 trcVerbose("pagesize is, surprisingly, %h.", real_pagesize); 1957 } 1958 1959 if (addr) { 1960 trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)", 1961 addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize)); 1962 } else { 1963 if (requested_addr != NULL) { 1964 trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr); 1965 } else { 1966 trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size); 1967 } 1968 } 1969 1970 // book-keeping 1971 vmembk_add(addr, size, real_pagesize, VMEM_SHMATED); 1972 assert0(is_aligned_to(addr, os::vm_page_size())); 1973 1974 return addr; 1975 } 1976 1977 static bool release_shmated_memory(char* addr, size_t size) { 1978 1979 trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].", 1980 addr, addr + size - 1); 1981 1982 bool rc = false; 1983 1984 // TODO: is there a way to verify shm size without doing bookkeeping? 1985 if (::shmdt(addr) != 0) { 1986 trcVerbose("error (%d).", errno); 1987 } else { 1988 trcVerbose("ok."); 1989 rc = true; 1990 } 1991 return rc; 1992 } 1993 1994 static bool uncommit_shmated_memory(char* addr, size_t size) { 1995 trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].", 1996 addr, addr + size - 1); 1997 1998 const bool rc = my_disclaim64(addr, size); 1999 2000 if (!rc) { 2001 trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size); 2002 return false; 2003 } 2004 return true; 2005 } 2006 2007 //////////////////////////////// mmap-based routines ///////////////////////////////// 2008 2009 // Reserve memory via mmap. 2010 // If <requested_addr> is given, an attempt is made to attach at the given address. 2011 // Failing that, memory is allocated at any address. 2012 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to 2013 // allocate at an address aligned with the given alignment. Failing that, memory 2014 // is aligned anywhere. 2015 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 2016 trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", " 2017 "alignment_hint " UINTX_FORMAT "...", 2018 bytes, requested_addr, alignment_hint); 2019 2020 // If a wish address is given, but not aligned to 4K page boundary, mmap will fail. 2021 if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) { 2022 trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr); 2023 return NULL; 2024 } 2025 2026 // We must prevent anyone from attaching too close to the 2027 // BRK because that may cause malloc OOM. 2028 if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) { 2029 trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. " 2030 "Will attach anywhere.", requested_addr); 2031 // Act like the OS refused to attach there. 2032 requested_addr = NULL; 2033 } 2034 2035 // Specify one or the other but not both. 2036 assert0(!(requested_addr != NULL && alignment_hint > 0)); 2037 2038 // In 64K mode, we claim the global page size (os::vm_page_size()) 2039 // is 64K. This is one of the few points where that illusion may 2040 // break, because mmap() will always return memory aligned to 4K. So 2041 // we must ensure we only ever return memory aligned to 64k. 2042 if (alignment_hint) { 2043 alignment_hint = lcm(alignment_hint, os::vm_page_size()); 2044 } else { 2045 alignment_hint = os::vm_page_size(); 2046 } 2047 2048 // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode). 2049 const size_t size = align_up(bytes, os::vm_page_size()); 2050 2051 // alignment: Allocate memory large enough to include an aligned range of the right size and 2052 // cut off the leading and trailing waste pages. 2053 assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above 2054 const size_t extra_size = size + alignment_hint; 2055 2056 // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to 2057 // later use msync(MS_INVALIDATE) (see os::uncommit_memory). 2058 int flags = MAP_ANONYMOUS | MAP_SHARED; 2059 2060 // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what 2061 // it means if wishaddress is given but MAP_FIXED is not set. 2062 // 2063 // Important! Behaviour differs depending on whether SPEC1170 mode is active or not. 2064 // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings. 2065 // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will 2066 // get clobbered. 2067 if (requested_addr != NULL) { 2068 if (!os::Aix::xpg_sus_mode()) { // not SPEC1170 Behaviour 2069 flags |= MAP_FIXED; 2070 } 2071 } 2072 2073 char* addr = (char*)::mmap(requested_addr, extra_size, 2074 PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0); 2075 2076 if (addr == MAP_FAILED) { 2077 trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno); 2078 return NULL; 2079 } 2080 2081 // Handle alignment. 2082 char* const addr_aligned = align_up(addr, alignment_hint); 2083 const size_t waste_pre = addr_aligned - addr; 2084 char* const addr_aligned_end = addr_aligned + size; 2085 const size_t waste_post = extra_size - waste_pre - size; 2086 if (waste_pre > 0) { 2087 ::munmap(addr, waste_pre); 2088 } 2089 if (waste_post > 0) { 2090 ::munmap(addr_aligned_end, waste_post); 2091 } 2092 addr = addr_aligned; 2093 2094 if (addr) { 2095 trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)", 2096 addr, addr + bytes, bytes); 2097 } else { 2098 if (requested_addr != NULL) { 2099 trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr); 2100 } else { 2101 trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes); 2102 } 2103 } 2104 2105 // bookkeeping 2106 vmembk_add(addr, size, 4*K, VMEM_MAPPED); 2107 2108 // Test alignment, see above. 2109 assert0(is_aligned_to(addr, os::vm_page_size())); 2110 2111 return addr; 2112 } 2113 2114 static bool release_mmaped_memory(char* addr, size_t size) { 2115 assert0(is_aligned_to(addr, os::vm_page_size())); 2116 assert0(is_aligned_to(size, os::vm_page_size())); 2117 2118 trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].", 2119 addr, addr + size - 1); 2120 bool rc = false; 2121 2122 if (::munmap(addr, size) != 0) { 2123 trcVerbose("failed (%d)\n", errno); 2124 rc = false; 2125 } else { 2126 trcVerbose("ok."); 2127 rc = true; 2128 } 2129 2130 return rc; 2131 } 2132 2133 static bool uncommit_mmaped_memory(char* addr, size_t size) { 2134 2135 assert0(is_aligned_to(addr, os::vm_page_size())); 2136 assert0(is_aligned_to(size, os::vm_page_size())); 2137 2138 trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].", 2139 addr, addr + size - 1); 2140 bool rc = false; 2141 2142 // Uncommit mmap memory with msync MS_INVALIDATE. 2143 if (::msync(addr, size, MS_INVALIDATE) != 0) { 2144 trcVerbose("failed (%d)\n", errno); 2145 rc = false; 2146 } else { 2147 trcVerbose("ok."); 2148 rc = true; 2149 } 2150 2151 return rc; 2152 } 2153 2154 int os::vm_page_size() { 2155 // Seems redundant as all get out. 2156 assert(os::Aix::page_size() != -1, "must call os::init"); 2157 return os::Aix::page_size(); 2158 } 2159 2160 // Aix allocates memory by pages. 2161 int os::vm_allocation_granularity() { 2162 assert(os::Aix::page_size() != -1, "must call os::init"); 2163 return os::Aix::page_size(); 2164 } 2165 2166 #ifdef PRODUCT 2167 static void warn_fail_commit_memory(char* addr, size_t size, bool exec, 2168 int err) { 2169 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2170 ", %d) failed; error='%s' (errno=%d)", addr, size, exec, 2171 os::errno_name(err), err); 2172 } 2173 #endif 2174 2175 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 2176 const char* mesg) { 2177 assert(mesg != NULL, "mesg must be specified"); 2178 if (!pd_commit_memory(addr, size, exec)) { 2179 // Add extra info in product mode for vm_exit_out_of_memory(): 2180 PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);) 2181 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 2182 } 2183 } 2184 2185 bool os::pd_commit_memory(char* addr, size_t size, bool exec) { 2186 2187 assert(is_aligned_to(addr, os::vm_page_size()), 2188 "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", 2189 p2i(addr), os::vm_page_size()); 2190 assert(is_aligned_to(size, os::vm_page_size()), 2191 "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", 2192 size, os::vm_page_size()); 2193 2194 vmembk_t* const vmi = vmembk_find(addr); 2195 guarantee0(vmi); 2196 vmi->assert_is_valid_subrange(addr, size); 2197 2198 trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1); 2199 2200 if (UseExplicitCommit) { 2201 // AIX commits memory on touch. So, touch all pages to be committed. 2202 for (char* p = addr; p < (addr + size); p += 4*K) { 2203 *p = '\0'; 2204 } 2205 } 2206 2207 return true; 2208 } 2209 2210 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) { 2211 return pd_commit_memory(addr, size, exec); 2212 } 2213 2214 void os::pd_commit_memory_or_exit(char* addr, size_t size, 2215 size_t alignment_hint, bool exec, 2216 const char* mesg) { 2217 // Alignment_hint is ignored on this OS. 2218 pd_commit_memory_or_exit(addr, size, exec, mesg); 2219 } 2220 2221 bool os::pd_uncommit_memory(char* addr, size_t size) { 2222 assert(is_aligned_to(addr, os::vm_page_size()), 2223 "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", 2224 p2i(addr), os::vm_page_size()); 2225 assert(is_aligned_to(size, os::vm_page_size()), 2226 "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", 2227 size, os::vm_page_size()); 2228 2229 // Dynamically do different things for mmap/shmat. 2230 const vmembk_t* const vmi = vmembk_find(addr); 2231 guarantee0(vmi); 2232 vmi->assert_is_valid_subrange(addr, size); 2233 2234 if (vmi->type == VMEM_SHMATED) { 2235 return uncommit_shmated_memory(addr, size); 2236 } else { 2237 return uncommit_mmaped_memory(addr, size); 2238 } 2239 } 2240 2241 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 2242 // Do not call this; no need to commit stack pages on AIX. 2243 ShouldNotReachHere(); 2244 return true; 2245 } 2246 2247 bool os::remove_stack_guard_pages(char* addr, size_t size) { 2248 // Do not call this; no need to commit stack pages on AIX. 2249 ShouldNotReachHere(); 2250 return true; 2251 } 2252 2253 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2254 } 2255 2256 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { 2257 } 2258 2259 void os::numa_make_global(char *addr, size_t bytes) { 2260 } 2261 2262 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2263 } 2264 2265 bool os::numa_topology_changed() { 2266 return false; 2267 } 2268 2269 size_t os::numa_get_groups_num() { 2270 return 1; 2271 } 2272 2273 int os::numa_get_group_id() { 2274 return 0; 2275 } 2276 2277 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2278 if (size > 0) { 2279 ids[0] = 0; 2280 return 1; 2281 } 2282 return 0; 2283 } 2284 2285 bool os::get_page_info(char *start, page_info* info) { 2286 return false; 2287 } 2288 2289 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 2290 return end; 2291 } 2292 2293 // Reserves and attaches a shared memory segment. 2294 // Will assert if a wish address is given and could not be obtained. 2295 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 2296 2297 // All other Unices do a mmap(MAP_FIXED) if the addr is given, 2298 // thereby clobbering old mappings at that place. That is probably 2299 // not intended, never used and almost certainly an error were it 2300 // ever be used this way (to try attaching at a specified address 2301 // without clobbering old mappings an alternate API exists, 2302 // os::attempt_reserve_memory_at()). 2303 // Instead of mimicking the dangerous coding of the other platforms, here I 2304 // just ignore the request address (release) or assert(debug). 2305 assert0(requested_addr == NULL); 2306 2307 // Always round to os::vm_page_size(), which may be larger than 4K. 2308 bytes = align_up(bytes, os::vm_page_size()); 2309 const size_t alignment_hint0 = 2310 alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0; 2311 2312 // In 4K mode always use mmap. 2313 // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted. 2314 if (os::vm_page_size() == 4*K) { 2315 return reserve_mmaped_memory(bytes, requested_addr, alignment_hint); 2316 } else { 2317 if (bytes >= Use64KPagesThreshold) { 2318 return reserve_shmated_memory(bytes, requested_addr, alignment_hint); 2319 } else { 2320 return reserve_mmaped_memory(bytes, requested_addr, alignment_hint); 2321 } 2322 } 2323 } 2324 2325 bool os::pd_release_memory(char* addr, size_t size) { 2326 2327 // Dynamically do different things for mmap/shmat. 2328 vmembk_t* const vmi = vmembk_find(addr); 2329 guarantee0(vmi); 2330 2331 // Always round to os::vm_page_size(), which may be larger than 4K. 2332 size = align_up(size, os::vm_page_size()); 2333 addr = align_up(addr, os::vm_page_size()); 2334 2335 bool rc = false; 2336 bool remove_bookkeeping = false; 2337 if (vmi->type == VMEM_SHMATED) { 2338 // For shmatted memory, we do: 2339 // - If user wants to release the whole range, release the memory (shmdt). 2340 // - If user only wants to release a partial range, uncommit (disclaim) that 2341 // range. That way, at least, we do not use memory anymore (bust still page 2342 // table space). 2343 vmi->assert_is_valid_subrange(addr, size); 2344 if (addr == vmi->addr && size == vmi->size) { 2345 rc = release_shmated_memory(addr, size); 2346 remove_bookkeeping = true; 2347 } else { 2348 rc = uncommit_shmated_memory(addr, size); 2349 } 2350 } else { 2351 // User may unmap partial regions but region has to be fully contained. 2352 #ifdef ASSERT 2353 vmi->assert_is_valid_subrange(addr, size); 2354 #endif 2355 rc = release_mmaped_memory(addr, size); 2356 remove_bookkeeping = true; 2357 } 2358 2359 // update bookkeeping 2360 if (rc && remove_bookkeeping) { 2361 vmembk_remove(vmi); 2362 } 2363 2364 return rc; 2365 } 2366 2367 static bool checked_mprotect(char* addr, size_t size, int prot) { 2368 2369 // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will 2370 // not tell me if protection failed when trying to protect an un-protectable range. 2371 // 2372 // This means if the memory was allocated using shmget/shmat, protection wont work 2373 // but mprotect will still return 0: 2374 // 2375 // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm 2376 2377 bool rc = ::mprotect(addr, size, prot) == 0 ? true : false; 2378 2379 if (!rc) { 2380 const char* const s_errno = os::errno_name(errno); 2381 warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno); 2382 return false; 2383 } 2384 2385 // mprotect success check 2386 // 2387 // Mprotect said it changed the protection but can I believe it? 2388 // 2389 // To be sure I need to check the protection afterwards. Try to 2390 // read from protected memory and check whether that causes a segfault. 2391 // 2392 if (!os::Aix::xpg_sus_mode()) { 2393 2394 if (CanUseSafeFetch32()) { 2395 2396 const bool read_protected = 2397 (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 && 2398 SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false; 2399 2400 if (prot & PROT_READ) { 2401 rc = !read_protected; 2402 } else { 2403 rc = read_protected; 2404 } 2405 2406 if (!rc) { 2407 if (os::Aix::on_pase()) { 2408 // There is an issue on older PASE systems where mprotect() will return success but the 2409 // memory will not be protected. 2410 // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible 2411 // machines; we only see it rarely, when using mprotect() to protect the guard page of 2412 // a stack. It is an OS error. 2413 // 2414 // A valid strategy is just to try again. This usually works. :-/ 2415 2416 ::usleep(1000); 2417 if (::mprotect(addr, size, prot) == 0) { 2418 const bool read_protected_2 = 2419 (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 && 2420 SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false; 2421 rc = true; 2422 } 2423 } 2424 } 2425 } 2426 } 2427 2428 assert(rc == true, "mprotect failed."); 2429 2430 return rc; 2431 } 2432 2433 // Set protections specified 2434 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) { 2435 unsigned int p = 0; 2436 switch (prot) { 2437 case MEM_PROT_NONE: p = PROT_NONE; break; 2438 case MEM_PROT_READ: p = PROT_READ; break; 2439 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 2440 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 2441 default: 2442 ShouldNotReachHere(); 2443 } 2444 // is_committed is unused. 2445 return checked_mprotect(addr, size, p); 2446 } 2447 2448 bool os::guard_memory(char* addr, size_t size) { 2449 return checked_mprotect(addr, size, PROT_NONE); 2450 } 2451 2452 bool os::unguard_memory(char* addr, size_t size) { 2453 return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC); 2454 } 2455 2456 // Large page support 2457 2458 static size_t _large_page_size = 0; 2459 2460 // Enable large page support if OS allows that. 2461 void os::large_page_init() { 2462 return; // Nothing to do. See query_multipage_support and friends. 2463 } 2464 2465 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) { 2466 // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement 2467 // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()), 2468 // so this is not needed. 2469 assert(false, "should not be called on AIX"); 2470 return NULL; 2471 } 2472 2473 bool os::release_memory_special(char* base, size_t bytes) { 2474 // Detaching the SHM segment will also delete it, see reserve_memory_special(). 2475 Unimplemented(); 2476 return false; 2477 } 2478 2479 size_t os::large_page_size() { 2480 return _large_page_size; 2481 } 2482 2483 bool os::can_commit_large_page_memory() { 2484 // Does not matter, we do not support huge pages. 2485 return false; 2486 } 2487 2488 bool os::can_execute_large_page_memory() { 2489 // Does not matter, we do not support huge pages. 2490 return false; 2491 } 2492 2493 // Reserve memory at an arbitrary address, only if that area is 2494 // available (and not reserved for something else). 2495 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 2496 char* addr = NULL; 2497 2498 // Always round to os::vm_page_size(), which may be larger than 4K. 2499 bytes = align_up(bytes, os::vm_page_size()); 2500 2501 // In 4K mode always use mmap. 2502 // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted. 2503 if (os::vm_page_size() == 4*K) { 2504 return reserve_mmaped_memory(bytes, requested_addr, 0); 2505 } else { 2506 if (bytes >= Use64KPagesThreshold) { 2507 return reserve_shmated_memory(bytes, requested_addr, 0); 2508 } else { 2509 return reserve_mmaped_memory(bytes, requested_addr, 0); 2510 } 2511 } 2512 2513 return addr; 2514 } 2515 2516 size_t os::read(int fd, void *buf, unsigned int nBytes) { 2517 return ::read(fd, buf, nBytes); 2518 } 2519 2520 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 2521 return ::pread(fd, buf, nBytes, offset); 2522 } 2523 2524 void os::naked_short_sleep(jlong ms) { 2525 struct timespec req; 2526 2527 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 2528 req.tv_sec = 0; 2529 if (ms > 0) { 2530 req.tv_nsec = (ms % 1000) * 1000000; 2531 } 2532 else { 2533 req.tv_nsec = 1; 2534 } 2535 2536 nanosleep(&req, NULL); 2537 2538 return; 2539 } 2540 2541 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 2542 void os::infinite_sleep() { 2543 while (true) { // sleep forever ... 2544 ::sleep(100); // ... 100 seconds at a time 2545 } 2546 } 2547 2548 // Used to convert frequent JVM_Yield() to nops 2549 bool os::dont_yield() { 2550 return DontYieldALot; 2551 } 2552 2553 void os::naked_yield() { 2554 sched_yield(); 2555 } 2556 2557 //////////////////////////////////////////////////////////////////////////////// 2558 // thread priority support 2559 2560 // From AIX manpage to pthread_setschedparam 2561 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp? 2562 // topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm): 2563 // 2564 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the 2565 // range from 40 to 80, where 40 is the least favored priority and 80 2566 // is the most favored." 2567 // 2568 // (Actually, I doubt this even has an impact on AIX, as we do kernel 2569 // scheduling there; however, this still leaves iSeries.) 2570 // 2571 // We use the same values for AIX and PASE. 2572 int os::java_to_os_priority[CriticalPriority + 1] = { 2573 54, // 0 Entry should never be used 2574 2575 55, // 1 MinPriority 2576 55, // 2 2577 56, // 3 2578 2579 56, // 4 2580 57, // 5 NormPriority 2581 57, // 6 2582 2583 58, // 7 2584 58, // 8 2585 59, // 9 NearMaxPriority 2586 2587 60, // 10 MaxPriority 2588 2589 60 // 11 CriticalPriority 2590 }; 2591 2592 OSReturn os::set_native_priority(Thread* thread, int newpri) { 2593 if (!UseThreadPriorities) return OS_OK; 2594 pthread_t thr = thread->osthread()->pthread_id(); 2595 int policy = SCHED_OTHER; 2596 struct sched_param param; 2597 param.sched_priority = newpri; 2598 int ret = pthread_setschedparam(thr, policy, ¶m); 2599 2600 if (ret != 0) { 2601 trcVerbose("Could not change priority for thread %d to %d (error %d, %s)", 2602 (int)thr, newpri, ret, os::errno_name(ret)); 2603 } 2604 return (ret == 0) ? OS_OK : OS_ERR; 2605 } 2606 2607 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { 2608 if (!UseThreadPriorities) { 2609 *priority_ptr = java_to_os_priority[NormPriority]; 2610 return OS_OK; 2611 } 2612 pthread_t thr = thread->osthread()->pthread_id(); 2613 int policy = SCHED_OTHER; 2614 struct sched_param param; 2615 int ret = pthread_getschedparam(thr, &policy, ¶m); 2616 *priority_ptr = param.sched_priority; 2617 2618 return (ret == 0) ? OS_OK : OS_ERR; 2619 } 2620 2621 // Hint to the underlying OS that a task switch would not be good. 2622 // Void return because it's a hint and can fail. 2623 void os::hint_no_preempt() {} 2624 2625 //////////////////////////////////////////////////////////////////////////////// 2626 // suspend/resume support 2627 2628 // The low-level signal-based suspend/resume support is a remnant from the 2629 // old VM-suspension that used to be for java-suspension, safepoints etc, 2630 // within hotspot. Currently used by JFR's OSThreadSampler 2631 // 2632 // The remaining code is greatly simplified from the more general suspension 2633 // code that used to be used. 2634 // 2635 // The protocol is quite simple: 2636 // - suspend: 2637 // - sends a signal to the target thread 2638 // - polls the suspend state of the osthread using a yield loop 2639 // - target thread signal handler (SR_handler) sets suspend state 2640 // and blocks in sigsuspend until continued 2641 // - resume: 2642 // - sets target osthread state to continue 2643 // - sends signal to end the sigsuspend loop in the SR_handler 2644 // 2645 // Note that the SR_lock plays no role in this suspend/resume protocol, 2646 // but is checked for NULL in SR_handler as a thread termination indicator. 2647 // The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs. 2648 // 2649 // Note that resume_clear_context() and suspend_save_context() are needed 2650 // by SR_handler(), so that fetch_frame_from_ucontext() works, 2651 // which in part is used by: 2652 // - Forte Analyzer: AsyncGetCallTrace() 2653 // - StackBanging: get_frame_at_stack_banging_point() 2654 2655 static void resume_clear_context(OSThread *osthread) { 2656 osthread->set_ucontext(NULL); 2657 osthread->set_siginfo(NULL); 2658 } 2659 2660 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) { 2661 osthread->set_ucontext(context); 2662 osthread->set_siginfo(siginfo); 2663 } 2664 2665 // 2666 // Handler function invoked when a thread's execution is suspended or 2667 // resumed. We have to be careful that only async-safe functions are 2668 // called here (Note: most pthread functions are not async safe and 2669 // should be avoided.) 2670 // 2671 // Note: sigwait() is a more natural fit than sigsuspend() from an 2672 // interface point of view, but sigwait() prevents the signal hander 2673 // from being run. libpthread would get very confused by not having 2674 // its signal handlers run and prevents sigwait()'s use with the 2675 // mutex granting granting signal. 2676 // 2677 // Currently only ever called on the VMThread and JavaThreads (PC sampling). 2678 // 2679 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) { 2680 // Save and restore errno to avoid confusing native code with EINTR 2681 // after sigsuspend. 2682 int old_errno = errno; 2683 2684 Thread* thread = Thread::current_or_null_safe(); 2685 assert(thread != NULL, "Missing current thread in SR_handler"); 2686 2687 // On some systems we have seen signal delivery get "stuck" until the signal 2688 // mask is changed as part of thread termination. Check that the current thread 2689 // has not already terminated (via SR_lock()) - else the following assertion 2690 // will fail because the thread is no longer a JavaThread as the ~JavaThread 2691 // destructor has completed. 2692 2693 if (thread->SR_lock() == NULL) { 2694 return; 2695 } 2696 2697 assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); 2698 2699 OSThread* osthread = thread->osthread(); 2700 2701 os::SuspendResume::State current = osthread->sr.state(); 2702 if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { 2703 suspend_save_context(osthread, siginfo, context); 2704 2705 // attempt to switch the state, we assume we had a SUSPEND_REQUEST 2706 os::SuspendResume::State state = osthread->sr.suspended(); 2707 if (state == os::SuspendResume::SR_SUSPENDED) { 2708 sigset_t suspend_set; // signals for sigsuspend() 2709 2710 // get current set of blocked signals and unblock resume signal 2711 pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); 2712 sigdelset(&suspend_set, SR_signum); 2713 2714 // wait here until we are resumed 2715 while (1) { 2716 sigsuspend(&suspend_set); 2717 2718 os::SuspendResume::State result = osthread->sr.running(); 2719 if (result == os::SuspendResume::SR_RUNNING) { 2720 break; 2721 } 2722 } 2723 2724 } else if (state == os::SuspendResume::SR_RUNNING) { 2725 // request was cancelled, continue 2726 } else { 2727 ShouldNotReachHere(); 2728 } 2729 2730 resume_clear_context(osthread); 2731 } else if (current == os::SuspendResume::SR_RUNNING) { 2732 // request was cancelled, continue 2733 } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { 2734 // ignore 2735 } else { 2736 ShouldNotReachHere(); 2737 } 2738 2739 errno = old_errno; 2740 } 2741 2742 static int SR_initialize() { 2743 struct sigaction act; 2744 char *s; 2745 // Get signal number to use for suspend/resume 2746 if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) { 2747 int sig = ::strtol(s, 0, 10); 2748 if (sig > MAX2(SIGSEGV, SIGBUS) && // See 4355769. 2749 sig < NSIG) { // Must be legal signal and fit into sigflags[]. 2750 SR_signum = sig; 2751 } else { 2752 warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.", 2753 sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum); 2754 } 2755 } 2756 2757 assert(SR_signum > SIGSEGV && SR_signum > SIGBUS, 2758 "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769"); 2759 2760 sigemptyset(&SR_sigset); 2761 sigaddset(&SR_sigset, SR_signum); 2762 2763 // Set up signal handler for suspend/resume. 2764 act.sa_flags = SA_RESTART|SA_SIGINFO; 2765 act.sa_handler = (void (*)(int)) SR_handler; 2766 2767 // SR_signum is blocked by default. 2768 pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask); 2769 2770 if (sigaction(SR_signum, &act, 0) == -1) { 2771 return -1; 2772 } 2773 2774 // Save signal flag 2775 os::Aix::set_our_sigflags(SR_signum, act.sa_flags); 2776 return 0; 2777 } 2778 2779 static int SR_finalize() { 2780 return 0; 2781 } 2782 2783 static int sr_notify(OSThread* osthread) { 2784 int status = pthread_kill(osthread->pthread_id(), SR_signum); 2785 assert_status(status == 0, status, "pthread_kill"); 2786 return status; 2787 } 2788 2789 // "Randomly" selected value for how long we want to spin 2790 // before bailing out on suspending a thread, also how often 2791 // we send a signal to a thread we want to resume 2792 static const int RANDOMLY_LARGE_INTEGER = 1000000; 2793 static const int RANDOMLY_LARGE_INTEGER2 = 100; 2794 2795 // returns true on success and false on error - really an error is fatal 2796 // but this seems the normal response to library errors 2797 static bool do_suspend(OSThread* osthread) { 2798 assert(osthread->sr.is_running(), "thread should be running"); 2799 // mark as suspended and send signal 2800 2801 if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { 2802 // failed to switch, state wasn't running? 2803 ShouldNotReachHere(); 2804 return false; 2805 } 2806 2807 if (sr_notify(osthread) != 0) { 2808 // try to cancel, switch to running 2809 2810 os::SuspendResume::State result = osthread->sr.cancel_suspend(); 2811 if (result == os::SuspendResume::SR_RUNNING) { 2812 // cancelled 2813 return false; 2814 } else if (result == os::SuspendResume::SR_SUSPENDED) { 2815 // somehow managed to suspend 2816 return true; 2817 } else { 2818 ShouldNotReachHere(); 2819 return false; 2820 } 2821 } 2822 2823 // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED 2824 2825 for (int n = 0; !osthread->sr.is_suspended(); n++) { 2826 for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) { 2827 os::naked_yield(); 2828 } 2829 2830 // timeout, try to cancel the request 2831 if (n >= RANDOMLY_LARGE_INTEGER) { 2832 os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); 2833 if (cancelled == os::SuspendResume::SR_RUNNING) { 2834 return false; 2835 } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { 2836 return true; 2837 } else { 2838 ShouldNotReachHere(); 2839 return false; 2840 } 2841 } 2842 } 2843 2844 guarantee(osthread->sr.is_suspended(), "Must be suspended"); 2845 return true; 2846 } 2847 2848 static void do_resume(OSThread* osthread) { 2849 //assert(osthread->sr.is_suspended(), "thread should be suspended"); 2850 2851 if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { 2852 // failed to switch to WAKEUP_REQUEST 2853 ShouldNotReachHere(); 2854 return; 2855 } 2856 2857 while (!osthread->sr.is_running()) { 2858 if (sr_notify(osthread) == 0) { 2859 for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) { 2860 for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) { 2861 os::naked_yield(); 2862 } 2863 } 2864 } else { 2865 ShouldNotReachHere(); 2866 } 2867 } 2868 2869 guarantee(osthread->sr.is_running(), "Must be running!"); 2870 } 2871 2872 /////////////////////////////////////////////////////////////////////////////////// 2873 // signal handling (except suspend/resume) 2874 2875 // This routine may be used by user applications as a "hook" to catch signals. 2876 // The user-defined signal handler must pass unrecognized signals to this 2877 // routine, and if it returns true (non-zero), then the signal handler must 2878 // return immediately. If the flag "abort_if_unrecognized" is true, then this 2879 // routine will never retun false (zero), but instead will execute a VM panic 2880 // routine kill the process. 2881 // 2882 // If this routine returns false, it is OK to call it again. This allows 2883 // the user-defined signal handler to perform checks either before or after 2884 // the VM performs its own checks. Naturally, the user code would be making 2885 // a serious error if it tried to handle an exception (such as a null check 2886 // or breakpoint) that the VM was generating for its own correct operation. 2887 // 2888 // This routine may recognize any of the following kinds of signals: 2889 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1. 2890 // It should be consulted by handlers for any of those signals. 2891 // 2892 // The caller of this routine must pass in the three arguments supplied 2893 // to the function referred to in the "sa_sigaction" (not the "sa_handler") 2894 // field of the structure passed to sigaction(). This routine assumes that 2895 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 2896 // 2897 // Note that the VM will print warnings if it detects conflicting signal 2898 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 2899 // 2900 extern "C" JNIEXPORT int 2901 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized); 2902 2903 // Set thread signal mask (for some reason on AIX sigthreadmask() seems 2904 // to be the thing to call; documentation is not terribly clear about whether 2905 // pthread_sigmask also works, and if it does, whether it does the same. 2906 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) { 2907 const int rc = ::pthread_sigmask(how, set, oset); 2908 // return value semantics differ slightly for error case: 2909 // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno 2910 // (so, pthread_sigmask is more theadsafe for error handling) 2911 // But success is always 0. 2912 return rc == 0 ? true : false; 2913 } 2914 2915 // Function to unblock all signals which are, according 2916 // to POSIX, typical program error signals. If they happen while being blocked, 2917 // they typically will bring down the process immediately. 2918 bool unblock_program_error_signals() { 2919 sigset_t set; 2920 ::sigemptyset(&set); 2921 ::sigaddset(&set, SIGILL); 2922 ::sigaddset(&set, SIGBUS); 2923 ::sigaddset(&set, SIGFPE); 2924 ::sigaddset(&set, SIGSEGV); 2925 return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL); 2926 } 2927 2928 // Renamed from 'signalHandler' to avoid collision with other shared libs. 2929 void javaSignalHandler(int sig, siginfo_t* info, void* uc) { 2930 assert(info != NULL && uc != NULL, "it must be old kernel"); 2931 2932 // Never leave program error signals blocked; 2933 // on all our platforms they would bring down the process immediately when 2934 // getting raised while being blocked. 2935 unblock_program_error_signals(); 2936 2937 int orig_errno = errno; // Preserve errno value over signal handler. 2938 JVM_handle_aix_signal(sig, info, uc, true); 2939 errno = orig_errno; 2940 } 2941 2942 // This boolean allows users to forward their own non-matching signals 2943 // to JVM_handle_aix_signal, harmlessly. 2944 bool os::Aix::signal_handlers_are_installed = false; 2945 2946 // For signal-chaining 2947 struct sigaction sigact[NSIG]; 2948 sigset_t sigs; 2949 bool os::Aix::libjsig_is_loaded = false; 2950 typedef struct sigaction *(*get_signal_t)(int); 2951 get_signal_t os::Aix::get_signal_action = NULL; 2952 2953 struct sigaction* os::Aix::get_chained_signal_action(int sig) { 2954 struct sigaction *actp = NULL; 2955 2956 if (libjsig_is_loaded) { 2957 // Retrieve the old signal handler from libjsig 2958 actp = (*get_signal_action)(sig); 2959 } 2960 if (actp == NULL) { 2961 // Retrieve the preinstalled signal handler from jvm 2962 actp = get_preinstalled_handler(sig); 2963 } 2964 2965 return actp; 2966 } 2967 2968 static bool call_chained_handler(struct sigaction *actp, int sig, 2969 siginfo_t *siginfo, void *context) { 2970 // Call the old signal handler 2971 if (actp->sa_handler == SIG_DFL) { 2972 // It's more reasonable to let jvm treat it as an unexpected exception 2973 // instead of taking the default action. 2974 return false; 2975 } else if (actp->sa_handler != SIG_IGN) { 2976 if ((actp->sa_flags & SA_NODEFER) == 0) { 2977 // automaticlly block the signal 2978 sigaddset(&(actp->sa_mask), sig); 2979 } 2980 2981 sa_handler_t hand = NULL; 2982 sa_sigaction_t sa = NULL; 2983 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 2984 // retrieve the chained handler 2985 if (siginfo_flag_set) { 2986 sa = actp->sa_sigaction; 2987 } else { 2988 hand = actp->sa_handler; 2989 } 2990 2991 if ((actp->sa_flags & SA_RESETHAND) != 0) { 2992 actp->sa_handler = SIG_DFL; 2993 } 2994 2995 // try to honor the signal mask 2996 sigset_t oset; 2997 pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset); 2998 2999 // call into the chained handler 3000 if (siginfo_flag_set) { 3001 (*sa)(sig, siginfo, context); 3002 } else { 3003 (*hand)(sig); 3004 } 3005 3006 // restore the signal mask 3007 pthread_sigmask(SIG_SETMASK, &oset, 0); 3008 } 3009 // Tell jvm's signal handler the signal is taken care of. 3010 return true; 3011 } 3012 3013 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) { 3014 bool chained = false; 3015 // signal-chaining 3016 if (UseSignalChaining) { 3017 struct sigaction *actp = get_chained_signal_action(sig); 3018 if (actp != NULL) { 3019 chained = call_chained_handler(actp, sig, siginfo, context); 3020 } 3021 } 3022 return chained; 3023 } 3024 3025 struct sigaction* os::Aix::get_preinstalled_handler(int sig) { 3026 if (sigismember(&sigs, sig)) { 3027 return &sigact[sig]; 3028 } 3029 return NULL; 3030 } 3031 3032 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) { 3033 assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); 3034 sigact[sig] = oldAct; 3035 sigaddset(&sigs, sig); 3036 } 3037 3038 // for diagnostic 3039 int sigflags[NSIG]; 3040 3041 int os::Aix::get_our_sigflags(int sig) { 3042 assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); 3043 return sigflags[sig]; 3044 } 3045 3046 void os::Aix::set_our_sigflags(int sig, int flags) { 3047 assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); 3048 if (sig > 0 && sig < NSIG) { 3049 sigflags[sig] = flags; 3050 } 3051 } 3052 3053 void os::Aix::set_signal_handler(int sig, bool set_installed) { 3054 // Check for overwrite. 3055 struct sigaction oldAct; 3056 sigaction(sig, (struct sigaction*)NULL, &oldAct); 3057 3058 void* oldhand = oldAct.sa_sigaction 3059 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 3060 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 3061 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 3062 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 3063 oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) { 3064 if (AllowUserSignalHandlers || !set_installed) { 3065 // Do not overwrite; user takes responsibility to forward to us. 3066 return; 3067 } else if (UseSignalChaining) { 3068 // save the old handler in jvm 3069 save_preinstalled_handler(sig, oldAct); 3070 // libjsig also interposes the sigaction() call below and saves the 3071 // old sigaction on it own. 3072 } else { 3073 fatal("Encountered unexpected pre-existing sigaction handler " 3074 "%#lx for signal %d.", (long)oldhand, sig); 3075 } 3076 } 3077 3078 struct sigaction sigAct; 3079 sigfillset(&(sigAct.sa_mask)); 3080 if (!set_installed) { 3081 sigAct.sa_handler = SIG_DFL; 3082 sigAct.sa_flags = SA_RESTART; 3083 } else { 3084 sigAct.sa_sigaction = javaSignalHandler; 3085 sigAct.sa_flags = SA_SIGINFO|SA_RESTART; 3086 } 3087 // Save flags, which are set by ours 3088 assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); 3089 sigflags[sig] = sigAct.sa_flags; 3090 3091 int ret = sigaction(sig, &sigAct, &oldAct); 3092 assert(ret == 0, "check"); 3093 3094 void* oldhand2 = oldAct.sa_sigaction 3095 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 3096 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 3097 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 3098 } 3099 3100 // install signal handlers for signals that HotSpot needs to 3101 // handle in order to support Java-level exception handling. 3102 void os::Aix::install_signal_handlers() { 3103 if (!signal_handlers_are_installed) { 3104 signal_handlers_are_installed = true; 3105 3106 // signal-chaining 3107 typedef void (*signal_setting_t)(); 3108 signal_setting_t begin_signal_setting = NULL; 3109 signal_setting_t end_signal_setting = NULL; 3110 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 3111 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 3112 if (begin_signal_setting != NULL) { 3113 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 3114 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 3115 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 3116 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 3117 libjsig_is_loaded = true; 3118 assert(UseSignalChaining, "should enable signal-chaining"); 3119 } 3120 if (libjsig_is_loaded) { 3121 // Tell libjsig jvm is setting signal handlers. 3122 (*begin_signal_setting)(); 3123 } 3124 3125 ::sigemptyset(&sigs); 3126 set_signal_handler(SIGSEGV, true); 3127 set_signal_handler(SIGPIPE, true); 3128 set_signal_handler(SIGBUS, true); 3129 set_signal_handler(SIGILL, true); 3130 set_signal_handler(SIGFPE, true); 3131 set_signal_handler(SIGTRAP, true); 3132 set_signal_handler(SIGXFSZ, true); 3133 3134 if (libjsig_is_loaded) { 3135 // Tell libjsig jvm finishes setting signal handlers. 3136 (*end_signal_setting)(); 3137 } 3138 3139 // We don't activate signal checker if libjsig is in place, we trust ourselves 3140 // and if UserSignalHandler is installed all bets are off. 3141 // Log that signal checking is off only if -verbose:jni is specified. 3142 if (CheckJNICalls) { 3143 if (libjsig_is_loaded) { 3144 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 3145 check_signals = false; 3146 } 3147 if (AllowUserSignalHandlers) { 3148 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 3149 check_signals = false; 3150 } 3151 // Need to initialize check_signal_done. 3152 ::sigemptyset(&check_signal_done); 3153 } 3154 } 3155 } 3156 3157 static const char* get_signal_handler_name(address handler, 3158 char* buf, int buflen) { 3159 int offset; 3160 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 3161 if (found) { 3162 // skip directory names 3163 const char *p1, *p2; 3164 p1 = buf; 3165 size_t len = strlen(os::file_separator()); 3166 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 3167 // The way os::dll_address_to_library_name is implemented on Aix 3168 // right now, it always returns -1 for the offset which is not 3169 // terribly informative. 3170 // Will fix that. For now, omit the offset. 3171 jio_snprintf(buf, buflen, "%s", p1); 3172 } else { 3173 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 3174 } 3175 return buf; 3176 } 3177 3178 static void print_signal_handler(outputStream* st, int sig, 3179 char* buf, size_t buflen) { 3180 struct sigaction sa; 3181 sigaction(sig, NULL, &sa); 3182 3183 st->print("%s: ", os::exception_name(sig, buf, buflen)); 3184 3185 address handler = (sa.sa_flags & SA_SIGINFO) 3186 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 3187 : CAST_FROM_FN_PTR(address, sa.sa_handler); 3188 3189 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 3190 st->print("SIG_DFL"); 3191 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 3192 st->print("SIG_IGN"); 3193 } else { 3194 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 3195 } 3196 3197 // Print readable mask. 3198 st->print(", sa_mask[0]="); 3199 os::Posix::print_signal_set_short(st, &sa.sa_mask); 3200 3201 address rh = VMError::get_resetted_sighandler(sig); 3202 // May be, handler was resetted by VMError? 3203 if (rh != NULL) { 3204 handler = rh; 3205 sa.sa_flags = VMError::get_resetted_sigflags(sig); 3206 } 3207 3208 // Print textual representation of sa_flags. 3209 st->print(", sa_flags="); 3210 os::Posix::print_sa_flags(st, sa.sa_flags); 3211 3212 // Check: is it our handler? 3213 if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) || 3214 handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) { 3215 // It is our signal handler. 3216 // Check for flags, reset system-used one! 3217 if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) { 3218 st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library", 3219 os::Aix::get_our_sigflags(sig)); 3220 } 3221 } 3222 st->cr(); 3223 } 3224 3225 #define DO_SIGNAL_CHECK(sig) \ 3226 if (!sigismember(&check_signal_done, sig)) \ 3227 os::Aix::check_signal_handler(sig) 3228 3229 // This method is a periodic task to check for misbehaving JNI applications 3230 // under CheckJNI, we can add any periodic checks here 3231 3232 void os::run_periodic_checks() { 3233 3234 if (check_signals == false) return; 3235 3236 // SEGV and BUS if overridden could potentially prevent 3237 // generation of hs*.log in the event of a crash, debugging 3238 // such a case can be very challenging, so we absolutely 3239 // check the following for a good measure: 3240 DO_SIGNAL_CHECK(SIGSEGV); 3241 DO_SIGNAL_CHECK(SIGILL); 3242 DO_SIGNAL_CHECK(SIGFPE); 3243 DO_SIGNAL_CHECK(SIGBUS); 3244 DO_SIGNAL_CHECK(SIGPIPE); 3245 DO_SIGNAL_CHECK(SIGXFSZ); 3246 if (UseSIGTRAP) { 3247 DO_SIGNAL_CHECK(SIGTRAP); 3248 } 3249 3250 // ReduceSignalUsage allows the user to override these handlers 3251 // see comments at the very top and jvm_md.h 3252 if (!ReduceSignalUsage) { 3253 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 3254 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 3255 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 3256 DO_SIGNAL_CHECK(BREAK_SIGNAL); 3257 } 3258 3259 DO_SIGNAL_CHECK(SR_signum); 3260 } 3261 3262 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 3263 3264 static os_sigaction_t os_sigaction = NULL; 3265 3266 void os::Aix::check_signal_handler(int sig) { 3267 char buf[O_BUFLEN]; 3268 address jvmHandler = NULL; 3269 3270 struct sigaction act; 3271 if (os_sigaction == NULL) { 3272 // only trust the default sigaction, in case it has been interposed 3273 os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction")); 3274 if (os_sigaction == NULL) return; 3275 } 3276 3277 os_sigaction(sig, (struct sigaction*)NULL, &act); 3278 3279 address thisHandler = (act.sa_flags & SA_SIGINFO) 3280 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 3281 : CAST_FROM_FN_PTR(address, act.sa_handler); 3282 3283 switch(sig) { 3284 case SIGSEGV: 3285 case SIGBUS: 3286 case SIGFPE: 3287 case SIGPIPE: 3288 case SIGILL: 3289 case SIGXFSZ: 3290 jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler); 3291 break; 3292 3293 case SHUTDOWN1_SIGNAL: 3294 case SHUTDOWN2_SIGNAL: 3295 case SHUTDOWN3_SIGNAL: 3296 case BREAK_SIGNAL: 3297 jvmHandler = (address)user_handler(); 3298 break; 3299 3300 default: 3301 if (sig == SR_signum) { 3302 jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler); 3303 } else { 3304 return; 3305 } 3306 break; 3307 } 3308 3309 if (thisHandler != jvmHandler) { 3310 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 3311 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 3312 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 3313 // No need to check this sig any longer 3314 sigaddset(&check_signal_done, sig); 3315 // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN 3316 if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) { 3317 tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell", 3318 exception_name(sig, buf, O_BUFLEN)); 3319 } 3320 } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) { 3321 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 3322 tty->print("expected:"); 3323 os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig)); 3324 tty->cr(); 3325 tty->print(" found:"); 3326 os::Posix::print_sa_flags(tty, act.sa_flags); 3327 tty->cr(); 3328 // No need to check this sig any longer 3329 sigaddset(&check_signal_done, sig); 3330 } 3331 3332 // Dump all the signal 3333 if (sigismember(&check_signal_done, sig)) { 3334 print_signal_handlers(tty, buf, O_BUFLEN); 3335 } 3336 } 3337 3338 // To install functions for atexit system call 3339 extern "C" { 3340 static void perfMemory_exit_helper() { 3341 perfMemory_exit(); 3342 } 3343 } 3344 3345 // This is called _before_ the most of global arguments have been parsed. 3346 void os::init(void) { 3347 // This is basic, we want to know if that ever changes. 3348 // (Shared memory boundary is supposed to be a 256M aligned.) 3349 assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected"); 3350 3351 // Record process break at startup. 3352 g_brk_at_startup = (address) ::sbrk(0); 3353 assert(g_brk_at_startup != (address) -1, "sbrk failed"); 3354 3355 // First off, we need to know whether we run on AIX or PASE, and 3356 // the OS level we run on. 3357 os::Aix::initialize_os_info(); 3358 3359 // Scan environment (SPEC1170 behaviour, etc). 3360 os::Aix::scan_environment(); 3361 3362 // Probe multipage support. 3363 query_multipage_support(); 3364 3365 // Act like we only have one page size by eliminating corner cases which 3366 // we did not support very well anyway. 3367 // We have two input conditions: 3368 // 1) Data segment page size. This is controlled by linker setting (datapsize) on the 3369 // launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker 3370 // setting. 3371 // Data segment page size is important for us because it defines the thread stack page 3372 // size, which is needed for guard page handling, stack banging etc. 3373 // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can 3374 // and should be allocated with 64k pages. 3375 // 3376 // So, we do the following: 3377 // LDR_CNTRL can_use_64K_pages_dynamically what we do remarks 3378 // 4K no 4K old systems (aix 5.2, as/400 v5r4) or new systems with AME activated 3379 // 4k yes 64k (treat 4k stacks as 64k) different loader than java and standard settings 3380 // 64k no --- AIX 5.2 ? --- 3381 // 64k yes 64k new systems and standard java loader (we set datapsize=64k when linking) 3382 3383 // We explicitly leave no option to change page size, because only upgrading would work, 3384 // not downgrading (if stack page size is 64k you cannot pretend its 4k). 3385 3386 if (g_multipage_support.datapsize == 4*K) { 3387 // datapsize = 4K. Data segment, thread stacks are 4K paged. 3388 if (g_multipage_support.can_use_64K_pages) { 3389 // .. but we are able to use 64K pages dynamically. 3390 // This would be typical for java launchers which are not linked 3391 // with datapsize=64K (like, any other launcher but our own). 3392 // 3393 // In this case it would be smart to allocate the java heap with 64K 3394 // to get the performance benefit, and to fake 64k pages for the 3395 // data segment (when dealing with thread stacks). 3396 // 3397 // However, leave a possibility to downgrade to 4K, using 3398 // -XX:-Use64KPages. 3399 if (Use64KPages) { 3400 trcVerbose("64K page mode (faked for data segment)"); 3401 Aix::_page_size = 64*K; 3402 } else { 3403 trcVerbose("4K page mode (Use64KPages=off)"); 3404 Aix::_page_size = 4*K; 3405 } 3406 } else { 3407 // .. and not able to allocate 64k pages dynamically. Here, just 3408 // fall back to 4K paged mode and use mmap for everything. 3409 trcVerbose("4K page mode"); 3410 Aix::_page_size = 4*K; 3411 FLAG_SET_ERGO(bool, Use64KPages, false); 3412 } 3413 } else { 3414 // datapsize = 64k. Data segment, thread stacks are 64k paged. 3415 // This normally means that we can allocate 64k pages dynamically. 3416 // (There is one special case where this may be false: EXTSHM=on. 3417 // but we decided to not support that mode). 3418 assert0(g_multipage_support.can_use_64K_pages); 3419 Aix::_page_size = 64*K; 3420 trcVerbose("64K page mode"); 3421 FLAG_SET_ERGO(bool, Use64KPages, true); 3422 } 3423 3424 // For now UseLargePages is just ignored. 3425 FLAG_SET_ERGO(bool, UseLargePages, false); 3426 _page_sizes[0] = 0; 3427 3428 // debug trace 3429 trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size())); 3430 3431 // Next, we need to initialize libo4 and libperfstat libraries. 3432 if (os::Aix::on_pase()) { 3433 os::Aix::initialize_libo4(); 3434 } else { 3435 os::Aix::initialize_libperfstat(); 3436 } 3437 3438 // Reset the perfstat information provided by ODM. 3439 if (os::Aix::on_aix()) { 3440 libperfstat::perfstat_reset(); 3441 } 3442 3443 // Now initialze basic system properties. Note that for some of the values we 3444 // need libperfstat etc. 3445 os::Aix::initialize_system_info(); 3446 3447 clock_tics_per_sec = sysconf(_SC_CLK_TCK); 3448 3449 init_random(1234567); 3450 3451 // Main_thread points to the aboriginal thread. 3452 Aix::_main_thread = pthread_self(); 3453 3454 initial_time_count = os::elapsed_counter(); 3455 3456 os::Posix::init(); 3457 } 3458 3459 // This is called _after_ the global arguments have been parsed. 3460 jint os::init_2(void) { 3461 3462 os::Posix::init_2(); 3463 3464 if (os::Aix::on_pase()) { 3465 trcVerbose("Running on PASE."); 3466 } else { 3467 trcVerbose("Running on AIX (not PASE)."); 3468 } 3469 3470 trcVerbose("processor count: %d", os::_processor_count); 3471 trcVerbose("physical memory: %lu", Aix::_physical_memory); 3472 3473 // Initially build up the loaded dll map. 3474 LoadedLibraries::reload(); 3475 if (Verbose) { 3476 trcVerbose("Loaded Libraries: "); 3477 LoadedLibraries::print(tty); 3478 } 3479 3480 const int page_size = Aix::page_size(); 3481 const int map_size = page_size; 3482 3483 address map_address = (address) MAP_FAILED; 3484 const int prot = PROT_READ; 3485 const int flags = MAP_PRIVATE|MAP_ANONYMOUS; 3486 3487 // Use optimized addresses for the polling page, 3488 // e.g. map it to a special 32-bit address. 3489 if (OptimizePollingPageLocation) { 3490 // architecture-specific list of address wishes: 3491 address address_wishes[] = { 3492 // AIX: addresses lower than 0x30000000 don't seem to work on AIX. 3493 // PPC64: all address wishes are non-negative 32 bit values where 3494 // the lower 16 bits are all zero. we can load these addresses 3495 // with a single ppc_lis instruction. 3496 (address) 0x30000000, (address) 0x31000000, 3497 (address) 0x32000000, (address) 0x33000000, 3498 (address) 0x40000000, (address) 0x41000000, 3499 (address) 0x42000000, (address) 0x43000000, 3500 (address) 0x50000000, (address) 0x51000000, 3501 (address) 0x52000000, (address) 0x53000000, 3502 (address) 0x60000000, (address) 0x61000000, 3503 (address) 0x62000000, (address) 0x63000000 3504 }; 3505 int address_wishes_length = sizeof(address_wishes)/sizeof(address); 3506 3507 // iterate over the list of address wishes: 3508 for (int i=0; i<address_wishes_length; i++) { 3509 // Try to map with current address wish. 3510 // AIX: AIX needs MAP_FIXED if we provide an address and mmap will 3511 // fail if the address is already mapped. 3512 map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size, 3513 map_size, prot, 3514 flags | MAP_FIXED, 3515 -1, 0); 3516 trcVerbose("SafePoint Polling Page address: %p (wish) => %p", 3517 address_wishes[i], map_address + (ssize_t)page_size); 3518 3519 if (map_address + (ssize_t)page_size == address_wishes[i]) { 3520 // Map succeeded and map_address is at wished address, exit loop. 3521 break; 3522 } 3523 3524 if (map_address != (address) MAP_FAILED) { 3525 // Map succeeded, but polling_page is not at wished address, unmap and continue. 3526 ::munmap(map_address, map_size); 3527 map_address = (address) MAP_FAILED; 3528 } 3529 // Map failed, continue loop. 3530 } 3531 } // end OptimizePollingPageLocation 3532 3533 if (map_address == (address) MAP_FAILED) { 3534 map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0); 3535 } 3536 guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page"); 3537 os::set_polling_page(map_address); 3538 3539 if (!UseMembar) { 3540 address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 3541 guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 3542 os::set_memory_serialize_page(mem_serialize_page); 3543 3544 trcVerbose("Memory Serialize Page address: %p - %p, size %IX (%IB)", 3545 mem_serialize_page, mem_serialize_page + Aix::page_size(), 3546 Aix::page_size(), Aix::page_size()); 3547 } 3548 3549 // initialize suspend/resume support - must do this before signal_sets_init() 3550 if (SR_initialize() != 0) { 3551 perror("SR_initialize failed"); 3552 return JNI_ERR; 3553 } 3554 3555 Aix::signal_sets_init(); 3556 Aix::install_signal_handlers(); 3557 3558 // Check and sets minimum stack sizes against command line options 3559 if (Posix::set_minimum_stack_sizes() == JNI_ERR) { 3560 return JNI_ERR; 3561 } 3562 3563 if (UseNUMA) { 3564 UseNUMA = false; 3565 warning("NUMA optimizations are not available on this OS."); 3566 } 3567 3568 if (MaxFDLimit) { 3569 // Set the number of file descriptors to max. print out error 3570 // if getrlimit/setrlimit fails but continue regardless. 3571 struct rlimit nbr_files; 3572 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 3573 if (status != 0) { 3574 log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno)); 3575 } else { 3576 nbr_files.rlim_cur = nbr_files.rlim_max; 3577 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 3578 if (status != 0) { 3579 log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno)); 3580 } 3581 } 3582 } 3583 3584 if (PerfAllowAtExitRegistration) { 3585 // Only register atexit functions if PerfAllowAtExitRegistration is set. 3586 // At exit functions can be delayed until process exit time, which 3587 // can be problematic for embedded VM situations. Embedded VMs should 3588 // call DestroyJavaVM() to assure that VM resources are released. 3589 3590 // Note: perfMemory_exit_helper atexit function may be removed in 3591 // the future if the appropriate cleanup code can be added to the 3592 // VM_Exit VMOperation's doit method. 3593 if (atexit(perfMemory_exit_helper) != 0) { 3594 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 3595 } 3596 } 3597 3598 return JNI_OK; 3599 } 3600 3601 // Mark the polling page as unreadable 3602 void os::make_polling_page_unreadable(void) { 3603 if (!guard_memory((char*)_polling_page, Aix::page_size())) { 3604 fatal("Could not disable polling page"); 3605 } 3606 }; 3607 3608 // Mark the polling page as readable 3609 void os::make_polling_page_readable(void) { 3610 // Changed according to os_linux.cpp. 3611 if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) { 3612 fatal("Could not enable polling page at " PTR_FORMAT, _polling_page); 3613 } 3614 }; 3615 3616 int os::active_processor_count() { 3617 int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN); 3618 assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check"); 3619 return online_cpus; 3620 } 3621 3622 void os::set_native_thread_name(const char *name) { 3623 // Not yet implemented. 3624 return; 3625 } 3626 3627 bool os::distribute_processes(uint length, uint* distribution) { 3628 // Not yet implemented. 3629 return false; 3630 } 3631 3632 bool os::bind_to_processor(uint processor_id) { 3633 // Not yet implemented. 3634 return false; 3635 } 3636 3637 void os::SuspendedThreadTask::internal_do_task() { 3638 if (do_suspend(_thread->osthread())) { 3639 SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); 3640 do_task(context); 3641 do_resume(_thread->osthread()); 3642 } 3643 } 3644 3645 //////////////////////////////////////////////////////////////////////////////// 3646 // debug support 3647 3648 bool os::find(address addr, outputStream* st) { 3649 3650 st->print(PTR_FORMAT ": ", addr); 3651 3652 loaded_module_t lm; 3653 if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL || 3654 LoadedLibraries::find_for_data_address(addr, &lm) != NULL) { 3655 st->print_cr("%s", lm.path); 3656 return true; 3657 } 3658 3659 return false; 3660 } 3661 3662 //////////////////////////////////////////////////////////////////////////////// 3663 // misc 3664 3665 // This does not do anything on Aix. This is basically a hook for being 3666 // able to use structured exception handling (thread-local exception filters) 3667 // on, e.g., Win32. 3668 void 3669 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, 3670 JavaCallArguments* args, Thread* thread) { 3671 f(value, method, args, thread); 3672 } 3673 3674 void os::print_statistics() { 3675 } 3676 3677 bool os::message_box(const char* title, const char* message) { 3678 int i; 3679 fdStream err(defaultStream::error_fd()); 3680 for (i = 0; i < 78; i++) err.print_raw("="); 3681 err.cr(); 3682 err.print_raw_cr(title); 3683 for (i = 0; i < 78; i++) err.print_raw("-"); 3684 err.cr(); 3685 err.print_raw_cr(message); 3686 for (i = 0; i < 78; i++) err.print_raw("="); 3687 err.cr(); 3688 3689 char buf[16]; 3690 // Prevent process from exiting upon "read error" without consuming all CPU 3691 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 3692 3693 return buf[0] == 'y' || buf[0] == 'Y'; 3694 } 3695 3696 int os::stat(const char *path, struct stat *sbuf) { 3697 char pathbuf[MAX_PATH]; 3698 if (strlen(path) > MAX_PATH - 1) { 3699 errno = ENAMETOOLONG; 3700 return -1; 3701 } 3702 os::native_path(strcpy(pathbuf, path)); 3703 return ::stat(pathbuf, sbuf); 3704 } 3705 3706 // Is a (classpath) directory empty? 3707 bool os::dir_is_empty(const char* path) { 3708 DIR *dir = NULL; 3709 struct dirent *ptr; 3710 3711 dir = opendir(path); 3712 if (dir == NULL) return true; 3713 3714 /* Scan the directory */ 3715 bool result = true; 3716 char buf[sizeof(struct dirent) + MAX_PATH]; 3717 while (result && (ptr = ::readdir(dir)) != NULL) { 3718 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 3719 result = false; 3720 } 3721 } 3722 closedir(dir); 3723 return result; 3724 } 3725 3726 // This code originates from JDK's sysOpen and open64_w 3727 // from src/solaris/hpi/src/system_md.c 3728 3729 int os::open(const char *path, int oflag, int mode) { 3730 3731 if (strlen(path) > MAX_PATH - 1) { 3732 errno = ENAMETOOLONG; 3733 return -1; 3734 } 3735 int fd; 3736 3737 fd = ::open64(path, oflag, mode); 3738 if (fd == -1) return -1; 3739 3740 // If the open succeeded, the file might still be a directory. 3741 { 3742 struct stat64 buf64; 3743 int ret = ::fstat64(fd, &buf64); 3744 int st_mode = buf64.st_mode; 3745 3746 if (ret != -1) { 3747 if ((st_mode & S_IFMT) == S_IFDIR) { 3748 errno = EISDIR; 3749 ::close(fd); 3750 return -1; 3751 } 3752 } else { 3753 ::close(fd); 3754 return -1; 3755 } 3756 } 3757 3758 // All file descriptors that are opened in the JVM and not 3759 // specifically destined for a subprocess should have the 3760 // close-on-exec flag set. If we don't set it, then careless 3rd 3761 // party native code might fork and exec without closing all 3762 // appropriate file descriptors (e.g. as we do in closeDescriptors in 3763 // UNIXProcess.c), and this in turn might: 3764 // 3765 // - cause end-of-file to fail to be detected on some file 3766 // descriptors, resulting in mysterious hangs, or 3767 // 3768 // - might cause an fopen in the subprocess to fail on a system 3769 // suffering from bug 1085341. 3770 // 3771 // (Yes, the default setting of the close-on-exec flag is a Unix 3772 // design flaw.) 3773 // 3774 // See: 3775 // 1085341: 32-bit stdio routines should support file descriptors >255 3776 // 4843136: (process) pipe file descriptor from Runtime.exec not being closed 3777 // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 3778 #ifdef FD_CLOEXEC 3779 { 3780 int flags = ::fcntl(fd, F_GETFD); 3781 if (flags != -1) 3782 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); 3783 } 3784 #endif 3785 3786 return fd; 3787 } 3788 3789 // create binary file, rewriting existing file if required 3790 int os::create_binary_file(const char* path, bool rewrite_existing) { 3791 int oflags = O_WRONLY | O_CREAT; 3792 if (!rewrite_existing) { 3793 oflags |= O_EXCL; 3794 } 3795 return ::open64(path, oflags, S_IREAD | S_IWRITE); 3796 } 3797 3798 // return current position of file pointer 3799 jlong os::current_file_offset(int fd) { 3800 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 3801 } 3802 3803 // move file pointer to the specified offset 3804 jlong os::seek_to_file_offset(int fd, jlong offset) { 3805 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 3806 } 3807 3808 // This code originates from JDK's sysAvailable 3809 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c 3810 3811 int os::available(int fd, jlong *bytes) { 3812 jlong cur, end; 3813 int mode; 3814 struct stat64 buf64; 3815 3816 if (::fstat64(fd, &buf64) >= 0) { 3817 mode = buf64.st_mode; 3818 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { 3819 int n; 3820 if (::ioctl(fd, FIONREAD, &n) >= 0) { 3821 *bytes = n; 3822 return 1; 3823 } 3824 } 3825 } 3826 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) { 3827 return 0; 3828 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) { 3829 return 0; 3830 } else if (::lseek64(fd, cur, SEEK_SET) == -1) { 3831 return 0; 3832 } 3833 *bytes = end - cur; 3834 return 1; 3835 } 3836 3837 // Map a block of memory. 3838 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 3839 char *addr, size_t bytes, bool read_only, 3840 bool allow_exec) { 3841 int prot; 3842 int flags = MAP_PRIVATE; 3843 3844 if (read_only) { 3845 prot = PROT_READ; 3846 flags = MAP_SHARED; 3847 } else { 3848 prot = PROT_READ | PROT_WRITE; 3849 flags = MAP_PRIVATE; 3850 } 3851 3852 if (allow_exec) { 3853 prot |= PROT_EXEC; 3854 } 3855 3856 if (addr != NULL) { 3857 flags |= MAP_FIXED; 3858 } 3859 3860 // Allow anonymous mappings if 'fd' is -1. 3861 if (fd == -1) { 3862 flags |= MAP_ANONYMOUS; 3863 } 3864 3865 char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags, 3866 fd, file_offset); 3867 if (mapped_address == MAP_FAILED) { 3868 return NULL; 3869 } 3870 return mapped_address; 3871 } 3872 3873 // Remap a block of memory. 3874 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 3875 char *addr, size_t bytes, bool read_only, 3876 bool allow_exec) { 3877 // same as map_memory() on this OS 3878 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 3879 allow_exec); 3880 } 3881 3882 // Unmap a block of memory. 3883 bool os::pd_unmap_memory(char* addr, size_t bytes) { 3884 return munmap(addr, bytes) == 0; 3885 } 3886 3887 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 3888 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 3889 // of a thread. 3890 // 3891 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 3892 // the fast estimate available on the platform. 3893 3894 jlong os::current_thread_cpu_time() { 3895 // return user + sys since the cost is the same 3896 const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */); 3897 assert(n >= 0, "negative CPU time"); 3898 return n; 3899 } 3900 3901 jlong os::thread_cpu_time(Thread* thread) { 3902 // consistent with what current_thread_cpu_time() returns 3903 const jlong n = os::thread_cpu_time(thread, true /* user + sys */); 3904 assert(n >= 0, "negative CPU time"); 3905 return n; 3906 } 3907 3908 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 3909 const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 3910 assert(n >= 0, "negative CPU time"); 3911 return n; 3912 } 3913 3914 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) { 3915 bool error = false; 3916 3917 jlong sys_time = 0; 3918 jlong user_time = 0; 3919 3920 // Reimplemented using getthrds64(). 3921 // 3922 // Works like this: 3923 // For the thread in question, get the kernel thread id. Then get the 3924 // kernel thread statistics using that id. 3925 // 3926 // This only works of course when no pthread scheduling is used, 3927 // i.e. there is a 1:1 relationship to kernel threads. 3928 // On AIX, see AIXTHREAD_SCOPE variable. 3929 3930 pthread_t pthtid = thread->osthread()->pthread_id(); 3931 3932 // retrieve kernel thread id for the pthread: 3933 tid64_t tid = 0; 3934 struct __pthrdsinfo pinfo; 3935 // I just love those otherworldly IBM APIs which force me to hand down 3936 // dummy buffers for stuff I dont care for... 3937 char dummy[1]; 3938 int dummy_size = sizeof(dummy); 3939 if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo), 3940 dummy, &dummy_size) == 0) { 3941 tid = pinfo.__pi_tid; 3942 } else { 3943 tty->print_cr("pthread_getthrds_np failed."); 3944 error = true; 3945 } 3946 3947 // retrieve kernel timing info for that kernel thread 3948 if (!error) { 3949 struct thrdentry64 thrdentry; 3950 if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) { 3951 sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL; 3952 user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL; 3953 } else { 3954 tty->print_cr("pthread_getthrds_np failed."); 3955 error = true; 3956 } 3957 } 3958 3959 if (p_sys_time) { 3960 *p_sys_time = sys_time; 3961 } 3962 3963 if (p_user_time) { 3964 *p_user_time = user_time; 3965 } 3966 3967 if (error) { 3968 return false; 3969 } 3970 3971 return true; 3972 } 3973 3974 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 3975 jlong sys_time; 3976 jlong user_time; 3977 3978 if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) { 3979 return -1; 3980 } 3981 3982 return user_sys_cpu_time ? sys_time + user_time : user_time; 3983 } 3984 3985 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 3986 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 3987 info_ptr->may_skip_backward = false; // elapsed time not wall time 3988 info_ptr->may_skip_forward = false; // elapsed time not wall time 3989 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 3990 } 3991 3992 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 3993 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 3994 info_ptr->may_skip_backward = false; // elapsed time not wall time 3995 info_ptr->may_skip_forward = false; // elapsed time not wall time 3996 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 3997 } 3998 3999 bool os::is_thread_cpu_time_supported() { 4000 return true; 4001 } 4002 4003 // System loadavg support. Returns -1 if load average cannot be obtained. 4004 // For now just return the system wide load average (no processor sets). 4005 int os::loadavg(double values[], int nelem) { 4006 4007 guarantee(nelem >= 0 && nelem <= 3, "argument error"); 4008 guarantee(values, "argument error"); 4009 4010 if (os::Aix::on_pase()) { 4011 4012 // AS/400 PASE: use libo4 porting library 4013 double v[3] = { 0.0, 0.0, 0.0 }; 4014 4015 if (libo4::get_load_avg(v, v + 1, v + 2)) { 4016 for (int i = 0; i < nelem; i ++) { 4017 values[i] = v[i]; 4018 } 4019 return nelem; 4020 } else { 4021 return -1; 4022 } 4023 4024 } else { 4025 4026 // AIX: use libperfstat 4027 libperfstat::cpuinfo_t ci; 4028 if (libperfstat::get_cpuinfo(&ci)) { 4029 for (int i = 0; i < nelem; i++) { 4030 values[i] = ci.loadavg[i]; 4031 } 4032 } else { 4033 return -1; 4034 } 4035 return nelem; 4036 } 4037 } 4038 4039 void os::pause() { 4040 char filename[MAX_PATH]; 4041 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4042 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4043 } else { 4044 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4045 } 4046 4047 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4048 if (fd != -1) { 4049 struct stat buf; 4050 ::close(fd); 4051 while (::stat(filename, &buf) == 0) { 4052 (void)::poll(NULL, 0, 100); 4053 } 4054 } else { 4055 trcVerbose("Could not open pause file '%s', continuing immediately.", filename); 4056 } 4057 } 4058 4059 bool os::Aix::is_primordial_thread() { 4060 if (pthread_self() == (pthread_t)1) { 4061 return true; 4062 } else { 4063 return false; 4064 } 4065 } 4066 4067 // OS recognitions (PASE/AIX, OS level) call this before calling any 4068 // one of Aix::on_pase(), Aix::os_version() static 4069 void os::Aix::initialize_os_info() { 4070 4071 assert(_on_pase == -1 && _os_version == 0, "already called."); 4072 4073 struct utsname uts; 4074 memset(&uts, 0, sizeof(uts)); 4075 strcpy(uts.sysname, "?"); 4076 if (::uname(&uts) == -1) { 4077 trcVerbose("uname failed (%d)", errno); 4078 guarantee(0, "Could not determine whether we run on AIX or PASE"); 4079 } else { 4080 trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" " 4081 "node \"%s\" machine \"%s\"\n", 4082 uts.sysname, uts.version, uts.release, uts.nodename, uts.machine); 4083 const int major = atoi(uts.version); 4084 assert(major > 0, "invalid OS version"); 4085 const int minor = atoi(uts.release); 4086 assert(minor > 0, "invalid OS release"); 4087 _os_version = (major << 24) | (minor << 16); 4088 char ver_str[20] = {0}; 4089 char *name_str = "unknown OS"; 4090 if (strcmp(uts.sysname, "OS400") == 0) { 4091 // We run on AS/400 PASE. We do not support versions older than V5R4M0. 4092 _on_pase = 1; 4093 if (os_version_short() < 0x0504) { 4094 trcVerbose("OS/400 releases older than V5R4M0 not supported."); 4095 assert(false, "OS/400 release too old."); 4096 } 4097 name_str = "OS/400 (pase)"; 4098 jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor); 4099 } else if (strcmp(uts.sysname, "AIX") == 0) { 4100 // We run on AIX. We do not support versions older than AIX 5.3. 4101 _on_pase = 0; 4102 // Determine detailed AIX version: Version, Release, Modification, Fix Level. 4103 odmWrapper::determine_os_kernel_version(&_os_version); 4104 if (os_version_short() < 0x0503) { 4105 trcVerbose("AIX release older than AIX 5.3 not supported."); 4106 assert(false, "AIX release too old."); 4107 } 4108 name_str = "AIX"; 4109 jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u", 4110 major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF); 4111 } else { 4112 assert(false, name_str); 4113 } 4114 trcVerbose("We run on %s %s", name_str, ver_str); 4115 } 4116 4117 guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release"); 4118 } // end: os::Aix::initialize_os_info() 4119 4120 // Scan environment for important settings which might effect the VM. 4121 // Trace out settings. Warn about invalid settings and/or correct them. 4122 // 4123 // Must run after os::Aix::initialue_os_info(). 4124 void os::Aix::scan_environment() { 4125 4126 char* p; 4127 int rc; 4128 4129 // Warn explicity if EXTSHM=ON is used. That switch changes how 4130 // System V shared memory behaves. One effect is that page size of 4131 // shared memory cannot be change dynamically, effectivly preventing 4132 // large pages from working. 4133 // This switch was needed on AIX 32bit, but on AIX 64bit the general 4134 // recommendation is (in OSS notes) to switch it off. 4135 p = ::getenv("EXTSHM"); 4136 trcVerbose("EXTSHM=%s.", p ? p : "<unset>"); 4137 if (p && strcasecmp(p, "ON") == 0) { 4138 _extshm = 1; 4139 trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***"); 4140 if (!AllowExtshm) { 4141 // We allow under certain conditions the user to continue. However, we want this 4142 // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means 4143 // that the VM is not able to allocate 64k pages for the heap. 4144 // We do not want to run with reduced performance. 4145 vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment."); 4146 } 4147 } else { 4148 _extshm = 0; 4149 } 4150 4151 // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs. 4152 // Not tested, not supported. 4153 // 4154 // Note that it might be worth the trouble to test and to require it, if only to 4155 // get useful return codes for mprotect. 4156 // 4157 // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before 4158 // exec() ? before loading the libjvm ? ....) 4159 p = ::getenv("XPG_SUS_ENV"); 4160 trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>"); 4161 if (p && strcmp(p, "ON") == 0) { 4162 _xpg_sus_mode = 1; 4163 trcVerbose("Unsupported setting: XPG_SUS_ENV=ON"); 4164 // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to 4165 // clobber address ranges. If we ever want to support that, we have to do some 4166 // testing first. 4167 guarantee(false, "XPG_SUS_ENV=ON not supported"); 4168 } else { 4169 _xpg_sus_mode = 0; 4170 } 4171 4172 if (os::Aix::on_pase()) { 4173 p = ::getenv("QIBM_MULTI_THREADED"); 4174 trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>"); 4175 } 4176 4177 p = ::getenv("LDR_CNTRL"); 4178 trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>"); 4179 if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) { 4180 if (p && ::strstr(p, "TEXTPSIZE")) { 4181 trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. " 4182 "you may experience hangs or crashes on OS/400 V7R1."); 4183 } 4184 } 4185 4186 p = ::getenv("AIXTHREAD_GUARDPAGES"); 4187 trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>"); 4188 4189 } // end: os::Aix::scan_environment() 4190 4191 // PASE: initialize the libo4 library (PASE porting library). 4192 void os::Aix::initialize_libo4() { 4193 guarantee(os::Aix::on_pase(), "OS/400 only."); 4194 if (!libo4::init()) { 4195 trcVerbose("libo4 initialization failed."); 4196 assert(false, "libo4 initialization failed"); 4197 } else { 4198 trcVerbose("libo4 initialized."); 4199 } 4200 } 4201 4202 // AIX: initialize the libperfstat library. 4203 void os::Aix::initialize_libperfstat() { 4204 assert(os::Aix::on_aix(), "AIX only"); 4205 if (!libperfstat::init()) { 4206 trcVerbose("libperfstat initialization failed."); 4207 assert(false, "libperfstat initialization failed"); 4208 } else { 4209 trcVerbose("libperfstat initialized."); 4210 } 4211 } 4212 4213 ///////////////////////////////////////////////////////////////////////////// 4214 // thread stack 4215 4216 // Get the current stack base from the OS (actually, the pthread library). 4217 // Note: usually not page aligned. 4218 address os::current_stack_base() { 4219 AixMisc::stackbounds_t bounds; 4220 bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds); 4221 guarantee(rc, "Unable to retrieve stack bounds."); 4222 return bounds.base; 4223 } 4224 4225 // Get the current stack size from the OS (actually, the pthread library). 4226 // Returned size is such that (base - size) is always aligned to page size. 4227 size_t os::current_stack_size() { 4228 AixMisc::stackbounds_t bounds; 4229 bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds); 4230 guarantee(rc, "Unable to retrieve stack bounds."); 4231 // Align the returned stack size such that the stack low address 4232 // is aligned to page size (Note: base is usually not and we do not care). 4233 // We need to do this because caller code will assume stack low address is 4234 // page aligned and will place guard pages without checking. 4235 address low = bounds.base - bounds.size; 4236 address low_aligned = (address)align_up(low, os::vm_page_size()); 4237 size_t s = bounds.base - low_aligned; 4238 return s; 4239 } 4240 4241 extern char** environ; 4242 4243 // Run the specified command in a separate process. Return its exit value, 4244 // or -1 on failure (e.g. can't fork a new process). 4245 // Unlike system(), this function can be called from signal handler. It 4246 // doesn't block SIGINT et al. 4247 int os::fork_and_exec(char* cmd) { 4248 char * argv[4] = {"sh", "-c", cmd, NULL}; 4249 4250 pid_t pid = fork(); 4251 4252 if (pid < 0) { 4253 // fork failed 4254 return -1; 4255 4256 } else if (pid == 0) { 4257 // child process 4258 4259 // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX. 4260 execve("/usr/bin/sh", argv, environ); 4261 4262 // execve failed 4263 _exit(-1); 4264 4265 } else { 4266 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 4267 // care about the actual exit code, for now. 4268 4269 int status; 4270 4271 // Wait for the child process to exit. This returns immediately if 4272 // the child has already exited. */ 4273 while (waitpid(pid, &status, 0) < 0) { 4274 switch (errno) { 4275 case ECHILD: return 0; 4276 case EINTR: break; 4277 default: return -1; 4278 } 4279 } 4280 4281 if (WIFEXITED(status)) { 4282 // The child exited normally; get its exit code. 4283 return WEXITSTATUS(status); 4284 } else if (WIFSIGNALED(status)) { 4285 // The child exited because of a signal. 4286 // The best value to return is 0x80 + signal number, 4287 // because that is what all Unix shells do, and because 4288 // it allows callers to distinguish between process exit and 4289 // process death by signal. 4290 return 0x80 + WTERMSIG(status); 4291 } else { 4292 // Unknown exit code; pass it through. 4293 return status; 4294 } 4295 } 4296 return -1; 4297 } 4298 4299 // is_headless_jre() 4300 // 4301 // Test for the existence of xawt/libmawt.so or libawt_xawt.so 4302 // in order to report if we are running in a headless jre. 4303 // 4304 // Since JDK8 xawt/libmawt.so is moved into the same directory 4305 // as libawt.so, and renamed libawt_xawt.so 4306 bool os::is_headless_jre() { 4307 struct stat statbuf; 4308 char buf[MAXPATHLEN]; 4309 char libmawtpath[MAXPATHLEN]; 4310 const char *xawtstr = "/xawt/libmawt.so"; 4311 const char *new_xawtstr = "/libawt_xawt.so"; 4312 4313 char *p; 4314 4315 // Get path to libjvm.so 4316 os::jvm_path(buf, sizeof(buf)); 4317 4318 // Get rid of libjvm.so 4319 p = strrchr(buf, '/'); 4320 if (p == NULL) return false; 4321 else *p = '\0'; 4322 4323 // Get rid of client or server 4324 p = strrchr(buf, '/'); 4325 if (p == NULL) return false; 4326 else *p = '\0'; 4327 4328 // check xawt/libmawt.so 4329 strcpy(libmawtpath, buf); 4330 strcat(libmawtpath, xawtstr); 4331 if (::stat(libmawtpath, &statbuf) == 0) return false; 4332 4333 // check libawt_xawt.so 4334 strcpy(libmawtpath, buf); 4335 strcat(libmawtpath, new_xawtstr); 4336 if (::stat(libmawtpath, &statbuf) == 0) return false; 4337 4338 return true; 4339 } 4340 4341 // Get the default path to the core file 4342 // Returns the length of the string 4343 int os::get_core_path(char* buffer, size_t bufferSize) { 4344 const char* p = get_current_directory(buffer, bufferSize); 4345 4346 if (p == NULL) { 4347 assert(p != NULL, "failed to get current directory"); 4348 return 0; 4349 } 4350 4351 jio_snprintf(buffer, bufferSize, "%s/core or core.%d", 4352 p, current_process_id()); 4353 4354 return strlen(buffer); 4355 } 4356 4357 #ifndef PRODUCT 4358 void TestReserveMemorySpecial_test() { 4359 // No tests available for this platform 4360 } 4361 #endif 4362 4363 bool os::start_debugging(char *buf, int buflen) { 4364 int len = (int)strlen(buf); 4365 char *p = &buf[len]; 4366 4367 jio_snprintf(p, buflen -len, 4368 "\n\n" 4369 "Do you want to debug the problem?\n\n" 4370 "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n" 4371 "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n" 4372 "Otherwise, press RETURN to abort...", 4373 os::current_process_id(), 4374 os::current_thread_id(), thread_self()); 4375 4376 bool yes = os::message_box("Unexpected Error", buf); 4377 4378 if (yes) { 4379 // yes, user asked VM to launch debugger 4380 jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id()); 4381 4382 os::fork_and_exec(buf); 4383 yes = false; 4384 } 4385 return yes; 4386 } 4387 4388 static inline time_t get_mtime(const char* filename) { 4389 struct stat st; 4390 int ret = os::stat(filename, &st); 4391 assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno)); 4392 return st.st_mtime; 4393 } 4394 4395 int os::compare_file_modified_times(const char* file1, const char* file2) { 4396 time_t t1 = get_mtime(file1); 4397 time_t t2 = get_mtime(file2); 4398 return t1 - t2; 4399 }