1 /* 2 * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2012, 2014 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 // According to the AIX OS doc #pragma alloca must be used 27 // with C++ compiler before referencing the function alloca() 28 #pragma alloca 29 30 // no precompiled headers 31 #include "classfile/classLoader.hpp" 32 #include "classfile/systemDictionary.hpp" 33 #include "classfile/vmSymbols.hpp" 34 #include "code/icBuffer.hpp" 35 #include "code/vtableStubs.hpp" 36 #include "compiler/compileBroker.hpp" 37 #include "interpreter/interpreter.hpp" 38 #include "jvm_aix.h" 39 #include "libperfstat_aix.hpp" 40 #include "loadlib_aix.hpp" 41 #include "memory/allocation.inline.hpp" 42 #include "memory/filemap.hpp" 43 #include "mutex_aix.inline.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "os_aix.inline.hpp" 46 #include "os_share_aix.hpp" 47 #include "porting_aix.hpp" 48 #include "prims/jniFastGetField.hpp" 49 #include "prims/jvm.h" 50 #include "prims/jvm_misc.hpp" 51 #include "runtime/arguments.hpp" 52 #include "runtime/atomic.inline.hpp" 53 #include "runtime/extendedPC.hpp" 54 #include "runtime/globals.hpp" 55 #include "runtime/interfaceSupport.hpp" 56 #include "runtime/java.hpp" 57 #include "runtime/javaCalls.hpp" 58 #include "runtime/mutexLocker.hpp" 59 #include "runtime/objectMonitor.hpp" 60 #include "runtime/orderAccess.inline.hpp" 61 #include "runtime/os.hpp" 62 #include "runtime/osThread.hpp" 63 #include "runtime/perfMemory.hpp" 64 #include "runtime/sharedRuntime.hpp" 65 #include "runtime/statSampler.hpp" 66 #include "runtime/stubRoutines.hpp" 67 #include "runtime/thread.inline.hpp" 68 #include "runtime/threadCritical.hpp" 69 #include "runtime/timer.hpp" 70 #include "runtime/vm_version.hpp" 71 #include "services/attachListener.hpp" 72 #include "services/runtimeService.hpp" 73 #include "utilities/decoder.hpp" 74 #include "utilities/defaultStream.hpp" 75 #include "utilities/events.hpp" 76 #include "utilities/growableArray.hpp" 77 #include "utilities/vmError.hpp" 78 79 // put OS-includes here (sorted alphabetically) 80 #include <errno.h> 81 #include <fcntl.h> 82 #include <inttypes.h> 83 #include <poll.h> 84 #include <procinfo.h> 85 #include <pthread.h> 86 #include <pwd.h> 87 #include <semaphore.h> 88 #include <signal.h> 89 #include <stdint.h> 90 #include <stdio.h> 91 #include <string.h> 92 #include <unistd.h> 93 #include <sys/ioctl.h> 94 #include <sys/ipc.h> 95 #include <sys/mman.h> 96 #include <sys/resource.h> 97 #include <sys/select.h> 98 #include <sys/shm.h> 99 #include <sys/socket.h> 100 #include <sys/stat.h> 101 #include <sys/sysinfo.h> 102 #include <sys/systemcfg.h> 103 #include <sys/time.h> 104 #include <sys/times.h> 105 #include <sys/types.h> 106 #include <sys/utsname.h> 107 #include <sys/vminfo.h> 108 #include <sys/wait.h> 109 110 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1). 111 #if !defined(_AIXVERSION_610) 112 extern "C" { 113 int getthrds64(pid_t ProcessIdentifier, 114 struct thrdentry64* ThreadBuffer, 115 int ThreadSize, 116 tid64_t* IndexPointer, 117 int Count); 118 } 119 #endif 120 121 // Excerpts from systemcfg.h definitions newer than AIX 5.3 122 #ifndef PV_7 123 # define PV_7 0x200000 // Power PC 7 124 # define PV_7_Compat 0x208000 // Power PC 7 125 #endif 126 127 #define MAX_PATH (2 * K) 128 129 // for timer info max values which include all bits 130 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 131 // for multipage initialization error analysis (in 'g_multipage_error') 132 #define ERROR_MP_OS_TOO_OLD 100 133 #define ERROR_MP_EXTSHM_ACTIVE 101 134 #define ERROR_MP_VMGETINFO_FAILED 102 135 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103 136 137 // the semantics in this file are thus that codeptr_t is a *real code ptr* 138 // This means that any function taking codeptr_t as arguments will assume 139 // a real codeptr and won't handle function descriptors (eg getFuncName), 140 // whereas functions taking address as args will deal with function 141 // descriptors (eg os::dll_address_to_library_name) 142 typedef unsigned int* codeptr_t; 143 144 // typedefs for stackslots, stack pointers, pointers to op codes 145 typedef unsigned long stackslot_t; 146 typedef stackslot_t* stackptr_t; 147 148 // query dimensions of the stack of the calling thread 149 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size); 150 151 // function to check a given stack pointer against given stack limits 152 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) { 153 if (((uintptr_t)sp) & 0x7) { 154 return false; 155 } 156 if (sp > stack_base) { 157 return false; 158 } 159 if (sp < (stackptr_t) ((address)stack_base - stack_size)) { 160 return false; 161 } 162 return true; 163 } 164 165 // returns true if function is a valid codepointer 166 inline bool is_valid_codepointer(codeptr_t p) { 167 if (!p) { 168 return false; 169 } 170 if (((uintptr_t)p) & 0x3) { 171 return false; 172 } 173 if (LoadedLibraries::find_for_text_address((address)p) == NULL) { 174 return false; 175 } 176 return true; 177 } 178 179 // macro to check a given stack pointer against given stack limits and to die if test fails 180 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \ 181 guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \ 182 } 183 184 // macro to check the current stack pointer against given stacklimits 185 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \ 186 address sp; \ 187 sp = os::current_stack_pointer(); \ 188 CHECK_STACK_PTR(sp, stack_base, stack_size); \ 189 } 190 191 //////////////////////////////////////////////////////////////////////////////// 192 // global variables (for a description see os_aix.hpp) 193 194 julong os::Aix::_physical_memory = 0; 195 pthread_t os::Aix::_main_thread = ((pthread_t)0); 196 int os::Aix::_page_size = -1; 197 int os::Aix::_on_pase = -1; 198 int os::Aix::_os_version = -1; 199 int os::Aix::_stack_page_size = -1; 200 size_t os::Aix::_shm_default_page_size = -1; 201 int os::Aix::_can_use_64K_pages = -1; 202 int os::Aix::_can_use_16M_pages = -1; 203 int os::Aix::_xpg_sus_mode = -1; 204 int os::Aix::_extshm = -1; 205 int os::Aix::_logical_cpus = -1; 206 207 //////////////////////////////////////////////////////////////////////////////// 208 // local variables 209 210 static int g_multipage_error = -1; // error analysis for multipage initialization 211 static jlong initial_time_count = 0; 212 static int clock_tics_per_sec = 100; 213 static sigset_t check_signal_done; // For diagnostics to print a message once (see run_periodic_checks) 214 static bool check_signals = true; 215 static pid_t _initial_pid = 0; 216 static int SR_signum = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769) 217 static sigset_t SR_sigset; 218 static pthread_mutex_t dl_mutex; // Used to protect dlsym() calls */ 219 220 julong os::available_memory() { 221 return Aix::available_memory(); 222 } 223 224 julong os::Aix::available_memory() { 225 os::Aix::meminfo_t mi; 226 if (os::Aix::get_meminfo(&mi)) { 227 return mi.real_free; 228 } else { 229 return 0xFFFFFFFFFFFFFFFFLL; 230 } 231 } 232 233 julong os::physical_memory() { 234 return Aix::physical_memory(); 235 } 236 237 //////////////////////////////////////////////////////////////////////////////// 238 // environment support 239 240 bool os::getenv(const char* name, char* buf, int len) { 241 const char* val = ::getenv(name); 242 if (val != NULL && strlen(val) < (size_t)len) { 243 strcpy(buf, val); 244 return true; 245 } 246 if (len > 0) buf[0] = 0; // return a null string 247 return false; 248 } 249 250 251 // Return true if user is running as root. 252 253 bool os::have_special_privileges() { 254 static bool init = false; 255 static bool privileges = false; 256 if (!init) { 257 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 258 init = true; 259 } 260 return privileges; 261 } 262 263 // Helper function, emulates disclaim64 using multiple 32bit disclaims 264 // because we cannot use disclaim64() on AS/400 and old AIX releases. 265 static bool my_disclaim64(char* addr, size_t size) { 266 267 if (size == 0) { 268 return true; 269 } 270 271 // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.) 272 const unsigned int maxDisclaimSize = 0x80000000; 273 274 const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize); 275 const unsigned int lastDisclaimSize = (size % maxDisclaimSize); 276 277 char* p = addr; 278 279 for (int i = 0; i < numFullDisclaimsNeeded; i ++) { 280 if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) { 281 //if (Verbose) 282 fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno); 283 return false; 284 } 285 p += maxDisclaimSize; 286 } 287 288 if (lastDisclaimSize > 0) { 289 if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) { 290 //if (Verbose) 291 fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno); 292 return false; 293 } 294 } 295 296 return true; 297 } 298 299 // Cpu architecture string 300 #if defined(PPC32) 301 static char cpu_arch[] = "ppc"; 302 #elif defined(PPC64) 303 static char cpu_arch[] = "ppc64"; 304 #else 305 #error Add appropriate cpu_arch setting 306 #endif 307 308 309 // Given an address, returns the size of the page backing that address. 310 size_t os::Aix::query_pagesize(void* addr) { 311 312 vm_page_info pi; 313 pi.addr = (uint64_t)addr; 314 if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) { 315 return pi.pagesize; 316 } else { 317 fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno); 318 assert(false, "vmgetinfo failed to retrieve page size"); 319 return SIZE_4K; 320 } 321 322 } 323 324 // Returns the kernel thread id of the currently running thread. 325 pid_t os::Aix::gettid() { 326 return (pid_t) thread_self(); 327 } 328 329 void os::Aix::initialize_system_info() { 330 331 // get the number of online(logical) cpus instead of configured 332 os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN); 333 assert(_processor_count > 0, "_processor_count must be > 0"); 334 335 // retrieve total physical storage 336 os::Aix::meminfo_t mi; 337 if (!os::Aix::get_meminfo(&mi)) { 338 fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr); 339 assert(false, "os::Aix::get_meminfo failed."); 340 } 341 _physical_memory = (julong) mi.real_total; 342 } 343 344 // Helper function for tracing page sizes. 345 static const char* describe_pagesize(size_t pagesize) { 346 switch (pagesize) { 347 case SIZE_4K : return "4K"; 348 case SIZE_64K: return "64K"; 349 case SIZE_16M: return "16M"; 350 case SIZE_16G: return "16G"; 351 default: 352 assert(false, "surprise"); 353 return "??"; 354 } 355 } 356 357 // Retrieve information about multipage size support. Will initialize 358 // Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages, 359 // Aix::_can_use_16M_pages. 360 // Must be called before calling os::large_page_init(). 361 void os::Aix::query_multipage_support() { 362 363 guarantee(_page_size == -1 && 364 _stack_page_size == -1 && 365 _can_use_64K_pages == -1 && 366 _can_use_16M_pages == -1 && 367 g_multipage_error == -1, 368 "do not call twice"); 369 370 _page_size = ::sysconf(_SC_PAGESIZE); 371 372 // This really would surprise me. 373 assert(_page_size == SIZE_4K, "surprise!"); 374 375 376 // Query default data page size (default page size for C-Heap, pthread stacks and .bss). 377 // Default data page size is influenced either by linker options (-bdatapsize) 378 // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given, 379 // default should be 4K. 380 size_t data_page_size = SIZE_4K; 381 { 382 void* p = os::malloc(SIZE_16M, mtInternal); 383 guarantee(p != NULL, "malloc failed"); 384 data_page_size = os::Aix::query_pagesize(p); 385 os::free(p); 386 } 387 388 // query default shm page size (LDR_CNTRL SHMPSIZE) 389 { 390 const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR); 391 guarantee(shmid != -1, "shmget failed"); 392 void* p = ::shmat(shmid, NULL, 0); 393 ::shmctl(shmid, IPC_RMID, NULL); 394 guarantee(p != (void*) -1, "shmat failed"); 395 _shm_default_page_size = os::Aix::query_pagesize(p); 396 ::shmdt(p); 397 } 398 399 // before querying the stack page size, make sure we are not running as primordial 400 // thread (because primordial thread's stack may have different page size than 401 // pthread thread stacks). Running a VM on the primordial thread won't work for a 402 // number of reasons so we may just as well guarantee it here 403 guarantee(!os::Aix::is_primordial_thread(), "Must not be called for primordial thread"); 404 405 // query stack page size 406 { 407 int dummy = 0; 408 _stack_page_size = os::Aix::query_pagesize(&dummy); 409 // everything else would surprise me and should be looked into 410 guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size"); 411 // also, just for completeness: pthread stacks are allocated from C heap, so 412 // stack page size should be the same as data page size 413 guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size"); 414 } 415 416 // EXTSHM is bad: among other things, it prevents setting pagesize dynamically 417 // for system V shm. 418 if (Aix::extshm()) { 419 if (Verbose) { 420 fprintf(stderr, "EXTSHM is active - will disable large page support.\n" 421 "Please make sure EXTSHM is OFF for large page support.\n"); 422 } 423 g_multipage_error = ERROR_MP_EXTSHM_ACTIVE; 424 _can_use_64K_pages = _can_use_16M_pages = 0; 425 goto query_multipage_support_end; 426 } 427 428 // now check which page sizes the OS claims it supports, and of those, which actually can be used. 429 { 430 const int MAX_PAGE_SIZES = 4; 431 psize_t sizes[MAX_PAGE_SIZES]; 432 const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES); 433 if (num_psizes == -1) { 434 if (Verbose) { 435 fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno); 436 fprintf(stderr, "disabling multipage support.\n"); 437 } 438 g_multipage_error = ERROR_MP_VMGETINFO_FAILED; 439 _can_use_64K_pages = _can_use_16M_pages = 0; 440 goto query_multipage_support_end; 441 } 442 guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed."); 443 assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?"); 444 if (Verbose) { 445 fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes); 446 for (int i = 0; i < num_psizes; i ++) { 447 fprintf(stderr, " %s ", describe_pagesize(sizes[i])); 448 } 449 fprintf(stderr, " .\n"); 450 } 451 452 // Can we use 64K, 16M pages? 453 _can_use_64K_pages = 0; 454 _can_use_16M_pages = 0; 455 for (int i = 0; i < num_psizes; i ++) { 456 if (sizes[i] == SIZE_64K) { 457 _can_use_64K_pages = 1; 458 } else if (sizes[i] == SIZE_16M) { 459 _can_use_16M_pages = 1; 460 } 461 } 462 463 if (!_can_use_64K_pages) { 464 g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K; 465 } 466 467 // Double-check for 16M pages: Even if AIX claims to be able to use 16M pages, 468 // there must be an actual 16M page pool, and we must run with enough rights. 469 if (_can_use_16M_pages) { 470 const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR); 471 guarantee(shmid != -1, "shmget failed"); 472 struct shmid_ds shm_buf = { 0 }; 473 shm_buf.shm_pagesize = SIZE_16M; 474 const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false; 475 const int en = errno; 476 ::shmctl(shmid, IPC_RMID, NULL); 477 if (!can_set_pagesize) { 478 if (Verbose) { 479 fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n" 480 "Will deactivate 16M support.\n", en, strerror(en)); 481 } 482 _can_use_16M_pages = 0; 483 } 484 } 485 486 } // end: check which pages can be used for shared memory 487 488 query_multipage_support_end: 489 490 guarantee(_page_size != -1 && 491 _stack_page_size != -1 && 492 _can_use_64K_pages != -1 && 493 _can_use_16M_pages != -1, "Page sizes not properly initialized"); 494 495 if (_can_use_64K_pages) { 496 g_multipage_error = 0; 497 } 498 499 if (Verbose) { 500 fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size)); 501 fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size)); 502 fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size)); 503 fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no")); 504 fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no")); 505 fprintf(stderr, "Multipage error details: %d\n", g_multipage_error); 506 } 507 508 } // end os::Aix::query_multipage_support() 509 510 // The code for this method was initially derived from the version in os_linux.cpp. 511 void os::init_system_properties_values() { 512 513 #define DEFAULT_LIBPATH "/usr/lib:/lib" 514 #define EXTENSIONS_DIR "/lib/ext" 515 #define ENDORSED_DIR "/lib/endorsed" 516 517 // Buffer that fits several sprintfs. 518 // Note that the space for the trailing null is provided 519 // by the nulls included by the sizeof operator. 520 const size_t bufsize = 521 MAX3((size_t)MAXPATHLEN, // For dll_dir & friends. 522 (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR), // extensions dir 523 (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir 524 char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); 525 526 // sysclasspath, java_home, dll_dir 527 { 528 char *pslash; 529 os::jvm_path(buf, bufsize); 530 531 // Found the full path to libjvm.so. 532 // Now cut the path to <java_home>/jre if we can. 533 *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so. 534 pslash = strrchr(buf, '/'); 535 if (pslash != NULL) { 536 *pslash = '\0'; // Get rid of /{client|server|hotspot}. 537 } 538 Arguments::set_dll_dir(buf); 539 540 if (pslash != NULL) { 541 pslash = strrchr(buf, '/'); 542 if (pslash != NULL) { 543 *pslash = '\0'; // Get rid of /<arch>. 544 pslash = strrchr(buf, '/'); 545 if (pslash != NULL) { 546 *pslash = '\0'; // Get rid of /lib. 547 } 548 } 549 } 550 Arguments::set_java_home(buf); 551 set_boot_path('/', ':'); 552 } 553 554 // Where to look for native libraries. 555 556 // On Aix we get the user setting of LIBPATH. 557 // Eventually, all the library path setting will be done here. 558 // Get the user setting of LIBPATH. 559 const char *v = ::getenv("LIBPATH"); 560 const char *v_colon = ":"; 561 if (v == NULL) { v = ""; v_colon = ""; } 562 563 // Concatenate user and invariant part of ld_library_path. 564 // That's +1 for the colon and +1 for the trailing '\0'. 565 char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal); 566 sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon); 567 Arguments::set_library_path(ld_library_path); 568 FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal); 569 570 // Extensions directories. 571 sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home()); 572 Arguments::set_ext_dirs(buf); 573 574 // Endorsed standards default directory. 575 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home()); 576 Arguments::set_endorsed_dirs(buf); 577 578 FREE_C_HEAP_ARRAY(char, buf, mtInternal); 579 580 #undef DEFAULT_LIBPATH 581 #undef EXTENSIONS_DIR 582 #undef ENDORSED_DIR 583 } 584 585 //////////////////////////////////////////////////////////////////////////////// 586 // breakpoint support 587 588 void os::breakpoint() { 589 BREAKPOINT; 590 } 591 592 extern "C" void breakpoint() { 593 // use debugger to set breakpoint here 594 } 595 596 //////////////////////////////////////////////////////////////////////////////// 597 // signal support 598 599 debug_only(static bool signal_sets_initialized = false); 600 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; 601 602 bool os::Aix::is_sig_ignored(int sig) { 603 struct sigaction oact; 604 sigaction(sig, (struct sigaction*)NULL, &oact); 605 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 606 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 607 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) 608 return true; 609 else 610 return false; 611 } 612 613 void os::Aix::signal_sets_init() { 614 // Should also have an assertion stating we are still single-threaded. 615 assert(!signal_sets_initialized, "Already initialized"); 616 // Fill in signals that are necessarily unblocked for all threads in 617 // the VM. Currently, we unblock the following signals: 618 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 619 // by -Xrs (=ReduceSignalUsage)); 620 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 621 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 622 // the dispositions or masks wrt these signals. 623 // Programs embedding the VM that want to use the above signals for their 624 // own purposes must, at this time, use the "-Xrs" option to prevent 625 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 626 // (See bug 4345157, and other related bugs). 627 // In reality, though, unblocking these signals is really a nop, since 628 // these signals are not blocked by default. 629 sigemptyset(&unblocked_sigs); 630 sigemptyset(&allowdebug_blocked_sigs); 631 sigaddset(&unblocked_sigs, SIGILL); 632 sigaddset(&unblocked_sigs, SIGSEGV); 633 sigaddset(&unblocked_sigs, SIGBUS); 634 sigaddset(&unblocked_sigs, SIGFPE); 635 sigaddset(&unblocked_sigs, SIGTRAP); 636 sigaddset(&unblocked_sigs, SIGDANGER); 637 sigaddset(&unblocked_sigs, SR_signum); 638 639 if (!ReduceSignalUsage) { 640 if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 641 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 642 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); 643 } 644 if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 645 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 646 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); 647 } 648 if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 649 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 650 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); 651 } 652 } 653 // Fill in signals that are blocked by all but the VM thread. 654 sigemptyset(&vm_sigs); 655 if (!ReduceSignalUsage) 656 sigaddset(&vm_sigs, BREAK_SIGNAL); 657 debug_only(signal_sets_initialized = true); 658 } 659 660 // These are signals that are unblocked while a thread is running Java. 661 // (For some reason, they get blocked by default.) 662 sigset_t* os::Aix::unblocked_signals() { 663 assert(signal_sets_initialized, "Not initialized"); 664 return &unblocked_sigs; 665 } 666 667 // These are the signals that are blocked while a (non-VM) thread is 668 // running Java. Only the VM thread handles these signals. 669 sigset_t* os::Aix::vm_signals() { 670 assert(signal_sets_initialized, "Not initialized"); 671 return &vm_sigs; 672 } 673 674 // These are signals that are blocked during cond_wait to allow debugger in 675 sigset_t* os::Aix::allowdebug_blocked_signals() { 676 assert(signal_sets_initialized, "Not initialized"); 677 return &allowdebug_blocked_sigs; 678 } 679 680 void os::Aix::hotspot_sigmask(Thread* thread) { 681 682 //Save caller's signal mask before setting VM signal mask 683 sigset_t caller_sigmask; 684 pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask); 685 686 OSThread* osthread = thread->osthread(); 687 osthread->set_caller_sigmask(caller_sigmask); 688 689 pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL); 690 691 if (!ReduceSignalUsage) { 692 if (thread->is_VM_thread()) { 693 // Only the VM thread handles BREAK_SIGNAL ... 694 pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL); 695 } else { 696 // ... all other threads block BREAK_SIGNAL 697 pthread_sigmask(SIG_BLOCK, vm_signals(), NULL); 698 } 699 } 700 } 701 702 // retrieve memory information. 703 // Returns false if something went wrong; 704 // content of pmi undefined in this case. 705 bool os::Aix::get_meminfo(meminfo_t* pmi) { 706 707 assert(pmi, "get_meminfo: invalid parameter"); 708 709 memset(pmi, 0, sizeof(meminfo_t)); 710 711 if (os::Aix::on_pase()) { 712 713 Unimplemented(); 714 return false; 715 716 } else { 717 718 // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics 719 // See: 720 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp 721 // ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm 722 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp 723 // ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm 724 725 perfstat_memory_total_t psmt; 726 memset (&psmt, '\0', sizeof(psmt)); 727 const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1); 728 if (rc == -1) { 729 fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno); 730 assert(0, "perfstat_memory_total() failed"); 731 return false; 732 } 733 734 assert(rc == 1, "perfstat_memory_total() - weird return code"); 735 736 // excerpt from 737 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp 738 // ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm 739 // The fields of perfstat_memory_total_t: 740 // u_longlong_t virt_total Total virtual memory (in 4 KB pages). 741 // u_longlong_t real_total Total real memory (in 4 KB pages). 742 // u_longlong_t real_free Free real memory (in 4 KB pages). 743 // u_longlong_t pgsp_total Total paging space (in 4 KB pages). 744 // u_longlong_t pgsp_free Free paging space (in 4 KB pages). 745 746 pmi->virt_total = psmt.virt_total * 4096; 747 pmi->real_total = psmt.real_total * 4096; 748 pmi->real_free = psmt.real_free * 4096; 749 pmi->pgsp_total = psmt.pgsp_total * 4096; 750 pmi->pgsp_free = psmt.pgsp_free * 4096; 751 752 return true; 753 754 } 755 } // end os::Aix::get_meminfo 756 757 // Retrieve global cpu information. 758 // Returns false if something went wrong; 759 // the content of pci is undefined in this case. 760 bool os::Aix::get_cpuinfo(cpuinfo_t* pci) { 761 assert(pci, "get_cpuinfo: invalid parameter"); 762 memset(pci, 0, sizeof(cpuinfo_t)); 763 764 perfstat_cpu_total_t psct; 765 memset (&psct, '\0', sizeof(psct)); 766 767 if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) { 768 fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno); 769 assert(0, "perfstat_cpu_total() failed"); 770 return false; 771 } 772 773 // global cpu information 774 strcpy (pci->description, psct.description); 775 pci->processorHZ = psct.processorHZ; 776 pci->ncpus = psct.ncpus; 777 os::Aix::_logical_cpus = psct.ncpus; 778 for (int i = 0; i < 3; i++) { 779 pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS); 780 } 781 782 // get the processor version from _system_configuration 783 switch (_system_configuration.version) { 784 case PV_7: 785 strcpy(pci->version, "Power PC 7"); 786 break; 787 case PV_6_1: 788 strcpy(pci->version, "Power PC 6 DD1.x"); 789 break; 790 case PV_6: 791 strcpy(pci->version, "Power PC 6"); 792 break; 793 case PV_5: 794 strcpy(pci->version, "Power PC 5"); 795 break; 796 case PV_5_2: 797 strcpy(pci->version, "Power PC 5_2"); 798 break; 799 case PV_5_3: 800 strcpy(pci->version, "Power PC 5_3"); 801 break; 802 case PV_5_Compat: 803 strcpy(pci->version, "PV_5_Compat"); 804 break; 805 case PV_6_Compat: 806 strcpy(pci->version, "PV_6_Compat"); 807 break; 808 case PV_7_Compat: 809 strcpy(pci->version, "PV_7_Compat"); 810 break; 811 default: 812 strcpy(pci->version, "unknown"); 813 } 814 815 return true; 816 817 } //end os::Aix::get_cpuinfo 818 819 ////////////////////////////////////////////////////////////////////////////// 820 // detecting pthread library 821 822 void os::Aix::libpthread_init() { 823 return; 824 } 825 826 ////////////////////////////////////////////////////////////////////////////// 827 // create new thread 828 829 // Thread start routine for all newly created threads 830 static void *java_start(Thread *thread) { 831 832 // find out my own stack dimensions 833 { 834 // actually, this should do exactly the same as thread->record_stack_base_and_size... 835 address base = 0; 836 size_t size = 0; 837 query_stack_dimensions(&base, &size); 838 thread->set_stack_base(base); 839 thread->set_stack_size(size); 840 } 841 842 // Do some sanity checks. 843 CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size()); 844 845 // Try to randomize the cache line index of hot stack frames. 846 // This helps when threads of the same stack traces evict each other's 847 // cache lines. The threads can be either from the same JVM instance, or 848 // from different JVM instances. The benefit is especially true for 849 // processors with hyperthreading technology. 850 851 static int counter = 0; 852 int pid = os::current_process_id(); 853 alloca(((pid ^ counter++) & 7) * 128); 854 855 ThreadLocalStorage::set_thread(thread); 856 857 OSThread* osthread = thread->osthread(); 858 859 // thread_id is kernel thread id (similar to Solaris LWP id) 860 osthread->set_thread_id(os::Aix::gettid()); 861 862 // initialize signal mask for this thread 863 os::Aix::hotspot_sigmask(thread); 864 865 // initialize floating point control register 866 os::Aix::init_thread_fpu_state(); 867 868 assert(osthread->get_state() == RUNNABLE, "invalid os thread state"); 869 870 // call one more level start routine 871 thread->run(); 872 873 return 0; 874 } 875 876 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 877 878 // We want the whole function to be synchronized. 879 ThreadCritical cs; 880 881 assert(thread->osthread() == NULL, "caller responsible"); 882 883 // Allocate the OSThread object 884 OSThread* osthread = new OSThread(NULL, NULL); 885 if (osthread == NULL) { 886 return false; 887 } 888 889 // set the correct thread state 890 osthread->set_thread_type(thr_type); 891 892 // Initial state is ALLOCATED but not INITIALIZED 893 osthread->set_state(ALLOCATED); 894 895 thread->set_osthread(osthread); 896 897 // init thread attributes 898 pthread_attr_t attr; 899 pthread_attr_init(&attr); 900 guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???"); 901 902 // Make sure we run in 1:1 kernel-user-thread mode. 903 if (os::Aix::on_aix()) { 904 guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???"); 905 guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???"); 906 } // end: aix 907 908 // Start in suspended state, and in os::thread_start, wake the thread up. 909 guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???"); 910 911 // calculate stack size if it's not specified by caller 912 if (os::Aix::supports_variable_stack_size()) { 913 if (stack_size == 0) { 914 stack_size = os::Aix::default_stack_size(thr_type); 915 916 switch (thr_type) { 917 case os::java_thread: 918 // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss. 919 assert(JavaThread::stack_size_at_create() > 0, "this should be set"); 920 stack_size = JavaThread::stack_size_at_create(); 921 break; 922 case os::compiler_thread: 923 if (CompilerThreadStackSize > 0) { 924 stack_size = (size_t)(CompilerThreadStackSize * K); 925 break; 926 } // else fall through: 927 // use VMThreadStackSize if CompilerThreadStackSize is not defined 928 case os::vm_thread: 929 case os::pgc_thread: 930 case os::cgc_thread: 931 case os::watcher_thread: 932 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 933 break; 934 } 935 } 936 937 stack_size = MAX2(stack_size, os::Aix::min_stack_allowed); 938 pthread_attr_setstacksize(&attr, stack_size); 939 } //else let thread_create() pick the default value (96 K on AIX) 940 941 pthread_t tid; 942 int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread); 943 944 pthread_attr_destroy(&attr); 945 946 if (ret != 0) { 947 if (PrintMiscellaneous && (Verbose || WizardMode)) { 948 perror("pthread_create()"); 949 } 950 // Need to clean up stuff we've allocated so far 951 thread->set_osthread(NULL); 952 delete osthread; 953 return false; 954 } 955 956 // Store pthread info into the OSThread 957 osthread->set_pthread_id(tid); 958 959 return true; 960 } 961 962 ///////////////////////////////////////////////////////////////////////////// 963 // attach existing thread 964 965 // bootstrap the main thread 966 bool os::create_main_thread(JavaThread* thread) { 967 assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread"); 968 return create_attached_thread(thread); 969 } 970 971 bool os::create_attached_thread(JavaThread* thread) { 972 #ifdef ASSERT 973 thread->verify_not_published(); 974 #endif 975 976 // Allocate the OSThread object 977 OSThread* osthread = new OSThread(NULL, NULL); 978 979 if (osthread == NULL) { 980 return false; 981 } 982 983 // Store pthread info into the OSThread 984 osthread->set_thread_id(os::Aix::gettid()); 985 osthread->set_pthread_id(::pthread_self()); 986 987 // initialize floating point control register 988 os::Aix::init_thread_fpu_state(); 989 990 // some sanity checks 991 CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size()); 992 993 // Initial thread state is RUNNABLE 994 osthread->set_state(RUNNABLE); 995 996 thread->set_osthread(osthread); 997 998 if (UseNUMA) { 999 int lgrp_id = os::numa_get_group_id(); 1000 if (lgrp_id != -1) { 1001 thread->set_lgrp_id(lgrp_id); 1002 } 1003 } 1004 1005 // initialize signal mask for this thread 1006 // and save the caller's signal mask 1007 os::Aix::hotspot_sigmask(thread); 1008 1009 return true; 1010 } 1011 1012 void os::pd_start_thread(Thread* thread) { 1013 int status = pthread_continue_np(thread->osthread()->pthread_id()); 1014 assert(status == 0, "thr_continue failed"); 1015 } 1016 1017 // Free OS resources related to the OSThread 1018 void os::free_thread(OSThread* osthread) { 1019 assert(osthread != NULL, "osthread not set"); 1020 1021 if (Thread::current()->osthread() == osthread) { 1022 // Restore caller's signal mask 1023 sigset_t sigmask = osthread->caller_sigmask(); 1024 pthread_sigmask(SIG_SETMASK, &sigmask, NULL); 1025 } 1026 1027 delete osthread; 1028 } 1029 1030 ////////////////////////////////////////////////////////////////////////////// 1031 // thread local storage 1032 1033 int os::allocate_thread_local_storage() { 1034 pthread_key_t key; 1035 int rslt = pthread_key_create(&key, NULL); 1036 assert(rslt == 0, "cannot allocate thread local storage"); 1037 return (int)key; 1038 } 1039 1040 // Note: This is currently not used by VM, as we don't destroy TLS key 1041 // on VM exit. 1042 void os::free_thread_local_storage(int index) { 1043 int rslt = pthread_key_delete((pthread_key_t)index); 1044 assert(rslt == 0, "invalid index"); 1045 } 1046 1047 void os::thread_local_storage_at_put(int index, void* value) { 1048 int rslt = pthread_setspecific((pthread_key_t)index, value); 1049 assert(rslt == 0, "pthread_setspecific failed"); 1050 } 1051 1052 extern "C" Thread* get_thread() { 1053 return ThreadLocalStorage::thread(); 1054 } 1055 1056 //////////////////////////////////////////////////////////////////////////////// 1057 // time support 1058 1059 // Time since start-up in seconds to a fine granularity. 1060 // Used by VMSelfDestructTimer and the MemProfiler. 1061 double os::elapsedTime() { 1062 return (double)(os::elapsed_counter()) * 0.000001; 1063 } 1064 1065 jlong os::elapsed_counter() { 1066 timeval time; 1067 int status = gettimeofday(&time, NULL); 1068 return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count; 1069 } 1070 1071 jlong os::elapsed_frequency() { 1072 return (1000 * 1000); 1073 } 1074 1075 // For now, we say that linux does not support vtime. I have no idea 1076 // whether it can actually be made to (DLD, 9/13/05). 1077 1078 bool os::supports_vtime() { return false; } 1079 bool os::enable_vtime() { return false; } 1080 bool os::vtime_enabled() { return false; } 1081 double os::elapsedVTime() { 1082 // better than nothing, but not much 1083 return elapsedTime(); 1084 } 1085 1086 jlong os::javaTimeMillis() { 1087 timeval time; 1088 int status = gettimeofday(&time, NULL); 1089 assert(status != -1, "aix error at gettimeofday()"); 1090 return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000); 1091 } 1092 1093 // We need to manually declare mread_real_time, 1094 // because IBM didn't provide a prototype in time.h. 1095 // (they probably only ever tested in C, not C++) 1096 extern "C" 1097 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t); 1098 1099 jlong os::javaTimeNanos() { 1100 if (os::Aix::on_pase()) { 1101 Unimplemented(); 1102 return 0; 1103 } 1104 else { 1105 // On AIX use the precision of processors real time clock 1106 // or time base registers. 1107 timebasestruct_t time; 1108 int rc; 1109 1110 // If the CPU has a time register, it will be used and 1111 // we have to convert to real time first. After convertion we have following data: 1112 // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970] 1113 // time.tb_low [nanoseconds after the last full second above] 1114 // We better use mread_real_time here instead of read_real_time 1115 // to ensure that we will get a monotonic increasing time. 1116 if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) { 1117 rc = time_base_to_time(&time, TIMEBASE_SZ); 1118 assert(rc != -1, "aix error at time_base_to_time()"); 1119 } 1120 return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low); 1121 } 1122 } 1123 1124 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1125 info_ptr->max_value = ALL_64_BITS; 1126 // mread_real_time() is monotonic (see 'os::javaTimeNanos()') 1127 info_ptr->may_skip_backward = false; 1128 info_ptr->may_skip_forward = false; 1129 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1130 } 1131 1132 // Return the real, user, and system times in seconds from an 1133 // arbitrary fixed point in the past. 1134 bool os::getTimesSecs(double* process_real_time, 1135 double* process_user_time, 1136 double* process_system_time) { 1137 struct tms ticks; 1138 clock_t real_ticks = times(&ticks); 1139 1140 if (real_ticks == (clock_t) (-1)) { 1141 return false; 1142 } else { 1143 double ticks_per_second = (double) clock_tics_per_sec; 1144 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1145 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1146 *process_real_time = ((double) real_ticks) / ticks_per_second; 1147 1148 return true; 1149 } 1150 } 1151 1152 1153 char * os::local_time_string(char *buf, size_t buflen) { 1154 struct tm t; 1155 time_t long_time; 1156 time(&long_time); 1157 localtime_r(&long_time, &t); 1158 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1159 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1160 t.tm_hour, t.tm_min, t.tm_sec); 1161 return buf; 1162 } 1163 1164 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 1165 return localtime_r(clock, res); 1166 } 1167 1168 //////////////////////////////////////////////////////////////////////////////// 1169 // runtime exit support 1170 1171 // Note: os::shutdown() might be called very early during initialization, or 1172 // called from signal handler. Before adding something to os::shutdown(), make 1173 // sure it is async-safe and can handle partially initialized VM. 1174 void os::shutdown() { 1175 1176 // allow PerfMemory to attempt cleanup of any persistent resources 1177 perfMemory_exit(); 1178 1179 // needs to remove object in file system 1180 AttachListener::abort(); 1181 1182 // flush buffered output, finish log files 1183 ostream_abort(); 1184 1185 // Check for abort hook 1186 abort_hook_t abort_hook = Arguments::abort_hook(); 1187 if (abort_hook != NULL) { 1188 abort_hook(); 1189 } 1190 1191 } 1192 1193 // Note: os::abort() might be called very early during initialization, or 1194 // called from signal handler. Before adding something to os::abort(), make 1195 // sure it is async-safe and can handle partially initialized VM. 1196 void os::abort(bool dump_core) { 1197 os::shutdown(); 1198 if (dump_core) { 1199 #ifndef PRODUCT 1200 fdStream out(defaultStream::output_fd()); 1201 out.print_raw("Current thread is "); 1202 char buf[16]; 1203 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1204 out.print_raw_cr(buf); 1205 out.print_raw_cr("Dumping core ..."); 1206 #endif 1207 ::abort(); // dump core 1208 } 1209 1210 ::exit(1); 1211 } 1212 1213 // Die immediately, no exit hook, no abort hook, no cleanup. 1214 void os::die() { 1215 ::abort(); 1216 } 1217 1218 // This method is a copy of JDK's sysGetLastErrorString 1219 // from src/solaris/hpi/src/system_md.c 1220 1221 size_t os::lasterror(char *buf, size_t len) { 1222 1223 if (errno == 0) return 0; 1224 1225 const char *s = ::strerror(errno); 1226 size_t n = ::strlen(s); 1227 if (n >= len) { 1228 n = len - 1; 1229 } 1230 ::strncpy(buf, s, n); 1231 buf[n] = '\0'; 1232 return n; 1233 } 1234 1235 intx os::current_thread_id() { return (intx)pthread_self(); } 1236 int os::current_process_id() { 1237 1238 // This implementation returns a unique pid, the pid of the 1239 // launcher thread that starts the vm 'process'. 1240 1241 // Under POSIX, getpid() returns the same pid as the 1242 // launcher thread rather than a unique pid per thread. 1243 // Use gettid() if you want the old pre NPTL behaviour. 1244 1245 // if you are looking for the result of a call to getpid() that 1246 // returns a unique pid for the calling thread, then look at the 1247 // OSThread::thread_id() method in osThread_linux.hpp file 1248 1249 return (int)(_initial_pid ? _initial_pid : getpid()); 1250 } 1251 1252 // DLL functions 1253 1254 const char* os::dll_file_extension() { return ".so"; } 1255 1256 // This must be hard coded because it's the system's temporary 1257 // directory not the java application's temp directory, ala java.io.tmpdir. 1258 const char* os::get_temp_directory() { return "/tmp"; } 1259 1260 static bool file_exists(const char* filename) { 1261 struct stat statbuf; 1262 if (filename == NULL || strlen(filename) == 0) { 1263 return false; 1264 } 1265 return os::stat(filename, &statbuf) == 0; 1266 } 1267 1268 bool os::dll_build_name(char* buffer, size_t buflen, 1269 const char* pname, const char* fname) { 1270 bool retval = false; 1271 // Copied from libhpi 1272 const size_t pnamelen = pname ? strlen(pname) : 0; 1273 1274 // Return error on buffer overflow. 1275 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { 1276 *buffer = '\0'; 1277 return retval; 1278 } 1279 1280 if (pnamelen == 0) { 1281 snprintf(buffer, buflen, "lib%s.so", fname); 1282 retval = true; 1283 } else if (strchr(pname, *os::path_separator()) != NULL) { 1284 int n; 1285 char** pelements = split_path(pname, &n); 1286 for (int i = 0; i < n; i++) { 1287 // Really shouldn't be NULL, but check can't hurt 1288 if (pelements[i] == NULL || strlen(pelements[i]) == 0) { 1289 continue; // skip the empty path values 1290 } 1291 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); 1292 if (file_exists(buffer)) { 1293 retval = true; 1294 break; 1295 } 1296 } 1297 // release the storage 1298 for (int i = 0; i < n; i++) { 1299 if (pelements[i] != NULL) { 1300 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1301 } 1302 } 1303 if (pelements != NULL) { 1304 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1305 } 1306 } else { 1307 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1308 retval = true; 1309 } 1310 return retval; 1311 } 1312 1313 // Check if addr is inside libjvm.so. 1314 bool os::address_is_in_vm(address addr) { 1315 1316 // Input could be a real pc or a function pointer literal. The latter 1317 // would be a function descriptor residing in the data segment of a module. 1318 1319 const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr); 1320 if (lib) { 1321 if (strcmp(lib->get_shortname(), "libjvm.so") == 0) { 1322 return true; 1323 } else { 1324 return false; 1325 } 1326 } else { 1327 lib = LoadedLibraries::find_for_data_address(addr); 1328 if (lib) { 1329 if (strcmp(lib->get_shortname(), "libjvm.so") == 0) { 1330 return true; 1331 } else { 1332 return false; 1333 } 1334 } else { 1335 return false; 1336 } 1337 } 1338 } 1339 1340 // Resolve an AIX function descriptor literal to a code pointer. 1341 // If the input is a valid code pointer to a text segment of a loaded module, 1342 // it is returned unchanged. 1343 // If the input is a valid AIX function descriptor, it is resolved to the 1344 // code entry point. 1345 // If the input is neither a valid function descriptor nor a valid code pointer, 1346 // NULL is returned. 1347 static address resolve_function_descriptor_to_code_pointer(address p) { 1348 1349 const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p); 1350 if (lib) { 1351 // its a real code pointer 1352 return p; 1353 } else { 1354 lib = LoadedLibraries::find_for_data_address(p); 1355 if (lib) { 1356 // pointer to data segment, potential function descriptor 1357 address code_entry = (address)(((FunctionDescriptor*)p)->entry()); 1358 if (LoadedLibraries::find_for_text_address(code_entry)) { 1359 // Its a function descriptor 1360 return code_entry; 1361 } 1362 } 1363 } 1364 return NULL; 1365 } 1366 1367 bool os::dll_address_to_function_name(address addr, char *buf, 1368 int buflen, int *offset) { 1369 if (offset) { 1370 *offset = -1; 1371 } 1372 if (buf) { 1373 buf[0] = '\0'; 1374 } 1375 1376 // Resolve function ptr literals first. 1377 addr = resolve_function_descriptor_to_code_pointer(addr); 1378 if (!addr) { 1379 return false; 1380 } 1381 1382 // Go through Decoder::decode to call getFuncName which reads the name from the traceback table. 1383 return Decoder::decode(addr, buf, buflen, offset); 1384 } 1385 1386 static int getModuleName(codeptr_t pc, // [in] program counter 1387 char* p_name, size_t namelen, // [out] optional: function name 1388 char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages 1389 ) { 1390 1391 // initialize output parameters 1392 if (p_name && namelen > 0) { 1393 *p_name = '\0'; 1394 } 1395 if (p_errmsg && errmsglen > 0) { 1396 *p_errmsg = '\0'; 1397 } 1398 1399 const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc); 1400 if (lib) { 1401 if (p_name && namelen > 0) { 1402 sprintf(p_name, "%.*s", namelen, lib->get_shortname()); 1403 } 1404 return 0; 1405 } 1406 1407 if (Verbose) { 1408 fprintf(stderr, "pc outside any module"); 1409 } 1410 1411 return -1; 1412 1413 } 1414 1415 bool os::dll_address_to_library_name(address addr, char* buf, 1416 int buflen, int* offset) { 1417 if (offset) { 1418 *offset = -1; 1419 } 1420 if (buf) { 1421 buf[0] = '\0'; 1422 } 1423 1424 // Resolve function ptr literals first. 1425 addr = resolve_function_descriptor_to_code_pointer(addr); 1426 if (!addr) { 1427 return false; 1428 } 1429 1430 if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) { 1431 return true; 1432 } 1433 return false; 1434 } 1435 1436 // Loads .dll/.so and in case of error it checks if .dll/.so was built 1437 // for the same architecture as Hotspot is running on 1438 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) { 1439 1440 if (ebuf && ebuflen > 0) { 1441 ebuf[0] = '\0'; 1442 ebuf[ebuflen - 1] = '\0'; 1443 } 1444 1445 if (!filename || strlen(filename) == 0) { 1446 ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1); 1447 return NULL; 1448 } 1449 1450 // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants. 1451 void * result= ::dlopen(filename, RTLD_LAZY); 1452 if (result != NULL) { 1453 // Reload dll cache. Don't do this in signal handling. 1454 LoadedLibraries::reload(); 1455 return result; 1456 } else { 1457 // error analysis when dlopen fails 1458 const char* const error_report = ::dlerror(); 1459 if (error_report && ebuf && ebuflen > 0) { 1460 snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s", 1461 filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report); 1462 } 1463 } 1464 return NULL; 1465 } 1466 1467 // Glibc-2.0 libdl is not MT safe. If you are building with any glibc, 1468 // chances are you might want to run the generated bits against glibc-2.0 1469 // libdl.so, so always use locking for any version of glibc. 1470 void* os::dll_lookup(void* handle, const char* name) { 1471 pthread_mutex_lock(&dl_mutex); 1472 void* res = dlsym(handle, name); 1473 pthread_mutex_unlock(&dl_mutex); 1474 return res; 1475 } 1476 1477 void* os::get_default_process_handle() { 1478 return (void*)::dlopen(NULL, RTLD_LAZY); 1479 } 1480 1481 void os::print_dll_info(outputStream *st) { 1482 st->print_cr("Dynamic libraries:"); 1483 LoadedLibraries::print(st); 1484 } 1485 1486 void os::print_os_info(outputStream* st) { 1487 st->print("OS:"); 1488 1489 st->print("uname:"); 1490 struct utsname name; 1491 uname(&name); 1492 st->print(name.sysname); st->print(" "); 1493 st->print(name.nodename); st->print(" "); 1494 st->print(name.release); st->print(" "); 1495 st->print(name.version); st->print(" "); 1496 st->print(name.machine); 1497 st->cr(); 1498 1499 // rlimit 1500 st->print("rlimit:"); 1501 struct rlimit rlim; 1502 1503 st->print(" STACK "); 1504 getrlimit(RLIMIT_STACK, &rlim); 1505 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1506 else st->print("%uk", rlim.rlim_cur >> 10); 1507 1508 st->print(", CORE "); 1509 getrlimit(RLIMIT_CORE, &rlim); 1510 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1511 else st->print("%uk", rlim.rlim_cur >> 10); 1512 1513 st->print(", NPROC "); 1514 st->print("%d", sysconf(_SC_CHILD_MAX)); 1515 1516 st->print(", NOFILE "); 1517 getrlimit(RLIMIT_NOFILE, &rlim); 1518 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1519 else st->print("%d", rlim.rlim_cur); 1520 1521 st->print(", AS "); 1522 getrlimit(RLIMIT_AS, &rlim); 1523 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1524 else st->print("%uk", rlim.rlim_cur >> 10); 1525 1526 // Print limits on DATA, because it limits the C-heap. 1527 st->print(", DATA "); 1528 getrlimit(RLIMIT_DATA, &rlim); 1529 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1530 else st->print("%uk", rlim.rlim_cur >> 10); 1531 st->cr(); 1532 1533 // load average 1534 st->print("load average:"); 1535 double loadavg[3] = {-1.L, -1.L, -1.L}; 1536 os::loadavg(loadavg, 3); 1537 st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]); 1538 st->cr(); 1539 } 1540 1541 void os::print_memory_info(outputStream* st) { 1542 1543 st->print_cr("Memory:"); 1544 1545 st->print_cr(" default page size: %s", describe_pagesize(os::vm_page_size())); 1546 st->print_cr(" default stack page size: %s", describe_pagesize(os::vm_page_size())); 1547 st->print_cr(" default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size())); 1548 st->print_cr(" can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no")); 1549 st->print_cr(" can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no")); 1550 if (g_multipage_error != 0) { 1551 st->print_cr(" multipage error: %d", g_multipage_error); 1552 } 1553 1554 // print out LDR_CNTRL because it affects the default page sizes 1555 const char* const ldr_cntrl = ::getenv("LDR_CNTRL"); 1556 st->print_cr(" LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>"); 1557 1558 const char* const extshm = ::getenv("EXTSHM"); 1559 st->print_cr(" EXTSHM=%s.", extshm ? extshm : "<unset>"); 1560 1561 // Call os::Aix::get_meminfo() to retrieve memory statistics. 1562 os::Aix::meminfo_t mi; 1563 if (os::Aix::get_meminfo(&mi)) { 1564 char buffer[256]; 1565 if (os::Aix::on_aix()) { 1566 jio_snprintf(buffer, sizeof(buffer), 1567 " physical total : %llu\n" 1568 " physical free : %llu\n" 1569 " swap total : %llu\n" 1570 " swap free : %llu\n", 1571 mi.real_total, 1572 mi.real_free, 1573 mi.pgsp_total, 1574 mi.pgsp_free); 1575 } else { 1576 Unimplemented(); 1577 } 1578 st->print_raw(buffer); 1579 } else { 1580 st->print_cr(" (no more information available)"); 1581 } 1582 } 1583 1584 void os::pd_print_cpu_info(outputStream* st) { 1585 // cpu 1586 st->print("CPU:"); 1587 st->print("total %d", os::processor_count()); 1588 // It's not safe to query number of active processors after crash 1589 // st->print("(active %d)", os::active_processor_count()); 1590 st->print(" %s", VM_Version::cpu_features()); 1591 st->cr(); 1592 } 1593 1594 void os::print_siginfo(outputStream* st, void* siginfo) { 1595 // Use common posix version. 1596 os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo); 1597 st->cr(); 1598 } 1599 1600 1601 static void print_signal_handler(outputStream* st, int sig, 1602 char* buf, size_t buflen); 1603 1604 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1605 st->print_cr("Signal Handlers:"); 1606 print_signal_handler(st, SIGSEGV, buf, buflen); 1607 print_signal_handler(st, SIGBUS , buf, buflen); 1608 print_signal_handler(st, SIGFPE , buf, buflen); 1609 print_signal_handler(st, SIGPIPE, buf, buflen); 1610 print_signal_handler(st, SIGXFSZ, buf, buflen); 1611 print_signal_handler(st, SIGILL , buf, buflen); 1612 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen); 1613 print_signal_handler(st, SR_signum, buf, buflen); 1614 print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen); 1615 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 1616 print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen); 1617 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 1618 print_signal_handler(st, SIGTRAP, buf, buflen); 1619 print_signal_handler(st, SIGDANGER, buf, buflen); 1620 } 1621 1622 static char saved_jvm_path[MAXPATHLEN] = {0}; 1623 1624 // Find the full path to the current module, libjvm.so or libjvm_g.so 1625 void os::jvm_path(char *buf, jint buflen) { 1626 // Error checking. 1627 if (buflen < MAXPATHLEN) { 1628 assert(false, "must use a large-enough buffer"); 1629 buf[0] = '\0'; 1630 return; 1631 } 1632 // Lazy resolve the path to current module. 1633 if (saved_jvm_path[0] != 0) { 1634 strcpy(buf, saved_jvm_path); 1635 return; 1636 } 1637 1638 Dl_info dlinfo; 1639 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 1640 assert(ret != 0, "cannot locate libjvm"); 1641 char* rp = realpath((char *)dlinfo.dli_fname, buf); 1642 assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?"); 1643 1644 strcpy(saved_jvm_path, buf); 1645 } 1646 1647 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1648 // no prefix required, not even "_" 1649 } 1650 1651 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1652 // no suffix required 1653 } 1654 1655 //////////////////////////////////////////////////////////////////////////////// 1656 // sun.misc.Signal support 1657 1658 static volatile jint sigint_count = 0; 1659 1660 static void 1661 UserHandler(int sig, void *siginfo, void *context) { 1662 // 4511530 - sem_post is serialized and handled by the manager thread. When 1663 // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We 1664 // don't want to flood the manager thread with sem_post requests. 1665 if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1) 1666 return; 1667 1668 // Ctrl-C is pressed during error reporting, likely because the error 1669 // handler fails to abort. Let VM die immediately. 1670 if (sig == SIGINT && is_error_reported()) { 1671 os::die(); 1672 } 1673 1674 os::signal_notify(sig); 1675 } 1676 1677 void* os::user_handler() { 1678 return CAST_FROM_FN_PTR(void*, UserHandler); 1679 } 1680 1681 extern "C" { 1682 typedef void (*sa_handler_t)(int); 1683 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 1684 } 1685 1686 void* os::signal(int signal_number, void* handler) { 1687 struct sigaction sigAct, oldSigAct; 1688 1689 sigfillset(&(sigAct.sa_mask)); 1690 1691 // Do not block out synchronous signals in the signal handler. 1692 // Blocking synchronous signals only makes sense if you can really 1693 // be sure that those signals won't happen during signal handling, 1694 // when the blocking applies. Normal signal handlers are lean and 1695 // do not cause signals. But our signal handlers tend to be "risky" 1696 // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen. 1697 // On AIX, PASE there was a case where a SIGSEGV happened, followed 1698 // by a SIGILL, which was blocked due to the signal mask. The process 1699 // just hung forever. Better to crash from a secondary signal than to hang. 1700 sigdelset(&(sigAct.sa_mask), SIGSEGV); 1701 sigdelset(&(sigAct.sa_mask), SIGBUS); 1702 sigdelset(&(sigAct.sa_mask), SIGILL); 1703 sigdelset(&(sigAct.sa_mask), SIGFPE); 1704 sigdelset(&(sigAct.sa_mask), SIGTRAP); 1705 1706 sigAct.sa_flags = SA_RESTART|SA_SIGINFO; 1707 1708 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 1709 1710 if (sigaction(signal_number, &sigAct, &oldSigAct)) { 1711 // -1 means registration failed 1712 return (void *)-1; 1713 } 1714 1715 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 1716 } 1717 1718 void os::signal_raise(int signal_number) { 1719 ::raise(signal_number); 1720 } 1721 1722 // 1723 // The following code is moved from os.cpp for making this 1724 // code platform specific, which it is by its very nature. 1725 // 1726 1727 // Will be modified when max signal is changed to be dynamic 1728 int os::sigexitnum_pd() { 1729 return NSIG; 1730 } 1731 1732 // a counter for each possible signal value 1733 static volatile jint pending_signals[NSIG+1] = { 0 }; 1734 1735 // Linux(POSIX) specific hand shaking semaphore. 1736 static sem_t sig_sem; 1737 1738 void os::signal_init_pd() { 1739 // Initialize signal structures 1740 ::memset((void*)pending_signals, 0, sizeof(pending_signals)); 1741 1742 // Initialize signal semaphore 1743 int rc = ::sem_init(&sig_sem, 0, 0); 1744 guarantee(rc != -1, "sem_init failed"); 1745 } 1746 1747 void os::signal_notify(int sig) { 1748 Atomic::inc(&pending_signals[sig]); 1749 ::sem_post(&sig_sem); 1750 } 1751 1752 static int check_pending_signals(bool wait) { 1753 Atomic::store(0, &sigint_count); 1754 for (;;) { 1755 for (int i = 0; i < NSIG + 1; i++) { 1756 jint n = pending_signals[i]; 1757 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 1758 return i; 1759 } 1760 } 1761 if (!wait) { 1762 return -1; 1763 } 1764 JavaThread *thread = JavaThread::current(); 1765 ThreadBlockInVM tbivm(thread); 1766 1767 bool threadIsSuspended; 1768 do { 1769 thread->set_suspend_equivalent(); 1770 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 1771 1772 ::sem_wait(&sig_sem); 1773 1774 // were we externally suspended while we were waiting? 1775 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 1776 if (threadIsSuspended) { 1777 // 1778 // The semaphore has been incremented, but while we were waiting 1779 // another thread suspended us. We don't want to continue running 1780 // while suspended because that would surprise the thread that 1781 // suspended us. 1782 // 1783 ::sem_post(&sig_sem); 1784 1785 thread->java_suspend_self(); 1786 } 1787 } while (threadIsSuspended); 1788 } 1789 } 1790 1791 int os::signal_lookup() { 1792 return check_pending_signals(false); 1793 } 1794 1795 int os::signal_wait() { 1796 return check_pending_signals(true); 1797 } 1798 1799 //////////////////////////////////////////////////////////////////////////////// 1800 // Virtual Memory 1801 1802 // AddrRange describes an immutable address range 1803 // 1804 // This is a helper class for the 'shared memory bookkeeping' below. 1805 class AddrRange { 1806 friend class ShmBkBlock; 1807 1808 char* _start; 1809 size_t _size; 1810 1811 public: 1812 1813 AddrRange(char* start, size_t size) 1814 : _start(start), _size(size) 1815 {} 1816 1817 AddrRange(const AddrRange& r) 1818 : _start(r.start()), _size(r.size()) 1819 {} 1820 1821 char* start() const { return _start; } 1822 size_t size() const { return _size; } 1823 char* end() const { return _start + _size; } 1824 bool is_empty() const { return _size == 0 ? true : false; } 1825 1826 static AddrRange empty_range() { return AddrRange(NULL, 0); } 1827 1828 bool contains(const char* p) const { 1829 return start() <= p && end() > p; 1830 } 1831 1832 bool contains(const AddrRange& range) const { 1833 return start() <= range.start() && end() >= range.end(); 1834 } 1835 1836 bool intersects(const AddrRange& range) const { 1837 return (range.start() <= start() && range.end() > start()) || 1838 (range.start() < end() && range.end() >= end()) || 1839 contains(range); 1840 } 1841 1842 bool is_same_range(const AddrRange& range) const { 1843 return start() == range.start() && size() == range.size(); 1844 } 1845 1846 // return the closest inside range consisting of whole pages 1847 AddrRange find_closest_aligned_range(size_t pagesize) const { 1848 if (pagesize == 0 || is_empty()) { 1849 return empty_range(); 1850 } 1851 char* const from = (char*)align_size_up((intptr_t)_start, pagesize); 1852 char* const to = (char*)align_size_down((intptr_t)end(), pagesize); 1853 if (from > to) { 1854 return empty_range(); 1855 } 1856 return AddrRange(from, to - from); 1857 } 1858 }; 1859 1860 //////////////////////////////////////////////////////////////////////////// 1861 // shared memory bookkeeping 1862 // 1863 // the os::reserve_memory() API and friends hand out different kind of memory, depending 1864 // on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat. 1865 // 1866 // But these memory types have to be treated differently. For example, to uncommit 1867 // mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory, 1868 // disclaim64() is needed. 1869 // 1870 // Therefore we need to keep track of the allocated memory segments and their 1871 // properties. 1872 1873 // ShmBkBlock: base class for all blocks in the shared memory bookkeeping 1874 class ShmBkBlock : public CHeapObj<mtInternal> { 1875 1876 ShmBkBlock* _next; 1877 1878 protected: 1879 1880 AddrRange _range; 1881 const size_t _pagesize; 1882 const bool _pinned; 1883 1884 public: 1885 1886 ShmBkBlock(AddrRange range, size_t pagesize, bool pinned) 1887 : _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) { 1888 1889 assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size"); 1890 assert(!_range.is_empty(), "invalid range"); 1891 } 1892 1893 virtual void print(outputStream* st) const { 1894 st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s", 1895 _range.start(), _range.end(), _range.size(), 1896 _range.size() / _pagesize, describe_pagesize(_pagesize), 1897 _pinned ? "pinned" : ""); 1898 } 1899 1900 enum Type { MMAP, SHMAT }; 1901 virtual Type getType() = 0; 1902 1903 char* base() const { return _range.start(); } 1904 size_t size() const { return _range.size(); } 1905 1906 void setAddrRange(AddrRange range) { 1907 _range = range; 1908 } 1909 1910 bool containsAddress(const char* p) const { 1911 return _range.contains(p); 1912 } 1913 1914 bool containsRange(const char* p, size_t size) const { 1915 return _range.contains(AddrRange((char*)p, size)); 1916 } 1917 1918 bool isSameRange(const char* p, size_t size) const { 1919 return _range.is_same_range(AddrRange((char*)p, size)); 1920 } 1921 1922 virtual bool disclaim(char* p, size_t size) = 0; 1923 virtual bool release() = 0; 1924 1925 // blocks live in a list. 1926 ShmBkBlock* next() const { return _next; } 1927 void set_next(ShmBkBlock* blk) { _next = blk; } 1928 1929 }; // end: ShmBkBlock 1930 1931 1932 // ShmBkMappedBlock: describes an block allocated with mmap() 1933 class ShmBkMappedBlock : public ShmBkBlock { 1934 public: 1935 1936 ShmBkMappedBlock(AddrRange range) 1937 : ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned 1938 1939 void print(outputStream* st) const { 1940 ShmBkBlock::print(st); 1941 st->print_cr(" - mmap'ed"); 1942 } 1943 1944 Type getType() { 1945 return MMAP; 1946 } 1947 1948 bool disclaim(char* p, size_t size) { 1949 1950 AddrRange r(p, size); 1951 1952 guarantee(_range.contains(r), "invalid disclaim"); 1953 1954 // only disclaim whole ranges. 1955 const AddrRange r2 = r.find_closest_aligned_range(_pagesize); 1956 if (r2.is_empty()) { 1957 return true; 1958 } 1959 1960 const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE); 1961 1962 if (rc != 0) { 1963 warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno); 1964 } 1965 1966 return rc == 0 ? true : false; 1967 } 1968 1969 bool release() { 1970 // mmap'ed blocks are released using munmap 1971 if (::munmap(_range.start(), _range.size()) != 0) { 1972 warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno); 1973 return false; 1974 } 1975 return true; 1976 } 1977 }; // end: ShmBkMappedBlock 1978 1979 // ShmBkShmatedBlock: describes an block allocated with shmget/shmat() 1980 class ShmBkShmatedBlock : public ShmBkBlock { 1981 public: 1982 1983 ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned) 1984 : ShmBkBlock(range, pagesize, pinned) {} 1985 1986 void print(outputStream* st) const { 1987 ShmBkBlock::print(st); 1988 st->print_cr(" - shmat'ed"); 1989 } 1990 1991 Type getType() { 1992 return SHMAT; 1993 } 1994 1995 bool disclaim(char* p, size_t size) { 1996 1997 AddrRange r(p, size); 1998 1999 if (_pinned) { 2000 return true; 2001 } 2002 2003 // shmat'ed blocks are disclaimed using disclaim64 2004 guarantee(_range.contains(r), "invalid disclaim"); 2005 2006 // only disclaim whole ranges. 2007 const AddrRange r2 = r.find_closest_aligned_range(_pagesize); 2008 if (r2.is_empty()) { 2009 return true; 2010 } 2011 2012 const bool rc = my_disclaim64(r2.start(), r2.size()); 2013 2014 if (Verbose && !rc) { 2015 warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end()); 2016 } 2017 2018 return rc; 2019 } 2020 2021 bool release() { 2022 bool rc = false; 2023 if (::shmdt(_range.start()) != 0) { 2024 warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno); 2025 } else { 2026 rc = true; 2027 } 2028 return rc; 2029 } 2030 2031 }; // end: ShmBkShmatedBlock 2032 2033 static ShmBkBlock* g_shmbk_list = NULL; 2034 static volatile jint g_shmbk_table_lock = 0; 2035 2036 // keep some usage statistics 2037 static struct { 2038 int nodes; // number of nodes in list 2039 size_t bytes; // reserved - not committed - bytes. 2040 int reserves; // how often reserve was called 2041 int lookups; // how often a lookup was made 2042 } g_shmbk_stats = { 0, 0, 0, 0 }; 2043 2044 // add information about a shared memory segment to the bookkeeping 2045 static void shmbk_register(ShmBkBlock* p_block) { 2046 guarantee(p_block, "logic error"); 2047 p_block->set_next(g_shmbk_list); 2048 g_shmbk_list = p_block; 2049 g_shmbk_stats.reserves ++; 2050 g_shmbk_stats.bytes += p_block->size(); 2051 g_shmbk_stats.nodes ++; 2052 } 2053 2054 // remove information about a shared memory segment by its starting address 2055 static void shmbk_unregister(ShmBkBlock* p_block) { 2056 ShmBkBlock* p = g_shmbk_list; 2057 ShmBkBlock* prev = NULL; 2058 while (p) { 2059 if (p == p_block) { 2060 if (prev) { 2061 prev->set_next(p->next()); 2062 } else { 2063 g_shmbk_list = p->next(); 2064 } 2065 g_shmbk_stats.nodes --; 2066 g_shmbk_stats.bytes -= p->size(); 2067 return; 2068 } 2069 prev = p; 2070 p = p->next(); 2071 } 2072 assert(false, "should not happen"); 2073 } 2074 2075 // given a pointer, return shared memory bookkeeping record for the segment it points into 2076 // using the returned block info must happen under lock protection 2077 static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) { 2078 g_shmbk_stats.lookups ++; 2079 ShmBkBlock* p = g_shmbk_list; 2080 while (p) { 2081 if (p->containsAddress(addr)) { 2082 return p; 2083 } 2084 p = p->next(); 2085 } 2086 return NULL; 2087 } 2088 2089 // dump all information about all memory segments allocated with os::reserve_memory() 2090 void shmbk_dump_info() { 2091 tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, " 2092 "total reserves: %d total lookups: %d)", 2093 g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups); 2094 const ShmBkBlock* p = g_shmbk_list; 2095 int i = 0; 2096 while (p) { 2097 p->print(tty); 2098 p = p->next(); 2099 i ++; 2100 } 2101 } 2102 2103 #define LOCK_SHMBK { ThreadCritical _LOCK_SHMBK; 2104 #define UNLOCK_SHMBK } 2105 2106 // End: shared memory bookkeeping 2107 //////////////////////////////////////////////////////////////////////////////////////////////////// 2108 2109 int os::vm_page_size() { 2110 // Seems redundant as all get out 2111 assert(os::Aix::page_size() != -1, "must call os::init"); 2112 return os::Aix::page_size(); 2113 } 2114 2115 // Aix allocates memory by pages. 2116 int os::vm_allocation_granularity() { 2117 assert(os::Aix::page_size() != -1, "must call os::init"); 2118 return os::Aix::page_size(); 2119 } 2120 2121 int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) { 2122 2123 // Commit is a noop. There is no explicit commit 2124 // needed on AIX. Memory is committed when touched. 2125 // 2126 // Debug : check address range for validity 2127 #ifdef ASSERT 2128 LOCK_SHMBK 2129 ShmBkBlock* const block = shmbk_find_by_containing_address(addr); 2130 if (!block) { 2131 fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr); 2132 shmbk_dump_info(); 2133 assert(false, "invalid pointer"); 2134 return false; 2135 } else if (!block->containsRange(addr, size)) { 2136 fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size); 2137 shmbk_dump_info(); 2138 assert(false, "invalid range"); 2139 return false; 2140 } 2141 UNLOCK_SHMBK 2142 #endif // ASSERT 2143 2144 return 0; 2145 } 2146 2147 bool os::pd_commit_memory(char* addr, size_t size, bool exec) { 2148 return os::Aix::commit_memory_impl(addr, size, exec) == 0; 2149 } 2150 2151 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 2152 const char* mesg) { 2153 assert(mesg != NULL, "mesg must be specified"); 2154 os::Aix::commit_memory_impl(addr, size, exec); 2155 } 2156 2157 int os::Aix::commit_memory_impl(char* addr, size_t size, 2158 size_t alignment_hint, bool exec) { 2159 return os::Aix::commit_memory_impl(addr, size, exec); 2160 } 2161 2162 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 2163 bool exec) { 2164 return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0; 2165 } 2166 2167 void os::pd_commit_memory_or_exit(char* addr, size_t size, 2168 size_t alignment_hint, bool exec, 2169 const char* mesg) { 2170 os::Aix::commit_memory_impl(addr, size, alignment_hint, exec); 2171 } 2172 2173 bool os::pd_uncommit_memory(char* addr, size_t size) { 2174 2175 // Delegate to ShmBkBlock class which knows how to uncommit its memory. 2176 2177 bool rc = false; 2178 LOCK_SHMBK 2179 ShmBkBlock* const block = shmbk_find_by_containing_address(addr); 2180 if (!block) { 2181 fprintf(stderr, "invalid pointer: 0x%p.\n", addr); 2182 shmbk_dump_info(); 2183 assert(false, "invalid pointer"); 2184 return false; 2185 } else if (!block->containsRange(addr, size)) { 2186 fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size); 2187 shmbk_dump_info(); 2188 assert(false, "invalid range"); 2189 return false; 2190 } 2191 rc = block->disclaim(addr, size); 2192 UNLOCK_SHMBK 2193 2194 if (Verbose && !rc) { 2195 warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size); 2196 } 2197 return rc; 2198 } 2199 2200 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 2201 return os::guard_memory(addr, size); 2202 } 2203 2204 bool os::remove_stack_guard_pages(char* addr, size_t size) { 2205 return os::unguard_memory(addr, size); 2206 } 2207 2208 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2209 } 2210 2211 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { 2212 } 2213 2214 void os::numa_make_global(char *addr, size_t bytes) { 2215 } 2216 2217 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2218 } 2219 2220 bool os::numa_topology_changed() { 2221 return false; 2222 } 2223 2224 size_t os::numa_get_groups_num() { 2225 return 1; 2226 } 2227 2228 int os::numa_get_group_id() { 2229 return 0; 2230 } 2231 2232 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2233 if (size > 0) { 2234 ids[0] = 0; 2235 return 1; 2236 } 2237 return 0; 2238 } 2239 2240 bool os::get_page_info(char *start, page_info* info) { 2241 return false; 2242 } 2243 2244 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 2245 return end; 2246 } 2247 2248 // Flags for reserve_shmatted_memory: 2249 #define RESSHM_WISHADDR_OR_FAIL 1 2250 #define RESSHM_TRY_16M_PAGES 2 2251 #define RESSHM_16M_PAGES_OR_FAIL 4 2252 2253 // Result of reserve_shmatted_memory: 2254 struct shmatted_memory_info_t { 2255 char* addr; 2256 size_t pagesize; 2257 bool pinned; 2258 }; 2259 2260 // Reserve a section of shmatted memory. 2261 // params: 2262 // bytes [in]: size of memory, in bytes 2263 // requested_addr [in]: wish address. 2264 // NULL = no wish. 2265 // If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot 2266 // be obtained, function will fail. Otherwise wish address is treated as hint and 2267 // another pointer is returned. 2268 // flags [in]: some flags. Valid flags are: 2269 // RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained. 2270 // RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool 2271 // (requires UseLargePages and Use16MPages) 2272 // RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail. 2273 // Otherwise any other page size will do. 2274 // p_info [out] : holds information about the created shared memory segment. 2275 static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) { 2276 2277 assert(p_info, "parameter error"); 2278 2279 // init output struct. 2280 p_info->addr = NULL; 2281 2282 // neither should we be here for EXTSHM=ON. 2283 if (os::Aix::extshm()) { 2284 ShouldNotReachHere(); 2285 } 2286 2287 // extract flags. sanity checks. 2288 const bool wishaddr_or_fail = 2289 flags & RESSHM_WISHADDR_OR_FAIL; 2290 const bool try_16M_pages = 2291 flags & RESSHM_TRY_16M_PAGES; 2292 const bool f16M_pages_or_fail = 2293 flags & RESSHM_16M_PAGES_OR_FAIL; 2294 2295 // first check: if a wish address is given and it is mandatory, but not aligned to segment boundary, 2296 // shmat will fail anyway, so save some cycles by failing right away 2297 if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) { 2298 if (wishaddr_or_fail) { 2299 return false; 2300 } else { 2301 requested_addr = NULL; 2302 } 2303 } 2304 2305 char* addr = NULL; 2306 2307 // Align size of shm up to the largest possible page size, to avoid errors later on when we try to change 2308 // pagesize dynamically. 2309 const size_t size = align_size_up(bytes, SIZE_16M); 2310 2311 // reserve the shared segment 2312 int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR); 2313 if (shmid == -1) { 2314 warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno); 2315 return false; 2316 } 2317 2318 // Important note: 2319 // It is very important that we, upon leaving this function, do not leave a shm segment alive. 2320 // We must right after attaching it remove it from the system. System V shm segments are global and 2321 // survive the process. 2322 // So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm". 2323 2324 // try forcing the page size 2325 size_t pagesize = -1; // unknown so far 2326 2327 if (UseLargePages) { 2328 2329 struct shmid_ds shmbuf; 2330 memset(&shmbuf, 0, sizeof(shmbuf)); 2331 2332 // First, try to take from 16M page pool if... 2333 if (os::Aix::can_use_16M_pages() // we can ... 2334 && Use16MPages // we are not explicitly forbidden to do so (-XX:-Use16MPages).. 2335 && try_16M_pages) { // caller wants us to. 2336 shmbuf.shm_pagesize = SIZE_16M; 2337 if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) { 2338 pagesize = SIZE_16M; 2339 } else { 2340 warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)", 2341 size / SIZE_16M, errno); 2342 if (f16M_pages_or_fail) { 2343 goto cleanup_shm; 2344 } 2345 } 2346 } 2347 2348 // Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might, 2349 // because the 64K page pool may also be exhausted. 2350 if (pagesize == -1) { 2351 shmbuf.shm_pagesize = SIZE_64K; 2352 if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) { 2353 pagesize = SIZE_64K; 2354 } else { 2355 warning("Failed to allocate %d 64K pages. (shmctl failed with %d)", 2356 size / SIZE_64K, errno); 2357 // here I give up. leave page_size -1 - later, after attaching, we will query the 2358 // real page size of the attached memory. (in theory, it may be something different 2359 // from 4K if LDR_CNTRL SHM_PSIZE is set) 2360 } 2361 } 2362 } 2363 2364 // sanity point 2365 assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size"); 2366 2367 // Now attach the shared segment. 2368 addr = (char*) shmat(shmid, requested_addr, 0); 2369 if (addr == (char*)-1) { 2370 // How to handle attach failure: 2371 // If it failed for a specific wish address, tolerate this: in that case, if wish address was 2372 // mandatory, fail, if not, retry anywhere. 2373 // If it failed for any other reason, treat that as fatal error. 2374 addr = NULL; 2375 if (requested_addr) { 2376 if (wishaddr_or_fail) { 2377 goto cleanup_shm; 2378 } else { 2379 addr = (char*) shmat(shmid, NULL, 0); 2380 if (addr == (char*)-1) { // fatal 2381 addr = NULL; 2382 warning("shmat failed (errno: %d)", errno); 2383 goto cleanup_shm; 2384 } 2385 } 2386 } else { // fatal 2387 addr = NULL; 2388 warning("shmat failed (errno: %d)", errno); 2389 goto cleanup_shm; 2390 } 2391 } 2392 2393 // sanity point 2394 assert(addr && addr != (char*) -1, "wrong address"); 2395 2396 // after successful Attach remove the segment - right away. 2397 if (::shmctl(shmid, IPC_RMID, NULL) == -1) { 2398 warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno); 2399 guarantee(false, "failed to remove shared memory segment!"); 2400 } 2401 shmid = -1; 2402 2403 // query the real page size. In case setting the page size did not work (see above), the system 2404 // may have given us something other then 4K (LDR_CNTRL) 2405 { 2406 const size_t real_pagesize = os::Aix::query_pagesize(addr); 2407 if (pagesize != -1) { 2408 assert(pagesize == real_pagesize, "unexpected pagesize after shmat"); 2409 } else { 2410 pagesize = real_pagesize; 2411 } 2412 } 2413 2414 // Now register the reserved block with internal book keeping. 2415 LOCK_SHMBK 2416 const bool pinned = pagesize >= SIZE_16M ? true : false; 2417 ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned); 2418 assert(p_block, ""); 2419 shmbk_register(p_block); 2420 UNLOCK_SHMBK 2421 2422 cleanup_shm: 2423 2424 // if we have not done so yet, remove the shared memory segment. This is very important. 2425 if (shmid != -1) { 2426 if (::shmctl(shmid, IPC_RMID, NULL) == -1) { 2427 warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno); 2428 guarantee(false, "failed to remove shared memory segment!"); 2429 } 2430 shmid = -1; 2431 } 2432 2433 // trace 2434 if (Verbose && !addr) { 2435 if (requested_addr != NULL) { 2436 warning("failed to shm-allocate 0x%llX bytes at wish address 0x%p.", size, requested_addr); 2437 } else { 2438 warning("failed to shm-allocate 0x%llX bytes at any address.", size); 2439 } 2440 } 2441 2442 // hand info to caller 2443 if (addr) { 2444 p_info->addr = addr; 2445 p_info->pagesize = pagesize; 2446 p_info->pinned = pagesize == SIZE_16M ? true : false; 2447 } 2448 2449 // sanity test: 2450 if (requested_addr && addr && wishaddr_or_fail) { 2451 guarantee(addr == requested_addr, "shmat error"); 2452 } 2453 2454 // just one more test to really make sure we have no dangling shm segments. 2455 guarantee(shmid == -1, "dangling shm segments"); 2456 2457 return addr ? true : false; 2458 2459 } // end: reserve_shmatted_memory 2460 2461 // Reserve memory using mmap. Behaves the same as reserve_shmatted_memory(): 2462 // will return NULL in case of an error. 2463 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) { 2464 2465 // if a wish address is given, but not aligned to 4K page boundary, mmap will fail. 2466 if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) { 2467 warning("Wish address 0x%p not aligned to page boundary.", requested_addr); 2468 return NULL; 2469 } 2470 2471 const size_t size = align_size_up(bytes, SIZE_4K); 2472 2473 // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to 2474 // msync(MS_INVALIDATE) (see os::uncommit_memory) 2475 int flags = MAP_ANONYMOUS | MAP_SHARED; 2476 2477 // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what 2478 // it means if wishaddress is given but MAP_FIXED is not set. 2479 // 2480 // Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED 2481 // clobbers the address range, which is probably not what the caller wants. That's 2482 // why I assert here (again) that the SPEC1170 compat mode is off. 2483 // If we want to be able to run under SPEC1170, we have to do some porting and 2484 // testing. 2485 if (requested_addr != NULL) { 2486 assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed."); 2487 flags |= MAP_FIXED; 2488 } 2489 2490 char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0); 2491 2492 if (addr == MAP_FAILED) { 2493 // attach failed: tolerate for specific wish addresses. Not being able to attach 2494 // anywhere is a fatal error. 2495 if (requested_addr == NULL) { 2496 // It's ok to fail here if the machine has not enough memory. 2497 warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno); 2498 } 2499 addr = NULL; 2500 goto cleanup_mmap; 2501 } 2502 2503 // If we did request a specific address and that address was not available, fail. 2504 if (addr && requested_addr) { 2505 guarantee(addr == requested_addr, "unexpected"); 2506 } 2507 2508 // register this mmap'ed segment with book keeping 2509 LOCK_SHMBK 2510 ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size)); 2511 assert(p_block, ""); 2512 shmbk_register(p_block); 2513 UNLOCK_SHMBK 2514 2515 cleanup_mmap: 2516 2517 // trace 2518 if (Verbose) { 2519 if (addr) { 2520 fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes); 2521 } 2522 else { 2523 if (requested_addr != NULL) { 2524 warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr); 2525 } else { 2526 warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes); 2527 } 2528 } 2529 } 2530 2531 return addr; 2532 2533 } // end: reserve_mmaped_memory 2534 2535 // Reserves and attaches a shared memory segment. 2536 // Will assert if a wish address is given and could not be obtained. 2537 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 2538 return os::attempt_reserve_memory_at(bytes, requested_addr); 2539 } 2540 2541 bool os::pd_release_memory(char* addr, size_t size) { 2542 2543 // delegate to ShmBkBlock class which knows how to uncommit its memory. 2544 2545 bool rc = false; 2546 LOCK_SHMBK 2547 ShmBkBlock* const block = shmbk_find_by_containing_address(addr); 2548 if (!block) { 2549 fprintf(stderr, "invalid pointer: 0x%p.\n", addr); 2550 shmbk_dump_info(); 2551 assert(false, "invalid pointer"); 2552 return false; 2553 } 2554 else if (!block->isSameRange(addr, size)) { 2555 if (block->getType() == ShmBkBlock::MMAP) { 2556 // Release only the same range or a the beginning or the end of a range. 2557 if (block->base() == addr && size < block->size()) { 2558 ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size)); 2559 assert(b, ""); 2560 shmbk_register(b); 2561 block->setAddrRange(AddrRange(addr, size)); 2562 } 2563 else if (addr > block->base() && addr + size == block->base() + block->size()) { 2564 ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size)); 2565 assert(b, ""); 2566 shmbk_register(b); 2567 block->setAddrRange(AddrRange(addr, size)); 2568 } 2569 else { 2570 fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size); 2571 shmbk_dump_info(); 2572 assert(false, "invalid mmap range"); 2573 return false; 2574 } 2575 } 2576 else { 2577 // Release only the same range. No partial release allowed. 2578 // Soften the requirement a bit, because the user may think he owns a smaller size 2579 // than the block is due to alignment etc. 2580 if (block->base() != addr || block->size() < size) { 2581 fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size); 2582 shmbk_dump_info(); 2583 assert(false, "invalid shmget range"); 2584 return false; 2585 } 2586 } 2587 } 2588 rc = block->release(); 2589 assert(rc, "release failed"); 2590 // remove block from bookkeeping 2591 shmbk_unregister(block); 2592 delete block; 2593 UNLOCK_SHMBK 2594 2595 if (!rc) { 2596 warning("failed to released %lu bytes at 0x%p", size, addr); 2597 } 2598 2599 return rc; 2600 } 2601 2602 static bool checked_mprotect(char* addr, size_t size, int prot) { 2603 2604 // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will 2605 // not tell me if protection failed when trying to protect an un-protectable range. 2606 // 2607 // This means if the memory was allocated using shmget/shmat, protection wont work 2608 // but mprotect will still return 0: 2609 // 2610 // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm 2611 2612 bool rc = ::mprotect(addr, size, prot) == 0 ? true : false; 2613 2614 if (!rc) { 2615 const char* const s_errno = strerror(errno); 2616 warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno); 2617 return false; 2618 } 2619 2620 // mprotect success check 2621 // 2622 // Mprotect said it changed the protection but can I believe it? 2623 // 2624 // To be sure I need to check the protection afterwards. Try to 2625 // read from protected memory and check whether that causes a segfault. 2626 // 2627 if (!os::Aix::xpg_sus_mode()) { 2628 2629 if (StubRoutines::SafeFetch32_stub()) { 2630 2631 const bool read_protected = 2632 (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 && 2633 SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false; 2634 2635 if (prot & PROT_READ) { 2636 rc = !read_protected; 2637 } else { 2638 rc = read_protected; 2639 } 2640 } 2641 } 2642 if (!rc) { 2643 assert(false, "mprotect failed."); 2644 } 2645 return rc; 2646 } 2647 2648 // Set protections specified 2649 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) { 2650 unsigned int p = 0; 2651 switch (prot) { 2652 case MEM_PROT_NONE: p = PROT_NONE; break; 2653 case MEM_PROT_READ: p = PROT_READ; break; 2654 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 2655 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 2656 default: 2657 ShouldNotReachHere(); 2658 } 2659 // is_committed is unused. 2660 return checked_mprotect(addr, size, p); 2661 } 2662 2663 bool os::guard_memory(char* addr, size_t size) { 2664 return checked_mprotect(addr, size, PROT_NONE); 2665 } 2666 2667 bool os::unguard_memory(char* addr, size_t size) { 2668 return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC); 2669 } 2670 2671 // Large page support 2672 2673 static size_t _large_page_size = 0; 2674 2675 // Enable large page support if OS allows that. 2676 void os::large_page_init() { 2677 2678 // Note: os::Aix::query_multipage_support must run first. 2679 2680 if (!UseLargePages) { 2681 return; 2682 } 2683 2684 if (!Aix::can_use_64K_pages()) { 2685 assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M."); 2686 UseLargePages = false; 2687 return; 2688 } 2689 2690 if (!Aix::can_use_16M_pages() && Use16MPages) { 2691 fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool " 2692 " and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n"); 2693 } 2694 2695 // Do not report 16M page alignment as part of os::_page_sizes if we are 2696 // explicitly forbidden from using 16M pages. Doing so would increase the 2697 // alignment the garbage collector calculates with, slightly increasing 2698 // heap usage. We should only pay for 16M alignment if we really want to 2699 // use 16M pages. 2700 if (Use16MPages && Aix::can_use_16M_pages()) { 2701 _large_page_size = SIZE_16M; 2702 _page_sizes[0] = SIZE_16M; 2703 _page_sizes[1] = SIZE_64K; 2704 _page_sizes[2] = SIZE_4K; 2705 _page_sizes[3] = 0; 2706 } else if (Aix::can_use_64K_pages()) { 2707 _large_page_size = SIZE_64K; 2708 _page_sizes[0] = SIZE_64K; 2709 _page_sizes[1] = SIZE_4K; 2710 _page_sizes[2] = 0; 2711 } 2712 2713 if (Verbose) { 2714 ("Default large page size is 0x%llX.", _large_page_size); 2715 } 2716 } // end: os::large_page_init() 2717 2718 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) { 2719 // "exec" is passed in but not used. Creating the shared image for 2720 // the code cache doesn't have an SHM_X executable permission to check. 2721 Unimplemented(); 2722 return 0; 2723 } 2724 2725 bool os::release_memory_special(char* base, size_t bytes) { 2726 // detaching the SHM segment will also delete it, see reserve_memory_special() 2727 Unimplemented(); 2728 return false; 2729 } 2730 2731 size_t os::large_page_size() { 2732 return _large_page_size; 2733 } 2734 2735 bool os::can_commit_large_page_memory() { 2736 // Well, sadly we cannot commit anything at all (see comment in 2737 // os::commit_memory) but we claim to so we can make use of large pages 2738 return true; 2739 } 2740 2741 bool os::can_execute_large_page_memory() { 2742 // We can do that 2743 return true; 2744 } 2745 2746 // Reserve memory at an arbitrary address, only if that area is 2747 // available (and not reserved for something else). 2748 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 2749 2750 bool use_mmap = false; 2751 2752 // mmap: smaller graining, no large page support 2753 // shm: large graining (256M), large page support, limited number of shm segments 2754 // 2755 // Prefer mmap wherever we either do not need large page support or have OS limits 2756 2757 if (!UseLargePages || bytes < SIZE_16M) { 2758 use_mmap = true; 2759 } 2760 2761 char* addr = NULL; 2762 if (use_mmap) { 2763 addr = reserve_mmaped_memory(bytes, requested_addr); 2764 } else { 2765 // shmat: wish address is mandatory, and do not try 16M pages here. 2766 shmatted_memory_info_t info; 2767 const int flags = RESSHM_WISHADDR_OR_FAIL; 2768 if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) { 2769 addr = info.addr; 2770 } 2771 } 2772 2773 return addr; 2774 } 2775 2776 size_t os::read(int fd, void *buf, unsigned int nBytes) { 2777 return ::read(fd, buf, nBytes); 2778 } 2779 2780 void os::naked_short_sleep(jlong ms) { 2781 struct timespec req; 2782 2783 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 2784 req.tv_sec = 0; 2785 if (ms > 0) { 2786 req.tv_nsec = (ms % 1000) * 1000000; 2787 } 2788 else { 2789 req.tv_nsec = 1; 2790 } 2791 2792 nanosleep(&req, NULL); 2793 2794 return; 2795 } 2796 2797 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 2798 void os::infinite_sleep() { 2799 while (true) { // sleep forever ... 2800 ::sleep(100); // ... 100 seconds at a time 2801 } 2802 } 2803 2804 // Used to convert frequent JVM_Yield() to nops 2805 bool os::dont_yield() { 2806 return DontYieldALot; 2807 } 2808 2809 void os::naked_yield() { 2810 sched_yield(); 2811 } 2812 2813 //////////////////////////////////////////////////////////////////////////////// 2814 // thread priority support 2815 2816 // From AIX manpage to pthread_setschedparam 2817 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp? 2818 // topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm): 2819 // 2820 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the 2821 // range from 40 to 80, where 40 is the least favored priority and 80 2822 // is the most favored." 2823 // 2824 // (Actually, I doubt this even has an impact on AIX, as we do kernel 2825 // scheduling there; however, this still leaves iSeries.) 2826 // 2827 // We use the same values for AIX and PASE. 2828 int os::java_to_os_priority[CriticalPriority + 1] = { 2829 54, // 0 Entry should never be used 2830 2831 55, // 1 MinPriority 2832 55, // 2 2833 56, // 3 2834 2835 56, // 4 2836 57, // 5 NormPriority 2837 57, // 6 2838 2839 58, // 7 2840 58, // 8 2841 59, // 9 NearMaxPriority 2842 2843 60, // 10 MaxPriority 2844 2845 60 // 11 CriticalPriority 2846 }; 2847 2848 OSReturn os::set_native_priority(Thread* thread, int newpri) { 2849 if (!UseThreadPriorities) return OS_OK; 2850 pthread_t thr = thread->osthread()->pthread_id(); 2851 int policy = SCHED_OTHER; 2852 struct sched_param param; 2853 param.sched_priority = newpri; 2854 int ret = pthread_setschedparam(thr, policy, ¶m); 2855 2856 if (Verbose) { 2857 if (ret == 0) { 2858 fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri); 2859 } else { 2860 fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n", 2861 (int)thr, newpri, ret, strerror(ret)); 2862 } 2863 } 2864 return (ret == 0) ? OS_OK : OS_ERR; 2865 } 2866 2867 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { 2868 if (!UseThreadPriorities) { 2869 *priority_ptr = java_to_os_priority[NormPriority]; 2870 return OS_OK; 2871 } 2872 pthread_t thr = thread->osthread()->pthread_id(); 2873 int policy = SCHED_OTHER; 2874 struct sched_param param; 2875 int ret = pthread_getschedparam(thr, &policy, ¶m); 2876 *priority_ptr = param.sched_priority; 2877 2878 return (ret == 0) ? OS_OK : OS_ERR; 2879 } 2880 2881 // Hint to the underlying OS that a task switch would not be good. 2882 // Void return because it's a hint and can fail. 2883 void os::hint_no_preempt() {} 2884 2885 //////////////////////////////////////////////////////////////////////////////// 2886 // suspend/resume support 2887 2888 // the low-level signal-based suspend/resume support is a remnant from the 2889 // old VM-suspension that used to be for java-suspension, safepoints etc, 2890 // within hotspot. Now there is a single use-case for this: 2891 // - calling get_thread_pc() on the VMThread by the flat-profiler task 2892 // that runs in the watcher thread. 2893 // The remaining code is greatly simplified from the more general suspension 2894 // code that used to be used. 2895 // 2896 // The protocol is quite simple: 2897 // - suspend: 2898 // - sends a signal to the target thread 2899 // - polls the suspend state of the osthread using a yield loop 2900 // - target thread signal handler (SR_handler) sets suspend state 2901 // and blocks in sigsuspend until continued 2902 // - resume: 2903 // - sets target osthread state to continue 2904 // - sends signal to end the sigsuspend loop in the SR_handler 2905 // 2906 // Note that the SR_lock plays no role in this suspend/resume protocol. 2907 // 2908 2909 static void resume_clear_context(OSThread *osthread) { 2910 osthread->set_ucontext(NULL); 2911 osthread->set_siginfo(NULL); 2912 } 2913 2914 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) { 2915 osthread->set_ucontext(context); 2916 osthread->set_siginfo(siginfo); 2917 } 2918 2919 // 2920 // Handler function invoked when a thread's execution is suspended or 2921 // resumed. We have to be careful that only async-safe functions are 2922 // called here (Note: most pthread functions are not async safe and 2923 // should be avoided.) 2924 // 2925 // Note: sigwait() is a more natural fit than sigsuspend() from an 2926 // interface point of view, but sigwait() prevents the signal hander 2927 // from being run. libpthread would get very confused by not having 2928 // its signal handlers run and prevents sigwait()'s use with the 2929 // mutex granting granting signal. 2930 // 2931 // Currently only ever called on the VMThread and JavaThreads (PC sampling). 2932 // 2933 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) { 2934 // Save and restore errno to avoid confusing native code with EINTR 2935 // after sigsuspend. 2936 int old_errno = errno; 2937 2938 Thread* thread = Thread::current(); 2939 OSThread* osthread = thread->osthread(); 2940 assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); 2941 2942 os::SuspendResume::State current = osthread->sr.state(); 2943 if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { 2944 suspend_save_context(osthread, siginfo, context); 2945 2946 // attempt to switch the state, we assume we had a SUSPEND_REQUEST 2947 os::SuspendResume::State state = osthread->sr.suspended(); 2948 if (state == os::SuspendResume::SR_SUSPENDED) { 2949 sigset_t suspend_set; // signals for sigsuspend() 2950 2951 // get current set of blocked signals and unblock resume signal 2952 pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); 2953 sigdelset(&suspend_set, SR_signum); 2954 2955 // wait here until we are resumed 2956 while (1) { 2957 sigsuspend(&suspend_set); 2958 2959 os::SuspendResume::State result = osthread->sr.running(); 2960 if (result == os::SuspendResume::SR_RUNNING) { 2961 break; 2962 } 2963 } 2964 2965 } else if (state == os::SuspendResume::SR_RUNNING) { 2966 // request was cancelled, continue 2967 } else { 2968 ShouldNotReachHere(); 2969 } 2970 2971 resume_clear_context(osthread); 2972 } else if (current == os::SuspendResume::SR_RUNNING) { 2973 // request was cancelled, continue 2974 } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { 2975 // ignore 2976 } else { 2977 ShouldNotReachHere(); 2978 } 2979 2980 errno = old_errno; 2981 } 2982 2983 2984 static int SR_initialize() { 2985 struct sigaction act; 2986 char *s; 2987 // Get signal number to use for suspend/resume 2988 if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) { 2989 int sig = ::strtol(s, 0, 10); 2990 if (sig > 0 || sig < NSIG) { 2991 SR_signum = sig; 2992 } 2993 } 2994 2995 assert(SR_signum > SIGSEGV && SR_signum > SIGBUS, 2996 "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769"); 2997 2998 sigemptyset(&SR_sigset); 2999 sigaddset(&SR_sigset, SR_signum); 3000 3001 // Set up signal handler for suspend/resume. 3002 act.sa_flags = SA_RESTART|SA_SIGINFO; 3003 act.sa_handler = (void (*)(int)) SR_handler; 3004 3005 // SR_signum is blocked by default. 3006 // 4528190 - We also need to block pthread restart signal (32 on all 3007 // supported Linux platforms). Note that LinuxThreads need to block 3008 // this signal for all threads to work properly. So we don't have 3009 // to use hard-coded signal number when setting up the mask. 3010 pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask); 3011 3012 if (sigaction(SR_signum, &act, 0) == -1) { 3013 return -1; 3014 } 3015 3016 // Save signal flag 3017 os::Aix::set_our_sigflags(SR_signum, act.sa_flags); 3018 return 0; 3019 } 3020 3021 static int SR_finalize() { 3022 return 0; 3023 } 3024 3025 static int sr_notify(OSThread* osthread) { 3026 int status = pthread_kill(osthread->pthread_id(), SR_signum); 3027 assert_status(status == 0, status, "pthread_kill"); 3028 return status; 3029 } 3030 3031 // "Randomly" selected value for how long we want to spin 3032 // before bailing out on suspending a thread, also how often 3033 // we send a signal to a thread we want to resume 3034 static const int RANDOMLY_LARGE_INTEGER = 1000000; 3035 static const int RANDOMLY_LARGE_INTEGER2 = 100; 3036 3037 // returns true on success and false on error - really an error is fatal 3038 // but this seems the normal response to library errors 3039 static bool do_suspend(OSThread* osthread) { 3040 assert(osthread->sr.is_running(), "thread should be running"); 3041 // mark as suspended and send signal 3042 3043 if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { 3044 // failed to switch, state wasn't running? 3045 ShouldNotReachHere(); 3046 return false; 3047 } 3048 3049 if (sr_notify(osthread) != 0) { 3050 // try to cancel, switch to running 3051 3052 os::SuspendResume::State result = osthread->sr.cancel_suspend(); 3053 if (result == os::SuspendResume::SR_RUNNING) { 3054 // cancelled 3055 return false; 3056 } else if (result == os::SuspendResume::SR_SUSPENDED) { 3057 // somehow managed to suspend 3058 return true; 3059 } else { 3060 ShouldNotReachHere(); 3061 return false; 3062 } 3063 } 3064 3065 // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED 3066 3067 for (int n = 0; !osthread->sr.is_suspended(); n++) { 3068 for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) { 3069 os::naked_yield(); 3070 } 3071 3072 // timeout, try to cancel the request 3073 if (n >= RANDOMLY_LARGE_INTEGER) { 3074 os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); 3075 if (cancelled == os::SuspendResume::SR_RUNNING) { 3076 return false; 3077 } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { 3078 return true; 3079 } else { 3080 ShouldNotReachHere(); 3081 return false; 3082 } 3083 } 3084 } 3085 3086 guarantee(osthread->sr.is_suspended(), "Must be suspended"); 3087 return true; 3088 } 3089 3090 static void do_resume(OSThread* osthread) { 3091 //assert(osthread->sr.is_suspended(), "thread should be suspended"); 3092 3093 if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { 3094 // failed to switch to WAKEUP_REQUEST 3095 ShouldNotReachHere(); 3096 return; 3097 } 3098 3099 while (!osthread->sr.is_running()) { 3100 if (sr_notify(osthread) == 0) { 3101 for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) { 3102 for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) { 3103 os::naked_yield(); 3104 } 3105 } 3106 } else { 3107 ShouldNotReachHere(); 3108 } 3109 } 3110 3111 guarantee(osthread->sr.is_running(), "Must be running!"); 3112 } 3113 3114 /////////////////////////////////////////////////////////////////////////////////// 3115 // signal handling (except suspend/resume) 3116 3117 // This routine may be used by user applications as a "hook" to catch signals. 3118 // The user-defined signal handler must pass unrecognized signals to this 3119 // routine, and if it returns true (non-zero), then the signal handler must 3120 // return immediately. If the flag "abort_if_unrecognized" is true, then this 3121 // routine will never retun false (zero), but instead will execute a VM panic 3122 // routine kill the process. 3123 // 3124 // If this routine returns false, it is OK to call it again. This allows 3125 // the user-defined signal handler to perform checks either before or after 3126 // the VM performs its own checks. Naturally, the user code would be making 3127 // a serious error if it tried to handle an exception (such as a null check 3128 // or breakpoint) that the VM was generating for its own correct operation. 3129 // 3130 // This routine may recognize any of the following kinds of signals: 3131 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1. 3132 // It should be consulted by handlers for any of those signals. 3133 // 3134 // The caller of this routine must pass in the three arguments supplied 3135 // to the function referred to in the "sa_sigaction" (not the "sa_handler") 3136 // field of the structure passed to sigaction(). This routine assumes that 3137 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 3138 // 3139 // Note that the VM will print warnings if it detects conflicting signal 3140 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 3141 // 3142 extern "C" JNIEXPORT int 3143 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized); 3144 3145 // Set thread signal mask (for some reason on AIX sigthreadmask() seems 3146 // to be the thing to call; documentation is not terribly clear about whether 3147 // pthread_sigmask also works, and if it does, whether it does the same. 3148 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) { 3149 const int rc = ::pthread_sigmask(how, set, oset); 3150 // return value semantics differ slightly for error case: 3151 // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno 3152 // (so, pthread_sigmask is more theadsafe for error handling) 3153 // But success is always 0. 3154 return rc == 0 ? true : false; 3155 } 3156 3157 // Function to unblock all signals which are, according 3158 // to POSIX, typical program error signals. If they happen while being blocked, 3159 // they typically will bring down the process immediately. 3160 bool unblock_program_error_signals() { 3161 sigset_t set; 3162 ::sigemptyset(&set); 3163 ::sigaddset(&set, SIGILL); 3164 ::sigaddset(&set, SIGBUS); 3165 ::sigaddset(&set, SIGFPE); 3166 ::sigaddset(&set, SIGSEGV); 3167 return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL); 3168 } 3169 3170 // Renamed from 'signalHandler' to avoid collision with other shared libs. 3171 void javaSignalHandler(int sig, siginfo_t* info, void* uc) { 3172 assert(info != NULL && uc != NULL, "it must be old kernel"); 3173 3174 // Never leave program error signals blocked; 3175 // on all our platforms they would bring down the process immediately when 3176 // getting raised while being blocked. 3177 unblock_program_error_signals(); 3178 3179 JVM_handle_aix_signal(sig, info, uc, true); 3180 } 3181 3182 3183 // This boolean allows users to forward their own non-matching signals 3184 // to JVM_handle_aix_signal, harmlessly. 3185 bool os::Aix::signal_handlers_are_installed = false; 3186 3187 // For signal-chaining 3188 struct sigaction os::Aix::sigact[MAXSIGNUM]; 3189 unsigned int os::Aix::sigs = 0; 3190 bool os::Aix::libjsig_is_loaded = false; 3191 typedef struct sigaction *(*get_signal_t)(int); 3192 get_signal_t os::Aix::get_signal_action = NULL; 3193 3194 struct sigaction* os::Aix::get_chained_signal_action(int sig) { 3195 struct sigaction *actp = NULL; 3196 3197 if (libjsig_is_loaded) { 3198 // Retrieve the old signal handler from libjsig 3199 actp = (*get_signal_action)(sig); 3200 } 3201 if (actp == NULL) { 3202 // Retrieve the preinstalled signal handler from jvm 3203 actp = get_preinstalled_handler(sig); 3204 } 3205 3206 return actp; 3207 } 3208 3209 static bool call_chained_handler(struct sigaction *actp, int sig, 3210 siginfo_t *siginfo, void *context) { 3211 // Call the old signal handler 3212 if (actp->sa_handler == SIG_DFL) { 3213 // It's more reasonable to let jvm treat it as an unexpected exception 3214 // instead of taking the default action. 3215 return false; 3216 } else if (actp->sa_handler != SIG_IGN) { 3217 if ((actp->sa_flags & SA_NODEFER) == 0) { 3218 // automaticlly block the signal 3219 sigaddset(&(actp->sa_mask), sig); 3220 } 3221 3222 sa_handler_t hand = NULL; 3223 sa_sigaction_t sa = NULL; 3224 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 3225 // retrieve the chained handler 3226 if (siginfo_flag_set) { 3227 sa = actp->sa_sigaction; 3228 } else { 3229 hand = actp->sa_handler; 3230 } 3231 3232 if ((actp->sa_flags & SA_RESETHAND) != 0) { 3233 actp->sa_handler = SIG_DFL; 3234 } 3235 3236 // try to honor the signal mask 3237 sigset_t oset; 3238 pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset); 3239 3240 // call into the chained handler 3241 if (siginfo_flag_set) { 3242 (*sa)(sig, siginfo, context); 3243 } else { 3244 (*hand)(sig); 3245 } 3246 3247 // restore the signal mask 3248 pthread_sigmask(SIG_SETMASK, &oset, 0); 3249 } 3250 // Tell jvm's signal handler the signal is taken care of. 3251 return true; 3252 } 3253 3254 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) { 3255 bool chained = false; 3256 // signal-chaining 3257 if (UseSignalChaining) { 3258 struct sigaction *actp = get_chained_signal_action(sig); 3259 if (actp != NULL) { 3260 chained = call_chained_handler(actp, sig, siginfo, context); 3261 } 3262 } 3263 return chained; 3264 } 3265 3266 struct sigaction* os::Aix::get_preinstalled_handler(int sig) { 3267 if ((((unsigned int)1 << sig) & sigs) != 0) { 3268 return &sigact[sig]; 3269 } 3270 return NULL; 3271 } 3272 3273 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) { 3274 assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range"); 3275 sigact[sig] = oldAct; 3276 sigs |= (unsigned int)1 << sig; 3277 } 3278 3279 // for diagnostic 3280 int os::Aix::sigflags[MAXSIGNUM]; 3281 3282 int os::Aix::get_our_sigflags(int sig) { 3283 assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range"); 3284 return sigflags[sig]; 3285 } 3286 3287 void os::Aix::set_our_sigflags(int sig, int flags) { 3288 assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range"); 3289 sigflags[sig] = flags; 3290 } 3291 3292 void os::Aix::set_signal_handler(int sig, bool set_installed) { 3293 // Check for overwrite. 3294 struct sigaction oldAct; 3295 sigaction(sig, (struct sigaction*)NULL, &oldAct); 3296 3297 void* oldhand = oldAct.sa_sigaction 3298 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 3299 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 3300 // Renamed 'signalHandler' to avoid collision with other shared libs. 3301 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 3302 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 3303 oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) { 3304 if (AllowUserSignalHandlers || !set_installed) { 3305 // Do not overwrite; user takes responsibility to forward to us. 3306 return; 3307 } else if (UseSignalChaining) { 3308 // save the old handler in jvm 3309 save_preinstalled_handler(sig, oldAct); 3310 // libjsig also interposes the sigaction() call below and saves the 3311 // old sigaction on it own. 3312 } else { 3313 fatal(err_msg("Encountered unexpected pre-existing sigaction handler " 3314 "%#lx for signal %d.", (long)oldhand, sig)); 3315 } 3316 } 3317 3318 struct sigaction sigAct; 3319 sigfillset(&(sigAct.sa_mask)); 3320 if (!set_installed) { 3321 sigAct.sa_handler = SIG_DFL; 3322 sigAct.sa_flags = SA_RESTART; 3323 } else { 3324 // Renamed 'signalHandler' to avoid collision with other shared libs. 3325 sigAct.sa_sigaction = javaSignalHandler; 3326 sigAct.sa_flags = SA_SIGINFO|SA_RESTART; 3327 } 3328 // Save flags, which are set by ours 3329 assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range"); 3330 sigflags[sig] = sigAct.sa_flags; 3331 3332 int ret = sigaction(sig, &sigAct, &oldAct); 3333 assert(ret == 0, "check"); 3334 3335 void* oldhand2 = oldAct.sa_sigaction 3336 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 3337 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 3338 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 3339 } 3340 3341 // install signal handlers for signals that HotSpot needs to 3342 // handle in order to support Java-level exception handling. 3343 void os::Aix::install_signal_handlers() { 3344 if (!signal_handlers_are_installed) { 3345 signal_handlers_are_installed = true; 3346 3347 // signal-chaining 3348 typedef void (*signal_setting_t)(); 3349 signal_setting_t begin_signal_setting = NULL; 3350 signal_setting_t end_signal_setting = NULL; 3351 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 3352 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 3353 if (begin_signal_setting != NULL) { 3354 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 3355 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 3356 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 3357 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 3358 libjsig_is_loaded = true; 3359 assert(UseSignalChaining, "should enable signal-chaining"); 3360 } 3361 if (libjsig_is_loaded) { 3362 // Tell libjsig jvm is setting signal handlers 3363 (*begin_signal_setting)(); 3364 } 3365 3366 set_signal_handler(SIGSEGV, true); 3367 set_signal_handler(SIGPIPE, true); 3368 set_signal_handler(SIGBUS, true); 3369 set_signal_handler(SIGILL, true); 3370 set_signal_handler(SIGFPE, true); 3371 set_signal_handler(SIGTRAP, true); 3372 set_signal_handler(SIGXFSZ, true); 3373 set_signal_handler(SIGDANGER, true); 3374 3375 if (libjsig_is_loaded) { 3376 // Tell libjsig jvm finishes setting signal handlers 3377 (*end_signal_setting)(); 3378 } 3379 3380 // We don't activate signal checker if libjsig is in place, we trust ourselves 3381 // and if UserSignalHandler is installed all bets are off. 3382 // Log that signal checking is off only if -verbose:jni is specified. 3383 if (CheckJNICalls) { 3384 if (libjsig_is_loaded) { 3385 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 3386 check_signals = false; 3387 } 3388 if (AllowUserSignalHandlers) { 3389 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 3390 check_signals = false; 3391 } 3392 // need to initialize check_signal_done 3393 ::sigemptyset(&check_signal_done); 3394 } 3395 } 3396 } 3397 3398 static const char* get_signal_handler_name(address handler, 3399 char* buf, int buflen) { 3400 int offset; 3401 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 3402 if (found) { 3403 // skip directory names 3404 const char *p1, *p2; 3405 p1 = buf; 3406 size_t len = strlen(os::file_separator()); 3407 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 3408 // The way os::dll_address_to_library_name is implemented on Aix 3409 // right now, it always returns -1 for the offset which is not 3410 // terribly informative. 3411 // Will fix that. For now, omit the offset. 3412 jio_snprintf(buf, buflen, "%s", p1); 3413 } else { 3414 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 3415 } 3416 return buf; 3417 } 3418 3419 static void print_signal_handler(outputStream* st, int sig, 3420 char* buf, size_t buflen) { 3421 struct sigaction sa; 3422 sigaction(sig, NULL, &sa); 3423 3424 st->print("%s: ", os::exception_name(sig, buf, buflen)); 3425 3426 address handler = (sa.sa_flags & SA_SIGINFO) 3427 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 3428 : CAST_FROM_FN_PTR(address, sa.sa_handler); 3429 3430 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 3431 st->print("SIG_DFL"); 3432 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 3433 st->print("SIG_IGN"); 3434 } else { 3435 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 3436 } 3437 3438 // Print readable mask. 3439 st->print(", sa_mask[0]="); 3440 os::Posix::print_signal_set_short(st, &sa.sa_mask); 3441 3442 address rh = VMError::get_resetted_sighandler(sig); 3443 // May be, handler was resetted by VMError? 3444 if (rh != NULL) { 3445 handler = rh; 3446 sa.sa_flags = VMError::get_resetted_sigflags(sig); 3447 } 3448 3449 // Print textual representation of sa_flags. 3450 st->print(", sa_flags="); 3451 os::Posix::print_sa_flags(st, sa.sa_flags); 3452 3453 // Check: is it our handler? 3454 if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) || 3455 handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) { 3456 // It is our signal handler. 3457 // Check for flags, reset system-used one! 3458 if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) { 3459 st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library", 3460 os::Aix::get_our_sigflags(sig)); 3461 } 3462 } 3463 st->cr(); 3464 } 3465 3466 3467 #define DO_SIGNAL_CHECK(sig) \ 3468 if (!sigismember(&check_signal_done, sig)) \ 3469 os::Aix::check_signal_handler(sig) 3470 3471 // This method is a periodic task to check for misbehaving JNI applications 3472 // under CheckJNI, we can add any periodic checks here 3473 3474 void os::run_periodic_checks() { 3475 3476 if (check_signals == false) return; 3477 3478 // SEGV and BUS if overridden could potentially prevent 3479 // generation of hs*.log in the event of a crash, debugging 3480 // such a case can be very challenging, so we absolutely 3481 // check the following for a good measure: 3482 DO_SIGNAL_CHECK(SIGSEGV); 3483 DO_SIGNAL_CHECK(SIGILL); 3484 DO_SIGNAL_CHECK(SIGFPE); 3485 DO_SIGNAL_CHECK(SIGBUS); 3486 DO_SIGNAL_CHECK(SIGPIPE); 3487 DO_SIGNAL_CHECK(SIGXFSZ); 3488 if (UseSIGTRAP) { 3489 DO_SIGNAL_CHECK(SIGTRAP); 3490 } 3491 DO_SIGNAL_CHECK(SIGDANGER); 3492 3493 // ReduceSignalUsage allows the user to override these handlers 3494 // see comments at the very top and jvm_solaris.h 3495 if (!ReduceSignalUsage) { 3496 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 3497 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 3498 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 3499 DO_SIGNAL_CHECK(BREAK_SIGNAL); 3500 } 3501 3502 DO_SIGNAL_CHECK(SR_signum); 3503 DO_SIGNAL_CHECK(INTERRUPT_SIGNAL); 3504 } 3505 3506 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 3507 3508 static os_sigaction_t os_sigaction = NULL; 3509 3510 void os::Aix::check_signal_handler(int sig) { 3511 char buf[O_BUFLEN]; 3512 address jvmHandler = NULL; 3513 3514 struct sigaction act; 3515 if (os_sigaction == NULL) { 3516 // only trust the default sigaction, in case it has been interposed 3517 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction"); 3518 if (os_sigaction == NULL) return; 3519 } 3520 3521 os_sigaction(sig, (struct sigaction*)NULL, &act); 3522 3523 address thisHandler = (act.sa_flags & SA_SIGINFO) 3524 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 3525 : CAST_FROM_FN_PTR(address, act.sa_handler); 3526 3527 3528 switch(sig) { 3529 case SIGSEGV: 3530 case SIGBUS: 3531 case SIGFPE: 3532 case SIGPIPE: 3533 case SIGILL: 3534 case SIGXFSZ: 3535 // Renamed 'signalHandler' to avoid collision with other shared libs. 3536 jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler); 3537 break; 3538 3539 case SHUTDOWN1_SIGNAL: 3540 case SHUTDOWN2_SIGNAL: 3541 case SHUTDOWN3_SIGNAL: 3542 case BREAK_SIGNAL: 3543 jvmHandler = (address)user_handler(); 3544 break; 3545 3546 case INTERRUPT_SIGNAL: 3547 jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL); 3548 break; 3549 3550 default: 3551 if (sig == SR_signum) { 3552 jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler); 3553 } else { 3554 return; 3555 } 3556 break; 3557 } 3558 3559 if (thisHandler != jvmHandler) { 3560 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 3561 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 3562 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 3563 // No need to check this sig any longer 3564 sigaddset(&check_signal_done, sig); 3565 // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN 3566 if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) { 3567 tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell", 3568 exception_name(sig, buf, O_BUFLEN)); 3569 } 3570 } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) { 3571 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 3572 tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig)); 3573 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags); 3574 // No need to check this sig any longer 3575 sigaddset(&check_signal_done, sig); 3576 } 3577 3578 // Dump all the signal 3579 if (sigismember(&check_signal_done, sig)) { 3580 print_signal_handlers(tty, buf, O_BUFLEN); 3581 } 3582 } 3583 3584 extern bool signal_name(int signo, char* buf, size_t len); 3585 3586 const char* os::exception_name(int exception_code, char* buf, size_t size) { 3587 if (0 < exception_code && exception_code <= SIGRTMAX) { 3588 // signal 3589 if (!signal_name(exception_code, buf, size)) { 3590 jio_snprintf(buf, size, "SIG%d", exception_code); 3591 } 3592 return buf; 3593 } else { 3594 return NULL; 3595 } 3596 } 3597 3598 // To install functions for atexit system call 3599 extern "C" { 3600 static void perfMemory_exit_helper() { 3601 perfMemory_exit(); 3602 } 3603 } 3604 3605 // This is called _before_ the most of global arguments have been parsed. 3606 void os::init(void) { 3607 // This is basic, we want to know if that ever changes. 3608 // (shared memory boundary is supposed to be a 256M aligned) 3609 assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected"); 3610 3611 // First off, we need to know whether we run on AIX or PASE, and 3612 // the OS level we run on. 3613 os::Aix::initialize_os_info(); 3614 3615 // Scan environment (SPEC1170 behaviour, etc) 3616 os::Aix::scan_environment(); 3617 3618 // Check which pages are supported by AIX. 3619 os::Aix::query_multipage_support(); 3620 3621 // Next, we need to initialize libo4 and libperfstat libraries. 3622 if (os::Aix::on_pase()) { 3623 os::Aix::initialize_libo4(); 3624 } else { 3625 os::Aix::initialize_libperfstat(); 3626 } 3627 3628 // Reset the perfstat information provided by ODM. 3629 if (os::Aix::on_aix()) { 3630 libperfstat::perfstat_reset(); 3631 } 3632 3633 // Now initialze basic system properties. Note that for some of the values we 3634 // need libperfstat etc. 3635 os::Aix::initialize_system_info(); 3636 3637 // Initialize large page support. 3638 if (UseLargePages) { 3639 os::large_page_init(); 3640 if (!UseLargePages) { 3641 // initialize os::_page_sizes 3642 _page_sizes[0] = Aix::page_size(); 3643 _page_sizes[1] = 0; 3644 if (Verbose) { 3645 fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n"); 3646 } 3647 } 3648 } else { 3649 // initialize os::_page_sizes 3650 _page_sizes[0] = Aix::page_size(); 3651 _page_sizes[1] = 0; 3652 } 3653 3654 // debug trace 3655 if (Verbose) { 3656 fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size()); 3657 fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size()); 3658 fprintf(stderr, "os::_page_sizes = ( "); 3659 for (int i = 0; _page_sizes[i]; i ++) { 3660 fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i])); 3661 } 3662 fprintf(stderr, ")\n"); 3663 } 3664 3665 _initial_pid = getpid(); 3666 3667 clock_tics_per_sec = sysconf(_SC_CLK_TCK); 3668 3669 init_random(1234567); 3670 3671 ThreadCritical::initialize(); 3672 3673 // Main_thread points to the aboriginal thread. 3674 Aix::_main_thread = pthread_self(); 3675 3676 initial_time_count = os::elapsed_counter(); 3677 pthread_mutex_init(&dl_mutex, NULL); 3678 } 3679 3680 // this is called _after_ the global arguments have been parsed 3681 jint os::init_2(void) { 3682 3683 if (Verbose) { 3684 fprintf(stderr, "processor count: %d\n", os::_processor_count); 3685 fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory); 3686 } 3687 3688 // initially build up the loaded dll map 3689 LoadedLibraries::reload(); 3690 3691 const int page_size = Aix::page_size(); 3692 const int map_size = page_size; 3693 3694 address map_address = (address) MAP_FAILED; 3695 const int prot = PROT_READ; 3696 const int flags = MAP_PRIVATE|MAP_ANONYMOUS; 3697 3698 // use optimized addresses for the polling page, 3699 // e.g. map it to a special 32-bit address. 3700 if (OptimizePollingPageLocation) { 3701 // architecture-specific list of address wishes: 3702 address address_wishes[] = { 3703 // AIX: addresses lower than 0x30000000 don't seem to work on AIX. 3704 // PPC64: all address wishes are non-negative 32 bit values where 3705 // the lower 16 bits are all zero. we can load these addresses 3706 // with a single ppc_lis instruction. 3707 (address) 0x30000000, (address) 0x31000000, 3708 (address) 0x32000000, (address) 0x33000000, 3709 (address) 0x40000000, (address) 0x41000000, 3710 (address) 0x42000000, (address) 0x43000000, 3711 (address) 0x50000000, (address) 0x51000000, 3712 (address) 0x52000000, (address) 0x53000000, 3713 (address) 0x60000000, (address) 0x61000000, 3714 (address) 0x62000000, (address) 0x63000000 3715 }; 3716 int address_wishes_length = sizeof(address_wishes)/sizeof(address); 3717 3718 // iterate over the list of address wishes: 3719 for (int i=0; i<address_wishes_length; i++) { 3720 // try to map with current address wish. 3721 // AIX: AIX needs MAP_FIXED if we provide an address and mmap will 3722 // fail if the address is already mapped. 3723 map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size, 3724 map_size, prot, 3725 flags | MAP_FIXED, 3726 -1, 0); 3727 if (Verbose) { 3728 fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n", 3729 address_wishes[i], map_address + (ssize_t)page_size); 3730 } 3731 3732 if (map_address + (ssize_t)page_size == address_wishes[i]) { 3733 // map succeeded and map_address is at wished address, exit loop. 3734 break; 3735 } 3736 3737 if (map_address != (address) MAP_FAILED) { 3738 // map succeeded, but polling_page is not at wished address, unmap and continue. 3739 ::munmap(map_address, map_size); 3740 map_address = (address) MAP_FAILED; 3741 } 3742 // map failed, continue loop. 3743 } 3744 } // end OptimizePollingPageLocation 3745 3746 if (map_address == (address) MAP_FAILED) { 3747 map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0); 3748 } 3749 guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page"); 3750 os::set_polling_page(map_address); 3751 3752 if (!UseMembar) { 3753 address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 3754 guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 3755 os::set_memory_serialize_page(mem_serialize_page); 3756 3757 #ifndef PRODUCT 3758 if (Verbose && PrintMiscellaneous) 3759 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 3760 #endif 3761 } 3762 3763 // initialize suspend/resume support - must do this before signal_sets_init() 3764 if (SR_initialize() != 0) { 3765 perror("SR_initialize failed"); 3766 return JNI_ERR; 3767 } 3768 3769 Aix::signal_sets_init(); 3770 Aix::install_signal_handlers(); 3771 3772 // Check minimum allowable stack size for thread creation and to initialize 3773 // the java system classes, including StackOverflowError - depends on page 3774 // size. Add a page for compiler2 recursion in main thread. 3775 // Add in 2*BytesPerWord times page size to account for VM stack during 3776 // class initialization depending on 32 or 64 bit VM. 3777 os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed, 3778 (size_t)(StackYellowPages+StackRedPages+StackShadowPages + 3779 2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size()); 3780 3781 size_t threadStackSizeInBytes = ThreadStackSize * K; 3782 if (threadStackSizeInBytes != 0 && 3783 threadStackSizeInBytes < os::Aix::min_stack_allowed) { 3784 tty->print_cr("\nThe stack size specified is too small, " 3785 "Specify at least %dk", 3786 os::Aix::min_stack_allowed / K); 3787 return JNI_ERR; 3788 } 3789 3790 // Make the stack size a multiple of the page size so that 3791 // the yellow/red zones can be guarded. 3792 // note that this can be 0, if no default stacksize was set 3793 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size())); 3794 3795 Aix::libpthread_init(); 3796 3797 if (MaxFDLimit) { 3798 // set the number of file descriptors to max. print out error 3799 // if getrlimit/setrlimit fails but continue regardless. 3800 struct rlimit nbr_files; 3801 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 3802 if (status != 0) { 3803 if (PrintMiscellaneous && (Verbose || WizardMode)) 3804 perror("os::init_2 getrlimit failed"); 3805 } else { 3806 nbr_files.rlim_cur = nbr_files.rlim_max; 3807 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 3808 if (status != 0) { 3809 if (PrintMiscellaneous && (Verbose || WizardMode)) 3810 perror("os::init_2 setrlimit failed"); 3811 } 3812 } 3813 } 3814 3815 if (PerfAllowAtExitRegistration) { 3816 // only register atexit functions if PerfAllowAtExitRegistration is set. 3817 // atexit functions can be delayed until process exit time, which 3818 // can be problematic for embedded VM situations. Embedded VMs should 3819 // call DestroyJavaVM() to assure that VM resources are released. 3820 3821 // note: perfMemory_exit_helper atexit function may be removed in 3822 // the future if the appropriate cleanup code can be added to the 3823 // VM_Exit VMOperation's doit method. 3824 if (atexit(perfMemory_exit_helper) != 0) { 3825 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 3826 } 3827 } 3828 3829 return JNI_OK; 3830 } 3831 3832 // this is called at the end of vm_initialization 3833 void os::init_3(void) { 3834 return; 3835 } 3836 3837 // Mark the polling page as unreadable 3838 void os::make_polling_page_unreadable(void) { 3839 if (!guard_memory((char*)_polling_page, Aix::page_size())) { 3840 fatal("Could not disable polling page"); 3841 } 3842 }; 3843 3844 // Mark the polling page as readable 3845 void os::make_polling_page_readable(void) { 3846 // Changed according to os_linux.cpp. 3847 if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) { 3848 fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page)); 3849 } 3850 }; 3851 3852 int os::active_processor_count() { 3853 int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN); 3854 assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check"); 3855 return online_cpus; 3856 } 3857 3858 void os::set_native_thread_name(const char *name) { 3859 // Not yet implemented. 3860 return; 3861 } 3862 3863 bool os::distribute_processes(uint length, uint* distribution) { 3864 // Not yet implemented. 3865 return false; 3866 } 3867 3868 bool os::bind_to_processor(uint processor_id) { 3869 // Not yet implemented. 3870 return false; 3871 } 3872 3873 void os::SuspendedThreadTask::internal_do_task() { 3874 if (do_suspend(_thread->osthread())) { 3875 SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); 3876 do_task(context); 3877 do_resume(_thread->osthread()); 3878 } 3879 } 3880 3881 class PcFetcher : public os::SuspendedThreadTask { 3882 public: 3883 PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} 3884 ExtendedPC result(); 3885 protected: 3886 void do_task(const os::SuspendedThreadTaskContext& context); 3887 private: 3888 ExtendedPC _epc; 3889 }; 3890 3891 ExtendedPC PcFetcher::result() { 3892 guarantee(is_done(), "task is not done yet."); 3893 return _epc; 3894 } 3895 3896 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { 3897 Thread* thread = context.thread(); 3898 OSThread* osthread = thread->osthread(); 3899 if (osthread->ucontext() != NULL) { 3900 _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext()); 3901 } else { 3902 // NULL context is unexpected, double-check this is the VMThread. 3903 guarantee(thread->is_VM_thread(), "can only be called for VMThread"); 3904 } 3905 } 3906 3907 // Suspends the target using the signal mechanism and then grabs the PC before 3908 // resuming the target. Used by the flat-profiler only 3909 ExtendedPC os::get_thread_pc(Thread* thread) { 3910 // Make sure that it is called by the watcher for the VMThread. 3911 assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); 3912 assert(thread->is_VM_thread(), "Can only be called for VMThread"); 3913 3914 PcFetcher fetcher(thread); 3915 fetcher.run(); 3916 return fetcher.result(); 3917 } 3918 3919 // Not neede on Aix. 3920 // int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) { 3921 // } 3922 3923 //////////////////////////////////////////////////////////////////////////////// 3924 // debug support 3925 3926 static address same_page(address x, address y) { 3927 intptr_t page_bits = -os::vm_page_size(); 3928 if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits)) 3929 return x; 3930 else if (x > y) 3931 return (address)(intptr_t(y) | ~page_bits) + 1; 3932 else 3933 return (address)(intptr_t(y) & page_bits); 3934 } 3935 3936 bool os::find(address addr, outputStream* st) { 3937 3938 st->print(PTR_FORMAT ": ", addr); 3939 3940 const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr); 3941 if (lib) { 3942 lib->print(st); 3943 return true; 3944 } else { 3945 lib = LoadedLibraries::find_for_data_address(addr); 3946 if (lib) { 3947 lib->print(st); 3948 return true; 3949 } else { 3950 st->print_cr("(outside any module)"); 3951 } 3952 } 3953 3954 return false; 3955 } 3956 3957 //////////////////////////////////////////////////////////////////////////////// 3958 // misc 3959 3960 // This does not do anything on Aix. This is basically a hook for being 3961 // able to use structured exception handling (thread-local exception filters) 3962 // on, e.g., Win32. 3963 void 3964 os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, 3965 JavaCallArguments* args, Thread* thread) { 3966 f(value, method, args, thread); 3967 } 3968 3969 void os::print_statistics() { 3970 } 3971 3972 int os::message_box(const char* title, const char* message) { 3973 int i; 3974 fdStream err(defaultStream::error_fd()); 3975 for (i = 0; i < 78; i++) err.print_raw("="); 3976 err.cr(); 3977 err.print_raw_cr(title); 3978 for (i = 0; i < 78; i++) err.print_raw("-"); 3979 err.cr(); 3980 err.print_raw_cr(message); 3981 for (i = 0; i < 78; i++) err.print_raw("="); 3982 err.cr(); 3983 3984 char buf[16]; 3985 // Prevent process from exiting upon "read error" without consuming all CPU 3986 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 3987 3988 return buf[0] == 'y' || buf[0] == 'Y'; 3989 } 3990 3991 int os::stat(const char *path, struct stat *sbuf) { 3992 char pathbuf[MAX_PATH]; 3993 if (strlen(path) > MAX_PATH - 1) { 3994 errno = ENAMETOOLONG; 3995 return -1; 3996 } 3997 os::native_path(strcpy(pathbuf, path)); 3998 return ::stat(pathbuf, sbuf); 3999 } 4000 4001 bool os::check_heap(bool force) { 4002 return true; 4003 } 4004 4005 // Is a (classpath) directory empty? 4006 bool os::dir_is_empty(const char* path) { 4007 DIR *dir = NULL; 4008 struct dirent *ptr; 4009 4010 dir = opendir(path); 4011 if (dir == NULL) return true; 4012 4013 /* Scan the directory */ 4014 bool result = true; 4015 char buf[sizeof(struct dirent) + MAX_PATH]; 4016 while (result && (ptr = ::readdir(dir)) != NULL) { 4017 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 4018 result = false; 4019 } 4020 } 4021 closedir(dir); 4022 return result; 4023 } 4024 4025 // This code originates from JDK's sysOpen and open64_w 4026 // from src/solaris/hpi/src/system_md.c 4027 4028 #ifndef O_DELETE 4029 #define O_DELETE 0x10000 4030 #endif 4031 4032 // Open a file. Unlink the file immediately after open returns 4033 // if the specified oflag has the O_DELETE flag set. 4034 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c 4035 4036 int os::open(const char *path, int oflag, int mode) { 4037 4038 if (strlen(path) > MAX_PATH - 1) { 4039 errno = ENAMETOOLONG; 4040 return -1; 4041 } 4042 int fd; 4043 int o_delete = (oflag & O_DELETE); 4044 oflag = oflag & ~O_DELETE; 4045 4046 fd = ::open64(path, oflag, mode); 4047 if (fd == -1) return -1; 4048 4049 // If the open succeeded, the file might still be a directory. 4050 { 4051 struct stat64 buf64; 4052 int ret = ::fstat64(fd, &buf64); 4053 int st_mode = buf64.st_mode; 4054 4055 if (ret != -1) { 4056 if ((st_mode & S_IFMT) == S_IFDIR) { 4057 errno = EISDIR; 4058 ::close(fd); 4059 return -1; 4060 } 4061 } else { 4062 ::close(fd); 4063 return -1; 4064 } 4065 } 4066 4067 // All file descriptors that are opened in the JVM and not 4068 // specifically destined for a subprocess should have the 4069 // close-on-exec flag set. If we don't set it, then careless 3rd 4070 // party native code might fork and exec without closing all 4071 // appropriate file descriptors (e.g. as we do in closeDescriptors in 4072 // UNIXProcess.c), and this in turn might: 4073 // 4074 // - cause end-of-file to fail to be detected on some file 4075 // descriptors, resulting in mysterious hangs, or 4076 // 4077 // - might cause an fopen in the subprocess to fail on a system 4078 // suffering from bug 1085341. 4079 // 4080 // (Yes, the default setting of the close-on-exec flag is a Unix 4081 // design flaw.) 4082 // 4083 // See: 4084 // 1085341: 32-bit stdio routines should support file descriptors >255 4085 // 4843136: (process) pipe file descriptor from Runtime.exec not being closed 4086 // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 4087 #ifdef FD_CLOEXEC 4088 { 4089 int flags = ::fcntl(fd, F_GETFD); 4090 if (flags != -1) 4091 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); 4092 } 4093 #endif 4094 4095 if (o_delete != 0) { 4096 ::unlink(path); 4097 } 4098 return fd; 4099 } 4100 4101 4102 // create binary file, rewriting existing file if required 4103 int os::create_binary_file(const char* path, bool rewrite_existing) { 4104 int oflags = O_WRONLY | O_CREAT; 4105 if (!rewrite_existing) { 4106 oflags |= O_EXCL; 4107 } 4108 return ::open64(path, oflags, S_IREAD | S_IWRITE); 4109 } 4110 4111 // return current position of file pointer 4112 jlong os::current_file_offset(int fd) { 4113 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 4114 } 4115 4116 // move file pointer to the specified offset 4117 jlong os::seek_to_file_offset(int fd, jlong offset) { 4118 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 4119 } 4120 4121 // This code originates from JDK's sysAvailable 4122 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c 4123 4124 int os::available(int fd, jlong *bytes) { 4125 jlong cur, end; 4126 int mode; 4127 struct stat64 buf64; 4128 4129 if (::fstat64(fd, &buf64) >= 0) { 4130 mode = buf64.st_mode; 4131 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { 4132 // XXX: is the following call interruptible? If so, this might 4133 // need to go through the INTERRUPT_IO() wrapper as for other 4134 // blocking, interruptible calls in this file. 4135 int n; 4136 if (::ioctl(fd, FIONREAD, &n) >= 0) { 4137 *bytes = n; 4138 return 1; 4139 } 4140 } 4141 } 4142 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) { 4143 return 0; 4144 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) { 4145 return 0; 4146 } else if (::lseek64(fd, cur, SEEK_SET) == -1) { 4147 return 0; 4148 } 4149 *bytes = end - cur; 4150 return 1; 4151 } 4152 4153 int os::socket_available(int fd, jint *pbytes) { 4154 // Linux doc says EINTR not returned, unlike Solaris 4155 int ret = ::ioctl(fd, FIONREAD, pbytes); 4156 4157 //%% note ioctl can return 0 when successful, JVM_SocketAvailable 4158 // is expected to return 0 on failure and 1 on success to the jdk. 4159 return (ret < 0) ? 0 : 1; 4160 } 4161 4162 // Map a block of memory. 4163 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4164 char *addr, size_t bytes, bool read_only, 4165 bool allow_exec) { 4166 Unimplemented(); 4167 return NULL; 4168 } 4169 4170 4171 // Remap a block of memory. 4172 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4173 char *addr, size_t bytes, bool read_only, 4174 bool allow_exec) { 4175 // same as map_memory() on this OS 4176 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 4177 allow_exec); 4178 } 4179 4180 // Unmap a block of memory. 4181 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4182 return munmap(addr, bytes) == 0; 4183 } 4184 4185 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4186 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4187 // of a thread. 4188 // 4189 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4190 // the fast estimate available on the platform. 4191 4192 jlong os::current_thread_cpu_time() { 4193 // return user + sys since the cost is the same 4194 const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */); 4195 assert(n >= 0, "negative CPU time"); 4196 return n; 4197 } 4198 4199 jlong os::thread_cpu_time(Thread* thread) { 4200 // consistent with what current_thread_cpu_time() returns 4201 const jlong n = os::thread_cpu_time(thread, true /* user + sys */); 4202 assert(n >= 0, "negative CPU time"); 4203 return n; 4204 } 4205 4206 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4207 const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4208 assert(n >= 0, "negative CPU time"); 4209 return n; 4210 } 4211 4212 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) { 4213 bool error = false; 4214 4215 jlong sys_time = 0; 4216 jlong user_time = 0; 4217 4218 // reimplemented using getthrds64(). 4219 // 4220 // goes like this: 4221 // For the thread in question, get the kernel thread id. Then get the 4222 // kernel thread statistics using that id. 4223 // 4224 // This only works of course when no pthread scheduling is used, 4225 // ie there is a 1:1 relationship to kernel threads. 4226 // On AIX, see AIXTHREAD_SCOPE variable. 4227 4228 pthread_t pthtid = thread->osthread()->pthread_id(); 4229 4230 // retrieve kernel thread id for the pthread: 4231 tid64_t tid = 0; 4232 struct __pthrdsinfo pinfo; 4233 // I just love those otherworldly IBM APIs which force me to hand down 4234 // dummy buffers for stuff I dont care for... 4235 char dummy[1]; 4236 int dummy_size = sizeof(dummy); 4237 if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo), 4238 dummy, &dummy_size) == 0) { 4239 tid = pinfo.__pi_tid; 4240 } else { 4241 tty->print_cr("pthread_getthrds_np failed."); 4242 error = true; 4243 } 4244 4245 // retrieve kernel timing info for that kernel thread 4246 if (!error) { 4247 struct thrdentry64 thrdentry; 4248 if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) { 4249 sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL; 4250 user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL; 4251 } else { 4252 tty->print_cr("pthread_getthrds_np failed."); 4253 error = true; 4254 } 4255 } 4256 4257 if (p_sys_time) { 4258 *p_sys_time = sys_time; 4259 } 4260 4261 if (p_user_time) { 4262 *p_user_time = user_time; 4263 } 4264 4265 if (error) { 4266 return false; 4267 } 4268 4269 return true; 4270 } 4271 4272 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 4273 jlong sys_time; 4274 jlong user_time; 4275 4276 if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) { 4277 return -1; 4278 } 4279 4280 return user_sys_cpu_time ? sys_time + user_time : user_time; 4281 } 4282 4283 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4284 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 4285 info_ptr->may_skip_backward = false; // elapsed time not wall time 4286 info_ptr->may_skip_forward = false; // elapsed time not wall time 4287 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4288 } 4289 4290 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4291 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 4292 info_ptr->may_skip_backward = false; // elapsed time not wall time 4293 info_ptr->may_skip_forward = false; // elapsed time not wall time 4294 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4295 } 4296 4297 bool os::is_thread_cpu_time_supported() { 4298 return true; 4299 } 4300 4301 // System loadavg support. Returns -1 if load average cannot be obtained. 4302 // For now just return the system wide load average (no processor sets). 4303 int os::loadavg(double values[], int nelem) { 4304 4305 // Implemented using libperfstat on AIX. 4306 4307 guarantee(nelem >= 0 && nelem <= 3, "argument error"); 4308 guarantee(values, "argument error"); 4309 4310 if (os::Aix::on_pase()) { 4311 Unimplemented(); 4312 return -1; 4313 } else { 4314 // AIX: use libperfstat 4315 // 4316 // See also: 4317 // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm 4318 // /usr/include/libperfstat.h: 4319 4320 // Use the already AIX version independent get_cpuinfo. 4321 os::Aix::cpuinfo_t ci; 4322 if (os::Aix::get_cpuinfo(&ci)) { 4323 for (int i = 0; i < nelem; i++) { 4324 values[i] = ci.loadavg[i]; 4325 } 4326 } else { 4327 return -1; 4328 } 4329 return nelem; 4330 } 4331 } 4332 4333 void os::pause() { 4334 char filename[MAX_PATH]; 4335 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4336 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4337 } else { 4338 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4339 } 4340 4341 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4342 if (fd != -1) { 4343 struct stat buf; 4344 ::close(fd); 4345 while (::stat(filename, &buf) == 0) { 4346 (void)::poll(NULL, 0, 100); 4347 } 4348 } else { 4349 jio_fprintf(stderr, 4350 "Could not open pause file '%s', continuing immediately.\n", filename); 4351 } 4352 } 4353 4354 bool os::Aix::is_primordial_thread() { 4355 if (pthread_self() == (pthread_t)1) { 4356 return true; 4357 } else { 4358 return false; 4359 } 4360 } 4361 4362 // OS recognitions (PASE/AIX, OS level) call this before calling any 4363 // one of Aix::on_pase(), Aix::os_version() static 4364 void os::Aix::initialize_os_info() { 4365 4366 assert(_on_pase == -1 && _os_version == -1, "already called."); 4367 4368 struct utsname uts; 4369 memset(&uts, 0, sizeof(uts)); 4370 strcpy(uts.sysname, "?"); 4371 if (::uname(&uts) == -1) { 4372 fprintf(stderr, "uname failed (%d)\n", errno); 4373 guarantee(0, "Could not determine whether we run on AIX or PASE"); 4374 } else { 4375 if (Verbose) { 4376 fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" " 4377 "node \"%s\" machine \"%s\"\n", 4378 uts.sysname, uts.version, uts.release, uts.nodename, uts.machine); 4379 } 4380 const int major = atoi(uts.version); 4381 assert(major > 0, "invalid OS version"); 4382 const int minor = atoi(uts.release); 4383 assert(minor > 0, "invalid OS release"); 4384 _os_version = (major << 8) | minor; 4385 if (strcmp(uts.sysname, "OS400") == 0) { 4386 Unimplemented(); 4387 } else if (strcmp(uts.sysname, "AIX") == 0) { 4388 // We run on AIX. We do not support versions older than AIX 5.3. 4389 _on_pase = 0; 4390 if (_os_version < 0x0503) { 4391 fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n"); 4392 assert(false, "AIX release too old."); 4393 } else { 4394 if (Verbose) { 4395 fprintf(stderr, "We run on AIX %d.%d\n", major, minor); 4396 } 4397 } 4398 } else { 4399 assert(false, "unknown OS"); 4400 } 4401 } 4402 4403 guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release"); 4404 4405 } // end: os::Aix::initialize_os_info() 4406 4407 // Scan environment for important settings which might effect the VM. 4408 // Trace out settings. Warn about invalid settings and/or correct them. 4409 // 4410 // Must run after os::Aix::initialue_os_info(). 4411 void os::Aix::scan_environment() { 4412 4413 char* p; 4414 int rc; 4415 4416 // Warn explicity if EXTSHM=ON is used. That switch changes how 4417 // System V shared memory behaves. One effect is that page size of 4418 // shared memory cannot be change dynamically, effectivly preventing 4419 // large pages from working. 4420 // This switch was needed on AIX 32bit, but on AIX 64bit the general 4421 // recommendation is (in OSS notes) to switch it off. 4422 p = ::getenv("EXTSHM"); 4423 if (Verbose) { 4424 fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>"); 4425 } 4426 if (p && strcmp(p, "ON") == 0) { 4427 fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n"); 4428 _extshm = 1; 4429 } else { 4430 _extshm = 0; 4431 } 4432 4433 // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs. 4434 // Not tested, not supported. 4435 // 4436 // Note that it might be worth the trouble to test and to require it, if only to 4437 // get useful return codes for mprotect. 4438 // 4439 // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before 4440 // exec() ? before loading the libjvm ? ....) 4441 p = ::getenv("XPG_SUS_ENV"); 4442 if (Verbose) { 4443 fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>"); 4444 } 4445 if (p && strcmp(p, "ON") == 0) { 4446 _xpg_sus_mode = 1; 4447 fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n"); 4448 // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to 4449 // clobber address ranges. If we ever want to support that, we have to do some 4450 // testing first. 4451 guarantee(false, "XPG_SUS_ENV=ON not supported"); 4452 } else { 4453 _xpg_sus_mode = 0; 4454 } 4455 4456 // Switch off AIX internal (pthread) guard pages. This has 4457 // immediate effect for any pthread_create calls which follow. 4458 p = ::getenv("AIXTHREAD_GUARDPAGES"); 4459 if (Verbose) { 4460 fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>"); 4461 fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n"); 4462 } 4463 rc = ::putenv("AIXTHREAD_GUARDPAGES=0"); 4464 guarantee(rc == 0, ""); 4465 4466 } // end: os::Aix::scan_environment() 4467 4468 // PASE: initialize the libo4 library (AS400 PASE porting library). 4469 void os::Aix::initialize_libo4() { 4470 Unimplemented(); 4471 } 4472 4473 // AIX: initialize the libperfstat library (we load this dynamically 4474 // because it is only available on AIX. 4475 void os::Aix::initialize_libperfstat() { 4476 4477 assert(os::Aix::on_aix(), "AIX only"); 4478 4479 if (!libperfstat::init()) { 4480 fprintf(stderr, "libperfstat initialization failed.\n"); 4481 assert(false, "libperfstat initialization failed"); 4482 } else { 4483 if (Verbose) { 4484 fprintf(stderr, "libperfstat initialized.\n"); 4485 } 4486 } 4487 } // end: os::Aix::initialize_libperfstat 4488 4489 ///////////////////////////////////////////////////////////////////////////// 4490 // thread stack 4491 4492 // function to query the current stack size using pthread_getthrds_np 4493 // 4494 // ! do not change anything here unless you know what you are doing ! 4495 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) { 4496 4497 // This only works when invoked on a pthread. As we agreed not to use 4498 // primordial threads anyway, I assert here 4499 guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread"); 4500 4501 // information about this api can be found (a) in the pthread.h header and 4502 // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm 4503 // 4504 // The use of this API to find out the current stack is kind of undefined. 4505 // But after a lot of tries and asking IBM about it, I concluded that it is safe 4506 // enough for cases where I let the pthread library create its stacks. For cases 4507 // where I create an own stack and pass this to pthread_create, it seems not to 4508 // work (the returned stack size in that case is 0). 4509 4510 pthread_t tid = pthread_self(); 4511 struct __pthrdsinfo pinfo; 4512 char dummy[1]; // we only need this to satisfy the api and to not get E 4513 int dummy_size = sizeof(dummy); 4514 4515 memset(&pinfo, 0, sizeof(pinfo)); 4516 4517 const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo, 4518 sizeof(pinfo), dummy, &dummy_size); 4519 4520 if (rc != 0) { 4521 fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc); 4522 guarantee(0, "pthread_getthrds_np failed"); 4523 } 4524 4525 guarantee(pinfo.__pi_stackend, "returned stack base invalid"); 4526 4527 // the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack 4528 // (when handing down a stack to pthread create, see pthread_attr_setstackaddr). 4529 // Not sure what to do here - I feel inclined to forbid this use case completely. 4530 guarantee(pinfo.__pi_stacksize, "returned stack size invalid"); 4531 4532 // On AIX, stacks are not necessarily page aligned so round the base and size accordingly 4533 if (p_stack_base) { 4534 (*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()); 4535 } 4536 4537 if (p_stack_size) { 4538 (*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size(); 4539 } 4540 4541 #ifndef PRODUCT 4542 if (Verbose) { 4543 fprintf(stderr, 4544 "query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT 4545 ", real stack_size=" INTPTR_FORMAT 4546 ", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n", 4547 (intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize, 4548 (intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()), 4549 pinfo.__pi_stacksize - os::Aix::stack_page_size()); 4550 } 4551 #endif 4552 4553 } // end query_stack_dimensions 4554 4555 // get the current stack base from the OS (actually, the pthread library) 4556 address os::current_stack_base() { 4557 address p; 4558 query_stack_dimensions(&p, 0); 4559 return p; 4560 } 4561 4562 // get the current stack size from the OS (actually, the pthread library) 4563 size_t os::current_stack_size() { 4564 size_t s; 4565 query_stack_dimensions(0, &s); 4566 return s; 4567 } 4568 4569 // Refer to the comments in os_solaris.cpp park-unpark. 4570 // 4571 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can 4572 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable. 4573 // For specifics regarding the bug see GLIBC BUGID 261237 : 4574 // http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html. 4575 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future 4576 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar 4577 // is used. (The simple C test-case provided in the GLIBC bug report manifests the 4578 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos() 4579 // and monitorenter when we're using 1-0 locking. All those operations may result in 4580 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version 4581 // of libpthread avoids the problem, but isn't practical. 4582 // 4583 // Possible remedies: 4584 // 4585 // 1. Establish a minimum relative wait time. 50 to 100 msecs seems to work. 4586 // This is palliative and probabilistic, however. If the thread is preempted 4587 // between the call to compute_abstime() and pthread_cond_timedwait(), more 4588 // than the minimum period may have passed, and the abstime may be stale (in the 4589 // past) resultin in a hang. Using this technique reduces the odds of a hang 4590 // but the JVM is still vulnerable, particularly on heavily loaded systems. 4591 // 4592 // 2. Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead 4593 // of the usual flag-condvar-mutex idiom. The write side of the pipe is set 4594 // NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo) 4595 // reduces to poll()+read(). This works well, but consumes 2 FDs per extant 4596 // thread. 4597 // 4598 // 3. Embargo pthread_cond_timedwait() and implement a native "chron" thread 4599 // that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing 4600 // a timeout request to the chron thread and then blocking via pthread_cond_wait(). 4601 // This also works well. In fact it avoids kernel-level scalability impediments 4602 // on certain platforms that don't handle lots of active pthread_cond_timedwait() 4603 // timers in a graceful fashion. 4604 // 4605 // 4. When the abstime value is in the past it appears that control returns 4606 // correctly from pthread_cond_timedwait(), but the condvar is left corrupt. 4607 // Subsequent timedwait/wait calls may hang indefinitely. Given that, we 4608 // can avoid the problem by reinitializing the condvar -- by cond_destroy() 4609 // followed by cond_init() -- after all calls to pthread_cond_timedwait(). 4610 // It may be possible to avoid reinitialization by checking the return 4611 // value from pthread_cond_timedwait(). In addition to reinitializing the 4612 // condvar we must establish the invariant that cond_signal() is only called 4613 // within critical sections protected by the adjunct mutex. This prevents 4614 // cond_signal() from "seeing" a condvar that's in the midst of being 4615 // reinitialized or that is corrupt. Sadly, this invariant obviates the 4616 // desirable signal-after-unlock optimization that avoids futile context switching. 4617 // 4618 // I'm also concerned that some versions of NTPL might allocate an auxilliary 4619 // structure when a condvar is used or initialized. cond_destroy() would 4620 // release the helper structure. Our reinitialize-after-timedwait fix 4621 // put excessive stress on malloc/free and locks protecting the c-heap. 4622 // 4623 // We currently use (4). See the WorkAroundNTPLTimedWaitHang flag. 4624 // It may be possible to refine (4) by checking the kernel and NTPL verisons 4625 // and only enabling the work-around for vulnerable environments. 4626 4627 // utility to compute the abstime argument to timedwait: 4628 // millis is the relative timeout time 4629 // abstime will be the absolute timeout time 4630 // TODO: replace compute_abstime() with unpackTime() 4631 4632 static struct timespec* compute_abstime(timespec* abstime, jlong millis) { 4633 if (millis < 0) millis = 0; 4634 struct timeval now; 4635 int status = gettimeofday(&now, NULL); 4636 assert(status == 0, "gettimeofday"); 4637 jlong seconds = millis / 1000; 4638 millis %= 1000; 4639 if (seconds > 50000000) { // see man cond_timedwait(3T) 4640 seconds = 50000000; 4641 } 4642 abstime->tv_sec = now.tv_sec + seconds; 4643 long usec = now.tv_usec + millis * 1000; 4644 if (usec >= 1000000) { 4645 abstime->tv_sec += 1; 4646 usec -= 1000000; 4647 } 4648 abstime->tv_nsec = usec * 1000; 4649 return abstime; 4650 } 4651 4652 4653 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately. 4654 // Conceptually TryPark() should be equivalent to park(0). 4655 4656 int os::PlatformEvent::TryPark() { 4657 for (;;) { 4658 const int v = _Event; 4659 guarantee ((v == 0) || (v == 1), "invariant"); 4660 if (Atomic::cmpxchg (0, &_Event, v) == v) return v; 4661 } 4662 } 4663 4664 void os::PlatformEvent::park() { // AKA "down()" 4665 // Invariant: Only the thread associated with the Event/PlatformEvent 4666 // may call park(). 4667 // TODO: assert that _Assoc != NULL or _Assoc == Self 4668 int v; 4669 for (;;) { 4670 v = _Event; 4671 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break; 4672 } 4673 guarantee (v >= 0, "invariant"); 4674 if (v == 0) { 4675 // Do this the hard way by blocking ... 4676 int status = pthread_mutex_lock(_mutex); 4677 assert_status(status == 0, status, "mutex_lock"); 4678 guarantee (_nParked == 0, "invariant"); 4679 ++ _nParked; 4680 while (_Event < 0) { 4681 status = pthread_cond_wait(_cond, _mutex); 4682 assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait"); 4683 } 4684 -- _nParked; 4685 4686 // In theory we could move the ST of 0 into _Event past the unlock(), 4687 // but then we'd need a MEMBAR after the ST. 4688 _Event = 0; 4689 status = pthread_mutex_unlock(_mutex); 4690 assert_status(status == 0, status, "mutex_unlock"); 4691 } 4692 guarantee (_Event >= 0, "invariant"); 4693 } 4694 4695 int os::PlatformEvent::park(jlong millis) { 4696 guarantee (_nParked == 0, "invariant"); 4697 4698 int v; 4699 for (;;) { 4700 v = _Event; 4701 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break; 4702 } 4703 guarantee (v >= 0, "invariant"); 4704 if (v != 0) return OS_OK; 4705 4706 // We do this the hard way, by blocking the thread. 4707 // Consider enforcing a minimum timeout value. 4708 struct timespec abst; 4709 compute_abstime(&abst, millis); 4710 4711 int ret = OS_TIMEOUT; 4712 int status = pthread_mutex_lock(_mutex); 4713 assert_status(status == 0, status, "mutex_lock"); 4714 guarantee (_nParked == 0, "invariant"); 4715 ++_nParked; 4716 4717 // Object.wait(timo) will return because of 4718 // (a) notification 4719 // (b) timeout 4720 // (c) thread.interrupt 4721 // 4722 // Thread.interrupt and object.notify{All} both call Event::set. 4723 // That is, we treat thread.interrupt as a special case of notification. 4724 // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false. 4725 // We assume all ETIME returns are valid. 4726 // 4727 // TODO: properly differentiate simultaneous notify+interrupt. 4728 // In that case, we should propagate the notify to another waiter. 4729 4730 while (_Event < 0) { 4731 status = pthread_cond_timedwait(_cond, _mutex, &abst); 4732 assert_status(status == 0 || status == ETIMEDOUT, 4733 status, "cond_timedwait"); 4734 if (!FilterSpuriousWakeups) break; // previous semantics 4735 if (status == ETIMEDOUT) break; 4736 // We consume and ignore EINTR and spurious wakeups. 4737 } 4738 --_nParked; 4739 if (_Event >= 0) { 4740 ret = OS_OK; 4741 } 4742 _Event = 0; 4743 status = pthread_mutex_unlock(_mutex); 4744 assert_status(status == 0, status, "mutex_unlock"); 4745 assert (_nParked == 0, "invariant"); 4746 return ret; 4747 } 4748 4749 void os::PlatformEvent::unpark() { 4750 int v, AnyWaiters; 4751 for (;;) { 4752 v = _Event; 4753 if (v > 0) { 4754 // The LD of _Event could have reordered or be satisfied 4755 // by a read-aside from this processor's write buffer. 4756 // To avoid problems execute a barrier and then 4757 // ratify the value. 4758 OrderAccess::fence(); 4759 if (_Event == v) return; 4760 continue; 4761 } 4762 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break; 4763 } 4764 if (v < 0) { 4765 // Wait for the thread associated with the event to vacate 4766 int status = pthread_mutex_lock(_mutex); 4767 assert_status(status == 0, status, "mutex_lock"); 4768 AnyWaiters = _nParked; 4769 4770 if (AnyWaiters != 0) { 4771 // We intentional signal *after* dropping the lock 4772 // to avoid a common class of futile wakeups. 4773 status = pthread_cond_signal(_cond); 4774 assert_status(status == 0, status, "cond_signal"); 4775 } 4776 // Mutex should be locked for pthread_cond_signal(_cond). 4777 status = pthread_mutex_unlock(_mutex); 4778 assert_status(status == 0, status, "mutex_unlock"); 4779 } 4780 4781 // Note that we signal() _after dropping the lock for "immortal" Events. 4782 // This is safe and avoids a common class of futile wakeups. In rare 4783 // circumstances this can cause a thread to return prematurely from 4784 // cond_{timed}wait() but the spurious wakeup is benign and the victim will 4785 // simply re-test the condition and re-park itself. 4786 } 4787 4788 4789 // JSR166 4790 // ------------------------------------------------------- 4791 4792 // 4793 // The solaris and linux implementations of park/unpark are fairly 4794 // conservative for now, but can be improved. They currently use a 4795 // mutex/condvar pair, plus a a count. 4796 // Park decrements count if > 0, else does a condvar wait. Unpark 4797 // sets count to 1 and signals condvar. Only one thread ever waits 4798 // on the condvar. Contention seen when trying to park implies that someone 4799 // is unparking you, so don't wait. And spurious returns are fine, so there 4800 // is no need to track notifications. 4801 // 4802 4803 #define MAX_SECS 100000000 4804 // 4805 // This code is common to linux and solaris and will be moved to a 4806 // common place in dolphin. 4807 // 4808 // The passed in time value is either a relative time in nanoseconds 4809 // or an absolute time in milliseconds. Either way it has to be unpacked 4810 // into suitable seconds and nanoseconds components and stored in the 4811 // given timespec structure. 4812 // Given time is a 64-bit value and the time_t used in the timespec is only 4813 // a signed-32-bit value (except on 64-bit Linux) we have to watch for 4814 // overflow if times way in the future are given. Further on Solaris versions 4815 // prior to 10 there is a restriction (see cond_timedwait) that the specified 4816 // number of seconds, in abstime, is less than current_time + 100,000,000. 4817 // As it will be 28 years before "now + 100000000" will overflow we can 4818 // ignore overflow and just impose a hard-limit on seconds using the value 4819 // of "now + 100,000,000". This places a limit on the timeout of about 3.17 4820 // years from "now". 4821 // 4822 4823 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { 4824 assert (time > 0, "convertTime"); 4825 4826 struct timeval now; 4827 int status = gettimeofday(&now, NULL); 4828 assert(status == 0, "gettimeofday"); 4829 4830 time_t max_secs = now.tv_sec + MAX_SECS; 4831 4832 if (isAbsolute) { 4833 jlong secs = time / 1000; 4834 if (secs > max_secs) { 4835 absTime->tv_sec = max_secs; 4836 } 4837 else { 4838 absTime->tv_sec = secs; 4839 } 4840 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; 4841 } 4842 else { 4843 jlong secs = time / NANOSECS_PER_SEC; 4844 if (secs >= MAX_SECS) { 4845 absTime->tv_sec = max_secs; 4846 absTime->tv_nsec = 0; 4847 } 4848 else { 4849 absTime->tv_sec = now.tv_sec + secs; 4850 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; 4851 if (absTime->tv_nsec >= NANOSECS_PER_SEC) { 4852 absTime->tv_nsec -= NANOSECS_PER_SEC; 4853 ++absTime->tv_sec; // note: this must be <= max_secs 4854 } 4855 } 4856 } 4857 assert(absTime->tv_sec >= 0, "tv_sec < 0"); 4858 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); 4859 assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); 4860 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); 4861 } 4862 4863 void Parker::park(bool isAbsolute, jlong time) { 4864 // Optional fast-path check: 4865 // Return immediately if a permit is available. 4866 if (_counter > 0) { 4867 _counter = 0; 4868 OrderAccess::fence(); 4869 return; 4870 } 4871 4872 Thread* thread = Thread::current(); 4873 assert(thread->is_Java_thread(), "Must be JavaThread"); 4874 JavaThread *jt = (JavaThread *)thread; 4875 4876 // Optional optimization -- avoid state transitions if there's an interrupt pending. 4877 // Check interrupt before trying to wait 4878 if (Thread::is_interrupted(thread, false)) { 4879 return; 4880 } 4881 4882 // Next, demultiplex/decode time arguments 4883 timespec absTime; 4884 if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all 4885 return; 4886 } 4887 if (time > 0) { 4888 unpackTime(&absTime, isAbsolute, time); 4889 } 4890 4891 4892 // Enter safepoint region 4893 // Beware of deadlocks such as 6317397. 4894 // The per-thread Parker:: mutex is a classic leaf-lock. 4895 // In particular a thread must never block on the Threads_lock while 4896 // holding the Parker:: mutex. If safepoints are pending both the 4897 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. 4898 ThreadBlockInVM tbivm(jt); 4899 4900 // Don't wait if cannot get lock since interference arises from 4901 // unblocking. Also. check interrupt before trying wait 4902 if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) { 4903 return; 4904 } 4905 4906 int status; 4907 if (_counter > 0) { // no wait needed 4908 _counter = 0; 4909 status = pthread_mutex_unlock(_mutex); 4910 assert (status == 0, "invariant"); 4911 OrderAccess::fence(); 4912 return; 4913 } 4914 4915 #ifdef ASSERT 4916 // Don't catch signals while blocked; let the running threads have the signals. 4917 // (This allows a debugger to break into the running thread.) 4918 sigset_t oldsigs; 4919 sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals(); 4920 pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); 4921 #endif 4922 4923 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 4924 jt->set_suspend_equivalent(); 4925 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 4926 4927 if (time == 0) { 4928 status = pthread_cond_wait (_cond, _mutex); 4929 } else { 4930 status = pthread_cond_timedwait (_cond, _mutex, &absTime); 4931 if (status != 0 && WorkAroundNPTLTimedWaitHang) { 4932 pthread_cond_destroy (_cond); 4933 pthread_cond_init (_cond, NULL); 4934 } 4935 } 4936 assert_status(status == 0 || status == EINTR || 4937 status == ETIME || status == ETIMEDOUT, 4938 status, "cond_timedwait"); 4939 4940 #ifdef ASSERT 4941 pthread_sigmask(SIG_SETMASK, &oldsigs, NULL); 4942 #endif 4943 4944 _counter = 0; 4945 status = pthread_mutex_unlock(_mutex); 4946 assert_status(status == 0, status, "invariant"); 4947 // If externally suspended while waiting, re-suspend 4948 if (jt->handle_special_suspend_equivalent_condition()) { 4949 jt->java_suspend_self(); 4950 } 4951 4952 OrderAccess::fence(); 4953 } 4954 4955 void Parker::unpark() { 4956 int s, status; 4957 status = pthread_mutex_lock(_mutex); 4958 assert (status == 0, "invariant"); 4959 s = _counter; 4960 _counter = 1; 4961 if (s < 1) { 4962 if (WorkAroundNPTLTimedWaitHang) { 4963 status = pthread_cond_signal (_cond); 4964 assert (status == 0, "invariant"); 4965 status = pthread_mutex_unlock(_mutex); 4966 assert (status == 0, "invariant"); 4967 } else { 4968 status = pthread_mutex_unlock(_mutex); 4969 assert (status == 0, "invariant"); 4970 status = pthread_cond_signal (_cond); 4971 assert (status == 0, "invariant"); 4972 } 4973 } else { 4974 pthread_mutex_unlock(_mutex); 4975 assert (status == 0, "invariant"); 4976 } 4977 } 4978 4979 4980 extern char** environ; 4981 4982 // Run the specified command in a separate process. Return its exit value, 4983 // or -1 on failure (e.g. can't fork a new process). 4984 // Unlike system(), this function can be called from signal handler. It 4985 // doesn't block SIGINT et al. 4986 int os::fork_and_exec(char* cmd) { 4987 char * argv[4] = {"sh", "-c", cmd, NULL}; 4988 4989 pid_t pid = fork(); 4990 4991 if (pid < 0) { 4992 // fork failed 4993 return -1; 4994 4995 } else if (pid == 0) { 4996 // child process 4997 4998 // try to be consistent with system(), which uses "/usr/bin/sh" on AIX 4999 execve("/usr/bin/sh", argv, environ); 5000 5001 // execve failed 5002 _exit(-1); 5003 5004 } else { 5005 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 5006 // care about the actual exit code, for now. 5007 5008 int status; 5009 5010 // Wait for the child process to exit. This returns immediately if 5011 // the child has already exited. */ 5012 while (waitpid(pid, &status, 0) < 0) { 5013 switch (errno) { 5014 case ECHILD: return 0; 5015 case EINTR: break; 5016 default: return -1; 5017 } 5018 } 5019 5020 if (WIFEXITED(status)) { 5021 // The child exited normally; get its exit code. 5022 return WEXITSTATUS(status); 5023 } else if (WIFSIGNALED(status)) { 5024 // The child exited because of a signal 5025 // The best value to return is 0x80 + signal number, 5026 // because that is what all Unix shells do, and because 5027 // it allows callers to distinguish between process exit and 5028 // process death by signal. 5029 return 0x80 + WTERMSIG(status); 5030 } else { 5031 // Unknown exit code; pass it through 5032 return status; 5033 } 5034 } 5035 // Remove warning. 5036 return -1; 5037 } 5038 5039 // is_headless_jre() 5040 // 5041 // Test for the existence of xawt/libmawt.so or libawt_xawt.so 5042 // in order to report if we are running in a headless jre. 5043 // 5044 // Since JDK8 xawt/libmawt.so is moved into the same directory 5045 // as libawt.so, and renamed libawt_xawt.so 5046 bool os::is_headless_jre() { 5047 struct stat statbuf; 5048 char buf[MAXPATHLEN]; 5049 char libmawtpath[MAXPATHLEN]; 5050 const char *xawtstr = "/xawt/libmawt.so"; 5051 const char *new_xawtstr = "/libawt_xawt.so"; 5052 5053 char *p; 5054 5055 // Get path to libjvm.so 5056 os::jvm_path(buf, sizeof(buf)); 5057 5058 // Get rid of libjvm.so 5059 p = strrchr(buf, '/'); 5060 if (p == NULL) return false; 5061 else *p = '\0'; 5062 5063 // Get rid of client or server 5064 p = strrchr(buf, '/'); 5065 if (p == NULL) return false; 5066 else *p = '\0'; 5067 5068 // check xawt/libmawt.so 5069 strcpy(libmawtpath, buf); 5070 strcat(libmawtpath, xawtstr); 5071 if (::stat(libmawtpath, &statbuf) == 0) return false; 5072 5073 // check libawt_xawt.so 5074 strcpy(libmawtpath, buf); 5075 strcat(libmawtpath, new_xawtstr); 5076 if (::stat(libmawtpath, &statbuf) == 0) return false; 5077 5078 return true; 5079 } 5080 5081 // Get the default path to the core file 5082 // Returns the length of the string 5083 int os::get_core_path(char* buffer, size_t bufferSize) { 5084 const char* p = get_current_directory(buffer, bufferSize); 5085 5086 if (p == NULL) { 5087 assert(p != NULL, "failed to get current directory"); 5088 return 0; 5089 } 5090 5091 return strlen(buffer); 5092 } 5093 5094 #ifndef PRODUCT 5095 void TestReserveMemorySpecial_test() { 5096 // No tests available for this platform 5097 } 5098 #endif