1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/classLoader.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "compiler/compileBroker.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "jvm_solaris.h" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/filemap.hpp" 36 #include "mutex_solaris.inline.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "os_share_solaris.hpp" 39 #include "prims/jniFastGetField.hpp" 40 #include "prims/jvm.h" 41 #include "prims/jvm_misc.hpp" 42 #include "runtime/arguments.hpp" 43 #include "runtime/extendedPC.hpp" 44 #include "runtime/globals.hpp" 45 #include "runtime/interfaceSupport.hpp" 46 #include "runtime/java.hpp" 47 #include "runtime/javaCalls.hpp" 48 #include "runtime/mutexLocker.hpp" 49 #include "runtime/objectMonitor.hpp" 50 #include "runtime/osThread.hpp" 51 #include "runtime/perfMemory.hpp" 52 #include "runtime/sharedRuntime.hpp" 53 #include "runtime/statSampler.hpp" 54 #include "runtime/stubRoutines.hpp" 55 #include "runtime/threadCritical.hpp" 56 #include "runtime/timer.hpp" 57 #include "services/attachListener.hpp" 58 #include "services/runtimeService.hpp" 59 #include "thread_solaris.inline.hpp" 60 #include "utilities/decoder.hpp" 61 #include "utilities/defaultStream.hpp" 62 #include "utilities/events.hpp" 63 #include "utilities/growableArray.hpp" 64 #include "utilities/vmError.hpp" 65 #ifdef TARGET_ARCH_x86 66 # include "assembler_x86.inline.hpp" 67 # include "nativeInst_x86.hpp" 68 #endif 69 #ifdef TARGET_ARCH_sparc 70 # include "assembler_sparc.inline.hpp" 71 # include "nativeInst_sparc.hpp" 72 #endif 73 #ifdef COMPILER1 74 #include "c1/c1_Runtime1.hpp" 75 #endif 76 #ifdef COMPILER2 77 #include "opto/runtime.hpp" 78 #endif 79 80 // put OS-includes here 81 # include <dlfcn.h> 82 # include <errno.h> 83 # include <exception> 84 # include <link.h> 85 # include <poll.h> 86 # include <pthread.h> 87 # include <pwd.h> 88 # include <schedctl.h> 89 # include <setjmp.h> 90 # include <signal.h> 91 # include <stdio.h> 92 # include <alloca.h> 93 # include <sys/filio.h> 94 # include <sys/ipc.h> 95 # include <sys/lwp.h> 96 # include <sys/machelf.h> // for elf Sym structure used by dladdr1 97 # include <sys/mman.h> 98 # include <sys/processor.h> 99 # include <sys/procset.h> 100 # include <sys/pset.h> 101 # include <sys/resource.h> 102 # include <sys/shm.h> 103 # include <sys/socket.h> 104 # include <sys/stat.h> 105 # include <sys/systeminfo.h> 106 # include <sys/time.h> 107 # include <sys/times.h> 108 # include <sys/types.h> 109 # include <sys/wait.h> 110 # include <sys/utsname.h> 111 # include <thread.h> 112 # include <unistd.h> 113 # include <sys/priocntl.h> 114 # include <sys/rtpriocntl.h> 115 # include <sys/tspriocntl.h> 116 # include <sys/iapriocntl.h> 117 # include <sys/fxpriocntl.h> 118 # include <sys/loadavg.h> 119 # include <string.h> 120 # include <stdio.h> 121 122 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later 123 # include <sys/procfs.h> // see comment in <sys/procfs.h> 124 125 #define MAX_PATH (2 * K) 126 127 // for timer info max values which include all bits 128 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 129 130 #ifdef _GNU_SOURCE 131 // See bug #6514594 132 extern "C" int madvise(caddr_t, size_t, int); 133 extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg, 134 int attr, int mask); 135 #endif //_GNU_SOURCE 136 137 /* 138 MPSS Changes Start. 139 The JVM binary needs to be built and run on pre-Solaris 9 140 systems, but the constants needed by MPSS are only in Solaris 9 141 header files. They are textually replicated here to allow 142 building on earlier systems. Once building on Solaris 8 is 143 no longer a requirement, these #defines can be replaced by ordinary 144 system .h inclusion. 145 146 In earlier versions of the JDK and Solaris, we used ISM for large pages. 147 But ISM requires shared memory to achieve this and thus has many caveats. 148 MPSS is a fully transparent and is a cleaner way to get large pages. 149 Although we still require keeping ISM for backward compatiblitiy as well as 150 giving the opportunity to use large pages on older systems it is 151 recommended that MPSS be used for Solaris 9 and above. 152 153 */ 154 155 #ifndef MC_HAT_ADVISE 156 157 struct memcntl_mha { 158 uint_t mha_cmd; /* command(s) */ 159 uint_t mha_flags; 160 size_t mha_pagesize; 161 }; 162 #define MC_HAT_ADVISE 7 /* advise hat map size */ 163 #define MHA_MAPSIZE_VA 0x1 /* set preferred page size */ 164 #define MAP_ALIGN 0x200 /* addr specifies alignment */ 165 166 #endif 167 // MPSS Changes End. 168 169 170 // Here are some liblgrp types from sys/lgrp_user.h to be able to 171 // compile on older systems without this header file. 172 173 #ifndef MADV_ACCESS_LWP 174 # define MADV_ACCESS_LWP 7 /* next LWP to access heavily */ 175 #endif 176 #ifndef MADV_ACCESS_MANY 177 # define MADV_ACCESS_MANY 8 /* many processes to access heavily */ 178 #endif 179 180 #ifndef LGRP_RSRC_CPU 181 # define LGRP_RSRC_CPU 0 /* CPU resources */ 182 #endif 183 #ifndef LGRP_RSRC_MEM 184 # define LGRP_RSRC_MEM 1 /* memory resources */ 185 #endif 186 187 // Some more macros from sys/mman.h that are not present in Solaris 8. 188 189 #ifndef MAX_MEMINFO_CNT 190 /* 191 * info_req request type definitions for meminfo 192 * request types starting with MEMINFO_V are used for Virtual addresses 193 * and should not be mixed with MEMINFO_PLGRP which is targeted for Physical 194 * addresses 195 */ 196 # define MEMINFO_SHIFT 16 197 # define MEMINFO_MASK (0xFF << MEMINFO_SHIFT) 198 # define MEMINFO_VPHYSICAL (0x01 << MEMINFO_SHIFT) /* get physical addr */ 199 # define MEMINFO_VLGRP (0x02 << MEMINFO_SHIFT) /* get lgroup */ 200 # define MEMINFO_VPAGESIZE (0x03 << MEMINFO_SHIFT) /* size of phys page */ 201 # define MEMINFO_VREPLCNT (0x04 << MEMINFO_SHIFT) /* no. of replica */ 202 # define MEMINFO_VREPL (0x05 << MEMINFO_SHIFT) /* physical replica */ 203 # define MEMINFO_VREPL_LGRP (0x06 << MEMINFO_SHIFT) /* lgrp of replica */ 204 # define MEMINFO_PLGRP (0x07 << MEMINFO_SHIFT) /* lgroup for paddr */ 205 206 /* maximum number of addresses meminfo() can process at a time */ 207 # define MAX_MEMINFO_CNT 256 208 209 /* maximum number of request types */ 210 # define MAX_MEMINFO_REQ 31 211 #endif 212 213 // see thr_setprio(3T) for the basis of these numbers 214 #define MinimumPriority 0 215 #define NormalPriority 64 216 #define MaximumPriority 127 217 218 // Values for ThreadPriorityPolicy == 1 219 int prio_policy1[CriticalPriority+1] = { 220 -99999, 0, 16, 32, 48, 64, 221 80, 96, 112, 124, 127, 127 }; 222 223 // System parameters used internally 224 static clock_t clock_tics_per_sec = 100; 225 226 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+) 227 static bool enabled_extended_FILE_stdio = false; 228 229 // For diagnostics to print a message once. see run_periodic_checks 230 static bool check_addr0_done = false; 231 static sigset_t check_signal_done; 232 static bool check_signals = true; 233 234 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo 235 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo 236 237 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround 238 239 240 // "default" initializers for missing libc APIs 241 extern "C" { 242 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 243 static int lwp_mutex_destroy(mutex_t *mx) { return 0; } 244 245 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 246 static int lwp_cond_destroy(cond_t *cv) { return 0; } 247 } 248 249 // "default" initializers for pthread-based synchronization 250 extern "C" { 251 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 252 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 253 } 254 255 // Thread Local Storage 256 // This is common to all Solaris platforms so it is defined here, 257 // in this common file. 258 // The declarations are in the os_cpu threadLS*.hpp files. 259 // 260 // Static member initialization for TLS 261 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL}; 262 263 #ifndef PRODUCT 264 #define _PCT(n,d) ((100.0*(double)(n))/(double)(d)) 265 266 int ThreadLocalStorage::_tcacheHit = 0; 267 int ThreadLocalStorage::_tcacheMiss = 0; 268 269 void ThreadLocalStorage::print_statistics() { 270 int total = _tcacheMiss+_tcacheHit; 271 tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n", 272 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total)); 273 } 274 #undef _PCT 275 #endif // PRODUCT 276 277 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id, 278 int index) { 279 Thread *thread = get_thread_slow(); 280 if (thread != NULL) { 281 address sp = os::current_stack_pointer(); 282 guarantee(thread->_stack_base == NULL || 283 (sp <= thread->_stack_base && 284 sp >= thread->_stack_base - thread->_stack_size) || 285 is_error_reported(), 286 "sp must be inside of selected thread stack"); 287 288 thread->set_self_raw_id(raw_id); // mark for quick retrieval 289 _get_thread_cache[ index ] = thread; 290 } 291 return thread; 292 } 293 294 295 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0}; 296 #define NO_CACHED_THREAD ((Thread*)all_zero) 297 298 void ThreadLocalStorage::pd_set_thread(Thread* thread) { 299 300 // Store the new value before updating the cache to prevent a race 301 // between get_thread_via_cache_slowly() and this store operation. 302 os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); 303 304 // Update thread cache with new thread if setting on thread create, 305 // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit. 306 uintptr_t raw = pd_raw_thread_id(); 307 int ix = pd_cache_index(raw); 308 _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread; 309 } 310 311 void ThreadLocalStorage::pd_init() { 312 for (int i = 0; i < _pd_cache_size; i++) { 313 _get_thread_cache[i] = NO_CACHED_THREAD; 314 } 315 } 316 317 // Invalidate all the caches (happens to be the same as pd_init). 318 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); } 319 320 #undef NO_CACHED_THREAD 321 322 // END Thread Local Storage 323 324 static inline size_t adjust_stack_size(address base, size_t size) { 325 if ((ssize_t)size < 0) { 326 // 4759953: Compensate for ridiculous stack size. 327 size = max_intx; 328 } 329 if (size > (size_t)base) { 330 // 4812466: Make sure size doesn't allow the stack to wrap the address space. 331 size = (size_t)base; 332 } 333 return size; 334 } 335 336 static inline stack_t get_stack_info() { 337 stack_t st; 338 int retval = thr_stksegment(&st); 339 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size); 340 assert(retval == 0, "incorrect return value from thr_stksegment"); 341 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 342 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 343 return st; 344 } 345 346 address os::current_stack_base() { 347 int r = thr_main() ; 348 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 349 bool is_primordial_thread = r; 350 351 // Workaround 4352906, avoid calls to thr_stksegment by 352 // thr_main after the first one (it looks like we trash 353 // some data, causing the value for ss_sp to be incorrect). 354 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) { 355 stack_t st = get_stack_info(); 356 if (is_primordial_thread) { 357 // cache initial value of stack base 358 os::Solaris::_main_stack_base = (address)st.ss_sp; 359 } 360 return (address)st.ss_sp; 361 } else { 362 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base"); 363 return os::Solaris::_main_stack_base; 364 } 365 } 366 367 size_t os::current_stack_size() { 368 size_t size; 369 370 int r = thr_main() ; 371 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 372 if(!r) { 373 size = get_stack_info().ss_size; 374 } else { 375 struct rlimit limits; 376 getrlimit(RLIMIT_STACK, &limits); 377 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur); 378 } 379 // base may not be page aligned 380 address base = current_stack_base(); 381 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());; 382 return (size_t)(base - bottom); 383 } 384 385 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 386 return localtime_r(clock, res); 387 } 388 389 // interruptible infrastructure 390 391 // setup_interruptible saves the thread state before going into an 392 // interruptible system call. 393 // The saved state is used to restore the thread to 394 // its former state whether or not an interrupt is received. 395 // Used by classloader os::read 396 // os::restartable_read calls skip this layer and stay in _thread_in_native 397 398 void os::Solaris::setup_interruptible(JavaThread* thread) { 399 400 JavaThreadState thread_state = thread->thread_state(); 401 402 assert(thread_state != _thread_blocked, "Coming from the wrong thread"); 403 assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible"); 404 OSThread* osthread = thread->osthread(); 405 osthread->set_saved_interrupt_thread_state(thread_state); 406 thread->frame_anchor()->make_walkable(thread); 407 ThreadStateTransition::transition(thread, thread_state, _thread_blocked); 408 } 409 410 // Version of setup_interruptible() for threads that are already in 411 // _thread_blocked. Used by os_sleep(). 412 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) { 413 thread->frame_anchor()->make_walkable(thread); 414 } 415 416 JavaThread* os::Solaris::setup_interruptible() { 417 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 418 setup_interruptible(thread); 419 return thread; 420 } 421 422 void os::Solaris::try_enable_extended_io() { 423 typedef int (*enable_extended_FILE_stdio_t)(int, int); 424 425 if (!UseExtendedFileIO) { 426 return; 427 } 428 429 enable_extended_FILE_stdio_t enabler = 430 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT, 431 "enable_extended_FILE_stdio"); 432 if (enabler) { 433 enabler(-1, -1); 434 } 435 } 436 437 438 #ifdef ASSERT 439 440 JavaThread* os::Solaris::setup_interruptible_native() { 441 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 442 JavaThreadState thread_state = thread->thread_state(); 443 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 444 return thread; 445 } 446 447 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) { 448 JavaThreadState thread_state = thread->thread_state(); 449 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 450 } 451 #endif 452 453 // cleanup_interruptible reverses the effects of setup_interruptible 454 // setup_interruptible_already_blocked() does not need any cleanup. 455 456 void os::Solaris::cleanup_interruptible(JavaThread* thread) { 457 OSThread* osthread = thread->osthread(); 458 459 ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state()); 460 } 461 462 // I/O interruption related counters called in _INTERRUPTIBLE 463 464 void os::Solaris::bump_interrupted_before_count() { 465 RuntimeService::record_interrupted_before_count(); 466 } 467 468 void os::Solaris::bump_interrupted_during_count() { 469 RuntimeService::record_interrupted_during_count(); 470 } 471 472 static int _processors_online = 0; 473 474 jint os::Solaris::_os_thread_limit = 0; 475 volatile jint os::Solaris::_os_thread_count = 0; 476 477 julong os::available_memory() { 478 return Solaris::available_memory(); 479 } 480 481 julong os::Solaris::available_memory() { 482 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size(); 483 } 484 485 julong os::Solaris::_physical_memory = 0; 486 487 julong os::physical_memory() { 488 return Solaris::physical_memory(); 489 } 490 491 julong os::allocatable_physical_memory(julong size) { 492 #ifdef _LP64 493 return size; 494 #else 495 julong result = MIN2(size, (julong)3835*M); 496 if (!is_allocatable(result)) { 497 // Memory allocations will be aligned but the alignment 498 // is not known at this point. Alignments will 499 // be at most to LargePageSizeInBytes. Protect 500 // allocations from alignments up to illegal 501 // values. If at this point 2G is illegal. 502 julong reasonable_size = (julong)2*G - 2 * LargePageSizeInBytes; 503 result = MIN2(size, reasonable_size); 504 } 505 return result; 506 #endif 507 } 508 509 static hrtime_t first_hrtime = 0; 510 static const hrtime_t hrtime_hz = 1000*1000*1000; 511 const int LOCK_BUSY = 1; 512 const int LOCK_FREE = 0; 513 const int LOCK_INVALID = -1; 514 static volatile hrtime_t max_hrtime = 0; 515 static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB as lock-in-progress 516 517 518 void os::Solaris::initialize_system_info() { 519 set_processor_count(sysconf(_SC_NPROCESSORS_CONF)); 520 _processors_online = sysconf (_SC_NPROCESSORS_ONLN); 521 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE); 522 } 523 524 int os::active_processor_count() { 525 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN); 526 pid_t pid = getpid(); 527 psetid_t pset = PS_NONE; 528 // Are we running in a processor set or is there any processor set around? 529 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) { 530 uint_t pset_cpus; 531 // Query the number of cpus available to us. 532 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) { 533 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check"); 534 _processors_online = pset_cpus; 535 return pset_cpus; 536 } 537 } 538 // Otherwise return number of online cpus 539 return online_cpus; 540 } 541 542 static bool find_processors_in_pset(psetid_t pset, 543 processorid_t** id_array, 544 uint_t* id_length) { 545 bool result = false; 546 // Find the number of processors in the processor set. 547 if (pset_info(pset, NULL, id_length, NULL) == 0) { 548 // Make up an array to hold their ids. 549 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length); 550 // Fill in the array with their processor ids. 551 if (pset_info(pset, NULL, id_length, *id_array) == 0) { 552 result = true; 553 } 554 } 555 return result; 556 } 557 558 // Callers of find_processors_online() must tolerate imprecise results -- 559 // the system configuration can change asynchronously because of DR 560 // or explicit psradm operations. 561 // 562 // We also need to take care that the loop (below) terminates as the 563 // number of processors online can change between the _SC_NPROCESSORS_ONLN 564 // request and the loop that builds the list of processor ids. Unfortunately 565 // there's no reliable way to determine the maximum valid processor id, 566 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online 567 // man pages, which claim the processor id set is "sparse, but 568 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually 569 // exit the loop. 570 // 571 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's 572 // not available on S8.0. 573 574 static bool find_processors_online(processorid_t** id_array, 575 uint* id_length) { 576 const processorid_t MAX_PROCESSOR_ID = 100000 ; 577 // Find the number of processors online. 578 *id_length = sysconf(_SC_NPROCESSORS_ONLN); 579 // Make up an array to hold their ids. 580 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length); 581 // Processors need not be numbered consecutively. 582 long found = 0; 583 processorid_t next = 0; 584 while (found < *id_length && next < MAX_PROCESSOR_ID) { 585 processor_info_t info; 586 if (processor_info(next, &info) == 0) { 587 // NB, PI_NOINTR processors are effectively online ... 588 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) { 589 (*id_array)[found] = next; 590 found += 1; 591 } 592 } 593 next += 1; 594 } 595 if (found < *id_length) { 596 // The loop above didn't identify the expected number of processors. 597 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN) 598 // and re-running the loop, above, but there's no guarantee of progress 599 // if the system configuration is in flux. Instead, we just return what 600 // we've got. Note that in the worst case find_processors_online() could 601 // return an empty set. (As a fall-back in the case of the empty set we 602 // could just return the ID of the current processor). 603 *id_length = found ; 604 } 605 606 return true; 607 } 608 609 static bool assign_distribution(processorid_t* id_array, 610 uint id_length, 611 uint* distribution, 612 uint distribution_length) { 613 // We assume we can assign processorid_t's to uint's. 614 assert(sizeof(processorid_t) == sizeof(uint), 615 "can't convert processorid_t to uint"); 616 // Quick check to see if we won't succeed. 617 if (id_length < distribution_length) { 618 return false; 619 } 620 // Assign processor ids to the distribution. 621 // Try to shuffle processors to distribute work across boards, 622 // assuming 4 processors per board. 623 const uint processors_per_board = ProcessDistributionStride; 624 // Find the maximum processor id. 625 processorid_t max_id = 0; 626 for (uint m = 0; m < id_length; m += 1) { 627 max_id = MAX2(max_id, id_array[m]); 628 } 629 // The next id, to limit loops. 630 const processorid_t limit_id = max_id + 1; 631 // Make up markers for available processors. 632 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id); 633 for (uint c = 0; c < limit_id; c += 1) { 634 available_id[c] = false; 635 } 636 for (uint a = 0; a < id_length; a += 1) { 637 available_id[id_array[a]] = true; 638 } 639 // Step by "boards", then by "slot", copying to "assigned". 640 // NEEDS_CLEANUP: The assignment of processors should be stateful, 641 // remembering which processors have been assigned by 642 // previous calls, etc., so as to distribute several 643 // independent calls of this method. What we'd like is 644 // It would be nice to have an API that let us ask 645 // how many processes are bound to a processor, 646 // but we don't have that, either. 647 // In the short term, "board" is static so that 648 // subsequent distributions don't all start at board 0. 649 static uint board = 0; 650 uint assigned = 0; 651 // Until we've found enough processors .... 652 while (assigned < distribution_length) { 653 // ... find the next available processor in the board. 654 for (uint slot = 0; slot < processors_per_board; slot += 1) { 655 uint try_id = board * processors_per_board + slot; 656 if ((try_id < limit_id) && (available_id[try_id] == true)) { 657 distribution[assigned] = try_id; 658 available_id[try_id] = false; 659 assigned += 1; 660 break; 661 } 662 } 663 board += 1; 664 if (board * processors_per_board + 0 >= limit_id) { 665 board = 0; 666 } 667 } 668 if (available_id != NULL) { 669 FREE_C_HEAP_ARRAY(bool, available_id); 670 } 671 return true; 672 } 673 674 void os::set_native_thread_name(const char *name) { 675 // Not yet implemented. 676 return; 677 } 678 679 bool os::distribute_processes(uint length, uint* distribution) { 680 bool result = false; 681 // Find the processor id's of all the available CPUs. 682 processorid_t* id_array = NULL; 683 uint id_length = 0; 684 // There are some races between querying information and using it, 685 // since processor sets can change dynamically. 686 psetid_t pset = PS_NONE; 687 // Are we running in a processor set? 688 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) { 689 result = find_processors_in_pset(pset, &id_array, &id_length); 690 } else { 691 result = find_processors_online(&id_array, &id_length); 692 } 693 if (result == true) { 694 if (id_length >= length) { 695 result = assign_distribution(id_array, id_length, distribution, length); 696 } else { 697 result = false; 698 } 699 } 700 if (id_array != NULL) { 701 FREE_C_HEAP_ARRAY(processorid_t, id_array); 702 } 703 return result; 704 } 705 706 bool os::bind_to_processor(uint processor_id) { 707 // We assume that a processorid_t can be stored in a uint. 708 assert(sizeof(uint) == sizeof(processorid_t), 709 "can't convert uint to processorid_t"); 710 int bind_result = 711 processor_bind(P_LWPID, // bind LWP. 712 P_MYID, // bind current LWP. 713 (processorid_t) processor_id, // id. 714 NULL); // don't return old binding. 715 return (bind_result == 0); 716 } 717 718 bool os::getenv(const char* name, char* buffer, int len) { 719 char* val = ::getenv( name ); 720 if ( val == NULL 721 || strlen(val) + 1 > len ) { 722 if (len > 0) buffer[0] = 0; // return a null string 723 return false; 724 } 725 strcpy( buffer, val ); 726 return true; 727 } 728 729 730 // Return true if user is running as root. 731 732 bool os::have_special_privileges() { 733 static bool init = false; 734 static bool privileges = false; 735 if (!init) { 736 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 737 init = true; 738 } 739 return privileges; 740 } 741 742 743 void os::init_system_properties_values() { 744 char arch[12]; 745 sysinfo(SI_ARCHITECTURE, arch, sizeof(arch)); 746 747 // The next steps are taken in the product version: 748 // 749 // Obtain the JAVA_HOME value from the location of libjvm[_g].so. 750 // This library should be located at: 751 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so. 752 // 753 // If "/jre/lib/" appears at the right place in the path, then we 754 // assume libjvm[_g].so is installed in a JDK and we use this path. 755 // 756 // Otherwise exit with message: "Could not create the Java virtual machine." 757 // 758 // The following extra steps are taken in the debugging version: 759 // 760 // If "/jre/lib/" does NOT appear at the right place in the path 761 // instead of exit check for $JAVA_HOME environment variable. 762 // 763 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>, 764 // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so 765 // it looks like libjvm[_g].so is installed there 766 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so. 767 // 768 // Otherwise exit. 769 // 770 // Important note: if the location of libjvm.so changes this 771 // code needs to be changed accordingly. 772 773 // The next few definitions allow the code to be verbatim: 774 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n)) 775 #define free(p) FREE_C_HEAP_ARRAY(char, p) 776 #define getenv(n) ::getenv(n) 777 778 #define EXTENSIONS_DIR "/lib/ext" 779 #define ENDORSED_DIR "/lib/endorsed" 780 #define COMMON_DIR "/usr/jdk/packages" 781 782 { 783 /* sysclasspath, java_home, dll_dir */ 784 { 785 char *home_path; 786 char *dll_path; 787 char *pslash; 788 char buf[MAXPATHLEN]; 789 os::jvm_path(buf, sizeof(buf)); 790 791 // Found the full path to libjvm.so. 792 // Now cut the path to <java_home>/jre if we can. 793 *(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */ 794 pslash = strrchr(buf, '/'); 795 if (pslash != NULL) 796 *pslash = '\0'; /* get rid of /{client|server|hotspot} */ 797 dll_path = malloc(strlen(buf) + 1); 798 if (dll_path == NULL) 799 return; 800 strcpy(dll_path, buf); 801 Arguments::set_dll_dir(dll_path); 802 803 if (pslash != NULL) { 804 pslash = strrchr(buf, '/'); 805 if (pslash != NULL) { 806 *pslash = '\0'; /* get rid of /<arch> */ 807 pslash = strrchr(buf, '/'); 808 if (pslash != NULL) 809 *pslash = '\0'; /* get rid of /lib */ 810 } 811 } 812 813 home_path = malloc(strlen(buf) + 1); 814 if (home_path == NULL) 815 return; 816 strcpy(home_path, buf); 817 Arguments::set_java_home(home_path); 818 819 if (!set_boot_path('/', ':')) 820 return; 821 } 822 823 /* 824 * Where to look for native libraries 825 */ 826 { 827 // Use dlinfo() to determine the correct java.library.path. 828 // 829 // If we're launched by the Java launcher, and the user 830 // does not set java.library.path explicitly on the commandline, 831 // the Java launcher sets LD_LIBRARY_PATH for us and unsets 832 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case 833 // dlinfo returns LD_LIBRARY_PATH + crle settings (including 834 // /usr/lib), which is exactly what we want. 835 // 836 // If the user does set java.library.path, it completely 837 // overwrites this setting, and always has. 838 // 839 // If we're not launched by the Java launcher, we may 840 // get here with any/all of the LD_LIBRARY_PATH[_32|64] 841 // settings. Again, dlinfo does exactly what we want. 842 843 Dl_serinfo _info, *info = &_info; 844 Dl_serpath *path; 845 char* library_path; 846 char *common_path; 847 int i; 848 849 // determine search path count and required buffer size 850 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) { 851 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror()); 852 } 853 854 // allocate new buffer and initialize 855 info = (Dl_serinfo*)malloc(_info.dls_size); 856 if (info == NULL) { 857 vm_exit_out_of_memory(_info.dls_size, 858 "init_system_properties_values info"); 859 } 860 info->dls_size = _info.dls_size; 861 info->dls_cnt = _info.dls_cnt; 862 863 // obtain search path information 864 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) { 865 free(info); 866 vm_exit_during_initialization("dlinfo SERINFO request", dlerror()); 867 } 868 869 path = &info->dls_serpath[0]; 870 871 // Note: Due to a legacy implementation, most of the library path 872 // is set in the launcher. This was to accomodate linking restrictions 873 // on legacy Solaris implementations (which are no longer supported). 874 // Eventually, all the library path setting will be done here. 875 // 876 // However, to prevent the proliferation of improperly built native 877 // libraries, the new path component /usr/jdk/packages is added here. 878 879 // Determine the actual CPU architecture. 880 char cpu_arch[12]; 881 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 882 #ifdef _LP64 883 // If we are a 64-bit vm, perform the following translations: 884 // sparc -> sparcv9 885 // i386 -> amd64 886 if (strcmp(cpu_arch, "sparc") == 0) 887 strcat(cpu_arch, "v9"); 888 else if (strcmp(cpu_arch, "i386") == 0) 889 strcpy(cpu_arch, "amd64"); 890 #endif 891 892 // Construct the invariant part of ld_library_path. Note that the 893 // space for the colon and the trailing null are provided by the 894 // nulls included by the sizeof operator. 895 size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch); 896 common_path = malloc(bufsize); 897 if (common_path == NULL) { 898 free(info); 899 vm_exit_out_of_memory(bufsize, 900 "init_system_properties_values common_path"); 901 } 902 sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch); 903 904 // struct size is more than sufficient for the path components obtained 905 // through the dlinfo() call, so only add additional space for the path 906 // components explicitly added here. 907 bufsize = info->dls_size + strlen(common_path); 908 library_path = malloc(bufsize); 909 if (library_path == NULL) { 910 free(info); 911 free(common_path); 912 vm_exit_out_of_memory(bufsize, 913 "init_system_properties_values library_path"); 914 } 915 library_path[0] = '\0'; 916 917 // Construct the desired Java library path from the linker's library 918 // search path. 919 // 920 // For compatibility, it is optimal that we insert the additional path 921 // components specific to the Java VM after those components specified 922 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so 923 // infrastructure. 924 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it 925 strcpy(library_path, common_path); 926 } else { 927 int inserted = 0; 928 for (i = 0; i < info->dls_cnt; i++, path++) { 929 uint_t flags = path->dls_flags & LA_SER_MASK; 930 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) { 931 strcat(library_path, common_path); 932 strcat(library_path, os::path_separator()); 933 inserted = 1; 934 } 935 strcat(library_path, path->dls_name); 936 strcat(library_path, os::path_separator()); 937 } 938 // eliminate trailing path separator 939 library_path[strlen(library_path)-1] = '\0'; 940 } 941 942 // happens before argument parsing - can't use a trace flag 943 // tty->print_raw("init_system_properties_values: native lib path: "); 944 // tty->print_raw_cr(library_path); 945 946 // callee copies into its own buffer 947 Arguments::set_library_path(library_path); 948 949 free(common_path); 950 free(library_path); 951 free(info); 952 } 953 954 /* 955 * Extensions directories. 956 * 957 * Note that the space for the colon and the trailing null are provided 958 * by the nulls included by the sizeof operator (so actually one byte more 959 * than necessary is allocated). 960 */ 961 { 962 char *buf = (char *) malloc(strlen(Arguments::get_java_home()) + 963 sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) + 964 sizeof(EXTENSIONS_DIR)); 965 sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR, 966 Arguments::get_java_home()); 967 Arguments::set_ext_dirs(buf); 968 } 969 970 /* Endorsed standards default directory. */ 971 { 972 char * buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR)); 973 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home()); 974 Arguments::set_endorsed_dirs(buf); 975 } 976 } 977 978 #undef malloc 979 #undef free 980 #undef getenv 981 #undef EXTENSIONS_DIR 982 #undef ENDORSED_DIR 983 #undef COMMON_DIR 984 985 } 986 987 void os::breakpoint() { 988 BREAKPOINT; 989 } 990 991 bool os::obsolete_option(const JavaVMOption *option) 992 { 993 if (!strncmp(option->optionString, "-Xt", 3)) { 994 return true; 995 } else if (!strncmp(option->optionString, "-Xtm", 4)) { 996 return true; 997 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) { 998 return true; 999 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) { 1000 return true; 1001 } 1002 return false; 1003 } 1004 1005 bool os::Solaris::valid_stack_address(Thread* thread, address sp) { 1006 address stackStart = (address)thread->stack_base(); 1007 address stackEnd = (address)(stackStart - (address)thread->stack_size()); 1008 if (sp < stackStart && sp >= stackEnd ) return true; 1009 return false; 1010 } 1011 1012 extern "C" void breakpoint() { 1013 // use debugger to set breakpoint here 1014 } 1015 1016 // Returns an estimate of the current stack pointer. Result must be guaranteed to 1017 // point into the calling threads stack, and be no lower than the current stack 1018 // pointer. 1019 address os::current_stack_pointer() { 1020 volatile int dummy; 1021 address sp = (address)&dummy + 8; // %%%% need to confirm if this is right 1022 return sp; 1023 } 1024 1025 static thread_t main_thread; 1026 1027 // Thread start routine for all new Java threads 1028 extern "C" void* java_start(void* thread_addr) { 1029 // Try to randomize the cache line index of hot stack frames. 1030 // This helps when threads of the same stack traces evict each other's 1031 // cache lines. The threads can be either from the same JVM instance, or 1032 // from different JVM instances. The benefit is especially true for 1033 // processors with hyperthreading technology. 1034 static int counter = 0; 1035 int pid = os::current_process_id(); 1036 alloca(((pid ^ counter++) & 7) * 128); 1037 1038 int prio; 1039 Thread* thread = (Thread*)thread_addr; 1040 OSThread* osthr = thread->osthread(); 1041 1042 osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound 1043 thread->_schedctl = (void *) schedctl_init () ; 1044 1045 if (UseNUMA) { 1046 int lgrp_id = os::numa_get_group_id(); 1047 if (lgrp_id != -1) { 1048 thread->set_lgrp_id(lgrp_id); 1049 } 1050 } 1051 1052 // If the creator called set priority before we started, 1053 // we need to call set_native_priority now that we have an lwp. 1054 // We used to get the priority from thr_getprio (we called 1055 // thr_setprio way back in create_thread) and pass it to 1056 // set_native_priority, but Solaris scales the priority 1057 // in java_to_os_priority, so when we read it back here, 1058 // we pass trash to set_native_priority instead of what's 1059 // in java_to_os_priority. So we save the native priority 1060 // in the osThread and recall it here. 1061 1062 if ( osthr->thread_id() != -1 ) { 1063 if ( UseThreadPriorities ) { 1064 int prio = osthr->native_priority(); 1065 if (ThreadPriorityVerbose) { 1066 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " 1067 INTPTR_FORMAT ", setting priority: %d\n", 1068 osthr->thread_id(), osthr->lwp_id(), prio); 1069 } 1070 os::set_native_priority(thread, prio); 1071 } 1072 } else if (ThreadPriorityVerbose) { 1073 warning("Can't set priority in _start routine, thread id hasn't been set\n"); 1074 } 1075 1076 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 1077 1078 // initialize signal mask for this thread 1079 os::Solaris::hotspot_sigmask(thread); 1080 1081 thread->run(); 1082 1083 // One less thread is executing 1084 // When the VMThread gets here, the main thread may have already exited 1085 // which frees the CodeHeap containing the Atomic::dec code 1086 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 1087 Atomic::dec(&os::Solaris::_os_thread_count); 1088 } 1089 1090 if (UseDetachedThreads) { 1091 thr_exit(NULL); 1092 ShouldNotReachHere(); 1093 } 1094 return NULL; 1095 } 1096 1097 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) { 1098 // Allocate the OSThread object 1099 OSThread* osthread = new OSThread(NULL, NULL); 1100 if (osthread == NULL) return NULL; 1101 1102 // Store info on the Solaris thread into the OSThread 1103 osthread->set_thread_id(thread_id); 1104 osthread->set_lwp_id(_lwp_self()); 1105 thread->_schedctl = (void *) schedctl_init () ; 1106 1107 if (UseNUMA) { 1108 int lgrp_id = os::numa_get_group_id(); 1109 if (lgrp_id != -1) { 1110 thread->set_lgrp_id(lgrp_id); 1111 } 1112 } 1113 1114 if ( ThreadPriorityVerbose ) { 1115 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n", 1116 osthread->thread_id(), osthread->lwp_id() ); 1117 } 1118 1119 // Initial thread state is INITIALIZED, not SUSPENDED 1120 osthread->set_state(INITIALIZED); 1121 1122 return osthread; 1123 } 1124 1125 void os::Solaris::hotspot_sigmask(Thread* thread) { 1126 1127 //Save caller's signal mask 1128 sigset_t sigmask; 1129 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask); 1130 OSThread *osthread = thread->osthread(); 1131 osthread->set_caller_sigmask(sigmask); 1132 1133 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL); 1134 if (!ReduceSignalUsage) { 1135 if (thread->is_VM_thread()) { 1136 // Only the VM thread handles BREAK_SIGNAL ... 1137 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL); 1138 } else { 1139 // ... all other threads block BREAK_SIGNAL 1140 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked"); 1141 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL); 1142 } 1143 } 1144 } 1145 1146 bool os::create_attached_thread(JavaThread* thread) { 1147 #ifdef ASSERT 1148 thread->verify_not_published(); 1149 #endif 1150 OSThread* osthread = create_os_thread(thread, thr_self()); 1151 if (osthread == NULL) { 1152 return false; 1153 } 1154 1155 // Initial thread state is RUNNABLE 1156 osthread->set_state(RUNNABLE); 1157 thread->set_osthread(osthread); 1158 1159 // initialize signal mask for this thread 1160 // and save the caller's signal mask 1161 os::Solaris::hotspot_sigmask(thread); 1162 1163 return true; 1164 } 1165 1166 bool os::create_main_thread(JavaThread* thread) { 1167 #ifdef ASSERT 1168 thread->verify_not_published(); 1169 #endif 1170 if (_starting_thread == NULL) { 1171 _starting_thread = create_os_thread(thread, main_thread); 1172 if (_starting_thread == NULL) { 1173 return false; 1174 } 1175 } 1176 1177 // The primodial thread is runnable from the start 1178 _starting_thread->set_state(RUNNABLE); 1179 1180 thread->set_osthread(_starting_thread); 1181 1182 // initialize signal mask for this thread 1183 // and save the caller's signal mask 1184 os::Solaris::hotspot_sigmask(thread); 1185 1186 return true; 1187 } 1188 1189 // _T2_libthread is true if we believe we are running with the newer 1190 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default) 1191 bool os::Solaris::_T2_libthread = false; 1192 1193 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 1194 // Allocate the OSThread object 1195 OSThread* osthread = new OSThread(NULL, NULL); 1196 if (osthread == NULL) { 1197 return false; 1198 } 1199 1200 if ( ThreadPriorityVerbose ) { 1201 char *thrtyp; 1202 switch ( thr_type ) { 1203 case vm_thread: 1204 thrtyp = (char *)"vm"; 1205 break; 1206 case cgc_thread: 1207 thrtyp = (char *)"cgc"; 1208 break; 1209 case pgc_thread: 1210 thrtyp = (char *)"pgc"; 1211 break; 1212 case java_thread: 1213 thrtyp = (char *)"java"; 1214 break; 1215 case compiler_thread: 1216 thrtyp = (char *)"compiler"; 1217 break; 1218 case watcher_thread: 1219 thrtyp = (char *)"watcher"; 1220 break; 1221 default: 1222 thrtyp = (char *)"unknown"; 1223 break; 1224 } 1225 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp); 1226 } 1227 1228 // Calculate stack size if it's not specified by caller. 1229 if (stack_size == 0) { 1230 // The default stack size 1M (2M for LP64). 1231 stack_size = (BytesPerWord >> 2) * K * K; 1232 1233 switch (thr_type) { 1234 case os::java_thread: 1235 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 1236 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create(); 1237 break; 1238 case os::compiler_thread: 1239 if (CompilerThreadStackSize > 0) { 1240 stack_size = (size_t)(CompilerThreadStackSize * K); 1241 break; 1242 } // else fall through: 1243 // use VMThreadStackSize if CompilerThreadStackSize is not defined 1244 case os::vm_thread: 1245 case os::pgc_thread: 1246 case os::cgc_thread: 1247 case os::watcher_thread: 1248 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 1249 break; 1250 } 1251 } 1252 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed); 1253 1254 // Initial state is ALLOCATED but not INITIALIZED 1255 osthread->set_state(ALLOCATED); 1256 1257 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) { 1258 // We got lots of threads. Check if we still have some address space left. 1259 // Need to be at least 5Mb of unreserved address space. We do check by 1260 // trying to reserve some. 1261 const size_t VirtualMemoryBangSize = 20*K*K; 1262 char* mem = os::reserve_memory(VirtualMemoryBangSize); 1263 if (mem == NULL) { 1264 delete osthread; 1265 return false; 1266 } else { 1267 // Release the memory again 1268 os::release_memory(mem, VirtualMemoryBangSize); 1269 } 1270 } 1271 1272 // Setup osthread because the child thread may need it. 1273 thread->set_osthread(osthread); 1274 1275 // Create the Solaris thread 1276 // explicit THR_BOUND for T2_libthread case in case 1277 // that assumption is not accurate, but our alternate signal stack 1278 // handling is based on it which must have bound threads 1279 thread_t tid = 0; 1280 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED 1281 | ((UseBoundThreads || os::Solaris::T2_libthread() || 1282 (thr_type == vm_thread) || 1283 (thr_type == cgc_thread) || 1284 (thr_type == pgc_thread) || 1285 (thr_type == compiler_thread && BackgroundCompilation)) ? 1286 THR_BOUND : 0); 1287 int status; 1288 1289 // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs. 1290 // 1291 // On multiprocessors systems, libthread sometimes under-provisions our 1292 // process with LWPs. On a 30-way systems, for instance, we could have 1293 // 50 user-level threads in ready state and only 2 or 3 LWPs assigned 1294 // to our process. This can result in under utilization of PEs. 1295 // I suspect the problem is related to libthread's LWP 1296 // pool management and to the kernel's SIGBLOCKING "last LWP parked" 1297 // upcall policy. 1298 // 1299 // The following code is palliative -- it attempts to ensure that our 1300 // process has sufficient LWPs to take advantage of multiple PEs. 1301 // Proper long-term cures include using user-level threads bound to LWPs 1302 // (THR_BOUND) or using LWP-based synchronization. Note that there is a 1303 // slight timing window with respect to sampling _os_thread_count, but 1304 // the race is benign. Also, we should periodically recompute 1305 // _processors_online as the min of SC_NPROCESSORS_ONLN and the 1306 // the number of PEs in our partition. You might be tempted to use 1307 // THR_NEW_LWP here, but I'd recommend against it as that could 1308 // result in undesirable growth of the libthread's LWP pool. 1309 // The fix below isn't sufficient; for instance, it doesn't take into count 1310 // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks. 1311 // 1312 // Some pathologies this scheme doesn't handle: 1313 // * Threads can block, releasing the LWPs. The LWPs can age out. 1314 // When a large number of threads become ready again there aren't 1315 // enough LWPs available to service them. This can occur when the 1316 // number of ready threads oscillates. 1317 // * LWPs/Threads park on IO, thus taking the LWP out of circulation. 1318 // 1319 // Finally, we should call thr_setconcurrency() periodically to refresh 1320 // the LWP pool and thwart the LWP age-out mechanism. 1321 // The "+3" term provides a little slop -- we want to slightly overprovision. 1322 1323 if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) { 1324 if (!(flags & THR_BOUND)) { 1325 thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation 1326 } 1327 } 1328 // Although this doesn't hurt, we should warn of undefined behavior 1329 // when using unbound T1 threads with schedctl(). This should never 1330 // happen, as the compiler and VM threads are always created bound 1331 DEBUG_ONLY( 1332 if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) && 1333 (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) && 1334 ((thr_type == vm_thread) || (thr_type == cgc_thread) || 1335 (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) { 1336 warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound"); 1337 } 1338 ); 1339 1340 1341 // Mark that we don't have an lwp or thread id yet. 1342 // In case we attempt to set the priority before the thread starts. 1343 osthread->set_lwp_id(-1); 1344 osthread->set_thread_id(-1); 1345 1346 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid); 1347 if (status != 0) { 1348 if (PrintMiscellaneous && (Verbose || WizardMode)) { 1349 perror("os::create_thread"); 1350 } 1351 thread->set_osthread(NULL); 1352 // Need to clean up stuff we've allocated so far 1353 delete osthread; 1354 return false; 1355 } 1356 1357 Atomic::inc(&os::Solaris::_os_thread_count); 1358 1359 // Store info on the Solaris thread into the OSThread 1360 osthread->set_thread_id(tid); 1361 1362 // Remember that we created this thread so we can set priority on it 1363 osthread->set_vm_created(); 1364 1365 // Set the default thread priority. If using bound threads, setting 1366 // lwp priority will be delayed until thread start. 1367 set_native_priority(thread, 1368 DefaultThreadPriority == -1 ? 1369 java_to_os_priority[NormPriority] : 1370 DefaultThreadPriority); 1371 1372 // Initial thread state is INITIALIZED, not SUSPENDED 1373 osthread->set_state(INITIALIZED); 1374 1375 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 1376 return true; 1377 } 1378 1379 /* defined for >= Solaris 10. This allows builds on earlier versions 1380 * of Solaris to take advantage of the newly reserved Solaris JVM signals 1381 * With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2 1382 * and -XX:+UseAltSigs does nothing since these should have no conflict 1383 */ 1384 #if !defined(SIGJVM1) 1385 #define SIGJVM1 39 1386 #define SIGJVM2 40 1387 #endif 1388 1389 debug_only(static bool signal_sets_initialized = false); 1390 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; 1391 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL; 1392 int os::Solaris::_SIGasync = ASYNC_SIGNAL; 1393 1394 bool os::Solaris::is_sig_ignored(int sig) { 1395 struct sigaction oact; 1396 sigaction(sig, (struct sigaction*)NULL, &oact); 1397 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 1398 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 1399 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) 1400 return true; 1401 else 1402 return false; 1403 } 1404 1405 // Note: SIGRTMIN is a macro that calls sysconf() so it will 1406 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime 1407 static bool isJVM1available() { 1408 return SIGJVM1 < SIGRTMIN; 1409 } 1410 1411 void os::Solaris::signal_sets_init() { 1412 // Should also have an assertion stating we are still single-threaded. 1413 assert(!signal_sets_initialized, "Already initialized"); 1414 // Fill in signals that are necessarily unblocked for all threads in 1415 // the VM. Currently, we unblock the following signals: 1416 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 1417 // by -Xrs (=ReduceSignalUsage)); 1418 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 1419 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 1420 // the dispositions or masks wrt these signals. 1421 // Programs embedding the VM that want to use the above signals for their 1422 // own purposes must, at this time, use the "-Xrs" option to prevent 1423 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 1424 // (See bug 4345157, and other related bugs). 1425 // In reality, though, unblocking these signals is really a nop, since 1426 // these signals are not blocked by default. 1427 sigemptyset(&unblocked_sigs); 1428 sigemptyset(&allowdebug_blocked_sigs); 1429 sigaddset(&unblocked_sigs, SIGILL); 1430 sigaddset(&unblocked_sigs, SIGSEGV); 1431 sigaddset(&unblocked_sigs, SIGBUS); 1432 sigaddset(&unblocked_sigs, SIGFPE); 1433 1434 if (isJVM1available) { 1435 os::Solaris::set_SIGinterrupt(SIGJVM1); 1436 os::Solaris::set_SIGasync(SIGJVM2); 1437 } else if (UseAltSigs) { 1438 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL); 1439 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL); 1440 } else { 1441 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL); 1442 os::Solaris::set_SIGasync(ASYNC_SIGNAL); 1443 } 1444 1445 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt()); 1446 sigaddset(&unblocked_sigs, os::Solaris::SIGasync()); 1447 1448 if (!ReduceSignalUsage) { 1449 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 1450 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 1451 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); 1452 } 1453 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 1454 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 1455 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); 1456 } 1457 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 1458 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 1459 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); 1460 } 1461 } 1462 // Fill in signals that are blocked by all but the VM thread. 1463 sigemptyset(&vm_sigs); 1464 if (!ReduceSignalUsage) 1465 sigaddset(&vm_sigs, BREAK_SIGNAL); 1466 debug_only(signal_sets_initialized = true); 1467 1468 // For diagnostics only used in run_periodic_checks 1469 sigemptyset(&check_signal_done); 1470 } 1471 1472 // These are signals that are unblocked while a thread is running Java. 1473 // (For some reason, they get blocked by default.) 1474 sigset_t* os::Solaris::unblocked_signals() { 1475 assert(signal_sets_initialized, "Not initialized"); 1476 return &unblocked_sigs; 1477 } 1478 1479 // These are the signals that are blocked while a (non-VM) thread is 1480 // running Java. Only the VM thread handles these signals. 1481 sigset_t* os::Solaris::vm_signals() { 1482 assert(signal_sets_initialized, "Not initialized"); 1483 return &vm_sigs; 1484 } 1485 1486 // These are signals that are blocked during cond_wait to allow debugger in 1487 sigset_t* os::Solaris::allowdebug_blocked_signals() { 1488 assert(signal_sets_initialized, "Not initialized"); 1489 return &allowdebug_blocked_sigs; 1490 } 1491 1492 1493 void _handle_uncaught_cxx_exception() { 1494 VMError err("An uncaught C++ exception"); 1495 err.report_and_die(); 1496 } 1497 1498 1499 // First crack at OS-specific initialization, from inside the new thread. 1500 void os::initialize_thread() { 1501 int r = thr_main() ; 1502 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 1503 if (r) { 1504 JavaThread* jt = (JavaThread *)Thread::current(); 1505 assert(jt != NULL,"Sanity check"); 1506 size_t stack_size; 1507 address base = jt->stack_base(); 1508 if (Arguments::created_by_java_launcher()) { 1509 // Use 2MB to allow for Solaris 7 64 bit mode. 1510 stack_size = JavaThread::stack_size_at_create() == 0 1511 ? 2048*K : JavaThread::stack_size_at_create(); 1512 1513 // There are rare cases when we may have already used more than 1514 // the basic stack size allotment before this method is invoked. 1515 // Attempt to allow for a normally sized java_stack. 1516 size_t current_stack_offset = (size_t)(base - (address)&stack_size); 1517 stack_size += ReservedSpace::page_align_size_down(current_stack_offset); 1518 } else { 1519 // 6269555: If we were not created by a Java launcher, i.e. if we are 1520 // running embedded in a native application, treat the primordial thread 1521 // as much like a native attached thread as possible. This means using 1522 // the current stack size from thr_stksegment(), unless it is too large 1523 // to reliably setup guard pages. A reasonable max size is 8MB. 1524 size_t current_size = current_stack_size(); 1525 // This should never happen, but just in case.... 1526 if (current_size == 0) current_size = 2 * K * K; 1527 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size; 1528 } 1529 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());; 1530 stack_size = (size_t)(base - bottom); 1531 1532 assert(stack_size > 0, "Stack size calculation problem"); 1533 1534 if (stack_size > jt->stack_size()) { 1535 NOT_PRODUCT( 1536 struct rlimit limits; 1537 getrlimit(RLIMIT_STACK, &limits); 1538 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur); 1539 assert(size >= jt->stack_size(), "Stack size problem in main thread"); 1540 ) 1541 tty->print_cr( 1542 "Stack size of %d Kb exceeds current limit of %d Kb.\n" 1543 "(Stack sizes are rounded up to a multiple of the system page size.)\n" 1544 "See limit(1) to increase the stack size limit.", 1545 stack_size / K, jt->stack_size() / K); 1546 vm_exit(1); 1547 } 1548 assert(jt->stack_size() >= stack_size, 1549 "Attempt to map more stack than was allocated"); 1550 jt->set_stack_size(stack_size); 1551 } 1552 1553 // 5/22/01: Right now alternate signal stacks do not handle 1554 // throwing stack overflow exceptions, see bug 4463178 1555 // Until a fix is found for this, T2 will NOT imply alternate signal 1556 // stacks. 1557 // If using T2 libthread threads, install an alternate signal stack. 1558 // Because alternate stacks associate with LWPs on Solaris, 1559 // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads 1560 // we prefer to explicitly stack bang. 1561 // If not using T2 libthread, but using UseBoundThreads any threads 1562 // (primordial thread, jni_attachCurrentThread) we do not create, 1563 // probably are not bound, therefore they can not have an alternate 1564 // signal stack. Since our stack banging code is generated and 1565 // is shared across threads, all threads must be bound to allow 1566 // using alternate signal stacks. The alternative is to interpose 1567 // on _lwp_create to associate an alt sig stack with each LWP, 1568 // and this could be a problem when the JVM is embedded. 1569 // We would prefer to use alternate signal stacks with T2 1570 // Since there is currently no accurate way to detect T2 1571 // we do not. Assuming T2 when running T1 causes sig 11s or assertions 1572 // on installing alternate signal stacks 1573 1574 1575 // 05/09/03: removed alternate signal stack support for Solaris 1576 // The alternate signal stack mechanism is no longer needed to 1577 // handle stack overflow. This is now handled by allocating 1578 // guard pages (red zone) and stackbanging. 1579 // Initially the alternate signal stack mechanism was removed because 1580 // it did not work with T1 llibthread. Alternate 1581 // signal stacks MUST have all threads bound to lwps. Applications 1582 // can create their own threads and attach them without their being 1583 // bound under T1. This is frequently the case for the primordial thread. 1584 // If we were ever to reenable this mechanism we would need to 1585 // use the dynamic check for T2 libthread. 1586 1587 os::Solaris::init_thread_fpu_state(); 1588 std::set_terminate(_handle_uncaught_cxx_exception); 1589 } 1590 1591 1592 1593 // Free Solaris resources related to the OSThread 1594 void os::free_thread(OSThread* osthread) { 1595 assert(osthread != NULL, "os::free_thread but osthread not set"); 1596 1597 1598 // We are told to free resources of the argument thread, 1599 // but we can only really operate on the current thread. 1600 // The main thread must take the VMThread down synchronously 1601 // before the main thread exits and frees up CodeHeap 1602 guarantee((Thread::current()->osthread() == osthread 1603 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread"); 1604 if (Thread::current()->osthread() == osthread) { 1605 // Restore caller's signal mask 1606 sigset_t sigmask = osthread->caller_sigmask(); 1607 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL); 1608 } 1609 delete osthread; 1610 } 1611 1612 void os::pd_start_thread(Thread* thread) { 1613 int status = thr_continue(thread->osthread()->thread_id()); 1614 assert_status(status == 0, status, "thr_continue failed"); 1615 } 1616 1617 1618 intx os::current_thread_id() { 1619 return (intx)thr_self(); 1620 } 1621 1622 static pid_t _initial_pid = 0; 1623 1624 int os::current_process_id() { 1625 return (int)(_initial_pid ? _initial_pid : getpid()); 1626 } 1627 1628 int os::allocate_thread_local_storage() { 1629 // %%% in Win32 this allocates a memory segment pointed to by a 1630 // register. Dan Stein can implement a similar feature in 1631 // Solaris. Alternatively, the VM can do the same thing 1632 // explicitly: malloc some storage and keep the pointer in a 1633 // register (which is part of the thread's context) (or keep it 1634 // in TLS). 1635 // %%% In current versions of Solaris, thr_self and TSD can 1636 // be accessed via short sequences of displaced indirections. 1637 // The value of thr_self is available as %g7(36). 1638 // The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4), 1639 // assuming that the current thread already has a value bound to k. 1640 // It may be worth experimenting with such access patterns, 1641 // and later having the parameters formally exported from a Solaris 1642 // interface. I think, however, that it will be faster to 1643 // maintain the invariant that %g2 always contains the 1644 // JavaThread in Java code, and have stubs simply 1645 // treat %g2 as a caller-save register, preserving it in a %lN. 1646 thread_key_t tk; 1647 if (thr_keycreate( &tk, NULL ) ) 1648 fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed " 1649 "(%s)", strerror(errno))); 1650 return int(tk); 1651 } 1652 1653 void os::free_thread_local_storage(int index) { 1654 // %%% don't think we need anything here 1655 // if ( pthread_key_delete((pthread_key_t) tk) ) 1656 // fatal("os::free_thread_local_storage: pthread_key_delete failed"); 1657 } 1658 1659 #define SMALLINT 32 // libthread allocate for tsd_common is a version specific 1660 // small number - point is NO swap space available 1661 void os::thread_local_storage_at_put(int index, void* value) { 1662 // %%% this is used only in threadLocalStorage.cpp 1663 if (thr_setspecific((thread_key_t)index, value)) { 1664 if (errno == ENOMEM) { 1665 vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space"); 1666 } else { 1667 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed " 1668 "(%s)", strerror(errno))); 1669 } 1670 } else { 1671 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ; 1672 } 1673 } 1674 1675 // This function could be called before TLS is initialized, for example, when 1676 // VM receives an async signal or when VM causes a fatal error during 1677 // initialization. Return NULL if thr_getspecific() fails. 1678 void* os::thread_local_storage_at(int index) { 1679 // %%% this is used only in threadLocalStorage.cpp 1680 void* r = NULL; 1681 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r; 1682 } 1683 1684 1685 // gethrtime can move backwards if read from one cpu and then a different cpu 1686 // getTimeNanos is guaranteed to not move backward on Solaris 1687 // local spinloop created as faster for a CAS on an int than 1688 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not 1689 // supported on sparc v8 or pre supports_cx8 intel boxes. 1690 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong 1691 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes 1692 inline hrtime_t oldgetTimeNanos() { 1693 int gotlock = LOCK_INVALID; 1694 hrtime_t newtime = gethrtime(); 1695 1696 for (;;) { 1697 // grab lock for max_hrtime 1698 int curlock = max_hrtime_lock; 1699 if (curlock & LOCK_BUSY) continue; 1700 if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue; 1701 if (newtime > max_hrtime) { 1702 max_hrtime = newtime; 1703 } else { 1704 newtime = max_hrtime; 1705 } 1706 // release lock 1707 max_hrtime_lock = LOCK_FREE; 1708 return newtime; 1709 } 1710 } 1711 // gethrtime can move backwards if read from one cpu and then a different cpu 1712 // getTimeNanos is guaranteed to not move backward on Solaris 1713 inline hrtime_t getTimeNanos() { 1714 if (VM_Version::supports_cx8()) { 1715 const hrtime_t now = gethrtime(); 1716 // Use atomic long load since 32-bit x86 uses 2 registers to keep long. 1717 const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime); 1718 if (now <= prev) return prev; // same or retrograde time; 1719 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); 1720 assert(obsv >= prev, "invariant"); // Monotonicity 1721 // If the CAS succeeded then we're done and return "now". 1722 // If the CAS failed and the observed value "obs" is >= now then 1723 // we should return "obs". If the CAS failed and now > obs > prv then 1724 // some other thread raced this thread and installed a new value, in which case 1725 // we could either (a) retry the entire operation, (b) retry trying to install now 1726 // or (c) just return obs. We use (c). No loop is required although in some cases 1727 // we might discard a higher "now" value in deference to a slightly lower but freshly 1728 // installed obs value. That's entirely benign -- it admits no new orderings compared 1729 // to (a) or (b) -- and greatly reduces coherence traffic. 1730 // We might also condition (c) on the magnitude of the delta between obs and now. 1731 // Avoiding excessive CAS operations to hot RW locations is critical. 1732 // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate 1733 return (prev == obsv) ? now : obsv ; 1734 } else { 1735 return oldgetTimeNanos(); 1736 } 1737 } 1738 1739 // Time since start-up in seconds to a fine granularity. 1740 // Used by VMSelfDestructTimer and the MemProfiler. 1741 double os::elapsedTime() { 1742 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz; 1743 } 1744 1745 jlong os::elapsed_counter() { 1746 return (jlong)(getTimeNanos() - first_hrtime); 1747 } 1748 1749 jlong os::elapsed_frequency() { 1750 return hrtime_hz; 1751 } 1752 1753 // Return the real, user, and system times in seconds from an 1754 // arbitrary fixed point in the past. 1755 bool os::getTimesSecs(double* process_real_time, 1756 double* process_user_time, 1757 double* process_system_time) { 1758 struct tms ticks; 1759 clock_t real_ticks = times(&ticks); 1760 1761 if (real_ticks == (clock_t) (-1)) { 1762 return false; 1763 } else { 1764 double ticks_per_second = (double) clock_tics_per_sec; 1765 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1766 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1767 // For consistency return the real time from getTimeNanos() 1768 // converted to seconds. 1769 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS); 1770 1771 return true; 1772 } 1773 } 1774 1775 bool os::supports_vtime() { return true; } 1776 1777 bool os::enable_vtime() { 1778 int fd = ::open("/proc/self/ctl", O_WRONLY); 1779 if (fd == -1) 1780 return false; 1781 1782 long cmd[] = { PCSET, PR_MSACCT }; 1783 int res = ::write(fd, cmd, sizeof(long) * 2); 1784 ::close(fd); 1785 if (res != sizeof(long) * 2) 1786 return false; 1787 1788 return true; 1789 } 1790 1791 bool os::vtime_enabled() { 1792 int fd = ::open("/proc/self/status", O_RDONLY); 1793 if (fd == -1) 1794 return false; 1795 1796 pstatus_t status; 1797 int res = os::read(fd, (void*) &status, sizeof(pstatus_t)); 1798 ::close(fd); 1799 if (res != sizeof(pstatus_t)) 1800 return false; 1801 1802 return status.pr_flags & PR_MSACCT; 1803 } 1804 1805 double os::elapsedVTime() { 1806 return (double)gethrvtime() / (double)hrtime_hz; 1807 } 1808 1809 // Used internally for comparisons only 1810 // getTimeMillis guaranteed to not move backwards on Solaris 1811 jlong getTimeMillis() { 1812 jlong nanotime = getTimeNanos(); 1813 return (jlong)(nanotime / NANOSECS_PER_MILLISEC); 1814 } 1815 1816 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis 1817 jlong os::javaTimeMillis() { 1818 timeval t; 1819 if (gettimeofday( &t, NULL) == -1) 1820 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno))); 1821 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; 1822 } 1823 1824 jlong os::javaTimeNanos() { 1825 return (jlong)getTimeNanos(); 1826 } 1827 1828 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1829 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits 1830 info_ptr->may_skip_backward = false; // not subject to resetting or drifting 1831 info_ptr->may_skip_forward = false; // not subject to resetting or drifting 1832 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1833 } 1834 1835 char * os::local_time_string(char *buf, size_t buflen) { 1836 struct tm t; 1837 time_t long_time; 1838 time(&long_time); 1839 localtime_r(&long_time, &t); 1840 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1841 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1842 t.tm_hour, t.tm_min, t.tm_sec); 1843 return buf; 1844 } 1845 1846 // Note: os::shutdown() might be called very early during initialization, or 1847 // called from signal handler. Before adding something to os::shutdown(), make 1848 // sure it is async-safe and can handle partially initialized VM. 1849 void os::shutdown() { 1850 1851 // allow PerfMemory to attempt cleanup of any persistent resources 1852 perfMemory_exit(); 1853 1854 // needs to remove object in file system 1855 AttachListener::abort(); 1856 1857 // flush buffered output, finish log files 1858 ostream_abort(); 1859 1860 // Check for abort hook 1861 abort_hook_t abort_hook = Arguments::abort_hook(); 1862 if (abort_hook != NULL) { 1863 abort_hook(); 1864 } 1865 } 1866 1867 // Note: os::abort() might be called very early during initialization, or 1868 // called from signal handler. Before adding something to os::abort(), make 1869 // sure it is async-safe and can handle partially initialized VM. 1870 void os::abort(bool dump_core) { 1871 os::shutdown(); 1872 if (dump_core) { 1873 #ifndef PRODUCT 1874 fdStream out(defaultStream::output_fd()); 1875 out.print_raw("Current thread is "); 1876 char buf[16]; 1877 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1878 out.print_raw_cr(buf); 1879 out.print_raw_cr("Dumping core ..."); 1880 #endif 1881 ::abort(); // dump core (for debugging) 1882 } 1883 1884 ::exit(1); 1885 } 1886 1887 // Die immediately, no exit hook, no abort hook, no cleanup. 1888 void os::die() { 1889 _exit(-1); 1890 } 1891 1892 // unused 1893 void os::set_error_file(const char *logfile) {} 1894 1895 // DLL functions 1896 1897 const char* os::dll_file_extension() { return ".so"; } 1898 1899 // This must be hard coded because it's the system's temporary 1900 // directory not the java application's temp directory, ala java.io.tmpdir. 1901 const char* os::get_temp_directory() { return "/tmp"; } 1902 1903 static bool file_exists(const char* filename) { 1904 struct stat statbuf; 1905 if (filename == NULL || strlen(filename) == 0) { 1906 return false; 1907 } 1908 return os::stat(filename, &statbuf) == 0; 1909 } 1910 1911 void os::dll_build_name(char* buffer, size_t buflen, 1912 const char* pname, const char* fname) { 1913 const size_t pnamelen = pname ? strlen(pname) : 0; 1914 1915 // Quietly truncate on buffer overflow. Should be an error. 1916 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { 1917 *buffer = '\0'; 1918 return; 1919 } 1920 1921 if (pnamelen == 0) { 1922 snprintf(buffer, buflen, "lib%s.so", fname); 1923 } else if (strchr(pname, *os::path_separator()) != NULL) { 1924 int n; 1925 char** pelements = split_path(pname, &n); 1926 for (int i = 0 ; i < n ; i++) { 1927 // really shouldn't be NULL but what the heck, check can't hurt 1928 if (pelements[i] == NULL || strlen(pelements[i]) == 0) { 1929 continue; // skip the empty path values 1930 } 1931 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); 1932 if (file_exists(buffer)) { 1933 break; 1934 } 1935 } 1936 // release the storage 1937 for (int i = 0 ; i < n ; i++) { 1938 if (pelements[i] != NULL) { 1939 FREE_C_HEAP_ARRAY(char, pelements[i]); 1940 } 1941 } 1942 if (pelements != NULL) { 1943 FREE_C_HEAP_ARRAY(char*, pelements); 1944 } 1945 } else { 1946 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1947 } 1948 } 1949 1950 const char* os::get_current_directory(char *buf, int buflen) { 1951 return getcwd(buf, buflen); 1952 } 1953 1954 // check if addr is inside libjvm[_g].so 1955 bool os::address_is_in_vm(address addr) { 1956 static address libjvm_base_addr; 1957 Dl_info dlinfo; 1958 1959 if (libjvm_base_addr == NULL) { 1960 dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo); 1961 libjvm_base_addr = (address)dlinfo.dli_fbase; 1962 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); 1963 } 1964 1965 if (dladdr((void *)addr, &dlinfo)) { 1966 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; 1967 } 1968 1969 return false; 1970 } 1971 1972 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int); 1973 static dladdr1_func_type dladdr1_func = NULL; 1974 1975 bool os::dll_address_to_function_name(address addr, char *buf, 1976 int buflen, int * offset) { 1977 Dl_info dlinfo; 1978 1979 // dladdr1_func was initialized in os::init() 1980 if (dladdr1_func){ 1981 // yes, we have dladdr1 1982 1983 // Support for dladdr1 is checked at runtime; it may be 1984 // available even if the vm is built on a machine that does 1985 // not have dladdr1 support. Make sure there is a value for 1986 // RTLD_DL_SYMENT. 1987 #ifndef RTLD_DL_SYMENT 1988 #define RTLD_DL_SYMENT 1 1989 #endif 1990 #ifdef _LP64 1991 Elf64_Sym * info; 1992 #else 1993 Elf32_Sym * info; 1994 #endif 1995 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info, 1996 RTLD_DL_SYMENT)) { 1997 if ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr) { 1998 if (buf != NULL) { 1999 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) 2000 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); 2001 } 2002 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 2003 return true; 2004 } 2005 } 2006 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) { 2007 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 2008 buf, buflen, offset, dlinfo.dli_fname)) { 2009 return true; 2010 } 2011 } 2012 if (buf != NULL) buf[0] = '\0'; 2013 if (offset != NULL) *offset = -1; 2014 return false; 2015 } else { 2016 // no, only dladdr is available 2017 if (dladdr((void *)addr, &dlinfo)) { 2018 if (buf != NULL) { 2019 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) 2020 jio_snprintf(buf, buflen, dlinfo.dli_sname); 2021 } 2022 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 2023 return true; 2024 } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) { 2025 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 2026 buf, buflen, offset, dlinfo.dli_fname)) { 2027 return true; 2028 } 2029 } 2030 if (buf != NULL) buf[0] = '\0'; 2031 if (offset != NULL) *offset = -1; 2032 return false; 2033 } 2034 } 2035 2036 bool os::dll_address_to_library_name(address addr, char* buf, 2037 int buflen, int* offset) { 2038 Dl_info dlinfo; 2039 2040 if (dladdr((void*)addr, &dlinfo)){ 2041 if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); 2042 if (offset) *offset = addr - (address)dlinfo.dli_fbase; 2043 return true; 2044 } else { 2045 if (buf) buf[0] = '\0'; 2046 if (offset) *offset = -1; 2047 return false; 2048 } 2049 } 2050 2051 // Prints the names and full paths of all opened dynamic libraries 2052 // for current process 2053 void os::print_dll_info(outputStream * st) { 2054 Dl_info dli; 2055 void *handle; 2056 Link_map *map; 2057 Link_map *p; 2058 2059 st->print_cr("Dynamic libraries:"); st->flush(); 2060 2061 if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) { 2062 st->print_cr("Error: Cannot print dynamic libraries."); 2063 return; 2064 } 2065 handle = dlopen(dli.dli_fname, RTLD_LAZY); 2066 if (handle == NULL) { 2067 st->print_cr("Error: Cannot print dynamic libraries."); 2068 return; 2069 } 2070 dlinfo(handle, RTLD_DI_LINKMAP, &map); 2071 if (map == NULL) { 2072 st->print_cr("Error: Cannot print dynamic libraries."); 2073 return; 2074 } 2075 2076 while (map->l_prev != NULL) 2077 map = map->l_prev; 2078 2079 while (map != NULL) { 2080 st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name); 2081 map = map->l_next; 2082 } 2083 2084 dlclose(handle); 2085 } 2086 2087 // Loads .dll/.so and 2088 // in case of error it checks if .dll/.so was built for the 2089 // same architecture as Hotspot is running on 2090 2091 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) 2092 { 2093 void * result= ::dlopen(filename, RTLD_LAZY); 2094 if (result != NULL) { 2095 // Successful loading 2096 return result; 2097 } 2098 2099 Elf32_Ehdr elf_head; 2100 2101 // Read system error message into ebuf 2102 // It may or may not be overwritten below 2103 ::strncpy(ebuf, ::dlerror(), ebuflen-1); 2104 ebuf[ebuflen-1]='\0'; 2105 int diag_msg_max_length=ebuflen-strlen(ebuf); 2106 char* diag_msg_buf=ebuf+strlen(ebuf); 2107 2108 if (diag_msg_max_length==0) { 2109 // No more space in ebuf for additional diagnostics message 2110 return NULL; 2111 } 2112 2113 2114 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); 2115 2116 if (file_descriptor < 0) { 2117 // Can't open library, report dlerror() message 2118 return NULL; 2119 } 2120 2121 bool failed_to_read_elf_head= 2122 (sizeof(elf_head)!= 2123 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ; 2124 2125 ::close(file_descriptor); 2126 if (failed_to_read_elf_head) { 2127 // file i/o error - report dlerror() msg 2128 return NULL; 2129 } 2130 2131 typedef struct { 2132 Elf32_Half code; // Actual value as defined in elf.h 2133 Elf32_Half compat_class; // Compatibility of archs at VM's sense 2134 char elf_class; // 32 or 64 bit 2135 char endianess; // MSB or LSB 2136 char* name; // String representation 2137 } arch_t; 2138 2139 static const arch_t arch_array[]={ 2140 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2141 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2142 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, 2143 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, 2144 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2145 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2146 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, 2147 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, 2148 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, 2149 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"} 2150 }; 2151 2152 #if (defined IA32) 2153 static Elf32_Half running_arch_code=EM_386; 2154 #elif (defined AMD64) 2155 static Elf32_Half running_arch_code=EM_X86_64; 2156 #elif (defined IA64) 2157 static Elf32_Half running_arch_code=EM_IA_64; 2158 #elif (defined __sparc) && (defined _LP64) 2159 static Elf32_Half running_arch_code=EM_SPARCV9; 2160 #elif (defined __sparc) && (!defined _LP64) 2161 static Elf32_Half running_arch_code=EM_SPARC; 2162 #elif (defined __powerpc64__) 2163 static Elf32_Half running_arch_code=EM_PPC64; 2164 #elif (defined __powerpc__) 2165 static Elf32_Half running_arch_code=EM_PPC; 2166 #elif (defined ARM) 2167 static Elf32_Half running_arch_code=EM_ARM; 2168 #else 2169 #error Method os::dll_load requires that one of following is defined:\ 2170 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM 2171 #endif 2172 2173 // Identify compatability class for VM's architecture and library's architecture 2174 // Obtain string descriptions for architectures 2175 2176 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; 2177 int running_arch_index=-1; 2178 2179 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) { 2180 if (running_arch_code == arch_array[i].code) { 2181 running_arch_index = i; 2182 } 2183 if (lib_arch.code == arch_array[i].code) { 2184 lib_arch.compat_class = arch_array[i].compat_class; 2185 lib_arch.name = arch_array[i].name; 2186 } 2187 } 2188 2189 assert(running_arch_index != -1, 2190 "Didn't find running architecture code (running_arch_code) in arch_array"); 2191 if (running_arch_index == -1) { 2192 // Even though running architecture detection failed 2193 // we may still continue with reporting dlerror() message 2194 return NULL; 2195 } 2196 2197 if (lib_arch.endianess != arch_array[running_arch_index].endianess) { 2198 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)"); 2199 return NULL; 2200 } 2201 2202 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { 2203 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)"); 2204 return NULL; 2205 } 2206 2207 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { 2208 if ( lib_arch.name!=NULL ) { 2209 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2210 " (Possible cause: can't load %s-bit .so on a %s-bit platform)", 2211 lib_arch.name, arch_array[running_arch_index].name); 2212 } else { 2213 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2214 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)", 2215 lib_arch.code, 2216 arch_array[running_arch_index].name); 2217 } 2218 } 2219 2220 return NULL; 2221 } 2222 2223 void* os::dll_lookup(void* handle, const char* name) { 2224 return dlsym(handle, name); 2225 } 2226 2227 int os::stat(const char *path, struct stat *sbuf) { 2228 char pathbuf[MAX_PATH]; 2229 if (strlen(path) > MAX_PATH - 1) { 2230 errno = ENAMETOOLONG; 2231 return -1; 2232 } 2233 os::native_path(strcpy(pathbuf, path)); 2234 return ::stat(pathbuf, sbuf); 2235 } 2236 2237 static bool _print_ascii_file(const char* filename, outputStream* st) { 2238 int fd = ::open(filename, O_RDONLY); 2239 if (fd == -1) { 2240 return false; 2241 } 2242 2243 char buf[32]; 2244 int bytes; 2245 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) { 2246 st->print_raw(buf, bytes); 2247 } 2248 2249 ::close(fd); 2250 2251 return true; 2252 } 2253 2254 void os::print_os_info(outputStream* st) { 2255 st->print("OS:"); 2256 2257 if (!_print_ascii_file("/etc/release", st)) { 2258 st->print("Solaris"); 2259 } 2260 st->cr(); 2261 2262 // kernel 2263 st->print("uname:"); 2264 struct utsname name; 2265 uname(&name); 2266 st->print(name.sysname); st->print(" "); 2267 st->print(name.release); st->print(" "); 2268 st->print(name.version); st->print(" "); 2269 st->print(name.machine); 2270 2271 // libthread 2272 if (os::Solaris::T2_libthread()) st->print(" (T2 libthread)"); 2273 else st->print(" (T1 libthread)"); 2274 st->cr(); 2275 2276 // rlimit 2277 st->print("rlimit:"); 2278 struct rlimit rlim; 2279 2280 st->print(" STACK "); 2281 getrlimit(RLIMIT_STACK, &rlim); 2282 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2283 else st->print("%uk", rlim.rlim_cur >> 10); 2284 2285 st->print(", CORE "); 2286 getrlimit(RLIMIT_CORE, &rlim); 2287 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2288 else st->print("%uk", rlim.rlim_cur >> 10); 2289 2290 st->print(", NOFILE "); 2291 getrlimit(RLIMIT_NOFILE, &rlim); 2292 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2293 else st->print("%d", rlim.rlim_cur); 2294 2295 st->print(", AS "); 2296 getrlimit(RLIMIT_AS, &rlim); 2297 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2298 else st->print("%uk", rlim.rlim_cur >> 10); 2299 st->cr(); 2300 2301 // load average 2302 st->print("load average:"); 2303 double loadavg[3]; 2304 os::loadavg(loadavg, 3); 2305 st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]); 2306 st->cr(); 2307 } 2308 2309 2310 static bool check_addr0(outputStream* st) { 2311 jboolean status = false; 2312 int fd = ::open("/proc/self/map",O_RDONLY); 2313 if (fd >= 0) { 2314 prmap_t p; 2315 while(::read(fd, &p, sizeof(p)) > 0) { 2316 if (p.pr_vaddr == 0x0) { 2317 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname); 2318 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname); 2319 st->print("Access:"); 2320 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-"); 2321 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-"); 2322 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-"); 2323 st->cr(); 2324 status = true; 2325 } 2326 ::close(fd); 2327 } 2328 } 2329 return status; 2330 } 2331 2332 void os::pd_print_cpu_info(outputStream* st) { 2333 // Nothing to do for now. 2334 } 2335 2336 void os::print_memory_info(outputStream* st) { 2337 st->print("Memory:"); 2338 st->print(" %dk page", os::vm_page_size()>>10); 2339 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10); 2340 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10); 2341 st->cr(); 2342 (void) check_addr0(st); 2343 } 2344 2345 // Taken from /usr/include/sys/machsig.h Supposed to be architecture specific 2346 // but they're the same for all the solaris architectures that we support. 2347 const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR", 2348 "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG", 2349 "ILL_COPROC", "ILL_BADSTK" }; 2350 2351 const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV", 2352 "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES", 2353 "FPE_FLTINV", "FPE_FLTSUB" }; 2354 2355 const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" }; 2356 2357 const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" }; 2358 2359 void os::print_siginfo(outputStream* st, void* siginfo) { 2360 st->print("siginfo:"); 2361 2362 const int buflen = 100; 2363 char buf[buflen]; 2364 siginfo_t *si = (siginfo_t*)siginfo; 2365 st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen)); 2366 char *err = strerror(si->si_errno); 2367 if (si->si_errno != 0 && err != NULL) { 2368 st->print("si_errno=%s", err); 2369 } else { 2370 st->print("si_errno=%d", si->si_errno); 2371 } 2372 const int c = si->si_code; 2373 assert(c > 0, "unexpected si_code"); 2374 switch (si->si_signo) { 2375 case SIGILL: 2376 st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]); 2377 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2378 break; 2379 case SIGFPE: 2380 st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]); 2381 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2382 break; 2383 case SIGSEGV: 2384 st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]); 2385 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2386 break; 2387 case SIGBUS: 2388 st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]); 2389 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2390 break; 2391 default: 2392 st->print(", si_code=%d", si->si_code); 2393 // no si_addr 2394 } 2395 2396 if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && 2397 UseSharedSpaces) { 2398 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2399 if (mapinfo->is_in_shared_space(si->si_addr)) { 2400 st->print("\n\nError accessing class data sharing archive." \ 2401 " Mapped file inaccessible during execution, " \ 2402 " possible disk/network problem."); 2403 } 2404 } 2405 st->cr(); 2406 } 2407 2408 // Moved from whole group, because we need them here for diagnostic 2409 // prints. 2410 #define OLDMAXSIGNUM 32 2411 static int Maxsignum = 0; 2412 static int *ourSigFlags = NULL; 2413 2414 extern "C" void sigINTRHandler(int, siginfo_t*, void*); 2415 2416 int os::Solaris::get_our_sigflags(int sig) { 2417 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2418 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2419 return ourSigFlags[sig]; 2420 } 2421 2422 void os::Solaris::set_our_sigflags(int sig, int flags) { 2423 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2424 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2425 ourSigFlags[sig] = flags; 2426 } 2427 2428 2429 static const char* get_signal_handler_name(address handler, 2430 char* buf, int buflen) { 2431 int offset; 2432 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 2433 if (found) { 2434 // skip directory names 2435 const char *p1, *p2; 2436 p1 = buf; 2437 size_t len = strlen(os::file_separator()); 2438 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 2439 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset); 2440 } else { 2441 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 2442 } 2443 return buf; 2444 } 2445 2446 static void print_signal_handler(outputStream* st, int sig, 2447 char* buf, size_t buflen) { 2448 struct sigaction sa; 2449 2450 sigaction(sig, NULL, &sa); 2451 2452 st->print("%s: ", os::exception_name(sig, buf, buflen)); 2453 2454 address handler = (sa.sa_flags & SA_SIGINFO) 2455 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 2456 : CAST_FROM_FN_PTR(address, sa.sa_handler); 2457 2458 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 2459 st->print("SIG_DFL"); 2460 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 2461 st->print("SIG_IGN"); 2462 } else { 2463 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 2464 } 2465 2466 st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask); 2467 2468 address rh = VMError::get_resetted_sighandler(sig); 2469 // May be, handler was resetted by VMError? 2470 if(rh != NULL) { 2471 handler = rh; 2472 sa.sa_flags = VMError::get_resetted_sigflags(sig); 2473 } 2474 2475 st->print(", sa_flags=" PTR32_FORMAT, sa.sa_flags); 2476 2477 // Check: is it our handler? 2478 if(handler == CAST_FROM_FN_PTR(address, signalHandler) || 2479 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) { 2480 // It is our signal handler 2481 // check for flags 2482 if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) { 2483 st->print( 2484 ", flags was changed from " PTR32_FORMAT ", consider using jsig library", 2485 os::Solaris::get_our_sigflags(sig)); 2486 } 2487 } 2488 st->cr(); 2489 } 2490 2491 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 2492 st->print_cr("Signal Handlers:"); 2493 print_signal_handler(st, SIGSEGV, buf, buflen); 2494 print_signal_handler(st, SIGBUS , buf, buflen); 2495 print_signal_handler(st, SIGFPE , buf, buflen); 2496 print_signal_handler(st, SIGPIPE, buf, buflen); 2497 print_signal_handler(st, SIGXFSZ, buf, buflen); 2498 print_signal_handler(st, SIGILL , buf, buflen); 2499 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen); 2500 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen); 2501 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 2502 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen); 2503 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 2504 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen); 2505 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen); 2506 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen); 2507 } 2508 2509 static char saved_jvm_path[MAXPATHLEN] = { 0 }; 2510 2511 // Find the full path to the current module, libjvm.so or libjvm_g.so 2512 void os::jvm_path(char *buf, jint buflen) { 2513 // Error checking. 2514 if (buflen < MAXPATHLEN) { 2515 assert(false, "must use a large-enough buffer"); 2516 buf[0] = '\0'; 2517 return; 2518 } 2519 // Lazy resolve the path to current module. 2520 if (saved_jvm_path[0] != 0) { 2521 strcpy(buf, saved_jvm_path); 2522 return; 2523 } 2524 2525 Dl_info dlinfo; 2526 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 2527 assert(ret != 0, "cannot locate libjvm"); 2528 realpath((char *)dlinfo.dli_fname, buf); 2529 2530 if (Arguments::created_by_gamma_launcher()) { 2531 // Support for the gamma launcher. Typical value for buf is 2532 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at 2533 // the right place in the string, then assume we are installed in a JDK and 2534 // we're done. Otherwise, check for a JAVA_HOME environment variable and fix 2535 // up the path so it looks like libjvm.so is installed there (append a 2536 // fake suffix hotspot/libjvm.so). 2537 const char *p = buf + strlen(buf) - 1; 2538 for (int count = 0; p > buf && count < 5; ++count) { 2539 for (--p; p > buf && *p != '/'; --p) 2540 /* empty */ ; 2541 } 2542 2543 if (strncmp(p, "/jre/lib/", 9) != 0) { 2544 // Look for JAVA_HOME in the environment. 2545 char* java_home_var = ::getenv("JAVA_HOME"); 2546 if (java_home_var != NULL && java_home_var[0] != 0) { 2547 char cpu_arch[12]; 2548 char* jrelib_p; 2549 int len; 2550 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 2551 #ifdef _LP64 2552 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9. 2553 if (strcmp(cpu_arch, "sparc") == 0) { 2554 strcat(cpu_arch, "v9"); 2555 } else if (strcmp(cpu_arch, "i386") == 0) { 2556 strcpy(cpu_arch, "amd64"); 2557 } 2558 #endif 2559 // Check the current module name "libjvm.so" or "libjvm_g.so". 2560 p = strrchr(buf, '/'); 2561 assert(strstr(p, "/libjvm") == p, "invalid library name"); 2562 p = strstr(p, "_g") ? "_g" : ""; 2563 2564 realpath(java_home_var, buf); 2565 // determine if this is a legacy image or modules image 2566 // modules image doesn't have "jre" subdirectory 2567 len = strlen(buf); 2568 jrelib_p = buf + len; 2569 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); 2570 if (0 != access(buf, F_OK)) { 2571 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch); 2572 } 2573 2574 if (0 == access(buf, F_OK)) { 2575 // Use current module name "libjvm[_g].so" instead of 2576 // "libjvm"debug_only("_g")".so" since for fastdebug version 2577 // we should have "libjvm.so" but debug_only("_g") adds "_g"! 2578 len = strlen(buf); 2579 snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p); 2580 } else { 2581 // Go back to path of .so 2582 realpath((char *)dlinfo.dli_fname, buf); 2583 } 2584 } 2585 } 2586 } 2587 2588 strcpy(saved_jvm_path, buf); 2589 } 2590 2591 2592 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 2593 // no prefix required, not even "_" 2594 } 2595 2596 2597 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 2598 // no suffix required 2599 } 2600 2601 // This method is a copy of JDK's sysGetLastErrorString 2602 // from src/solaris/hpi/src/system_md.c 2603 2604 size_t os::lasterror(char *buf, size_t len) { 2605 2606 if (errno == 0) return 0; 2607 2608 const char *s = ::strerror(errno); 2609 size_t n = ::strlen(s); 2610 if (n >= len) { 2611 n = len - 1; 2612 } 2613 ::strncpy(buf, s, n); 2614 buf[n] = '\0'; 2615 return n; 2616 } 2617 2618 2619 // sun.misc.Signal 2620 2621 extern "C" { 2622 static void UserHandler(int sig, void *siginfo, void *context) { 2623 // Ctrl-C is pressed during error reporting, likely because the error 2624 // handler fails to abort. Let VM die immediately. 2625 if (sig == SIGINT && is_error_reported()) { 2626 os::die(); 2627 } 2628 2629 os::signal_notify(sig); 2630 // We do not need to reinstate the signal handler each time... 2631 } 2632 } 2633 2634 void* os::user_handler() { 2635 return CAST_FROM_FN_PTR(void*, UserHandler); 2636 } 2637 2638 extern "C" { 2639 typedef void (*sa_handler_t)(int); 2640 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 2641 } 2642 2643 void* os::signal(int signal_number, void* handler) { 2644 struct sigaction sigAct, oldSigAct; 2645 sigfillset(&(sigAct.sa_mask)); 2646 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND; 2647 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 2648 2649 if (sigaction(signal_number, &sigAct, &oldSigAct)) 2650 // -1 means registration failed 2651 return (void *)-1; 2652 2653 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 2654 } 2655 2656 void os::signal_raise(int signal_number) { 2657 raise(signal_number); 2658 } 2659 2660 /* 2661 * The following code is moved from os.cpp for making this 2662 * code platform specific, which it is by its very nature. 2663 */ 2664 2665 // a counter for each possible signal value 2666 static int Sigexit = 0; 2667 static int Maxlibjsigsigs; 2668 static jint *pending_signals = NULL; 2669 static int *preinstalled_sigs = NULL; 2670 static struct sigaction *chainedsigactions = NULL; 2671 static sema_t sig_sem; 2672 typedef int (*version_getting_t)(); 2673 version_getting_t os::Solaris::get_libjsig_version = NULL; 2674 static int libjsigversion = NULL; 2675 2676 int os::sigexitnum_pd() { 2677 assert(Sigexit > 0, "signal memory not yet initialized"); 2678 return Sigexit; 2679 } 2680 2681 void os::Solaris::init_signal_mem() { 2682 // Initialize signal structures 2683 Maxsignum = SIGRTMAX; 2684 Sigexit = Maxsignum+1; 2685 assert(Maxsignum >0, "Unable to obtain max signal number"); 2686 2687 Maxlibjsigsigs = Maxsignum; 2688 2689 // pending_signals has one int per signal 2690 // The additional signal is for SIGEXIT - exit signal to signal_thread 2691 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1)); 2692 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1))); 2693 2694 if (UseSignalChaining) { 2695 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction) 2696 * (Maxsignum + 1)); 2697 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1))); 2698 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1)); 2699 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1))); 2700 } 2701 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 )); 2702 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1)); 2703 } 2704 2705 void os::signal_init_pd() { 2706 int ret; 2707 2708 ret = ::sema_init(&sig_sem, 0, NULL, NULL); 2709 assert(ret == 0, "sema_init() failed"); 2710 } 2711 2712 void os::signal_notify(int signal_number) { 2713 int ret; 2714 2715 Atomic::inc(&pending_signals[signal_number]); 2716 ret = ::sema_post(&sig_sem); 2717 assert(ret == 0, "sema_post() failed"); 2718 } 2719 2720 static int check_pending_signals(bool wait_for_signal) { 2721 int ret; 2722 while (true) { 2723 for (int i = 0; i < Sigexit + 1; i++) { 2724 jint n = pending_signals[i]; 2725 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2726 return i; 2727 } 2728 } 2729 if (!wait_for_signal) { 2730 return -1; 2731 } 2732 JavaThread *thread = JavaThread::current(); 2733 ThreadBlockInVM tbivm(thread); 2734 2735 bool threadIsSuspended; 2736 do { 2737 thread->set_suspend_equivalent(); 2738 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2739 while((ret = ::sema_wait(&sig_sem)) == EINTR) 2740 ; 2741 assert(ret == 0, "sema_wait() failed"); 2742 2743 // were we externally suspended while we were waiting? 2744 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2745 if (threadIsSuspended) { 2746 // 2747 // The semaphore has been incremented, but while we were waiting 2748 // another thread suspended us. We don't want to continue running 2749 // while suspended because that would surprise the thread that 2750 // suspended us. 2751 // 2752 ret = ::sema_post(&sig_sem); 2753 assert(ret == 0, "sema_post() failed"); 2754 2755 thread->java_suspend_self(); 2756 } 2757 } while (threadIsSuspended); 2758 } 2759 } 2760 2761 int os::signal_lookup() { 2762 return check_pending_signals(false); 2763 } 2764 2765 int os::signal_wait() { 2766 return check_pending_signals(true); 2767 } 2768 2769 //////////////////////////////////////////////////////////////////////////////// 2770 // Virtual Memory 2771 2772 static int page_size = -1; 2773 2774 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will 2775 // clear this var if support is not available. 2776 static bool has_map_align = true; 2777 2778 int os::vm_page_size() { 2779 assert(page_size != -1, "must call os::init"); 2780 return page_size; 2781 } 2782 2783 // Solaris allocates memory by pages. 2784 int os::vm_allocation_granularity() { 2785 assert(page_size != -1, "must call os::init"); 2786 return page_size; 2787 } 2788 2789 bool os::commit_memory(char* addr, size_t bytes, bool exec) { 2790 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2791 size_t size = bytes; 2792 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); 2793 if (res != NULL) { 2794 if (UseNUMAInterleaving) { 2795 numa_make_global(addr, bytes); 2796 } 2797 return true; 2798 } 2799 return false; 2800 } 2801 2802 bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint, 2803 bool exec) { 2804 if (commit_memory(addr, bytes, exec)) { 2805 if (UseMPSS && alignment_hint > (size_t)vm_page_size()) { 2806 // If the large page size has been set and the VM 2807 // is using large pages, use the large page size 2808 // if it is smaller than the alignment hint. This is 2809 // a case where the VM wants to use a larger alignment size 2810 // for its own reasons but still want to use large pages 2811 // (which is what matters to setting the mpss range. 2812 size_t page_size = 0; 2813 if (large_page_size() < alignment_hint) { 2814 assert(UseLargePages, "Expected to be here for large page use only"); 2815 page_size = large_page_size(); 2816 } else { 2817 // If the alignment hint is less than the large page 2818 // size, the VM wants a particular alignment (thus the hint) 2819 // for internal reasons. Try to set the mpss range using 2820 // the alignment_hint. 2821 page_size = alignment_hint; 2822 } 2823 // Since this is a hint, ignore any failures. 2824 (void)Solaris::set_mpss_range(addr, bytes, page_size); 2825 } 2826 return true; 2827 } 2828 return false; 2829 } 2830 2831 // Uncommit the pages in a specified region. 2832 void os::free_memory(char* addr, size_t bytes, size_t alignment_hint) { 2833 if (madvise(addr, bytes, MADV_FREE) < 0) { 2834 debug_only(warning("MADV_FREE failed.")); 2835 return; 2836 } 2837 } 2838 2839 bool os::create_stack_guard_pages(char* addr, size_t size) { 2840 return os::commit_memory(addr, size); 2841 } 2842 2843 bool os::remove_stack_guard_pages(char* addr, size_t size) { 2844 return os::uncommit_memory(addr, size); 2845 } 2846 2847 // Change the page size in a given range. 2848 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2849 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); 2850 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); 2851 if (UseLargePages && UseMPSS) { 2852 Solaris::set_mpss_range(addr, bytes, alignment_hint); 2853 } 2854 } 2855 2856 // Tell the OS to make the range local to the first-touching LWP 2857 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2858 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2859 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) { 2860 debug_only(warning("MADV_ACCESS_LWP failed.")); 2861 } 2862 } 2863 2864 // Tell the OS that this range would be accessed from different LWPs. 2865 void os::numa_make_global(char *addr, size_t bytes) { 2866 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2867 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) { 2868 debug_only(warning("MADV_ACCESS_MANY failed.")); 2869 } 2870 } 2871 2872 // Get the number of the locality groups. 2873 size_t os::numa_get_groups_num() { 2874 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie()); 2875 return n != -1 ? n : 1; 2876 } 2877 2878 // Get a list of leaf locality groups. A leaf lgroup is group that 2879 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory 2880 // board. An LWP is assigned to one of these groups upon creation. 2881 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2882 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) { 2883 ids[0] = 0; 2884 return 1; 2885 } 2886 int result_size = 0, top = 1, bottom = 0, cur = 0; 2887 for (int k = 0; k < size; k++) { 2888 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur], 2889 (Solaris::lgrp_id_t*)&ids[top], size - top); 2890 if (r == -1) { 2891 ids[0] = 0; 2892 return 1; 2893 } 2894 if (!r) { 2895 // That's a leaf node. 2896 assert (bottom <= cur, "Sanity check"); 2897 // Check if the node has memory 2898 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur], 2899 NULL, 0, LGRP_RSRC_MEM) > 0) { 2900 ids[bottom++] = ids[cur]; 2901 } 2902 } 2903 top += r; 2904 cur++; 2905 } 2906 if (bottom == 0) { 2907 // Handle a situation, when the OS reports no memory available. 2908 // Assume UMA architecture. 2909 ids[0] = 0; 2910 return 1; 2911 } 2912 return bottom; 2913 } 2914 2915 // Detect the topology change. Typically happens during CPU plugging-unplugging. 2916 bool os::numa_topology_changed() { 2917 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie()); 2918 if (is_stale != -1 && is_stale) { 2919 Solaris::lgrp_fini(Solaris::lgrp_cookie()); 2920 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER); 2921 assert(c != 0, "Failure to initialize LGRP API"); 2922 Solaris::set_lgrp_cookie(c); 2923 return true; 2924 } 2925 return false; 2926 } 2927 2928 // Get the group id of the current LWP. 2929 int os::numa_get_group_id() { 2930 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID); 2931 if (lgrp_id == -1) { 2932 return 0; 2933 } 2934 const int size = os::numa_get_groups_num(); 2935 int *ids = (int*)alloca(size * sizeof(int)); 2936 2937 // Get the ids of all lgroups with memory; r is the count. 2938 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id, 2939 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM); 2940 if (r <= 0) { 2941 return 0; 2942 } 2943 return ids[os::random() % r]; 2944 } 2945 2946 // Request information about the page. 2947 bool os::get_page_info(char *start, page_info* info) { 2948 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2949 uint64_t addr = (uintptr_t)start; 2950 uint64_t outdata[2]; 2951 uint_t validity = 0; 2952 2953 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) { 2954 return false; 2955 } 2956 2957 info->size = 0; 2958 info->lgrp_id = -1; 2959 2960 if ((validity & 1) != 0) { 2961 if ((validity & 2) != 0) { 2962 info->lgrp_id = outdata[0]; 2963 } 2964 if ((validity & 4) != 0) { 2965 info->size = outdata[1]; 2966 } 2967 return true; 2968 } 2969 return false; 2970 } 2971 2972 // Scan the pages from start to end until a page different than 2973 // the one described in the info parameter is encountered. 2974 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 2975 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2976 const size_t types = sizeof(info_types) / sizeof(info_types[0]); 2977 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT]; 2978 uint_t validity[MAX_MEMINFO_CNT]; 2979 2980 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size); 2981 uint64_t p = (uint64_t)start; 2982 while (p < (uint64_t)end) { 2983 addrs[0] = p; 2984 size_t addrs_count = 1; 2985 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] < (uint64_t)end) { 2986 addrs[addrs_count] = addrs[addrs_count - 1] + page_size; 2987 addrs_count++; 2988 } 2989 2990 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) { 2991 return NULL; 2992 } 2993 2994 size_t i = 0; 2995 for (; i < addrs_count; i++) { 2996 if ((validity[i] & 1) != 0) { 2997 if ((validity[i] & 4) != 0) { 2998 if (outdata[types * i + 1] != page_expected->size) { 2999 break; 3000 } 3001 } else 3002 if (page_expected->size != 0) { 3003 break; 3004 } 3005 3006 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) { 3007 if (outdata[types * i] != page_expected->lgrp_id) { 3008 break; 3009 } 3010 } 3011 } else { 3012 return NULL; 3013 } 3014 } 3015 3016 if (i != addrs_count) { 3017 if ((validity[i] & 2) != 0) { 3018 page_found->lgrp_id = outdata[types * i]; 3019 } else { 3020 page_found->lgrp_id = -1; 3021 } 3022 if ((validity[i] & 4) != 0) { 3023 page_found->size = outdata[types * i + 1]; 3024 } else { 3025 page_found->size = 0; 3026 } 3027 return (char*)addrs[i]; 3028 } 3029 3030 p = addrs[addrs_count - 1] + page_size; 3031 } 3032 return end; 3033 } 3034 3035 bool os::uncommit_memory(char* addr, size_t bytes) { 3036 size_t size = bytes; 3037 // Map uncommitted pages PROT_NONE so we fail early if we touch an 3038 // uncommitted page. Otherwise, the read/write might succeed if we 3039 // have enough swap space to back the physical page. 3040 return 3041 NULL != Solaris::mmap_chunk(addr, size, 3042 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, 3043 PROT_NONE); 3044 } 3045 3046 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) { 3047 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0); 3048 3049 if (b == MAP_FAILED) { 3050 return NULL; 3051 } 3052 return b; 3053 } 3054 3055 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) { 3056 char* addr = requested_addr; 3057 int flags = MAP_PRIVATE | MAP_NORESERVE; 3058 3059 assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap"); 3060 3061 if (fixed) { 3062 flags |= MAP_FIXED; 3063 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) { 3064 flags |= MAP_ALIGN; 3065 addr = (char*) alignment_hint; 3066 } 3067 3068 // Map uncommitted pages PROT_NONE so we fail early if we touch an 3069 // uncommitted page. Otherwise, the read/write might succeed if we 3070 // have enough swap space to back the physical page. 3071 return mmap_chunk(addr, bytes, flags, PROT_NONE); 3072 } 3073 3074 char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 3075 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL)); 3076 3077 guarantee(requested_addr == NULL || requested_addr == addr, 3078 "OS failed to return requested mmap address."); 3079 return addr; 3080 } 3081 3082 // Reserve memory at an arbitrary address, only if that area is 3083 // available (and not reserved for something else). 3084 3085 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3086 const int max_tries = 10; 3087 char* base[max_tries]; 3088 size_t size[max_tries]; 3089 3090 // Solaris adds a gap between mmap'ed regions. The size of the gap 3091 // is dependent on the requested size and the MMU. Our initial gap 3092 // value here is just a guess and will be corrected later. 3093 bool had_top_overlap = false; 3094 bool have_adjusted_gap = false; 3095 size_t gap = 0x400000; 3096 3097 // Assert only that the size is a multiple of the page size, since 3098 // that's all that mmap requires, and since that's all we really know 3099 // about at this low abstraction level. If we need higher alignment, 3100 // we can either pass an alignment to this method or verify alignment 3101 // in one of the methods further up the call chain. See bug 5044738. 3102 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); 3103 3104 // Since snv_84, Solaris attempts to honor the address hint - see 5003415. 3105 // Give it a try, if the kernel honors the hint we can return immediately. 3106 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false); 3107 volatile int err = errno; 3108 if (addr == requested_addr) { 3109 return addr; 3110 } else if (addr != NULL) { 3111 unmap_memory(addr, bytes); 3112 } 3113 3114 if (PrintMiscellaneous && Verbose) { 3115 char buf[256]; 3116 buf[0] = '\0'; 3117 if (addr == NULL) { 3118 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err)); 3119 } 3120 warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at " 3121 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT 3122 "%s", bytes, requested_addr, addr, buf); 3123 } 3124 3125 // Address hint method didn't work. Fall back to the old method. 3126 // In theory, once SNV becomes our oldest supported platform, this 3127 // code will no longer be needed. 3128 // 3129 // Repeatedly allocate blocks until the block is allocated at the 3130 // right spot. Give up after max_tries. 3131 int i; 3132 for (i = 0; i < max_tries; ++i) { 3133 base[i] = reserve_memory(bytes); 3134 3135 if (base[i] != NULL) { 3136 // Is this the block we wanted? 3137 if (base[i] == requested_addr) { 3138 size[i] = bytes; 3139 break; 3140 } 3141 3142 // check that the gap value is right 3143 if (had_top_overlap && !have_adjusted_gap) { 3144 size_t actual_gap = base[i-1] - base[i] - bytes; 3145 if (gap != actual_gap) { 3146 // adjust the gap value and retry the last 2 allocations 3147 assert(i > 0, "gap adjustment code problem"); 3148 have_adjusted_gap = true; // adjust the gap only once, just in case 3149 gap = actual_gap; 3150 if (PrintMiscellaneous && Verbose) { 3151 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap); 3152 } 3153 unmap_memory(base[i], bytes); 3154 unmap_memory(base[i-1], size[i-1]); 3155 i-=2; 3156 continue; 3157 } 3158 } 3159 3160 // Does this overlap the block we wanted? Give back the overlapped 3161 // parts and try again. 3162 // 3163 // There is still a bug in this code: if top_overlap == bytes, 3164 // the overlap is offset from requested region by the value of gap. 3165 // In this case giving back the overlapped part will not work, 3166 // because we'll give back the entire block at base[i] and 3167 // therefore the subsequent allocation will not generate a new gap. 3168 // This could be fixed with a new algorithm that used larger 3169 // or variable size chunks to find the requested region - 3170 // but such a change would introduce additional complications. 3171 // It's rare enough that the planets align for this bug, 3172 // so we'll just wait for a fix for 6204603/5003415 which 3173 // will provide a mmap flag to allow us to avoid this business. 3174 3175 size_t top_overlap = requested_addr + (bytes + gap) - base[i]; 3176 if (top_overlap >= 0 && top_overlap < bytes) { 3177 had_top_overlap = true; 3178 unmap_memory(base[i], top_overlap); 3179 base[i] += top_overlap; 3180 size[i] = bytes - top_overlap; 3181 } else { 3182 size_t bottom_overlap = base[i] + bytes - requested_addr; 3183 if (bottom_overlap >= 0 && bottom_overlap < bytes) { 3184 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) { 3185 warning("attempt_reserve_memory_at: possible alignment bug"); 3186 } 3187 unmap_memory(requested_addr, bottom_overlap); 3188 size[i] = bytes - bottom_overlap; 3189 } else { 3190 size[i] = bytes; 3191 } 3192 } 3193 } 3194 } 3195 3196 // Give back the unused reserved pieces. 3197 3198 for (int j = 0; j < i; ++j) { 3199 if (base[j] != NULL) { 3200 unmap_memory(base[j], size[j]); 3201 } 3202 } 3203 3204 return (i < max_tries) ? requested_addr : NULL; 3205 } 3206 3207 bool os::release_memory(char* addr, size_t bytes) { 3208 size_t size = bytes; 3209 return munmap(addr, size) == 0; 3210 } 3211 3212 static bool solaris_mprotect(char* addr, size_t bytes, int prot) { 3213 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()), 3214 "addr must be page aligned"); 3215 int retVal = mprotect(addr, bytes, prot); 3216 return retVal == 0; 3217 } 3218 3219 // Protect memory (Used to pass readonly pages through 3220 // JNI GetArray<type>Elements with empty arrays.) 3221 // Also, used for serialization page and for compressed oops null pointer 3222 // checking. 3223 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3224 bool is_committed) { 3225 unsigned int p = 0; 3226 switch (prot) { 3227 case MEM_PROT_NONE: p = PROT_NONE; break; 3228 case MEM_PROT_READ: p = PROT_READ; break; 3229 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 3230 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 3231 default: 3232 ShouldNotReachHere(); 3233 } 3234 // is_committed is unused. 3235 return solaris_mprotect(addr, bytes, p); 3236 } 3237 3238 // guard_memory and unguard_memory only happens within stack guard pages. 3239 // Since ISM pertains only to the heap, guard and unguard memory should not 3240 /// happen with an ISM region. 3241 bool os::guard_memory(char* addr, size_t bytes) { 3242 return solaris_mprotect(addr, bytes, PROT_NONE); 3243 } 3244 3245 bool os::unguard_memory(char* addr, size_t bytes) { 3246 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE); 3247 } 3248 3249 // Large page support 3250 3251 // UseLargePages is the master flag to enable/disable large page memory. 3252 // UseMPSS and UseISM are supported for compatibility reasons. Their combined 3253 // effects can be described in the following table: 3254 // 3255 // UseLargePages UseMPSS UseISM 3256 // false * * => UseLargePages is the master switch, turning 3257 // it off will turn off both UseMPSS and 3258 // UseISM. VM will not use large page memory 3259 // regardless the settings of UseMPSS/UseISM. 3260 // true false false => Unless future Solaris provides other 3261 // mechanism to use large page memory, this 3262 // combination is equivalent to -UseLargePages, 3263 // VM will not use large page memory 3264 // true true false => JVM will use MPSS for large page memory. 3265 // This is the default behavior. 3266 // true false true => JVM will use ISM for large page memory. 3267 // true true true => JVM will use ISM if it is available. 3268 // Otherwise, JVM will fall back to MPSS. 3269 // Becaues ISM is now available on all 3270 // supported Solaris versions, this combination 3271 // is equivalent to +UseISM -UseMPSS. 3272 3273 static size_t _large_page_size = 0; 3274 3275 bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) { 3276 // x86 uses either 2M or 4M page, depending on whether PAE (Physical Address 3277 // Extensions) mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. Sparc 3278 // can support multiple page sizes. 3279 3280 // Don't bother to probe page size because getpagesizes() comes with MPSS. 3281 // ISM is only recommended on old Solaris where there is no MPSS support. 3282 // Simply choose a conservative value as default. 3283 *page_size = LargePageSizeInBytes ? LargePageSizeInBytes : 3284 SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M) 3285 ARM_ONLY(2 * M); 3286 3287 // ISM is available on all supported Solaris versions 3288 return true; 3289 } 3290 3291 // Insertion sort for small arrays (descending order). 3292 static void insertion_sort_descending(size_t* array, int len) { 3293 for (int i = 0; i < len; i++) { 3294 size_t val = array[i]; 3295 for (size_t key = i; key > 0 && array[key - 1] < val; --key) { 3296 size_t tmp = array[key]; 3297 array[key] = array[key - 1]; 3298 array[key - 1] = tmp; 3299 } 3300 } 3301 } 3302 3303 bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) { 3304 const unsigned int usable_count = VM_Version::page_size_count(); 3305 if (usable_count == 1) { 3306 return false; 3307 } 3308 3309 // Find the right getpagesizes interface. When solaris 11 is the minimum 3310 // build platform, getpagesizes() (without the '2') can be called directly. 3311 typedef int (*gps_t)(size_t[], int); 3312 gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2")); 3313 if (gps_func == NULL) { 3314 gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes")); 3315 if (gps_func == NULL) { 3316 if (warn) { 3317 warning("MPSS is not supported by the operating system."); 3318 } 3319 return false; 3320 } 3321 } 3322 3323 // Fill the array of page sizes. 3324 int n = (*gps_func)(_page_sizes, page_sizes_max); 3325 assert(n > 0, "Solaris bug?"); 3326 3327 if (n == page_sizes_max) { 3328 // Add a sentinel value (necessary only if the array was completely filled 3329 // since it is static (zeroed at initialization)). 3330 _page_sizes[--n] = 0; 3331 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");) 3332 } 3333 assert(_page_sizes[n] == 0, "missing sentinel"); 3334 trace_page_sizes("available page sizes", _page_sizes, n); 3335 3336 if (n == 1) return false; // Only one page size available. 3337 3338 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and 3339 // select up to usable_count elements. First sort the array, find the first 3340 // acceptable value, then copy the usable sizes to the top of the array and 3341 // trim the rest. Make sure to include the default page size :-). 3342 // 3343 // A better policy could get rid of the 4M limit by taking the sizes of the 3344 // important VM memory regions (java heap and possibly the code cache) into 3345 // account. 3346 insertion_sort_descending(_page_sizes, n); 3347 const size_t size_limit = 3348 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes; 3349 int beg; 3350 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ; 3351 const int end = MIN2((int)usable_count, n) - 1; 3352 for (int cur = 0; cur < end; ++cur, ++beg) { 3353 _page_sizes[cur] = _page_sizes[beg]; 3354 } 3355 _page_sizes[end] = vm_page_size(); 3356 _page_sizes[end + 1] = 0; 3357 3358 if (_page_sizes[end] > _page_sizes[end - 1]) { 3359 // Default page size is not the smallest; sort again. 3360 insertion_sort_descending(_page_sizes, end + 1); 3361 } 3362 *page_size = _page_sizes[0]; 3363 3364 trace_page_sizes("usable page sizes", _page_sizes, end + 1); 3365 return true; 3366 } 3367 3368 void os::large_page_init() { 3369 if (!UseLargePages) { 3370 UseISM = false; 3371 UseMPSS = false; 3372 return; 3373 } 3374 3375 // print a warning if any large page related flag is specified on command line 3376 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3377 !FLAG_IS_DEFAULT(UseISM) || 3378 !FLAG_IS_DEFAULT(UseMPSS) || 3379 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3380 UseISM = UseISM && 3381 Solaris::ism_sanity_check(warn_on_failure, &_large_page_size); 3382 if (UseISM) { 3383 // ISM disables MPSS to be compatible with old JDK behavior 3384 UseMPSS = false; 3385 _page_sizes[0] = _large_page_size; 3386 _page_sizes[1] = vm_page_size(); 3387 } 3388 3389 UseMPSS = UseMPSS && 3390 Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); 3391 3392 UseLargePages = UseISM || UseMPSS; 3393 } 3394 3395 bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) { 3396 // Signal to OS that we want large pages for addresses 3397 // from addr, addr + bytes 3398 struct memcntl_mha mpss_struct; 3399 mpss_struct.mha_cmd = MHA_MAPSIZE_VA; 3400 mpss_struct.mha_pagesize = align; 3401 mpss_struct.mha_flags = 0; 3402 if (memcntl(start, bytes, MC_HAT_ADVISE, 3403 (caddr_t) &mpss_struct, 0, 0) < 0) { 3404 debug_only(warning("Attempt to use MPSS failed.")); 3405 return false; 3406 } 3407 return true; 3408 } 3409 3410 char* os::reserve_memory_special(size_t size, char* addr, bool exec) { 3411 // "exec" is passed in but not used. Creating the shared image for 3412 // the code cache doesn't have an SHM_X executable permission to check. 3413 assert(UseLargePages && UseISM, "only for ISM large pages"); 3414 3415 char* retAddr = NULL; 3416 int shmid; 3417 key_t ismKey; 3418 3419 bool warn_on_failure = UseISM && 3420 (!FLAG_IS_DEFAULT(UseLargePages) || 3421 !FLAG_IS_DEFAULT(UseISM) || 3422 !FLAG_IS_DEFAULT(LargePageSizeInBytes) 3423 ); 3424 char msg[128]; 3425 3426 ismKey = IPC_PRIVATE; 3427 3428 // Create a large shared memory region to attach to based on size. 3429 // Currently, size is the total size of the heap 3430 shmid = shmget(ismKey, size, SHM_R | SHM_W | IPC_CREAT); 3431 if (shmid == -1){ 3432 if (warn_on_failure) { 3433 jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno); 3434 warning(msg); 3435 } 3436 return NULL; 3437 } 3438 3439 // Attach to the region 3440 retAddr = (char *) shmat(shmid, 0, SHM_SHARE_MMU | SHM_R | SHM_W); 3441 int err = errno; 3442 3443 // Remove shmid. If shmat() is successful, the actual shared memory segment 3444 // will be deleted when it's detached by shmdt() or when the process 3445 // terminates. If shmat() is not successful this will remove the shared 3446 // segment immediately. 3447 shmctl(shmid, IPC_RMID, NULL); 3448 3449 if (retAddr == (char *) -1) { 3450 if (warn_on_failure) { 3451 jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err); 3452 warning(msg); 3453 } 3454 return NULL; 3455 } 3456 if ((retAddr != NULL) && UseNUMAInterleaving) { 3457 numa_make_global(retAddr, size); 3458 } 3459 return retAddr; 3460 } 3461 3462 bool os::release_memory_special(char* base, size_t bytes) { 3463 // detaching the SHM segment will also delete it, see reserve_memory_special() 3464 int rslt = shmdt(base); 3465 return rslt == 0; 3466 } 3467 3468 size_t os::large_page_size() { 3469 return _large_page_size; 3470 } 3471 3472 // MPSS allows application to commit large page memory on demand; with ISM 3473 // the entire memory region must be allocated as shared memory. 3474 bool os::can_commit_large_page_memory() { 3475 return UseISM ? false : true; 3476 } 3477 3478 bool os::can_execute_large_page_memory() { 3479 return UseISM ? false : true; 3480 } 3481 3482 static int os_sleep(jlong millis, bool interruptible) { 3483 const jlong limit = INT_MAX; 3484 jlong prevtime; 3485 int res; 3486 3487 while (millis > limit) { 3488 if ((res = os_sleep(limit, interruptible)) != OS_OK) 3489 return res; 3490 millis -= limit; 3491 } 3492 3493 // Restart interrupted polls with new parameters until the proper delay 3494 // has been completed. 3495 3496 prevtime = getTimeMillis(); 3497 3498 while (millis > 0) { 3499 jlong newtime; 3500 3501 if (!interruptible) { 3502 // Following assert fails for os::yield_all: 3503 // assert(!thread->is_Java_thread(), "must not be java thread"); 3504 res = poll(NULL, 0, millis); 3505 } else { 3506 JavaThread *jt = JavaThread::current(); 3507 3508 INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt, 3509 os::Solaris::clear_interrupted); 3510 } 3511 3512 // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for 3513 // thread.Interrupt. 3514 3515 // See c/r 6751923. Poll can return 0 before time 3516 // has elapsed if time is set via clock_settime (as NTP does). 3517 // res == 0 if poll timed out (see man poll RETURN VALUES) 3518 // using the logic below checks that we really did 3519 // sleep at least "millis" if not we'll sleep again. 3520 if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) { 3521 newtime = getTimeMillis(); 3522 assert(newtime >= prevtime, "time moving backwards"); 3523 /* Doing prevtime and newtime in microseconds doesn't help precision, 3524 and trying to round up to avoid lost milliseconds can result in a 3525 too-short delay. */ 3526 millis -= newtime - prevtime; 3527 if(millis <= 0) 3528 return OS_OK; 3529 prevtime = newtime; 3530 } else 3531 return res; 3532 } 3533 3534 return OS_OK; 3535 } 3536 3537 // Read calls from inside the vm need to perform state transitions 3538 size_t os::read(int fd, void *buf, unsigned int nBytes) { 3539 INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3540 } 3541 3542 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) { 3543 INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3544 } 3545 3546 int os::sleep(Thread* thread, jlong millis, bool interruptible) { 3547 assert(thread == Thread::current(), "thread consistency check"); 3548 3549 // TODO-FIXME: this should be removed. 3550 // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock 3551 // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate 3552 // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving 3553 // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel 3554 // is fooled into believing that the system is making progress. In the code below we block the 3555 // the watcher thread while safepoint is in progress so that it would not appear as though the 3556 // system is making progress. 3557 if (!Solaris::T2_libthread() && 3558 thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) { 3559 // We now try to acquire the threads lock. Since this lock is held by the VM thread during 3560 // the entire safepoint, the watcher thread will line up here during the safepoint. 3561 Threads_lock->lock_without_safepoint_check(); 3562 Threads_lock->unlock(); 3563 } 3564 3565 if (thread->is_Java_thread()) { 3566 // This is a JavaThread so we honor the _thread_blocked protocol 3567 // even for sleeps of 0 milliseconds. This was originally done 3568 // as a workaround for bug 4338139. However, now we also do it 3569 // to honor the suspend-equivalent protocol. 3570 3571 JavaThread *jt = (JavaThread *) thread; 3572 ThreadBlockInVM tbivm(jt); 3573 3574 jt->set_suspend_equivalent(); 3575 // cleared by handle_special_suspend_equivalent_condition() or 3576 // java_suspend_self() via check_and_wait_while_suspended() 3577 3578 int ret_code; 3579 if (millis <= 0) { 3580 thr_yield(); 3581 ret_code = 0; 3582 } else { 3583 // The original sleep() implementation did not create an 3584 // OSThreadWaitState helper for sleeps of 0 milliseconds. 3585 // I'm preserving that decision for now. 3586 OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */); 3587 3588 ret_code = os_sleep(millis, interruptible); 3589 } 3590 3591 // were we externally suspended while we were waiting? 3592 jt->check_and_wait_while_suspended(); 3593 3594 return ret_code; 3595 } 3596 3597 // non-JavaThread from this point on: 3598 3599 if (millis <= 0) { 3600 thr_yield(); 3601 return 0; 3602 } 3603 3604 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 3605 3606 return os_sleep(millis, interruptible); 3607 } 3608 3609 int os::naked_sleep() { 3610 // %% make the sleep time an integer flag. for now use 1 millisec. 3611 return os_sleep(1, false); 3612 } 3613 3614 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3615 void os::infinite_sleep() { 3616 while (true) { // sleep forever ... 3617 ::sleep(100); // ... 100 seconds at a time 3618 } 3619 } 3620 3621 // Used to convert frequent JVM_Yield() to nops 3622 bool os::dont_yield() { 3623 if (DontYieldALot) { 3624 static hrtime_t last_time = 0; 3625 hrtime_t diff = getTimeNanos() - last_time; 3626 3627 if (diff < DontYieldALotInterval * 1000000) 3628 return true; 3629 3630 last_time += diff; 3631 3632 return false; 3633 } 3634 else { 3635 return false; 3636 } 3637 } 3638 3639 // Caveat: Solaris os::yield() causes a thread-state transition whereas 3640 // the linux and win32 implementations do not. This should be checked. 3641 3642 void os::yield() { 3643 // Yields to all threads with same or greater priority 3644 os::sleep(Thread::current(), 0, false); 3645 } 3646 3647 // Note that yield semantics are defined by the scheduling class to which 3648 // the thread currently belongs. Typically, yield will _not yield to 3649 // other equal or higher priority threads that reside on the dispatch queues 3650 // of other CPUs. 3651 3652 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; } 3653 3654 3655 // On Solaris we found that yield_all doesn't always yield to all other threads. 3656 // There have been cases where there is a thread ready to execute but it doesn't 3657 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond. 3658 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a 3659 // SIGWAITING signal which will cause a new lwp to be created. So we count the 3660 // number of times yield_all is called in the one loop and increase the sleep 3661 // time after 8 attempts. If this fails too we increase the concurrency level 3662 // so that the starving thread would get an lwp 3663 3664 void os::yield_all(int attempts) { 3665 // Yields to all threads, including threads with lower priorities 3666 if (attempts == 0) { 3667 os::sleep(Thread::current(), 1, false); 3668 } else { 3669 int iterations = attempts % 30; 3670 if (iterations == 0 && !os::Solaris::T2_libthread()) { 3671 // thr_setconcurrency and _getconcurrency make sense only under T1. 3672 int noofLWPS = thr_getconcurrency(); 3673 if (noofLWPS < (Threads::number_of_threads() + 2)) { 3674 thr_setconcurrency(thr_getconcurrency() + 1); 3675 } 3676 } else if (iterations < 25) { 3677 os::sleep(Thread::current(), 1, false); 3678 } else { 3679 os::sleep(Thread::current(), 10, false); 3680 } 3681 } 3682 } 3683 3684 // Called from the tight loops to possibly influence time-sharing heuristics 3685 void os::loop_breaker(int attempts) { 3686 os::yield_all(attempts); 3687 } 3688 3689 3690 // Interface for setting lwp priorities. If we are using T2 libthread, 3691 // which forces the use of BoundThreads or we manually set UseBoundThreads, 3692 // all of our threads will be assigned to real lwp's. Using the thr_setprio 3693 // function is meaningless in this mode so we must adjust the real lwp's priority 3694 // The routines below implement the getting and setting of lwp priorities. 3695 // 3696 // Note: There are three priority scales used on Solaris. Java priotities 3697 // which range from 1 to 10, libthread "thr_setprio" scale which range 3698 // from 0 to 127, and the current scheduling class of the process we 3699 // are running in. This is typically from -60 to +60. 3700 // The setting of the lwp priorities in done after a call to thr_setprio 3701 // so Java priorities are mapped to libthread priorities and we map from 3702 // the latter to lwp priorities. We don't keep priorities stored in 3703 // Java priorities since some of our worker threads want to set priorities 3704 // higher than all Java threads. 3705 // 3706 // For related information: 3707 // (1) man -s 2 priocntl 3708 // (2) man -s 4 priocntl 3709 // (3) man dispadmin 3710 // = librt.so 3711 // = libthread/common/rtsched.c - thrp_setlwpprio(). 3712 // = ps -cL <pid> ... to validate priority. 3713 // = sched_get_priority_min and _max 3714 // pthread_create 3715 // sched_setparam 3716 // pthread_setschedparam 3717 // 3718 // Assumptions: 3719 // + We assume that all threads in the process belong to the same 3720 // scheduling class. IE. an homogenous process. 3721 // + Must be root or in IA group to change change "interactive" attribute. 3722 // Priocntl() will fail silently. The only indication of failure is when 3723 // we read-back the value and notice that it hasn't changed. 3724 // + Interactive threads enter the runq at the head, non-interactive at the tail. 3725 // + For RT, change timeslice as well. Invariant: 3726 // constant "priority integral" 3727 // Konst == TimeSlice * (60-Priority) 3728 // Given a priority, compute appropriate timeslice. 3729 // + Higher numerical values have higher priority. 3730 3731 // sched class attributes 3732 typedef struct { 3733 int schedPolicy; // classID 3734 int maxPrio; 3735 int minPrio; 3736 } SchedInfo; 3737 3738 3739 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits; 3740 3741 #ifdef ASSERT 3742 static int ReadBackValidate = 1; 3743 #endif 3744 static int myClass = 0; 3745 static int myMin = 0; 3746 static int myMax = 0; 3747 static int myCur = 0; 3748 static bool priocntl_enable = false; 3749 3750 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4 3751 static int java_MaxPriority_to_os_priority = 0; // Saved mapping 3752 3753 // Call the version of priocntl suitable for all supported versions 3754 // of Solaris. We need to call through this wrapper so that we can 3755 // build on Solaris 9 and run on Solaris 8, 9 and 10. 3756 // 3757 // This code should be removed if we ever stop supporting Solaris 8 3758 // and earlier releases. 3759 3760 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); 3761 typedef long (*priocntl_type)(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); 3762 static priocntl_type priocntl_ptr = priocntl_stub; 3763 3764 // Stub to set the value of the real pointer, and then call the real 3765 // function. 3766 3767 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg) { 3768 // Try Solaris 8- name only. 3769 priocntl_type tmp = (priocntl_type)dlsym(RTLD_DEFAULT, "__priocntl"); 3770 guarantee(tmp != NULL, "priocntl function not found."); 3771 priocntl_ptr = tmp; 3772 return (*priocntl_ptr)(PC_VERSION, idtype, id, cmd, arg); 3773 } 3774 3775 3776 // lwp_priocntl_init 3777 // 3778 // Try to determine the priority scale for our process. 3779 // 3780 // Return errno or 0 if OK. 3781 // 3782 static 3783 int lwp_priocntl_init () 3784 { 3785 int rslt; 3786 pcinfo_t ClassInfo; 3787 pcparms_t ParmInfo; 3788 int i; 3789 3790 if (!UseThreadPriorities) return 0; 3791 3792 // We are using Bound threads, we need to determine our priority ranges 3793 if (os::Solaris::T2_libthread() || UseBoundThreads) { 3794 // If ThreadPriorityPolicy is 1, switch tables 3795 if (ThreadPriorityPolicy == 1) { 3796 for (i = 0 ; i < CriticalPriority+1; i++) 3797 os::java_to_os_priority[i] = prio_policy1[i]; 3798 } 3799 if (UseCriticalJavaThreadPriority) { 3800 // MaxPriority always maps to the FX scheduling class and criticalPrio. 3801 // See set_native_priority() and set_lwp_class_and_priority(). 3802 // Save original MaxPriority mapping in case attempt to 3803 // use critical priority fails. 3804 java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority]; 3805 // Set negative to distinguish from other priorities 3806 os::java_to_os_priority[MaxPriority] = -criticalPrio; 3807 } 3808 } 3809 // Not using Bound Threads, set to ThreadPolicy 1 3810 else { 3811 for ( i = 0 ; i < CriticalPriority+1; i++ ) { 3812 os::java_to_os_priority[i] = prio_policy1[i]; 3813 } 3814 return 0; 3815 } 3816 3817 // Get IDs for a set of well-known scheduling classes. 3818 // TODO-FIXME: GETCLINFO returns the current # of classes in the 3819 // the system. We should have a loop that iterates over the 3820 // classID values, which are known to be "small" integers. 3821 3822 strcpy(ClassInfo.pc_clname, "TS"); 3823 ClassInfo.pc_cid = -1; 3824 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3825 if (rslt < 0) return errno; 3826 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1"); 3827 tsLimits.schedPolicy = ClassInfo.pc_cid; 3828 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri; 3829 tsLimits.minPrio = -tsLimits.maxPrio; 3830 3831 strcpy(ClassInfo.pc_clname, "IA"); 3832 ClassInfo.pc_cid = -1; 3833 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3834 if (rslt < 0) return errno; 3835 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1"); 3836 iaLimits.schedPolicy = ClassInfo.pc_cid; 3837 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri; 3838 iaLimits.minPrio = -iaLimits.maxPrio; 3839 3840 strcpy(ClassInfo.pc_clname, "RT"); 3841 ClassInfo.pc_cid = -1; 3842 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3843 if (rslt < 0) return errno; 3844 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1"); 3845 rtLimits.schedPolicy = ClassInfo.pc_cid; 3846 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri; 3847 rtLimits.minPrio = 0; 3848 3849 strcpy(ClassInfo.pc_clname, "FX"); 3850 ClassInfo.pc_cid = -1; 3851 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3852 if (rslt < 0) return errno; 3853 assert(ClassInfo.pc_cid != -1, "cid for FX class is -1"); 3854 fxLimits.schedPolicy = ClassInfo.pc_cid; 3855 fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri; 3856 fxLimits.minPrio = 0; 3857 3858 // Query our "current" scheduling class. 3859 // This will normally be IA, TS or, rarely, FX or RT. 3860 memset(&ParmInfo, 0, sizeof(ParmInfo)); 3861 ParmInfo.pc_cid = PC_CLNULL; 3862 rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3863 if (rslt < 0) return errno; 3864 myClass = ParmInfo.pc_cid; 3865 3866 // We now know our scheduling classId, get specific information 3867 // about the class. 3868 ClassInfo.pc_cid = myClass; 3869 ClassInfo.pc_clname[0] = 0; 3870 rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo); 3871 if (rslt < 0) return errno; 3872 3873 if (ThreadPriorityVerbose) { 3874 tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname); 3875 } 3876 3877 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3878 ParmInfo.pc_cid = PC_CLNULL; 3879 rslt = (*priocntl_ptr)(PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3880 if (rslt < 0) return errno; 3881 3882 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3883 myMin = rtLimits.minPrio; 3884 myMax = rtLimits.maxPrio; 3885 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3886 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3887 myMin = iaLimits.minPrio; 3888 myMax = iaLimits.maxPrio; 3889 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict 3890 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3891 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3892 myMin = tsLimits.minPrio; 3893 myMax = tsLimits.maxPrio; 3894 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict 3895 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 3896 fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 3897 myMin = fxLimits.minPrio; 3898 myMax = fxLimits.maxPrio; 3899 myMax = MIN2(myMax, (int)fxInfo->fx_uprilim); // clamp - restrict 3900 } else { 3901 // No clue - punt 3902 if (ThreadPriorityVerbose) 3903 tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname); 3904 return EINVAL; // no clue, punt 3905 } 3906 3907 if (ThreadPriorityVerbose) { 3908 tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax); 3909 } 3910 3911 priocntl_enable = true; // Enable changing priorities 3912 return 0; 3913 } 3914 3915 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms)) 3916 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms)) 3917 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms)) 3918 #define FXPRI(x) ((fxparms_t *)((x).pc_clparms)) 3919 3920 3921 // scale_to_lwp_priority 3922 // 3923 // Convert from the libthread "thr_setprio" scale to our current 3924 // lwp scheduling class scale. 3925 // 3926 static 3927 int scale_to_lwp_priority (int rMin, int rMax, int x) 3928 { 3929 int v; 3930 3931 if (x == 127) return rMax; // avoid round-down 3932 v = (((x*(rMax-rMin)))/128)+rMin; 3933 return v; 3934 } 3935 3936 3937 // set_lwp_class_and_priority 3938 // 3939 // Set the class and priority of the lwp. This call should only 3940 // be made when using bound threads (T2 threads are bound by default). 3941 // 3942 int set_lwp_class_and_priority(int ThreadID, int lwpid, 3943 int newPrio, int new_class, bool scale) { 3944 int rslt; 3945 int Actual, Expected, prv; 3946 pcparms_t ParmInfo; // for GET-SET 3947 #ifdef ASSERT 3948 pcparms_t ReadBack; // for readback 3949 #endif 3950 3951 // Set priority via PC_GETPARMS, update, PC_SETPARMS 3952 // Query current values. 3953 // TODO: accelerate this by eliminating the PC_GETPARMS call. 3954 // Cache "pcparms_t" in global ParmCache. 3955 // TODO: elide set-to-same-value 3956 3957 // If something went wrong on init, don't change priorities. 3958 if ( !priocntl_enable ) { 3959 if (ThreadPriorityVerbose) 3960 tty->print_cr("Trying to set priority but init failed, ignoring"); 3961 return EINVAL; 3962 } 3963 3964 // If lwp hasn't started yet, just return 3965 // the _start routine will call us again. 3966 if ( lwpid <= 0 ) { 3967 if (ThreadPriorityVerbose) { 3968 tty->print_cr ("deferring the set_lwp_class_and_priority of thread " 3969 INTPTR_FORMAT " to %d, lwpid not set", 3970 ThreadID, newPrio); 3971 } 3972 return 0; 3973 } 3974 3975 if (ThreadPriorityVerbose) { 3976 tty->print_cr ("set_lwp_class_and_priority(" 3977 INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ", 3978 ThreadID, lwpid, newPrio); 3979 } 3980 3981 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3982 ParmInfo.pc_cid = PC_CLNULL; 3983 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo); 3984 if (rslt < 0) return errno; 3985 3986 int cur_class = ParmInfo.pc_cid; 3987 ParmInfo.pc_cid = (id_t)new_class; 3988 3989 if (new_class == rtLimits.schedPolicy) { 3990 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms; 3991 rtInfo->rt_pri = scale ? scale_to_lwp_priority(rtLimits.minPrio, 3992 rtLimits.maxPrio, newPrio) 3993 : newPrio; 3994 rtInfo->rt_tqsecs = RT_NOCHANGE; 3995 rtInfo->rt_tqnsecs = RT_NOCHANGE; 3996 if (ThreadPriorityVerbose) { 3997 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri); 3998 } 3999 } else if (new_class == iaLimits.schedPolicy) { 4000 iaparms_t* iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 4001 int maxClamped = MIN2(iaLimits.maxPrio, 4002 cur_class == new_class 4003 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio); 4004 iaInfo->ia_upri = scale ? scale_to_lwp_priority(iaLimits.minPrio, 4005 maxClamped, newPrio) 4006 : newPrio; 4007 iaInfo->ia_uprilim = cur_class == new_class 4008 ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio; 4009 iaInfo->ia_mode = IA_NOCHANGE; 4010 if (ThreadPriorityVerbose) { 4011 tty->print_cr("IA: [%d...%d] %d->%d\n", 4012 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri); 4013 } 4014 } else if (new_class == tsLimits.schedPolicy) { 4015 tsparms_t* tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 4016 int maxClamped = MIN2(tsLimits.maxPrio, 4017 cur_class == new_class 4018 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio); 4019 tsInfo->ts_upri = scale ? scale_to_lwp_priority(tsLimits.minPrio, 4020 maxClamped, newPrio) 4021 : newPrio; 4022 tsInfo->ts_uprilim = cur_class == new_class 4023 ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio; 4024 if (ThreadPriorityVerbose) { 4025 tty->print_cr("TS: [%d...%d] %d->%d\n", 4026 tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri); 4027 } 4028 } else if (new_class == fxLimits.schedPolicy) { 4029 fxparms_t* fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 4030 int maxClamped = MIN2(fxLimits.maxPrio, 4031 cur_class == new_class 4032 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio); 4033 fxInfo->fx_upri = scale ? scale_to_lwp_priority(fxLimits.minPrio, 4034 maxClamped, newPrio) 4035 : newPrio; 4036 fxInfo->fx_uprilim = cur_class == new_class 4037 ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio; 4038 fxInfo->fx_tqsecs = FX_NOCHANGE; 4039 fxInfo->fx_tqnsecs = FX_NOCHANGE; 4040 if (ThreadPriorityVerbose) { 4041 tty->print_cr("FX: [%d...%d] %d->%d\n", 4042 fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri); 4043 } 4044 } else { 4045 if (ThreadPriorityVerbose) { 4046 tty->print_cr("Unknown new scheduling class %d\n", new_class); 4047 } 4048 return EINVAL; // no clue, punt 4049 } 4050 4051 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo); 4052 if (ThreadPriorityVerbose && rslt) { 4053 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno); 4054 } 4055 if (rslt < 0) return errno; 4056 4057 #ifdef ASSERT 4058 // Sanity check: read back what we just attempted to set. 4059 // In theory it could have changed in the interim ... 4060 // 4061 // The priocntl system call is tricky. 4062 // Sometimes it'll validate the priority value argument and 4063 // return EINVAL if unhappy. At other times it fails silently. 4064 // Readbacks are prudent. 4065 4066 if (!ReadBackValidate) return 0; 4067 4068 memset(&ReadBack, 0, sizeof(pcparms_t)); 4069 ReadBack.pc_cid = PC_CLNULL; 4070 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack); 4071 assert(rslt >= 0, "priocntl failed"); 4072 Actual = Expected = 0xBAD; 4073 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match"); 4074 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 4075 Actual = RTPRI(ReadBack)->rt_pri; 4076 Expected = RTPRI(ParmInfo)->rt_pri; 4077 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 4078 Actual = IAPRI(ReadBack)->ia_upri; 4079 Expected = IAPRI(ParmInfo)->ia_upri; 4080 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 4081 Actual = TSPRI(ReadBack)->ts_upri; 4082 Expected = TSPRI(ParmInfo)->ts_upri; 4083 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 4084 Actual = FXPRI(ReadBack)->fx_upri; 4085 Expected = FXPRI(ParmInfo)->fx_upri; 4086 } else { 4087 if (ThreadPriorityVerbose) { 4088 tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n", 4089 ParmInfo.pc_cid); 4090 } 4091 } 4092 4093 if (Actual != Expected) { 4094 if (ThreadPriorityVerbose) { 4095 tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n", 4096 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected); 4097 } 4098 } 4099 #endif 4100 4101 return 0; 4102 } 4103 4104 // Solaris only gives access to 128 real priorities at a time, 4105 // so we expand Java's ten to fill this range. This would be better 4106 // if we dynamically adjusted relative priorities. 4107 // 4108 // The ThreadPriorityPolicy option allows us to select 2 different 4109 // priority scales. 4110 // 4111 // ThreadPriorityPolicy=0 4112 // Since the Solaris' default priority is MaximumPriority, we do not 4113 // set a priority lower than Max unless a priority lower than 4114 // NormPriority is requested. 4115 // 4116 // ThreadPriorityPolicy=1 4117 // This mode causes the priority table to get filled with 4118 // linear values. NormPriority get's mapped to 50% of the 4119 // Maximum priority an so on. This will cause VM threads 4120 // to get unfair treatment against other Solaris processes 4121 // which do not explicitly alter their thread priorities. 4122 // 4123 4124 int os::java_to_os_priority[CriticalPriority + 1] = { 4125 -99999, // 0 Entry should never be used 4126 4127 0, // 1 MinPriority 4128 32, // 2 4129 64, // 3 4130 4131 96, // 4 4132 127, // 5 NormPriority 4133 127, // 6 4134 4135 127, // 7 4136 127, // 8 4137 127, // 9 NearMaxPriority 4138 4139 127, // 10 MaxPriority 4140 4141 -criticalPrio // 11 CriticalPriority 4142 }; 4143 4144 OSReturn os::set_native_priority(Thread* thread, int newpri) { 4145 OSThread* osthread = thread->osthread(); 4146 4147 // Save requested priority in case the thread hasn't been started 4148 osthread->set_native_priority(newpri); 4149 4150 // Check for critical priority request 4151 bool fxcritical = false; 4152 if (newpri == -criticalPrio) { 4153 fxcritical = true; 4154 newpri = criticalPrio; 4155 } 4156 4157 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping"); 4158 if (!UseThreadPriorities) return OS_OK; 4159 4160 int status = 0; 4161 4162 if (!fxcritical) { 4163 // Use thr_setprio only if we have a priority that thr_setprio understands 4164 status = thr_setprio(thread->osthread()->thread_id(), newpri); 4165 } 4166 4167 if (os::Solaris::T2_libthread() || 4168 (UseBoundThreads && osthread->is_vm_created())) { 4169 int lwp_status = 4170 set_lwp_class_and_priority(osthread->thread_id(), 4171 osthread->lwp_id(), 4172 newpri, 4173 fxcritical ? fxLimits.schedPolicy : myClass, 4174 !fxcritical); 4175 if (lwp_status != 0 && fxcritical) { 4176 // Try again, this time without changing the scheduling class 4177 newpri = java_MaxPriority_to_os_priority; 4178 lwp_status = set_lwp_class_and_priority(osthread->thread_id(), 4179 osthread->lwp_id(), 4180 newpri, myClass, false); 4181 } 4182 status |= lwp_status; 4183 } 4184 return (status == 0) ? OS_OK : OS_ERR; 4185 } 4186 4187 4188 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { 4189 int p; 4190 if ( !UseThreadPriorities ) { 4191 *priority_ptr = NormalPriority; 4192 return OS_OK; 4193 } 4194 int status = thr_getprio(thread->osthread()->thread_id(), &p); 4195 if (status != 0) { 4196 return OS_ERR; 4197 } 4198 *priority_ptr = p; 4199 return OS_OK; 4200 } 4201 4202 4203 // Hint to the underlying OS that a task switch would not be good. 4204 // Void return because it's a hint and can fail. 4205 void os::hint_no_preempt() { 4206 schedctl_start(schedctl_init()); 4207 } 4208 4209 void os::interrupt(Thread* thread) { 4210 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); 4211 4212 OSThread* osthread = thread->osthread(); 4213 4214 int isInterrupted = osthread->interrupted(); 4215 if (!isInterrupted) { 4216 osthread->set_interrupted(true); 4217 OrderAccess::fence(); 4218 // os::sleep() is implemented with either poll (NULL,0,timeout) or 4219 // by parking on _SleepEvent. If the former, thr_kill will unwedge 4220 // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper. 4221 ParkEvent * const slp = thread->_SleepEvent ; 4222 if (slp != NULL) slp->unpark() ; 4223 } 4224 4225 // For JSR166: unpark after setting status but before thr_kill -dl 4226 if (thread->is_Java_thread()) { 4227 ((JavaThread*)thread)->parker()->unpark(); 4228 } 4229 4230 // Handle interruptible wait() ... 4231 ParkEvent * const ev = thread->_ParkEvent ; 4232 if (ev != NULL) ev->unpark() ; 4233 4234 // When events are used everywhere for os::sleep, then this thr_kill 4235 // will only be needed if UseVMInterruptibleIO is true. 4236 4237 if (!isInterrupted) { 4238 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt()); 4239 assert_status(status == 0, status, "thr_kill"); 4240 4241 // Bump thread interruption counter 4242 RuntimeService::record_thread_interrupt_signaled_count(); 4243 } 4244 } 4245 4246 4247 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 4248 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); 4249 4250 OSThread* osthread = thread->osthread(); 4251 4252 bool res = osthread->interrupted(); 4253 4254 // NOTE that since there is no "lock" around these two operations, 4255 // there is the possibility that the interrupted flag will be 4256 // "false" but that the interrupt event will be set. This is 4257 // intentional. The effect of this is that Object.wait() will appear 4258 // to have a spurious wakeup, which is not harmful, and the 4259 // possibility is so rare that it is not worth the added complexity 4260 // to add yet another lock. It has also been recommended not to put 4261 // the interrupted flag into the os::Solaris::Event structure, 4262 // because it hides the issue. 4263 if (res && clear_interrupted) { 4264 osthread->set_interrupted(false); 4265 } 4266 return res; 4267 } 4268 4269 4270 void os::print_statistics() { 4271 } 4272 4273 int os::message_box(const char* title, const char* message) { 4274 int i; 4275 fdStream err(defaultStream::error_fd()); 4276 for (i = 0; i < 78; i++) err.print_raw("="); 4277 err.cr(); 4278 err.print_raw_cr(title); 4279 for (i = 0; i < 78; i++) err.print_raw("-"); 4280 err.cr(); 4281 err.print_raw_cr(message); 4282 for (i = 0; i < 78; i++) err.print_raw("="); 4283 err.cr(); 4284 4285 char buf[16]; 4286 // Prevent process from exiting upon "read error" without consuming all CPU 4287 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 4288 4289 return buf[0] == 'y' || buf[0] == 'Y'; 4290 } 4291 4292 // A lightweight implementation that does not suspend the target thread and 4293 // thus returns only a hint. Used for profiling only! 4294 ExtendedPC os::get_thread_pc(Thread* thread) { 4295 // Make sure that it is called by the watcher and the Threads lock is owned. 4296 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); 4297 // For now, is only used to profile the VM Thread 4298 assert(thread->is_VM_thread(), "Can only be called for VMThread"); 4299 ExtendedPC epc; 4300 4301 GetThreadPC_Callback cb(ProfileVM_lock); 4302 OSThread *osthread = thread->osthread(); 4303 const int time_to_wait = 400; // 400ms wait for initial response 4304 int status = cb.interrupt(thread, time_to_wait); 4305 4306 if (cb.is_done() ) { 4307 epc = cb.addr(); 4308 } else { 4309 DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status", 4310 osthread->thread_id(), status);); 4311 // epc is already NULL 4312 } 4313 return epc; 4314 } 4315 4316 4317 // This does not do anything on Solaris. This is basically a hook for being 4318 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32. 4319 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) { 4320 f(value, method, args, thread); 4321 } 4322 4323 // This routine may be used by user applications as a "hook" to catch signals. 4324 // The user-defined signal handler must pass unrecognized signals to this 4325 // routine, and if it returns true (non-zero), then the signal handler must 4326 // return immediately. If the flag "abort_if_unrecognized" is true, then this 4327 // routine will never retun false (zero), but instead will execute a VM panic 4328 // routine kill the process. 4329 // 4330 // If this routine returns false, it is OK to call it again. This allows 4331 // the user-defined signal handler to perform checks either before or after 4332 // the VM performs its own checks. Naturally, the user code would be making 4333 // a serious error if it tried to handle an exception (such as a null check 4334 // or breakpoint) that the VM was generating for its own correct operation. 4335 // 4336 // This routine may recognize any of the following kinds of signals: 4337 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ, 4338 // os::Solaris::SIGasync 4339 // It should be consulted by handlers for any of those signals. 4340 // It explicitly does not recognize os::Solaris::SIGinterrupt 4341 // 4342 // The caller of this routine must pass in the three arguments supplied 4343 // to the function referred to in the "sa_sigaction" (not the "sa_handler") 4344 // field of the structure passed to sigaction(). This routine assumes that 4345 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 4346 // 4347 // Note that the VM will print warnings if it detects conflicting signal 4348 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 4349 // 4350 extern "C" JNIEXPORT int 4351 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, 4352 int abort_if_unrecognized); 4353 4354 4355 void signalHandler(int sig, siginfo_t* info, void* ucVoid) { 4356 JVM_handle_solaris_signal(sig, info, ucVoid, true); 4357 } 4358 4359 /* Do not delete - if guarantee is ever removed, a signal handler (even empty) 4360 is needed to provoke threads blocked on IO to return an EINTR 4361 Note: this explicitly does NOT call JVM_handle_solaris_signal and 4362 does NOT participate in signal chaining due to requirement for 4363 NOT setting SA_RESTART to make EINTR work. */ 4364 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) { 4365 if (UseSignalChaining) { 4366 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig); 4367 if (actp && actp->sa_handler) { 4368 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs"); 4369 } 4370 } 4371 } 4372 4373 // This boolean allows users to forward their own non-matching signals 4374 // to JVM_handle_solaris_signal, harmlessly. 4375 bool os::Solaris::signal_handlers_are_installed = false; 4376 4377 // For signal-chaining 4378 bool os::Solaris::libjsig_is_loaded = false; 4379 typedef struct sigaction *(*get_signal_t)(int); 4380 get_signal_t os::Solaris::get_signal_action = NULL; 4381 4382 struct sigaction* os::Solaris::get_chained_signal_action(int sig) { 4383 struct sigaction *actp = NULL; 4384 4385 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) { 4386 // Retrieve the old signal handler from libjsig 4387 actp = (*get_signal_action)(sig); 4388 } 4389 if (actp == NULL) { 4390 // Retrieve the preinstalled signal handler from jvm 4391 actp = get_preinstalled_handler(sig); 4392 } 4393 4394 return actp; 4395 } 4396 4397 static bool call_chained_handler(struct sigaction *actp, int sig, 4398 siginfo_t *siginfo, void *context) { 4399 // Call the old signal handler 4400 if (actp->sa_handler == SIG_DFL) { 4401 // It's more reasonable to let jvm treat it as an unexpected exception 4402 // instead of taking the default action. 4403 return false; 4404 } else if (actp->sa_handler != SIG_IGN) { 4405 if ((actp->sa_flags & SA_NODEFER) == 0) { 4406 // automaticlly block the signal 4407 sigaddset(&(actp->sa_mask), sig); 4408 } 4409 4410 sa_handler_t hand; 4411 sa_sigaction_t sa; 4412 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 4413 // retrieve the chained handler 4414 if (siginfo_flag_set) { 4415 sa = actp->sa_sigaction; 4416 } else { 4417 hand = actp->sa_handler; 4418 } 4419 4420 if ((actp->sa_flags & SA_RESETHAND) != 0) { 4421 actp->sa_handler = SIG_DFL; 4422 } 4423 4424 // try to honor the signal mask 4425 sigset_t oset; 4426 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset); 4427 4428 // call into the chained handler 4429 if (siginfo_flag_set) { 4430 (*sa)(sig, siginfo, context); 4431 } else { 4432 (*hand)(sig); 4433 } 4434 4435 // restore the signal mask 4436 thr_sigsetmask(SIG_SETMASK, &oset, 0); 4437 } 4438 // Tell jvm's signal handler the signal is taken care of. 4439 return true; 4440 } 4441 4442 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) { 4443 bool chained = false; 4444 // signal-chaining 4445 if (UseSignalChaining) { 4446 struct sigaction *actp = get_chained_signal_action(sig); 4447 if (actp != NULL) { 4448 chained = call_chained_handler(actp, sig, siginfo, context); 4449 } 4450 } 4451 return chained; 4452 } 4453 4454 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) { 4455 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4456 if (preinstalled_sigs[sig] != 0) { 4457 return &chainedsigactions[sig]; 4458 } 4459 return NULL; 4460 } 4461 4462 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) { 4463 4464 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range"); 4465 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4466 chainedsigactions[sig] = oldAct; 4467 preinstalled_sigs[sig] = 1; 4468 } 4469 4470 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) { 4471 // Check for overwrite. 4472 struct sigaction oldAct; 4473 sigaction(sig, (struct sigaction*)NULL, &oldAct); 4474 void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4475 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4476 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 4477 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 4478 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) { 4479 if (AllowUserSignalHandlers || !set_installed) { 4480 // Do not overwrite; user takes responsibility to forward to us. 4481 return; 4482 } else if (UseSignalChaining) { 4483 if (oktochain) { 4484 // save the old handler in jvm 4485 save_preinstalled_handler(sig, oldAct); 4486 } else { 4487 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs."); 4488 } 4489 // libjsig also interposes the sigaction() call below and saves the 4490 // old sigaction on it own. 4491 } else { 4492 fatal(err_msg("Encountered unexpected pre-existing sigaction handler " 4493 "%#lx for signal %d.", (long)oldhand, sig)); 4494 } 4495 } 4496 4497 struct sigaction sigAct; 4498 sigfillset(&(sigAct.sa_mask)); 4499 sigAct.sa_handler = SIG_DFL; 4500 4501 sigAct.sa_sigaction = signalHandler; 4502 // Handle SIGSEGV on alternate signal stack if 4503 // not using stack banging 4504 if (!UseStackBanging && sig == SIGSEGV) { 4505 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK; 4506 // Interruptible i/o requires SA_RESTART cleared so EINTR 4507 // is returned instead of restarting system calls 4508 } else if (sig == os::Solaris::SIGinterrupt()) { 4509 sigemptyset(&sigAct.sa_mask); 4510 sigAct.sa_handler = NULL; 4511 sigAct.sa_flags = SA_SIGINFO; 4512 sigAct.sa_sigaction = sigINTRHandler; 4513 } else { 4514 sigAct.sa_flags = SA_SIGINFO | SA_RESTART; 4515 } 4516 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags); 4517 4518 sigaction(sig, &sigAct, &oldAct); 4519 4520 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4521 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4522 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 4523 } 4524 4525 4526 #define DO_SIGNAL_CHECK(sig) \ 4527 if (!sigismember(&check_signal_done, sig)) \ 4528 os::Solaris::check_signal_handler(sig) 4529 4530 // This method is a periodic task to check for misbehaving JNI applications 4531 // under CheckJNI, we can add any periodic checks here 4532 4533 void os::run_periodic_checks() { 4534 // A big source of grief is hijacking virt. addr 0x0 on Solaris, 4535 // thereby preventing a NULL checks. 4536 if(!check_addr0_done) check_addr0_done = check_addr0(tty); 4537 4538 if (check_signals == false) return; 4539 4540 // SEGV and BUS if overridden could potentially prevent 4541 // generation of hs*.log in the event of a crash, debugging 4542 // such a case can be very challenging, so we absolutely 4543 // check for the following for a good measure: 4544 DO_SIGNAL_CHECK(SIGSEGV); 4545 DO_SIGNAL_CHECK(SIGILL); 4546 DO_SIGNAL_CHECK(SIGFPE); 4547 DO_SIGNAL_CHECK(SIGBUS); 4548 DO_SIGNAL_CHECK(SIGPIPE); 4549 DO_SIGNAL_CHECK(SIGXFSZ); 4550 4551 // ReduceSignalUsage allows the user to override these handlers 4552 // see comments at the very top and jvm_solaris.h 4553 if (!ReduceSignalUsage) { 4554 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 4555 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 4556 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 4557 DO_SIGNAL_CHECK(BREAK_SIGNAL); 4558 } 4559 4560 // See comments above for using JVM1/JVM2 and UseAltSigs 4561 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt()); 4562 DO_SIGNAL_CHECK(os::Solaris::SIGasync()); 4563 4564 } 4565 4566 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 4567 4568 static os_sigaction_t os_sigaction = NULL; 4569 4570 void os::Solaris::check_signal_handler(int sig) { 4571 char buf[O_BUFLEN]; 4572 address jvmHandler = NULL; 4573 4574 struct sigaction act; 4575 if (os_sigaction == NULL) { 4576 // only trust the default sigaction, in case it has been interposed 4577 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction"); 4578 if (os_sigaction == NULL) return; 4579 } 4580 4581 os_sigaction(sig, (struct sigaction*)NULL, &act); 4582 4583 address thisHandler = (act.sa_flags & SA_SIGINFO) 4584 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 4585 : CAST_FROM_FN_PTR(address, act.sa_handler) ; 4586 4587 4588 switch(sig) { 4589 case SIGSEGV: 4590 case SIGBUS: 4591 case SIGFPE: 4592 case SIGPIPE: 4593 case SIGXFSZ: 4594 case SIGILL: 4595 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4596 break; 4597 4598 case SHUTDOWN1_SIGNAL: 4599 case SHUTDOWN2_SIGNAL: 4600 case SHUTDOWN3_SIGNAL: 4601 case BREAK_SIGNAL: 4602 jvmHandler = (address)user_handler(); 4603 break; 4604 4605 default: 4606 int intrsig = os::Solaris::SIGinterrupt(); 4607 int asynsig = os::Solaris::SIGasync(); 4608 4609 if (sig == intrsig) { 4610 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler); 4611 } else if (sig == asynsig) { 4612 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4613 } else { 4614 return; 4615 } 4616 break; 4617 } 4618 4619 4620 if (thisHandler != jvmHandler) { 4621 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 4622 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 4623 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 4624 // No need to check this sig any longer 4625 sigaddset(&check_signal_done, sig); 4626 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) { 4627 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 4628 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig)); 4629 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags); 4630 // No need to check this sig any longer 4631 sigaddset(&check_signal_done, sig); 4632 } 4633 4634 // Print all the signal handler state 4635 if (sigismember(&check_signal_done, sig)) { 4636 print_signal_handlers(tty, buf, O_BUFLEN); 4637 } 4638 4639 } 4640 4641 void os::Solaris::install_signal_handlers() { 4642 bool libjsigdone = false; 4643 signal_handlers_are_installed = true; 4644 4645 // signal-chaining 4646 typedef void (*signal_setting_t)(); 4647 signal_setting_t begin_signal_setting = NULL; 4648 signal_setting_t end_signal_setting = NULL; 4649 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4650 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 4651 if (begin_signal_setting != NULL) { 4652 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4653 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 4654 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 4655 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 4656 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t, 4657 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version")); 4658 libjsig_is_loaded = true; 4659 if (os::Solaris::get_libjsig_version != NULL) { 4660 libjsigversion = (*os::Solaris::get_libjsig_version)(); 4661 } 4662 assert(UseSignalChaining, "should enable signal-chaining"); 4663 } 4664 if (libjsig_is_loaded) { 4665 // Tell libjsig jvm is setting signal handlers 4666 (*begin_signal_setting)(); 4667 } 4668 4669 set_signal_handler(SIGSEGV, true, true); 4670 set_signal_handler(SIGPIPE, true, true); 4671 set_signal_handler(SIGXFSZ, true, true); 4672 set_signal_handler(SIGBUS, true, true); 4673 set_signal_handler(SIGILL, true, true); 4674 set_signal_handler(SIGFPE, true, true); 4675 4676 4677 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) { 4678 4679 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so 4680 // can not register overridable signals which might be > 32 4681 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) { 4682 // Tell libjsig jvm has finished setting signal handlers 4683 (*end_signal_setting)(); 4684 libjsigdone = true; 4685 } 4686 } 4687 4688 // Never ok to chain our SIGinterrupt 4689 set_signal_handler(os::Solaris::SIGinterrupt(), true, false); 4690 set_signal_handler(os::Solaris::SIGasync(), true, true); 4691 4692 if (libjsig_is_loaded && !libjsigdone) { 4693 // Tell libjsig jvm finishes setting signal handlers 4694 (*end_signal_setting)(); 4695 } 4696 4697 // We don't activate signal checker if libjsig is in place, we trust ourselves 4698 // and if UserSignalHandler is installed all bets are off. 4699 // Log that signal checking is off only if -verbose:jni is specified. 4700 if (CheckJNICalls) { 4701 if (libjsig_is_loaded) { 4702 if (PrintJNIResolving) { 4703 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 4704 } 4705 check_signals = false; 4706 } 4707 if (AllowUserSignalHandlers) { 4708 if (PrintJNIResolving) { 4709 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 4710 } 4711 check_signals = false; 4712 } 4713 } 4714 } 4715 4716 4717 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...); 4718 4719 const char * signames[] = { 4720 "SIG0", 4721 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP", 4722 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS", 4723 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM", 4724 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH", 4725 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT", 4726 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU", 4727 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW", 4728 "SIGCANCEL", "SIGLOST" 4729 }; 4730 4731 const char* os::exception_name(int exception_code, char* buf, size_t size) { 4732 if (0 < exception_code && exception_code <= SIGRTMAX) { 4733 // signal 4734 if (exception_code < sizeof(signames)/sizeof(const char*)) { 4735 jio_snprintf(buf, size, "%s", signames[exception_code]); 4736 } else { 4737 jio_snprintf(buf, size, "SIG%d", exception_code); 4738 } 4739 return buf; 4740 } else { 4741 return NULL; 4742 } 4743 } 4744 4745 // (Static) wrappers for the new libthread API 4746 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate; 4747 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate; 4748 int_fnP_thread_t_i os::Solaris::_thr_setmutator; 4749 int_fnP_thread_t os::Solaris::_thr_suspend_mutator; 4750 int_fnP_thread_t os::Solaris::_thr_continue_mutator; 4751 4752 // (Static) wrapper for getisax(2) call. 4753 os::Solaris::getisax_func_t os::Solaris::_getisax = 0; 4754 4755 // (Static) wrappers for the liblgrp API 4756 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home; 4757 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init; 4758 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini; 4759 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root; 4760 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children; 4761 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources; 4762 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps; 4763 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale; 4764 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0; 4765 4766 // (Static) wrapper for meminfo() call. 4767 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0; 4768 4769 static address resolve_symbol_lazy(const char* name) { 4770 address addr = (address) dlsym(RTLD_DEFAULT, name); 4771 if(addr == NULL) { 4772 // RTLD_DEFAULT was not defined on some early versions of 2.5.1 4773 addr = (address) dlsym(RTLD_NEXT, name); 4774 } 4775 return addr; 4776 } 4777 4778 static address resolve_symbol(const char* name) { 4779 address addr = resolve_symbol_lazy(name); 4780 if(addr == NULL) { 4781 fatal(dlerror()); 4782 } 4783 return addr; 4784 } 4785 4786 4787 4788 // isT2_libthread() 4789 // 4790 // Routine to determine if we are currently using the new T2 libthread. 4791 // 4792 // We determine if we are using T2 by reading /proc/self/lstatus and 4793 // looking for a thread with the ASLWP bit set. If we find this status 4794 // bit set, we must assume that we are NOT using T2. The T2 team 4795 // has approved this algorithm. 4796 // 4797 // We need to determine if we are running with the new T2 libthread 4798 // since setting native thread priorities is handled differently 4799 // when using this library. All threads created using T2 are bound 4800 // threads. Calling thr_setprio is meaningless in this case. 4801 // 4802 bool isT2_libthread() { 4803 static prheader_t * lwpArray = NULL; 4804 static int lwpSize = 0; 4805 static int lwpFile = -1; 4806 lwpstatus_t * that; 4807 char lwpName [128]; 4808 bool isT2 = false; 4809 4810 #define ADR(x) ((uintptr_t)(x)) 4811 #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1)))) 4812 4813 lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0); 4814 if (lwpFile < 0) { 4815 if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n"); 4816 return false; 4817 } 4818 lwpSize = 16*1024; 4819 for (;;) { 4820 ::lseek64 (lwpFile, 0, SEEK_SET); 4821 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize); 4822 if (::read(lwpFile, lwpArray, lwpSize) < 0) { 4823 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n"); 4824 break; 4825 } 4826 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) { 4827 // We got a good snapshot - now iterate over the list. 4828 int aslwpcount = 0; 4829 for (int i = 0; i < lwpArray->pr_nent; i++ ) { 4830 that = LWPINDEX(lwpArray,i); 4831 if (that->pr_flags & PR_ASLWP) { 4832 aslwpcount++; 4833 } 4834 } 4835 if (aslwpcount == 0) isT2 = true; 4836 break; 4837 } 4838 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize; 4839 FREE_C_HEAP_ARRAY(char, lwpArray); // retry. 4840 } 4841 4842 FREE_C_HEAP_ARRAY(char, lwpArray); 4843 ::close (lwpFile); 4844 if (ThreadPriorityVerbose) { 4845 if (isT2) tty->print_cr("We are running with a T2 libthread\n"); 4846 else tty->print_cr("We are not running with a T2 libthread\n"); 4847 } 4848 return isT2; 4849 } 4850 4851 4852 void os::Solaris::libthread_init() { 4853 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators"); 4854 4855 // Determine if we are running with the new T2 libthread 4856 os::Solaris::set_T2_libthread(isT2_libthread()); 4857 4858 lwp_priocntl_init(); 4859 4860 // RTLD_DEFAULT was not defined on some early versions of 5.5.1 4861 if(func == NULL) { 4862 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators"); 4863 // Guarantee that this VM is running on an new enough OS (5.6 or 4864 // later) that it will have a new enough libthread.so. 4865 guarantee(func != NULL, "libthread.so is too old."); 4866 } 4867 4868 // Initialize the new libthread getstate API wrappers 4869 func = resolve_symbol("thr_getstate"); 4870 os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func)); 4871 4872 func = resolve_symbol("thr_setstate"); 4873 os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func)); 4874 4875 func = resolve_symbol("thr_setmutator"); 4876 os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func)); 4877 4878 func = resolve_symbol("thr_suspend_mutator"); 4879 os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4880 4881 func = resolve_symbol("thr_continue_mutator"); 4882 os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4883 4884 int size; 4885 void (*handler_info_func)(address *, int *); 4886 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo")); 4887 handler_info_func(&handler_start, &size); 4888 handler_end = handler_start + size; 4889 } 4890 4891 4892 int_fnP_mutex_tP os::Solaris::_mutex_lock; 4893 int_fnP_mutex_tP os::Solaris::_mutex_trylock; 4894 int_fnP_mutex_tP os::Solaris::_mutex_unlock; 4895 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init; 4896 int_fnP_mutex_tP os::Solaris::_mutex_destroy; 4897 int os::Solaris::_mutex_scope = USYNC_THREAD; 4898 4899 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait; 4900 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait; 4901 int_fnP_cond_tP os::Solaris::_cond_signal; 4902 int_fnP_cond_tP os::Solaris::_cond_broadcast; 4903 int_fnP_cond_tP_i_vP os::Solaris::_cond_init; 4904 int_fnP_cond_tP os::Solaris::_cond_destroy; 4905 int os::Solaris::_cond_scope = USYNC_THREAD; 4906 4907 void os::Solaris::synchronization_init() { 4908 if(UseLWPSynchronization) { 4909 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock"))); 4910 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock"))); 4911 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock"))); 4912 os::Solaris::set_mutex_init(lwp_mutex_init); 4913 os::Solaris::set_mutex_destroy(lwp_mutex_destroy); 4914 os::Solaris::set_mutex_scope(USYNC_THREAD); 4915 4916 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait"))); 4917 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait"))); 4918 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal"))); 4919 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast"))); 4920 os::Solaris::set_cond_init(lwp_cond_init); 4921 os::Solaris::set_cond_destroy(lwp_cond_destroy); 4922 os::Solaris::set_cond_scope(USYNC_THREAD); 4923 } 4924 else { 4925 os::Solaris::set_mutex_scope(USYNC_THREAD); 4926 os::Solaris::set_cond_scope(USYNC_THREAD); 4927 4928 if(UsePthreads) { 4929 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock"))); 4930 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock"))); 4931 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock"))); 4932 os::Solaris::set_mutex_init(pthread_mutex_default_init); 4933 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy"))); 4934 4935 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait"))); 4936 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait"))); 4937 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal"))); 4938 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast"))); 4939 os::Solaris::set_cond_init(pthread_cond_default_init); 4940 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy"))); 4941 } 4942 else { 4943 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock"))); 4944 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock"))); 4945 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock"))); 4946 os::Solaris::set_mutex_init(::mutex_init); 4947 os::Solaris::set_mutex_destroy(::mutex_destroy); 4948 4949 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait"))); 4950 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait"))); 4951 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal"))); 4952 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast"))); 4953 os::Solaris::set_cond_init(::cond_init); 4954 os::Solaris::set_cond_destroy(::cond_destroy); 4955 } 4956 } 4957 } 4958 4959 bool os::Solaris::liblgrp_init() { 4960 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY); 4961 if (handle != NULL) { 4962 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home"))); 4963 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init"))); 4964 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini"))); 4965 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root"))); 4966 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children"))); 4967 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources"))); 4968 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps"))); 4969 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t, 4970 dlsym(handle, "lgrp_cookie_stale"))); 4971 4972 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER); 4973 set_lgrp_cookie(c); 4974 return true; 4975 } 4976 return false; 4977 } 4978 4979 void os::Solaris::misc_sym_init() { 4980 address func; 4981 4982 // getisax 4983 func = resolve_symbol_lazy("getisax"); 4984 if (func != NULL) { 4985 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func); 4986 } 4987 4988 // meminfo 4989 func = resolve_symbol_lazy("meminfo"); 4990 if (func != NULL) { 4991 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func)); 4992 } 4993 } 4994 4995 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) { 4996 assert(_getisax != NULL, "_getisax not set"); 4997 return _getisax(array, n); 4998 } 4999 5000 // Symbol doesn't exist in Solaris 8 pset.h 5001 #ifndef PS_MYID 5002 #define PS_MYID -3 5003 #endif 5004 5005 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem); 5006 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem); 5007 static pset_getloadavg_type pset_getloadavg_ptr = NULL; 5008 5009 void init_pset_getloadavg_ptr(void) { 5010 pset_getloadavg_ptr = 5011 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg"); 5012 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) { 5013 warning("pset_getloadavg function not found"); 5014 } 5015 } 5016 5017 int os::Solaris::_dev_zero_fd = -1; 5018 5019 // this is called _before_ the global arguments have been parsed 5020 void os::init(void) { 5021 _initial_pid = getpid(); 5022 5023 max_hrtime = first_hrtime = gethrtime(); 5024 5025 init_random(1234567); 5026 5027 page_size = sysconf(_SC_PAGESIZE); 5028 if (page_size == -1) 5029 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)", 5030 strerror(errno))); 5031 init_page_sizes((size_t) page_size); 5032 5033 Solaris::initialize_system_info(); 5034 5035 // Initialize misc. symbols as soon as possible, so we can use them 5036 // if we need them. 5037 Solaris::misc_sym_init(); 5038 5039 int fd = ::open("/dev/zero", O_RDWR); 5040 if (fd < 0) { 5041 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno))); 5042 } else { 5043 Solaris::set_dev_zero_fd(fd); 5044 5045 // Close on exec, child won't inherit. 5046 fcntl(fd, F_SETFD, FD_CLOEXEC); 5047 } 5048 5049 clock_tics_per_sec = CLK_TCK; 5050 5051 // check if dladdr1() exists; dladdr1 can provide more information than 5052 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9 5053 // and is available on linker patches for 5.7 and 5.8. 5054 // libdl.so must have been loaded, this call is just an entry lookup 5055 void * hdl = dlopen("libdl.so", RTLD_NOW); 5056 if (hdl) 5057 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1")); 5058 5059 // (Solaris only) this switches to calls that actually do locking. 5060 ThreadCritical::initialize(); 5061 5062 main_thread = thr_self(); 5063 5064 // Constant minimum stack size allowed. It must be at least 5065 // the minimum of what the OS supports (thr_min_stack()), and 5066 // enough to allow the thread to get to user bytecode execution. 5067 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed); 5068 // If the pagesize of the VM is greater than 8K determine the appropriate 5069 // number of initial guard pages. The user can change this with the 5070 // command line arguments, if needed. 5071 if (vm_page_size() > 8*K) { 5072 StackYellowPages = 1; 5073 StackRedPages = 1; 5074 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size(); 5075 } 5076 } 5077 5078 // To install functions for atexit system call 5079 extern "C" { 5080 static void perfMemory_exit_helper() { 5081 perfMemory_exit(); 5082 } 5083 } 5084 5085 // this is called _after_ the global arguments have been parsed 5086 jint os::init_2(void) { 5087 // try to enable extended file IO ASAP, see 6431278 5088 os::Solaris::try_enable_extended_io(); 5089 5090 // Allocate a single page and mark it as readable for safepoint polling. Also 5091 // use this first mmap call to check support for MAP_ALIGN. 5092 address polling_page = (address)Solaris::mmap_chunk((char*)page_size, 5093 page_size, 5094 MAP_PRIVATE | MAP_ALIGN, 5095 PROT_READ); 5096 if (polling_page == NULL) { 5097 has_map_align = false; 5098 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, 5099 PROT_READ); 5100 } 5101 5102 os::set_polling_page(polling_page); 5103 5104 #ifndef PRODUCT 5105 if( Verbose && PrintMiscellaneous ) 5106 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 5107 #endif 5108 5109 if (!UseMembar) { 5110 address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE ); 5111 guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 5112 os::set_memory_serialize_page( mem_serialize_page ); 5113 5114 #ifndef PRODUCT 5115 if(Verbose && PrintMiscellaneous) 5116 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 5117 #endif 5118 } 5119 5120 os::large_page_init(); 5121 5122 // Check minimum allowable stack size for thread creation and to initialize 5123 // the java system classes, including StackOverflowError - depends on page 5124 // size. Add a page for compiler2 recursion in main thread. 5125 // Add in 2*BytesPerWord times page size to account for VM stack during 5126 // class initialization depending on 32 or 64 bit VM. 5127 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed, 5128 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 5129 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size); 5130 5131 size_t threadStackSizeInBytes = ThreadStackSize * K; 5132 if (threadStackSizeInBytes != 0 && 5133 threadStackSizeInBytes < os::Solaris::min_stack_allowed) { 5134 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk", 5135 os::Solaris::min_stack_allowed/K); 5136 return JNI_ERR; 5137 } 5138 5139 // For 64kbps there will be a 64kb page size, which makes 5140 // the usable default stack size quite a bit less. Increase the 5141 // stack for 64kb (or any > than 8kb) pages, this increases 5142 // virtual memory fragmentation (since we're not creating the 5143 // stack on a power of 2 boundary. The real fix for this 5144 // should be to fix the guard page mechanism. 5145 5146 if (vm_page_size() > 8*K) { 5147 threadStackSizeInBytes = (threadStackSizeInBytes != 0) 5148 ? threadStackSizeInBytes + 5149 ((StackYellowPages + StackRedPages) * vm_page_size()) 5150 : 0; 5151 ThreadStackSize = threadStackSizeInBytes/K; 5152 } 5153 5154 // Make the stack size a multiple of the page size so that 5155 // the yellow/red zones can be guarded. 5156 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, 5157 vm_page_size())); 5158 5159 Solaris::libthread_init(); 5160 5161 if (UseNUMA) { 5162 if (!Solaris::liblgrp_init()) { 5163 UseNUMA = false; 5164 } else { 5165 size_t lgrp_limit = os::numa_get_groups_num(); 5166 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit); 5167 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); 5168 FREE_C_HEAP_ARRAY(int, lgrp_ids); 5169 if (lgrp_num < 2) { 5170 // There's only one locality group, disable NUMA. 5171 UseNUMA = false; 5172 } 5173 } 5174 // ISM is not compatible with the NUMA allocator - it always allocates 5175 // pages round-robin across the lgroups. 5176 if (UseNUMA && UseLargePages && UseISM) { 5177 if (!FLAG_IS_DEFAULT(UseNUMA)) { 5178 if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseISM)) { 5179 UseLargePages = false; 5180 } else { 5181 warning("UseNUMA is not compatible with ISM large pages, disabling NUMA allocator"); 5182 UseNUMA = false; 5183 } 5184 } else { 5185 UseNUMA = false; 5186 } 5187 } 5188 if (!UseNUMA && ForceNUMA) { 5189 UseNUMA = true; 5190 } 5191 } 5192 5193 Solaris::signal_sets_init(); 5194 Solaris::init_signal_mem(); 5195 Solaris::install_signal_handlers(); 5196 5197 if (libjsigversion < JSIG_VERSION_1_4_1) { 5198 Maxlibjsigsigs = OLDMAXSIGNUM; 5199 } 5200 5201 // initialize synchronization primitives to use either thread or 5202 // lwp synchronization (controlled by UseLWPSynchronization) 5203 Solaris::synchronization_init(); 5204 5205 if (MaxFDLimit) { 5206 // set the number of file descriptors to max. print out error 5207 // if getrlimit/setrlimit fails but continue regardless. 5208 struct rlimit nbr_files; 5209 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 5210 if (status != 0) { 5211 if (PrintMiscellaneous && (Verbose || WizardMode)) 5212 perror("os::init_2 getrlimit failed"); 5213 } else { 5214 nbr_files.rlim_cur = nbr_files.rlim_max; 5215 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 5216 if (status != 0) { 5217 if (PrintMiscellaneous && (Verbose || WizardMode)) 5218 perror("os::init_2 setrlimit failed"); 5219 } 5220 } 5221 } 5222 5223 // Calculate theoretical max. size of Threads to guard gainst 5224 // artifical out-of-memory situations, where all available address- 5225 // space has been reserved by thread stacks. Default stack size is 1Mb. 5226 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ? 5227 JavaThread::stack_size_at_create() : (1*K*K); 5228 assert(pre_thread_stack_size != 0, "Must have a stack"); 5229 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when 5230 // we should start doing Virtual Memory banging. Currently when the threads will 5231 // have used all but 200Mb of space. 5232 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K); 5233 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size; 5234 5235 // at-exit methods are called in the reverse order of their registration. 5236 // In Solaris 7 and earlier, atexit functions are called on return from 5237 // main or as a result of a call to exit(3C). There can be only 32 of 5238 // these functions registered and atexit() does not set errno. In Solaris 5239 // 8 and later, there is no limit to the number of functions registered 5240 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit 5241 // functions are called upon dlclose(3DL) in addition to return from main 5242 // and exit(3C). 5243 5244 if (PerfAllowAtExitRegistration) { 5245 // only register atexit functions if PerfAllowAtExitRegistration is set. 5246 // atexit functions can be delayed until process exit time, which 5247 // can be problematic for embedded VM situations. Embedded VMs should 5248 // call DestroyJavaVM() to assure that VM resources are released. 5249 5250 // note: perfMemory_exit_helper atexit function may be removed in 5251 // the future if the appropriate cleanup code can be added to the 5252 // VM_Exit VMOperation's doit method. 5253 if (atexit(perfMemory_exit_helper) != 0) { 5254 warning("os::init2 atexit(perfMemory_exit_helper) failed"); 5255 } 5256 } 5257 5258 // Init pset_loadavg function pointer 5259 init_pset_getloadavg_ptr(); 5260 5261 return JNI_OK; 5262 } 5263 5264 void os::init_3(void) { 5265 return; 5266 } 5267 5268 // Mark the polling page as unreadable 5269 void os::make_polling_page_unreadable(void) { 5270 if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 ) 5271 fatal("Could not disable polling page"); 5272 }; 5273 5274 // Mark the polling page as readable 5275 void os::make_polling_page_readable(void) { 5276 if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 ) 5277 fatal("Could not enable polling page"); 5278 }; 5279 5280 // OS interface. 5281 5282 bool os::check_heap(bool force) { return true; } 5283 5284 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr); 5285 static vsnprintf_t sol_vsnprintf = NULL; 5286 5287 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) { 5288 if (!sol_vsnprintf) { 5289 //search for the named symbol in the objects that were loaded after libjvm 5290 void* where = RTLD_NEXT; 5291 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5292 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5293 if (!sol_vsnprintf){ 5294 //search for the named symbol in the objects that were loaded before libjvm 5295 where = RTLD_DEFAULT; 5296 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5297 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5298 assert(sol_vsnprintf != NULL, "vsnprintf not found"); 5299 } 5300 } 5301 return (*sol_vsnprintf)(buf, count, fmt, argptr); 5302 } 5303 5304 5305 // Is a (classpath) directory empty? 5306 bool os::dir_is_empty(const char* path) { 5307 DIR *dir = NULL; 5308 struct dirent *ptr; 5309 5310 dir = opendir(path); 5311 if (dir == NULL) return true; 5312 5313 /* Scan the directory */ 5314 bool result = true; 5315 char buf[sizeof(struct dirent) + MAX_PATH]; 5316 struct dirent *dbuf = (struct dirent *) buf; 5317 while (result && (ptr = readdir(dir, dbuf)) != NULL) { 5318 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 5319 result = false; 5320 } 5321 } 5322 closedir(dir); 5323 return result; 5324 } 5325 5326 // This code originates from JDK's sysOpen and open64_w 5327 // from src/solaris/hpi/src/system_md.c 5328 5329 #ifndef O_DELETE 5330 #define O_DELETE 0x10000 5331 #endif 5332 5333 // Open a file. Unlink the file immediately after open returns 5334 // if the specified oflag has the O_DELETE flag set. 5335 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c 5336 5337 int os::open(const char *path, int oflag, int mode) { 5338 if (strlen(path) > MAX_PATH - 1) { 5339 errno = ENAMETOOLONG; 5340 return -1; 5341 } 5342 int fd; 5343 int o_delete = (oflag & O_DELETE); 5344 oflag = oflag & ~O_DELETE; 5345 5346 fd = ::open64(path, oflag, mode); 5347 if (fd == -1) return -1; 5348 5349 //If the open succeeded, the file might still be a directory 5350 { 5351 struct stat64 buf64; 5352 int ret = ::fstat64(fd, &buf64); 5353 int st_mode = buf64.st_mode; 5354 5355 if (ret != -1) { 5356 if ((st_mode & S_IFMT) == S_IFDIR) { 5357 errno = EISDIR; 5358 ::close(fd); 5359 return -1; 5360 } 5361 } else { 5362 ::close(fd); 5363 return -1; 5364 } 5365 } 5366 /* 5367 * 32-bit Solaris systems suffer from: 5368 * 5369 * - an historical default soft limit of 256 per-process file 5370 * descriptors that is too low for many Java programs. 5371 * 5372 * - a design flaw where file descriptors created using stdio 5373 * fopen must be less than 256, _even_ when the first limit above 5374 * has been raised. This can cause calls to fopen (but not calls to 5375 * open, for example) to fail mysteriously, perhaps in 3rd party 5376 * native code (although the JDK itself uses fopen). One can hardly 5377 * criticize them for using this most standard of all functions. 5378 * 5379 * We attempt to make everything work anyways by: 5380 * 5381 * - raising the soft limit on per-process file descriptors beyond 5382 * 256 5383 * 5384 * - As of Solaris 10u4, we can request that Solaris raise the 256 5385 * stdio fopen limit by calling function enable_extended_FILE_stdio. 5386 * This is done in init_2 and recorded in enabled_extended_FILE_stdio 5387 * 5388 * - If we are stuck on an old (pre 10u4) Solaris system, we can 5389 * workaround the bug by remapping non-stdio file descriptors below 5390 * 256 to ones beyond 256, which is done below. 5391 * 5392 * See: 5393 * 1085341: 32-bit stdio routines should support file descriptors >255 5394 * 6533291: Work around 32-bit Solaris stdio limit of 256 open files 5395 * 6431278: Netbeans crash on 32 bit Solaris: need to call 5396 * enable_extended_FILE_stdio() in VM initialisation 5397 * Giri Mandalika's blog 5398 * http://technopark02.blogspot.com/2005_05_01_archive.html 5399 */ 5400 #ifndef _LP64 5401 if ((!enabled_extended_FILE_stdio) && fd < 256) { 5402 int newfd = ::fcntl(fd, F_DUPFD, 256); 5403 if (newfd != -1) { 5404 ::close(fd); 5405 fd = newfd; 5406 } 5407 } 5408 #endif // 32-bit Solaris 5409 /* 5410 * All file descriptors that are opened in the JVM and not 5411 * specifically destined for a subprocess should have the 5412 * close-on-exec flag set. If we don't set it, then careless 3rd 5413 * party native code might fork and exec without closing all 5414 * appropriate file descriptors (e.g. as we do in closeDescriptors in 5415 * UNIXProcess.c), and this in turn might: 5416 * 5417 * - cause end-of-file to fail to be detected on some file 5418 * descriptors, resulting in mysterious hangs, or 5419 * 5420 * - might cause an fopen in the subprocess to fail on a system 5421 * suffering from bug 1085341. 5422 * 5423 * (Yes, the default setting of the close-on-exec flag is a Unix 5424 * design flaw) 5425 * 5426 * See: 5427 * 1085341: 32-bit stdio routines should support file descriptors >255 5428 * 4843136: (process) pipe file descriptor from Runtime.exec not being closed 5429 * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 5430 */ 5431 #ifdef FD_CLOEXEC 5432 { 5433 int flags = ::fcntl(fd, F_GETFD); 5434 if (flags != -1) 5435 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); 5436 } 5437 #endif 5438 5439 if (o_delete != 0) { 5440 ::unlink(path); 5441 } 5442 return fd; 5443 } 5444 5445 // create binary file, rewriting existing file if required 5446 int os::create_binary_file(const char* path, bool rewrite_existing) { 5447 int oflags = O_WRONLY | O_CREAT; 5448 if (!rewrite_existing) { 5449 oflags |= O_EXCL; 5450 } 5451 return ::open64(path, oflags, S_IREAD | S_IWRITE); 5452 } 5453 5454 // return current position of file pointer 5455 jlong os::current_file_offset(int fd) { 5456 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 5457 } 5458 5459 // move file pointer to the specified offset 5460 jlong os::seek_to_file_offset(int fd, jlong offset) { 5461 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 5462 } 5463 5464 jlong os::lseek(int fd, jlong offset, int whence) { 5465 return (jlong) ::lseek64(fd, offset, whence); 5466 } 5467 5468 char * os::native_path(char *path) { 5469 return path; 5470 } 5471 5472 int os::ftruncate(int fd, jlong length) { 5473 return ::ftruncate64(fd, length); 5474 } 5475 5476 int os::fsync(int fd) { 5477 RESTARTABLE_RETURN_INT(::fsync(fd)); 5478 } 5479 5480 int os::available(int fd, jlong *bytes) { 5481 jlong cur, end; 5482 int mode; 5483 struct stat64 buf64; 5484 5485 if (::fstat64(fd, &buf64) >= 0) { 5486 mode = buf64.st_mode; 5487 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { 5488 /* 5489 * XXX: is the following call interruptible? If so, this might 5490 * need to go through the INTERRUPT_IO() wrapper as for other 5491 * blocking, interruptible calls in this file. 5492 */ 5493 int n,ioctl_return; 5494 5495 INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted); 5496 if (ioctl_return>= 0) { 5497 *bytes = n; 5498 return 1; 5499 } 5500 } 5501 } 5502 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) { 5503 return 0; 5504 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) { 5505 return 0; 5506 } else if (::lseek64(fd, cur, SEEK_SET) == -1) { 5507 return 0; 5508 } 5509 *bytes = end - cur; 5510 return 1; 5511 } 5512 5513 // Map a block of memory. 5514 char* os::map_memory(int fd, const char* file_name, size_t file_offset, 5515 char *addr, size_t bytes, bool read_only, 5516 bool allow_exec) { 5517 int prot; 5518 int flags; 5519 5520 if (read_only) { 5521 prot = PROT_READ; 5522 flags = MAP_SHARED; 5523 } else { 5524 prot = PROT_READ | PROT_WRITE; 5525 flags = MAP_PRIVATE; 5526 } 5527 5528 if (allow_exec) { 5529 prot |= PROT_EXEC; 5530 } 5531 5532 if (addr != NULL) { 5533 flags |= MAP_FIXED; 5534 } 5535 5536 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, 5537 fd, file_offset); 5538 if (mapped_address == MAP_FAILED) { 5539 return NULL; 5540 } 5541 return mapped_address; 5542 } 5543 5544 5545 // Remap a block of memory. 5546 char* os::remap_memory(int fd, const char* file_name, size_t file_offset, 5547 char *addr, size_t bytes, bool read_only, 5548 bool allow_exec) { 5549 // same as map_memory() on this OS 5550 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 5551 allow_exec); 5552 } 5553 5554 5555 // Unmap a block of memory. 5556 bool os::unmap_memory(char* addr, size_t bytes) { 5557 return munmap(addr, bytes) == 0; 5558 } 5559 5560 void os::pause() { 5561 char filename[MAX_PATH]; 5562 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 5563 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 5564 } else { 5565 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 5566 } 5567 5568 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 5569 if (fd != -1) { 5570 struct stat buf; 5571 ::close(fd); 5572 while (::stat(filename, &buf) == 0) { 5573 (void)::poll(NULL, 0, 100); 5574 } 5575 } else { 5576 jio_fprintf(stderr, 5577 "Could not open pause file '%s', continuing immediately.\n", filename); 5578 } 5579 } 5580 5581 #ifndef PRODUCT 5582 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5583 // Turn this on if you need to trace synch operations. 5584 // Set RECORD_SYNCH_LIMIT to a large-enough value, 5585 // and call record_synch_enable and record_synch_disable 5586 // around the computation of interest. 5587 5588 void record_synch(char* name, bool returning); // defined below 5589 5590 class RecordSynch { 5591 char* _name; 5592 public: 5593 RecordSynch(char* name) :_name(name) 5594 { record_synch(_name, false); } 5595 ~RecordSynch() { record_synch(_name, true); } 5596 }; 5597 5598 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \ 5599 extern "C" ret name params { \ 5600 typedef ret name##_t params; \ 5601 static name##_t* implem = NULL; \ 5602 static int callcount = 0; \ 5603 if (implem == NULL) { \ 5604 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \ 5605 if (implem == NULL) fatal(dlerror()); \ 5606 } \ 5607 ++callcount; \ 5608 RecordSynch _rs(#name); \ 5609 inner; \ 5610 return implem args; \ 5611 } 5612 // in dbx, examine callcounts this way: 5613 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done 5614 5615 #define CHECK_POINTER_OK(p) \ 5616 (Universe::perm_gen() == NULL || !Universe::is_reserved_heap((oop)(p))) 5617 #define CHECK_MU \ 5618 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only."); 5619 #define CHECK_CV \ 5620 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only."); 5621 #define CHECK_P(p) \ 5622 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only."); 5623 5624 #define CHECK_MUTEX(mutex_op) \ 5625 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU); 5626 5627 CHECK_MUTEX( mutex_lock) 5628 CHECK_MUTEX( _mutex_lock) 5629 CHECK_MUTEX( mutex_unlock) 5630 CHECK_MUTEX(_mutex_unlock) 5631 CHECK_MUTEX( mutex_trylock) 5632 CHECK_MUTEX(_mutex_trylock) 5633 5634 #define CHECK_COND(cond_op) \ 5635 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV); 5636 5637 CHECK_COND( cond_wait); 5638 CHECK_COND(_cond_wait); 5639 CHECK_COND(_cond_wait_cancel); 5640 5641 #define CHECK_COND2(cond_op) \ 5642 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV); 5643 5644 CHECK_COND2( cond_timedwait); 5645 CHECK_COND2(_cond_timedwait); 5646 CHECK_COND2(_cond_timedwait_cancel); 5647 5648 // do the _lwp_* versions too 5649 #define mutex_t lwp_mutex_t 5650 #define cond_t lwp_cond_t 5651 CHECK_MUTEX( _lwp_mutex_lock) 5652 CHECK_MUTEX( _lwp_mutex_unlock) 5653 CHECK_MUTEX( _lwp_mutex_trylock) 5654 CHECK_MUTEX( __lwp_mutex_lock) 5655 CHECK_MUTEX( __lwp_mutex_unlock) 5656 CHECK_MUTEX( __lwp_mutex_trylock) 5657 CHECK_MUTEX(___lwp_mutex_lock) 5658 CHECK_MUTEX(___lwp_mutex_unlock) 5659 5660 CHECK_COND( _lwp_cond_wait); 5661 CHECK_COND( __lwp_cond_wait); 5662 CHECK_COND(___lwp_cond_wait); 5663 5664 CHECK_COND2( _lwp_cond_timedwait); 5665 CHECK_COND2( __lwp_cond_timedwait); 5666 #undef mutex_t 5667 #undef cond_t 5668 5669 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5670 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5671 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0); 5672 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0); 5673 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5674 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5675 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5676 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5677 5678 5679 // recording machinery: 5680 5681 enum { RECORD_SYNCH_LIMIT = 200 }; 5682 char* record_synch_name[RECORD_SYNCH_LIMIT]; 5683 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT]; 5684 bool record_synch_returning[RECORD_SYNCH_LIMIT]; 5685 thread_t record_synch_thread[RECORD_SYNCH_LIMIT]; 5686 int record_synch_count = 0; 5687 bool record_synch_enabled = false; 5688 5689 // in dbx, examine recorded data this way: 5690 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done 5691 5692 void record_synch(char* name, bool returning) { 5693 if (record_synch_enabled) { 5694 if (record_synch_count < RECORD_SYNCH_LIMIT) { 5695 record_synch_name[record_synch_count] = name; 5696 record_synch_returning[record_synch_count] = returning; 5697 record_synch_thread[record_synch_count] = thr_self(); 5698 record_synch_arg0ptr[record_synch_count] = &name; 5699 record_synch_count++; 5700 } 5701 // put more checking code here: 5702 // ... 5703 } 5704 } 5705 5706 void record_synch_enable() { 5707 // start collecting trace data, if not already doing so 5708 if (!record_synch_enabled) record_synch_count = 0; 5709 record_synch_enabled = true; 5710 } 5711 5712 void record_synch_disable() { 5713 // stop collecting trace data 5714 record_synch_enabled = false; 5715 } 5716 5717 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5718 #endif // PRODUCT 5719 5720 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5721 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) - 5722 (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5723 5724 5725 // JVMTI & JVM monitoring and management support 5726 // The thread_cpu_time() and current_thread_cpu_time() are only 5727 // supported if is_thread_cpu_time_supported() returns true. 5728 // They are not supported on Solaris T1. 5729 5730 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 5731 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 5732 // of a thread. 5733 // 5734 // current_thread_cpu_time() and thread_cpu_time(Thread *) 5735 // returns the fast estimate available on the platform. 5736 5737 // hrtime_t gethrvtime() return value includes 5738 // user time but does not include system time 5739 jlong os::current_thread_cpu_time() { 5740 return (jlong) gethrvtime(); 5741 } 5742 5743 jlong os::thread_cpu_time(Thread *thread) { 5744 // return user level CPU time only to be consistent with 5745 // what current_thread_cpu_time returns. 5746 // thread_cpu_time_info() must be changed if this changes 5747 return os::thread_cpu_time(thread, false /* user time only */); 5748 } 5749 5750 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 5751 if (user_sys_cpu_time) { 5752 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 5753 } else { 5754 return os::current_thread_cpu_time(); 5755 } 5756 } 5757 5758 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 5759 char proc_name[64]; 5760 int count; 5761 prusage_t prusage; 5762 jlong lwp_time; 5763 int fd; 5764 5765 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage", 5766 getpid(), 5767 thread->osthread()->lwp_id()); 5768 fd = ::open(proc_name, O_RDONLY); 5769 if ( fd == -1 ) return -1; 5770 5771 do { 5772 count = ::pread(fd, 5773 (void *)&prusage.pr_utime, 5774 thr_time_size, 5775 thr_time_off); 5776 } while (count < 0 && errno == EINTR); 5777 ::close(fd); 5778 if ( count < 0 ) return -1; 5779 5780 if (user_sys_cpu_time) { 5781 // user + system CPU time 5782 lwp_time = (((jlong)prusage.pr_stime.tv_sec + 5783 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) + 5784 (jlong)prusage.pr_stime.tv_nsec + 5785 (jlong)prusage.pr_utime.tv_nsec; 5786 } else { 5787 // user level CPU time only 5788 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) + 5789 (jlong)prusage.pr_utime.tv_nsec; 5790 } 5791 5792 return(lwp_time); 5793 } 5794 5795 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5796 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5797 info_ptr->may_skip_backward = false; // elapsed time not wall time 5798 info_ptr->may_skip_forward = false; // elapsed time not wall time 5799 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5800 } 5801 5802 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5803 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5804 info_ptr->may_skip_backward = false; // elapsed time not wall time 5805 info_ptr->may_skip_forward = false; // elapsed time not wall time 5806 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5807 } 5808 5809 bool os::is_thread_cpu_time_supported() { 5810 if ( os::Solaris::T2_libthread() || UseBoundThreads ) { 5811 return true; 5812 } else { 5813 return false; 5814 } 5815 } 5816 5817 // System loadavg support. Returns -1 if load average cannot be obtained. 5818 // Return the load average for our processor set if the primitive exists 5819 // (Solaris 9 and later). Otherwise just return system wide loadavg. 5820 int os::loadavg(double loadavg[], int nelem) { 5821 if (pset_getloadavg_ptr != NULL) { 5822 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem); 5823 } else { 5824 return ::getloadavg(loadavg, nelem); 5825 } 5826 } 5827 5828 //--------------------------------------------------------------------------------- 5829 5830 static address same_page(address x, address y) { 5831 intptr_t page_bits = -os::vm_page_size(); 5832 if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits)) 5833 return x; 5834 else if (x > y) 5835 return (address)(intptr_t(y) | ~page_bits) + 1; 5836 else 5837 return (address)(intptr_t(y) & page_bits); 5838 } 5839 5840 bool os::find(address addr, outputStream* st) { 5841 Dl_info dlinfo; 5842 memset(&dlinfo, 0, sizeof(dlinfo)); 5843 if (dladdr(addr, &dlinfo)) { 5844 #ifdef _LP64 5845 st->print("0x%016lx: ", addr); 5846 #else 5847 st->print("0x%08x: ", addr); 5848 #endif 5849 if (dlinfo.dli_sname != NULL) 5850 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr); 5851 else if (dlinfo.dli_fname) 5852 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase); 5853 else 5854 st->print("<absolute address>"); 5855 if (dlinfo.dli_fname) st->print(" in %s", dlinfo.dli_fname); 5856 #ifdef _LP64 5857 if (dlinfo.dli_fbase) st->print(" at 0x%016lx", dlinfo.dli_fbase); 5858 #else 5859 if (dlinfo.dli_fbase) st->print(" at 0x%08x", dlinfo.dli_fbase); 5860 #endif 5861 st->cr(); 5862 5863 if (Verbose) { 5864 // decode some bytes around the PC 5865 address begin = same_page(addr-40, addr); 5866 address end = same_page(addr+40, addr); 5867 address lowest = (address) dlinfo.dli_sname; 5868 if (!lowest) lowest = (address) dlinfo.dli_fbase; 5869 if (begin < lowest) begin = lowest; 5870 Dl_info dlinfo2; 5871 if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr 5872 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) 5873 end = (address) dlinfo2.dli_saddr; 5874 Disassembler::decode(begin, end, st); 5875 } 5876 return true; 5877 } 5878 return false; 5879 } 5880 5881 // Following function has been added to support HotSparc's libjvm.so running 5882 // under Solaris production JDK 1.2.2 / 1.3.0. These came from 5883 // src/solaris/hpi/native_threads in the EVM codebase. 5884 // 5885 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release 5886 // libraries and should thus be removed. We will leave it behind for a while 5887 // until we no longer want to able to run on top of 1.3.0 Solaris production 5888 // JDK. See 4341971. 5889 5890 #define STACK_SLACK 0x800 5891 5892 extern "C" { 5893 intptr_t sysThreadAvailableStackWithSlack() { 5894 stack_t st; 5895 intptr_t retval, stack_top; 5896 retval = thr_stksegment(&st); 5897 assert(retval == 0, "incorrect return value from thr_stksegment"); 5898 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 5899 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 5900 stack_top=(intptr_t)st.ss_sp-st.ss_size; 5901 return ((intptr_t)&stack_top - stack_top - STACK_SLACK); 5902 } 5903 } 5904 5905 // Just to get the Kernel build to link on solaris for testing. 5906 5907 extern "C" { 5908 class ASGCT_CallTrace; 5909 void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext) 5910 KERNEL_RETURN; 5911 } 5912 5913 5914 // ObjectMonitor park-unpark infrastructure ... 5915 // 5916 // We implement Solaris and Linux PlatformEvents with the 5917 // obvious condvar-mutex-flag triple. 5918 // Another alternative that works quite well is pipes: 5919 // Each PlatformEvent consists of a pipe-pair. 5920 // The thread associated with the PlatformEvent 5921 // calls park(), which reads from the input end of the pipe. 5922 // Unpark() writes into the other end of the pipe. 5923 // The write-side of the pipe must be set NDELAY. 5924 // Unfortunately pipes consume a large # of handles. 5925 // Native solaris lwp_park() and lwp_unpark() work nicely, too. 5926 // Using pipes for the 1st few threads might be workable, however. 5927 // 5928 // park() is permitted to return spuriously. 5929 // Callers of park() should wrap the call to park() in 5930 // an appropriate loop. A litmus test for the correct 5931 // usage of park is the following: if park() were modified 5932 // to immediately return 0 your code should still work, 5933 // albeit degenerating to a spin loop. 5934 // 5935 // An interesting optimization for park() is to use a trylock() 5936 // to attempt to acquire the mutex. If the trylock() fails 5937 // then we know that a concurrent unpark() operation is in-progress. 5938 // in that case the park() code could simply set _count to 0 5939 // and return immediately. The subsequent park() operation *might* 5940 // return immediately. That's harmless as the caller of park() is 5941 // expected to loop. By using trylock() we will have avoided a 5942 // avoided a context switch caused by contention on the per-thread mutex. 5943 // 5944 // TODO-FIXME: 5945 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the 5946 // objectmonitor implementation. 5947 // 2. Collapse the JSR166 parker event, and the 5948 // objectmonitor ParkEvent into a single "Event" construct. 5949 // 3. In park() and unpark() add: 5950 // assert (Thread::current() == AssociatedWith). 5951 // 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch. 5952 // 1-out-of-N park() operations will return immediately. 5953 // 5954 // _Event transitions in park() 5955 // -1 => -1 : illegal 5956 // 1 => 0 : pass - return immediately 5957 // 0 => -1 : block 5958 // 5959 // _Event serves as a restricted-range semaphore. 5960 // 5961 // Another possible encoding of _Event would be with 5962 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5963 // 5964 // TODO-FIXME: add DTRACE probes for: 5965 // 1. Tx parks 5966 // 2. Ty unparks Tx 5967 // 3. Tx resumes from park 5968 5969 5970 // value determined through experimentation 5971 #define ROUNDINGFIX 11 5972 5973 // utility to compute the abstime argument to timedwait. 5974 // TODO-FIXME: switch from compute_abstime() to unpackTime(). 5975 5976 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) { 5977 // millis is the relative timeout time 5978 // abstime will be the absolute timeout time 5979 if (millis < 0) millis = 0; 5980 struct timeval now; 5981 int status = gettimeofday(&now, NULL); 5982 assert(status == 0, "gettimeofday"); 5983 jlong seconds = millis / 1000; 5984 jlong max_wait_period; 5985 5986 if (UseLWPSynchronization) { 5987 // forward port of fix for 4275818 (not sleeping long enough) 5988 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where 5989 // _lwp_cond_timedwait() used a round_down algorithm rather 5990 // than a round_up. For millis less than our roundfactor 5991 // it rounded down to 0 which doesn't meet the spec. 5992 // For millis > roundfactor we may return a bit sooner, but 5993 // since we can not accurately identify the patch level and 5994 // this has already been fixed in Solaris 9 and 8 we will 5995 // leave it alone rather than always rounding down. 5996 5997 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX; 5998 // It appears that when we go directly through Solaris _lwp_cond_timedwait() 5999 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6 6000 max_wait_period = 21000000; 6001 } else { 6002 max_wait_period = 50000000; 6003 } 6004 millis %= 1000; 6005 if (seconds > max_wait_period) { // see man cond_timedwait(3T) 6006 seconds = max_wait_period; 6007 } 6008 abstime->tv_sec = now.tv_sec + seconds; 6009 long usec = now.tv_usec + millis * 1000; 6010 if (usec >= 1000000) { 6011 abstime->tv_sec += 1; 6012 usec -= 1000000; 6013 } 6014 abstime->tv_nsec = usec * 1000; 6015 return abstime; 6016 } 6017 6018 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately. 6019 // Conceptually TryPark() should be equivalent to park(0). 6020 6021 int os::PlatformEvent::TryPark() { 6022 for (;;) { 6023 const int v = _Event ; 6024 guarantee ((v == 0) || (v == 1), "invariant") ; 6025 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ; 6026 } 6027 } 6028 6029 void os::PlatformEvent::park() { // AKA: down() 6030 // Invariant: Only the thread associated with the Event/PlatformEvent 6031 // may call park(). 6032 int v ; 6033 for (;;) { 6034 v = _Event ; 6035 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 6036 } 6037 guarantee (v >= 0, "invariant") ; 6038 if (v == 0) { 6039 // Do this the hard way by blocking ... 6040 // See http://monaco.sfbay/detail.jsf?cr=5094058. 6041 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 6042 // Only for SPARC >= V8PlusA 6043 #if defined(__sparc) && defined(COMPILER2) 6044 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 6045 #endif 6046 int status = os::Solaris::mutex_lock(_mutex); 6047 assert_status(status == 0, status, "mutex_lock"); 6048 guarantee (_nParked == 0, "invariant") ; 6049 ++ _nParked ; 6050 while (_Event < 0) { 6051 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ... 6052 // Treat this the same as if the wait was interrupted 6053 // With usr/lib/lwp going to kernel, always handle ETIME 6054 status = os::Solaris::cond_wait(_cond, _mutex); 6055 if (status == ETIME) status = EINTR ; 6056 assert_status(status == 0 || status == EINTR, status, "cond_wait"); 6057 } 6058 -- _nParked ; 6059 _Event = 0 ; 6060 status = os::Solaris::mutex_unlock(_mutex); 6061 assert_status(status == 0, status, "mutex_unlock"); 6062 } 6063 } 6064 6065 int os::PlatformEvent::park(jlong millis) { 6066 guarantee (_nParked == 0, "invariant") ; 6067 int v ; 6068 for (;;) { 6069 v = _Event ; 6070 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 6071 } 6072 guarantee (v >= 0, "invariant") ; 6073 if (v != 0) return OS_OK ; 6074 6075 int ret = OS_TIMEOUT; 6076 timestruc_t abst; 6077 compute_abstime (&abst, millis); 6078 6079 // See http://monaco.sfbay/detail.jsf?cr=5094058. 6080 // For Solaris SPARC set fprs.FEF=0 prior to parking. 6081 // Only for SPARC >= V8PlusA 6082 #if defined(__sparc) && defined(COMPILER2) 6083 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 6084 #endif 6085 int status = os::Solaris::mutex_lock(_mutex); 6086 assert_status(status == 0, status, "mutex_lock"); 6087 guarantee (_nParked == 0, "invariant") ; 6088 ++ _nParked ; 6089 while (_Event < 0) { 6090 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst); 6091 assert_status(status == 0 || status == EINTR || 6092 status == ETIME || status == ETIMEDOUT, 6093 status, "cond_timedwait"); 6094 if (!FilterSpuriousWakeups) break ; // previous semantics 6095 if (status == ETIME || status == ETIMEDOUT) break ; 6096 // We consume and ignore EINTR and spurious wakeups. 6097 } 6098 -- _nParked ; 6099 if (_Event >= 0) ret = OS_OK ; 6100 _Event = 0 ; 6101 status = os::Solaris::mutex_unlock(_mutex); 6102 assert_status(status == 0, status, "mutex_unlock"); 6103 return ret; 6104 } 6105 6106 void os::PlatformEvent::unpark() { 6107 int v, AnyWaiters; 6108 6109 // Increment _Event. 6110 // Another acceptable implementation would be to simply swap 1 6111 // into _Event: 6112 // if (Swap (&_Event, 1) < 0) { 6113 // mutex_lock (_mutex) ; AnyWaiters = nParked; mutex_unlock (_mutex) ; 6114 // if (AnyWaiters) cond_signal (_cond) ; 6115 // } 6116 6117 for (;;) { 6118 v = _Event ; 6119 if (v > 0) { 6120 // The LD of _Event could have reordered or be satisfied 6121 // by a read-aside from this processor's write buffer. 6122 // To avoid problems execute a barrier and then 6123 // ratify the value. A degenerate CAS() would also work. 6124 // Viz., CAS (v+0, &_Event, v) == v). 6125 OrderAccess::fence() ; 6126 if (_Event == v) return ; 6127 continue ; 6128 } 6129 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ; 6130 } 6131 6132 // If the thread associated with the event was parked, wake it. 6133 if (v < 0) { 6134 int status ; 6135 // Wait for the thread assoc with the PlatformEvent to vacate. 6136 status = os::Solaris::mutex_lock(_mutex); 6137 assert_status(status == 0, status, "mutex_lock"); 6138 AnyWaiters = _nParked ; 6139 status = os::Solaris::mutex_unlock(_mutex); 6140 assert_status(status == 0, status, "mutex_unlock"); 6141 guarantee (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ; 6142 if (AnyWaiters != 0) { 6143 // We intentional signal *after* dropping the lock 6144 // to avoid a common class of futile wakeups. 6145 status = os::Solaris::cond_signal(_cond); 6146 assert_status(status == 0, status, "cond_signal"); 6147 } 6148 } 6149 } 6150 6151 // JSR166 6152 // ------------------------------------------------------- 6153 6154 /* 6155 * The solaris and linux implementations of park/unpark are fairly 6156 * conservative for now, but can be improved. They currently use a 6157 * mutex/condvar pair, plus _counter. 6158 * Park decrements _counter if > 0, else does a condvar wait. Unpark 6159 * sets count to 1 and signals condvar. Only one thread ever waits 6160 * on the condvar. Contention seen when trying to park implies that someone 6161 * is unparking you, so don't wait. And spurious returns are fine, so there 6162 * is no need to track notifications. 6163 */ 6164 6165 #define MAX_SECS 100000000 6166 /* 6167 * This code is common to linux and solaris and will be moved to a 6168 * common place in dolphin. 6169 * 6170 * The passed in time value is either a relative time in nanoseconds 6171 * or an absolute time in milliseconds. Either way it has to be unpacked 6172 * into suitable seconds and nanoseconds components and stored in the 6173 * given timespec structure. 6174 * Given time is a 64-bit value and the time_t used in the timespec is only 6175 * a signed-32-bit value (except on 64-bit Linux) we have to watch for 6176 * overflow if times way in the future are given. Further on Solaris versions 6177 * prior to 10 there is a restriction (see cond_timedwait) that the specified 6178 * number of seconds, in abstime, is less than current_time + 100,000,000. 6179 * As it will be 28 years before "now + 100000000" will overflow we can 6180 * ignore overflow and just impose a hard-limit on seconds using the value 6181 * of "now + 100,000,000". This places a limit on the timeout of about 3.17 6182 * years from "now". 6183 */ 6184 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { 6185 assert (time > 0, "convertTime"); 6186 6187 struct timeval now; 6188 int status = gettimeofday(&now, NULL); 6189 assert(status == 0, "gettimeofday"); 6190 6191 time_t max_secs = now.tv_sec + MAX_SECS; 6192 6193 if (isAbsolute) { 6194 jlong secs = time / 1000; 6195 if (secs > max_secs) { 6196 absTime->tv_sec = max_secs; 6197 } 6198 else { 6199 absTime->tv_sec = secs; 6200 } 6201 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; 6202 } 6203 else { 6204 jlong secs = time / NANOSECS_PER_SEC; 6205 if (secs >= MAX_SECS) { 6206 absTime->tv_sec = max_secs; 6207 absTime->tv_nsec = 0; 6208 } 6209 else { 6210 absTime->tv_sec = now.tv_sec + secs; 6211 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; 6212 if (absTime->tv_nsec >= NANOSECS_PER_SEC) { 6213 absTime->tv_nsec -= NANOSECS_PER_SEC; 6214 ++absTime->tv_sec; // note: this must be <= max_secs 6215 } 6216 } 6217 } 6218 assert(absTime->tv_sec >= 0, "tv_sec < 0"); 6219 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); 6220 assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); 6221 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); 6222 } 6223 6224 void Parker::park(bool isAbsolute, jlong time) { 6225 6226 // Optional fast-path check: 6227 // Return immediately if a permit is available. 6228 if (_counter > 0) { 6229 _counter = 0 ; 6230 OrderAccess::fence(); 6231 return ; 6232 } 6233 6234 // Optional fast-exit: Check interrupt before trying to wait 6235 Thread* thread = Thread::current(); 6236 assert(thread->is_Java_thread(), "Must be JavaThread"); 6237 JavaThread *jt = (JavaThread *)thread; 6238 if (Thread::is_interrupted(thread, false)) { 6239 return; 6240 } 6241 6242 // First, demultiplex/decode time arguments 6243 timespec absTime; 6244 if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all 6245 return; 6246 } 6247 if (time > 0) { 6248 // Warning: this code might be exposed to the old Solaris time 6249 // round-down bugs. Grep "roundingFix" for details. 6250 unpackTime(&absTime, isAbsolute, time); 6251 } 6252 6253 // Enter safepoint region 6254 // Beware of deadlocks such as 6317397. 6255 // The per-thread Parker:: _mutex is a classic leaf-lock. 6256 // In particular a thread must never block on the Threads_lock while 6257 // holding the Parker:: mutex. If safepoints are pending both the 6258 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. 6259 ThreadBlockInVM tbivm(jt); 6260 6261 // Don't wait if cannot get lock since interference arises from 6262 // unblocking. Also. check interrupt before trying wait 6263 if (Thread::is_interrupted(thread, false) || 6264 os::Solaris::mutex_trylock(_mutex) != 0) { 6265 return; 6266 } 6267 6268 int status ; 6269 6270 if (_counter > 0) { // no wait needed 6271 _counter = 0; 6272 status = os::Solaris::mutex_unlock(_mutex); 6273 assert (status == 0, "invariant") ; 6274 OrderAccess::fence(); 6275 return; 6276 } 6277 6278 #ifdef ASSERT 6279 // Don't catch signals while blocked; let the running threads have the signals. 6280 // (This allows a debugger to break into the running thread.) 6281 sigset_t oldsigs; 6282 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals(); 6283 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); 6284 #endif 6285 6286 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 6287 jt->set_suspend_equivalent(); 6288 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 6289 6290 // Do this the hard way by blocking ... 6291 // See http://monaco.sfbay/detail.jsf?cr=5094058. 6292 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 6293 // Only for SPARC >= V8PlusA 6294 #if defined(__sparc) && defined(COMPILER2) 6295 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 6296 #endif 6297 6298 if (time == 0) { 6299 status = os::Solaris::cond_wait (_cond, _mutex) ; 6300 } else { 6301 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime); 6302 } 6303 // Note that an untimed cond_wait() can sometimes return ETIME on older 6304 // versions of the Solaris. 6305 assert_status(status == 0 || status == EINTR || 6306 status == ETIME || status == ETIMEDOUT, 6307 status, "cond_timedwait"); 6308 6309 #ifdef ASSERT 6310 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL); 6311 #endif 6312 _counter = 0 ; 6313 status = os::Solaris::mutex_unlock(_mutex); 6314 assert_status(status == 0, status, "mutex_unlock") ; 6315 6316 // If externally suspended while waiting, re-suspend 6317 if (jt->handle_special_suspend_equivalent_condition()) { 6318 jt->java_suspend_self(); 6319 } 6320 OrderAccess::fence(); 6321 } 6322 6323 void Parker::unpark() { 6324 int s, status ; 6325 status = os::Solaris::mutex_lock (_mutex) ; 6326 assert (status == 0, "invariant") ; 6327 s = _counter; 6328 _counter = 1; 6329 status = os::Solaris::mutex_unlock (_mutex) ; 6330 assert (status == 0, "invariant") ; 6331 6332 if (s < 1) { 6333 status = os::Solaris::cond_signal (_cond) ; 6334 assert (status == 0, "invariant") ; 6335 } 6336 } 6337 6338 extern char** environ; 6339 6340 // Run the specified command in a separate process. Return its exit value, 6341 // or -1 on failure (e.g. can't fork a new process). 6342 // Unlike system(), this function can be called from signal handler. It 6343 // doesn't block SIGINT et al. 6344 int os::fork_and_exec(char* cmd) { 6345 char * argv[4]; 6346 argv[0] = (char *)"sh"; 6347 argv[1] = (char *)"-c"; 6348 argv[2] = cmd; 6349 argv[3] = NULL; 6350 6351 // fork is async-safe, fork1 is not so can't use in signal handler 6352 pid_t pid; 6353 Thread* t = ThreadLocalStorage::get_thread_slow(); 6354 if (t != NULL && t->is_inside_signal_handler()) { 6355 pid = fork(); 6356 } else { 6357 pid = fork1(); 6358 } 6359 6360 if (pid < 0) { 6361 // fork failed 6362 warning("fork failed: %s", strerror(errno)); 6363 return -1; 6364 6365 } else if (pid == 0) { 6366 // child process 6367 6368 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris 6369 execve("/usr/bin/sh", argv, environ); 6370 6371 // execve failed 6372 _exit(-1); 6373 6374 } else { 6375 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 6376 // care about the actual exit code, for now. 6377 6378 int status; 6379 6380 // Wait for the child process to exit. This returns immediately if 6381 // the child has already exited. */ 6382 while (waitpid(pid, &status, 0) < 0) { 6383 switch (errno) { 6384 case ECHILD: return 0; 6385 case EINTR: break; 6386 default: return -1; 6387 } 6388 } 6389 6390 if (WIFEXITED(status)) { 6391 // The child exited normally; get its exit code. 6392 return WEXITSTATUS(status); 6393 } else if (WIFSIGNALED(status)) { 6394 // The child exited because of a signal 6395 // The best value to return is 0x80 + signal number, 6396 // because that is what all Unix shells do, and because 6397 // it allows callers to distinguish between process exit and 6398 // process death by signal. 6399 return 0x80 + WTERMSIG(status); 6400 } else { 6401 // Unknown exit code; pass it through 6402 return status; 6403 } 6404 } 6405 } 6406 6407 // is_headless_jre() 6408 // 6409 // Test for the existence of xawt/libmawt.so or libawt_xawt.so 6410 // in order to report if we are running in a headless jre 6411 // 6412 // Since JDK8 xawt/libmawt.so was moved into the same directory 6413 // as libawt.so, and renamed libawt_xawt.so 6414 // 6415 bool os::is_headless_jre() { 6416 struct stat statbuf; 6417 char buf[MAXPATHLEN]; 6418 char libmawtpath[MAXPATHLEN]; 6419 const char *xawtstr = "/xawt/libmawt.so"; 6420 const char *new_xawtstr = "/libawt_xawt.so"; 6421 char *p; 6422 6423 // Get path to libjvm.so 6424 os::jvm_path(buf, sizeof(buf)); 6425 6426 // Get rid of libjvm.so 6427 p = strrchr(buf, '/'); 6428 if (p == NULL) return false; 6429 else *p = '\0'; 6430 6431 // Get rid of client or server 6432 p = strrchr(buf, '/'); 6433 if (p == NULL) return false; 6434 else *p = '\0'; 6435 6436 // check xawt/libmawt.so 6437 strcpy(libmawtpath, buf); 6438 strcat(libmawtpath, xawtstr); 6439 if (::stat(libmawtpath, &statbuf) == 0) return false; 6440 6441 // check libawt_xawt.so 6442 strcpy(libmawtpath, buf); 6443 strcat(libmawtpath, new_xawtstr); 6444 if (::stat(libmawtpath, &statbuf) == 0) return false; 6445 6446 return true; 6447 } 6448 6449 size_t os::write(int fd, const void *buf, unsigned int nBytes) { 6450 INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted); 6451 } 6452 6453 int os::close(int fd) { 6454 RESTARTABLE_RETURN_INT(::close(fd)); 6455 } 6456 6457 int os::socket_close(int fd) { 6458 RESTARTABLE_RETURN_INT(::close(fd)); 6459 } 6460 6461 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 6462 INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted); 6463 } 6464 6465 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 6466 INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted); 6467 } 6468 6469 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 6470 RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags)); 6471 } 6472 6473 // As both poll and select can be interrupted by signals, we have to be 6474 // prepared to restart the system call after updating the timeout, unless 6475 // a poll() is done with timeout == -1, in which case we repeat with this 6476 // "wait forever" value. 6477 6478 int os::timeout(int fd, long timeout) { 6479 int res; 6480 struct timeval t; 6481 julong prevtime, newtime; 6482 static const char* aNull = 0; 6483 struct pollfd pfd; 6484 pfd.fd = fd; 6485 pfd.events = POLLIN; 6486 6487 gettimeofday(&t, &aNull); 6488 prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000; 6489 6490 for(;;) { 6491 INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted); 6492 if(res == OS_ERR && errno == EINTR) { 6493 if(timeout != -1) { 6494 gettimeofday(&t, &aNull); 6495 newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000; 6496 timeout -= newtime - prevtime; 6497 if(timeout <= 0) 6498 return OS_OK; 6499 prevtime = newtime; 6500 } 6501 } else return res; 6502 } 6503 } 6504 6505 int os::connect(int fd, struct sockaddr *him, socklen_t len) { 6506 int _result; 6507 INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\ 6508 os::Solaris::clear_interrupted); 6509 6510 // Depending on when thread interruption is reset, _result could be 6511 // one of two values when errno == EINTR 6512 6513 if (((_result == OS_INTRPT) || (_result == OS_ERR)) 6514 && (errno == EINTR)) { 6515 /* restarting a connect() changes its errno semantics */ 6516 INTERRUPTIBLE(::connect(fd, him, len), _result,\ 6517 os::Solaris::clear_interrupted); 6518 /* undo these changes */ 6519 if (_result == OS_ERR) { 6520 if (errno == EALREADY) { 6521 errno = EINPROGRESS; /* fall through */ 6522 } else if (errno == EISCONN) { 6523 errno = 0; 6524 return OS_OK; 6525 } 6526 } 6527 } 6528 return _result; 6529 } 6530 6531 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 6532 if (fd < 0) { 6533 return OS_ERR; 6534 } 6535 INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\ 6536 os::Solaris::clear_interrupted); 6537 } 6538 6539 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags, 6540 sockaddr* from, socklen_t* fromlen) { 6541 INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\ 6542 os::Solaris::clear_interrupted); 6543 } 6544 6545 int os::sendto(int fd, char* buf, size_t len, uint flags, 6546 struct sockaddr* to, socklen_t tolen) { 6547 INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\ 6548 os::Solaris::clear_interrupted); 6549 } 6550 6551 int os::socket_available(int fd, jint *pbytes) { 6552 if (fd < 0) { 6553 return OS_OK; 6554 } 6555 int ret; 6556 RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret); 6557 // note: ioctl can return 0 when successful, JVM_SocketAvailable 6558 // is expected to return 0 on failure and 1 on success to the jdk. 6559 return (ret == OS_ERR) ? 0 : 1; 6560 } 6561 6562 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 6563 INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\ 6564 os::Solaris::clear_interrupted); 6565 }