1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/classLoader.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "compiler/compileBroker.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "jvm_solaris.h" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/filemap.hpp" 36 #include "mutex_solaris.inline.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "os_share_solaris.hpp" 39 #include "prims/jniFastGetField.hpp" 40 #include "prims/jvm.h" 41 #include "prims/jvm_misc.hpp" 42 #include "runtime/arguments.hpp" 43 #include "runtime/extendedPC.hpp" 44 #include "runtime/globals.hpp" 45 #include "runtime/interfaceSupport.hpp" 46 #include "runtime/java.hpp" 47 #include "runtime/javaCalls.hpp" 48 #include "runtime/mutexLocker.hpp" 49 #include "runtime/objectMonitor.hpp" 50 #include "runtime/osThread.hpp" 51 #include "runtime/perfMemory.hpp" 52 #include "runtime/sharedRuntime.hpp" 53 #include "runtime/statSampler.hpp" 54 #include "runtime/stubRoutines.hpp" 55 #include "runtime/threadCritical.hpp" 56 #include "runtime/timer.hpp" 57 #include "services/attachListener.hpp" 58 #include "services/runtimeService.hpp" 59 #include "thread_solaris.inline.hpp" 60 #include "utilities/decoder.hpp" 61 #include "utilities/defaultStream.hpp" 62 #include "utilities/events.hpp" 63 #include "utilities/growableArray.hpp" 64 #include "utilities/vmError.hpp" 65 #ifdef TARGET_ARCH_x86 66 # include "assembler_x86.inline.hpp" 67 # include "nativeInst_x86.hpp" 68 #endif 69 #ifdef TARGET_ARCH_sparc 70 # include "assembler_sparc.inline.hpp" 71 # include "nativeInst_sparc.hpp" 72 #endif 73 74 // put OS-includes here 75 # include <dlfcn.h> 76 # include <errno.h> 77 # include <exception> 78 # include <link.h> 79 # include <poll.h> 80 # include <pthread.h> 81 # include <pwd.h> 82 # include <schedctl.h> 83 # include <setjmp.h> 84 # include <signal.h> 85 # include <stdio.h> 86 # include <alloca.h> 87 # include <sys/filio.h> 88 # include <sys/ipc.h> 89 # include <sys/lwp.h> 90 # include <sys/machelf.h> // for elf Sym structure used by dladdr1 91 # include <sys/mman.h> 92 # include <sys/processor.h> 93 # include <sys/procset.h> 94 # include <sys/pset.h> 95 # include <sys/resource.h> 96 # include <sys/shm.h> 97 # include <sys/socket.h> 98 # include <sys/stat.h> 99 # include <sys/systeminfo.h> 100 # include <sys/time.h> 101 # include <sys/times.h> 102 # include <sys/types.h> 103 # include <sys/wait.h> 104 # include <sys/utsname.h> 105 # include <thread.h> 106 # include <unistd.h> 107 # include <sys/priocntl.h> 108 # include <sys/rtpriocntl.h> 109 # include <sys/tspriocntl.h> 110 # include <sys/iapriocntl.h> 111 # include <sys/fxpriocntl.h> 112 # include <sys/loadavg.h> 113 # include <string.h> 114 # include <stdio.h> 115 116 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later 117 # include <sys/procfs.h> // see comment in <sys/procfs.h> 118 119 #define MAX_PATH (2 * K) 120 121 // for timer info max values which include all bits 122 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 123 124 #ifdef _GNU_SOURCE 125 // See bug #6514594 126 extern "C" int madvise(caddr_t, size_t, int); 127 extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg, 128 int attr, int mask); 129 #endif //_GNU_SOURCE 130 131 /* 132 MPSS Changes Start. 133 The JVM binary needs to be built and run on pre-Solaris 9 134 systems, but the constants needed by MPSS are only in Solaris 9 135 header files. They are textually replicated here to allow 136 building on earlier systems. Once building on Solaris 8 is 137 no longer a requirement, these #defines can be replaced by ordinary 138 system .h inclusion. 139 140 In earlier versions of the JDK and Solaris, we used ISM for large pages. 141 But ISM requires shared memory to achieve this and thus has many caveats. 142 MPSS is a fully transparent and is a cleaner way to get large pages. 143 Although we still require keeping ISM for backward compatiblitiy as well as 144 giving the opportunity to use large pages on older systems it is 145 recommended that MPSS be used for Solaris 9 and above. 146 147 */ 148 149 #ifndef MC_HAT_ADVISE 150 151 struct memcntl_mha { 152 uint_t mha_cmd; /* command(s) */ 153 uint_t mha_flags; 154 size_t mha_pagesize; 155 }; 156 #define MC_HAT_ADVISE 7 /* advise hat map size */ 157 #define MHA_MAPSIZE_VA 0x1 /* set preferred page size */ 158 #define MAP_ALIGN 0x200 /* addr specifies alignment */ 159 160 #endif 161 // MPSS Changes End. 162 163 164 // Here are some liblgrp types from sys/lgrp_user.h to be able to 165 // compile on older systems without this header file. 166 167 #ifndef MADV_ACCESS_LWP 168 # define MADV_ACCESS_LWP 7 /* next LWP to access heavily */ 169 #endif 170 #ifndef MADV_ACCESS_MANY 171 # define MADV_ACCESS_MANY 8 /* many processes to access heavily */ 172 #endif 173 174 #ifndef LGRP_RSRC_CPU 175 # define LGRP_RSRC_CPU 0 /* CPU resources */ 176 #endif 177 #ifndef LGRP_RSRC_MEM 178 # define LGRP_RSRC_MEM 1 /* memory resources */ 179 #endif 180 181 // Some more macros from sys/mman.h that are not present in Solaris 8. 182 183 #ifndef MAX_MEMINFO_CNT 184 /* 185 * info_req request type definitions for meminfo 186 * request types starting with MEMINFO_V are used for Virtual addresses 187 * and should not be mixed with MEMINFO_PLGRP which is targeted for Physical 188 * addresses 189 */ 190 # define MEMINFO_SHIFT 16 191 # define MEMINFO_MASK (0xFF << MEMINFO_SHIFT) 192 # define MEMINFO_VPHYSICAL (0x01 << MEMINFO_SHIFT) /* get physical addr */ 193 # define MEMINFO_VLGRP (0x02 << MEMINFO_SHIFT) /* get lgroup */ 194 # define MEMINFO_VPAGESIZE (0x03 << MEMINFO_SHIFT) /* size of phys page */ 195 # define MEMINFO_VREPLCNT (0x04 << MEMINFO_SHIFT) /* no. of replica */ 196 # define MEMINFO_VREPL (0x05 << MEMINFO_SHIFT) /* physical replica */ 197 # define MEMINFO_VREPL_LGRP (0x06 << MEMINFO_SHIFT) /* lgrp of replica */ 198 # define MEMINFO_PLGRP (0x07 << MEMINFO_SHIFT) /* lgroup for paddr */ 199 200 /* maximum number of addresses meminfo() can process at a time */ 201 # define MAX_MEMINFO_CNT 256 202 203 /* maximum number of request types */ 204 # define MAX_MEMINFO_REQ 31 205 #endif 206 207 // see thr_setprio(3T) for the basis of these numbers 208 #define MinimumPriority 0 209 #define NormalPriority 64 210 #define MaximumPriority 127 211 212 // Values for ThreadPriorityPolicy == 1 213 int prio_policy1[CriticalPriority+1] = { 214 -99999, 0, 16, 32, 48, 64, 215 80, 96, 112, 124, 127, 127 }; 216 217 // System parameters used internally 218 static clock_t clock_tics_per_sec = 100; 219 220 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+) 221 static bool enabled_extended_FILE_stdio = false; 222 223 // For diagnostics to print a message once. see run_periodic_checks 224 static bool check_addr0_done = false; 225 static sigset_t check_signal_done; 226 static bool check_signals = true; 227 228 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo 229 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo 230 231 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround 232 233 234 // "default" initializers for missing libc APIs 235 extern "C" { 236 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 237 static int lwp_mutex_destroy(mutex_t *mx) { return 0; } 238 239 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 240 static int lwp_cond_destroy(cond_t *cv) { return 0; } 241 } 242 243 // "default" initializers for pthread-based synchronization 244 extern "C" { 245 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 246 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 247 } 248 249 // Thread Local Storage 250 // This is common to all Solaris platforms so it is defined here, 251 // in this common file. 252 // The declarations are in the os_cpu threadLS*.hpp files. 253 // 254 // Static member initialization for TLS 255 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL}; 256 257 #ifndef PRODUCT 258 #define _PCT(n,d) ((100.0*(double)(n))/(double)(d)) 259 260 int ThreadLocalStorage::_tcacheHit = 0; 261 int ThreadLocalStorage::_tcacheMiss = 0; 262 263 void ThreadLocalStorage::print_statistics() { 264 int total = _tcacheMiss+_tcacheHit; 265 tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n", 266 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total)); 267 } 268 #undef _PCT 269 #endif // PRODUCT 270 271 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id, 272 int index) { 273 Thread *thread = get_thread_slow(); 274 if (thread != NULL) { 275 address sp = os::current_stack_pointer(); 276 guarantee(thread->_stack_base == NULL || 277 (sp <= thread->_stack_base && 278 sp >= thread->_stack_base - thread->_stack_size) || 279 is_error_reported(), 280 "sp must be inside of selected thread stack"); 281 282 thread->set_self_raw_id(raw_id); // mark for quick retrieval 283 _get_thread_cache[ index ] = thread; 284 } 285 return thread; 286 } 287 288 289 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0}; 290 #define NO_CACHED_THREAD ((Thread*)all_zero) 291 292 void ThreadLocalStorage::pd_set_thread(Thread* thread) { 293 294 // Store the new value before updating the cache to prevent a race 295 // between get_thread_via_cache_slowly() and this store operation. 296 os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); 297 298 // Update thread cache with new thread if setting on thread create, 299 // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit. 300 uintptr_t raw = pd_raw_thread_id(); 301 int ix = pd_cache_index(raw); 302 _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread; 303 } 304 305 void ThreadLocalStorage::pd_init() { 306 for (int i = 0; i < _pd_cache_size; i++) { 307 _get_thread_cache[i] = NO_CACHED_THREAD; 308 } 309 } 310 311 // Invalidate all the caches (happens to be the same as pd_init). 312 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); } 313 314 #undef NO_CACHED_THREAD 315 316 // END Thread Local Storage 317 318 static inline size_t adjust_stack_size(address base, size_t size) { 319 if ((ssize_t)size < 0) { 320 // 4759953: Compensate for ridiculous stack size. 321 size = max_intx; 322 } 323 if (size > (size_t)base) { 324 // 4812466: Make sure size doesn't allow the stack to wrap the address space. 325 size = (size_t)base; 326 } 327 return size; 328 } 329 330 static inline stack_t get_stack_info() { 331 stack_t st; 332 int retval = thr_stksegment(&st); 333 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size); 334 assert(retval == 0, "incorrect return value from thr_stksegment"); 335 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 336 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 337 return st; 338 } 339 340 address os::current_stack_base() { 341 int r = thr_main() ; 342 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 343 bool is_primordial_thread = r; 344 345 // Workaround 4352906, avoid calls to thr_stksegment by 346 // thr_main after the first one (it looks like we trash 347 // some data, causing the value for ss_sp to be incorrect). 348 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) { 349 stack_t st = get_stack_info(); 350 if (is_primordial_thread) { 351 // cache initial value of stack base 352 os::Solaris::_main_stack_base = (address)st.ss_sp; 353 } 354 return (address)st.ss_sp; 355 } else { 356 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base"); 357 return os::Solaris::_main_stack_base; 358 } 359 } 360 361 size_t os::current_stack_size() { 362 size_t size; 363 364 int r = thr_main() ; 365 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 366 if(!r) { 367 size = get_stack_info().ss_size; 368 } else { 369 struct rlimit limits; 370 getrlimit(RLIMIT_STACK, &limits); 371 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur); 372 } 373 // base may not be page aligned 374 address base = current_stack_base(); 375 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());; 376 return (size_t)(base - bottom); 377 } 378 379 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 380 return localtime_r(clock, res); 381 } 382 383 // interruptible infrastructure 384 385 // setup_interruptible saves the thread state before going into an 386 // interruptible system call. 387 // The saved state is used to restore the thread to 388 // its former state whether or not an interrupt is received. 389 // Used by classloader os::read 390 // os::restartable_read calls skip this layer and stay in _thread_in_native 391 392 void os::Solaris::setup_interruptible(JavaThread* thread) { 393 394 JavaThreadState thread_state = thread->thread_state(); 395 396 assert(thread_state != _thread_blocked, "Coming from the wrong thread"); 397 assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible"); 398 OSThread* osthread = thread->osthread(); 399 osthread->set_saved_interrupt_thread_state(thread_state); 400 thread->frame_anchor()->make_walkable(thread); 401 ThreadStateTransition::transition(thread, thread_state, _thread_blocked); 402 } 403 404 // Version of setup_interruptible() for threads that are already in 405 // _thread_blocked. Used by os_sleep(). 406 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) { 407 thread->frame_anchor()->make_walkable(thread); 408 } 409 410 JavaThread* os::Solaris::setup_interruptible() { 411 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 412 setup_interruptible(thread); 413 return thread; 414 } 415 416 void os::Solaris::try_enable_extended_io() { 417 typedef int (*enable_extended_FILE_stdio_t)(int, int); 418 419 if (!UseExtendedFileIO) { 420 return; 421 } 422 423 enable_extended_FILE_stdio_t enabler = 424 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT, 425 "enable_extended_FILE_stdio"); 426 if (enabler) { 427 enabler(-1, -1); 428 } 429 } 430 431 432 #ifdef ASSERT 433 434 JavaThread* os::Solaris::setup_interruptible_native() { 435 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 436 JavaThreadState thread_state = thread->thread_state(); 437 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 438 return thread; 439 } 440 441 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) { 442 JavaThreadState thread_state = thread->thread_state(); 443 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 444 } 445 #endif 446 447 // cleanup_interruptible reverses the effects of setup_interruptible 448 // setup_interruptible_already_blocked() does not need any cleanup. 449 450 void os::Solaris::cleanup_interruptible(JavaThread* thread) { 451 OSThread* osthread = thread->osthread(); 452 453 ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state()); 454 } 455 456 // I/O interruption related counters called in _INTERRUPTIBLE 457 458 void os::Solaris::bump_interrupted_before_count() { 459 RuntimeService::record_interrupted_before_count(); 460 } 461 462 void os::Solaris::bump_interrupted_during_count() { 463 RuntimeService::record_interrupted_during_count(); 464 } 465 466 static int _processors_online = 0; 467 468 jint os::Solaris::_os_thread_limit = 0; 469 volatile jint os::Solaris::_os_thread_count = 0; 470 471 julong os::available_memory() { 472 return Solaris::available_memory(); 473 } 474 475 julong os::Solaris::available_memory() { 476 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size(); 477 } 478 479 julong os::Solaris::_physical_memory = 0; 480 481 julong os::physical_memory() { 482 return Solaris::physical_memory(); 483 } 484 485 julong os::allocatable_physical_memory(julong size) { 486 #ifdef _LP64 487 return size; 488 #else 489 julong result = MIN2(size, (julong)3835*M); 490 if (!is_allocatable(result)) { 491 // Memory allocations will be aligned but the alignment 492 // is not known at this point. Alignments will 493 // be at most to LargePageSizeInBytes. Protect 494 // allocations from alignments up to illegal 495 // values. If at this point 2G is illegal. 496 julong reasonable_size = (julong)2*G - 2 * LargePageSizeInBytes; 497 result = MIN2(size, reasonable_size); 498 } 499 return result; 500 #endif 501 } 502 503 static hrtime_t first_hrtime = 0; 504 static const hrtime_t hrtime_hz = 1000*1000*1000; 505 const int LOCK_BUSY = 1; 506 const int LOCK_FREE = 0; 507 const int LOCK_INVALID = -1; 508 static volatile hrtime_t max_hrtime = 0; 509 static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB as lock-in-progress 510 511 512 void os::Solaris::initialize_system_info() { 513 set_processor_count(sysconf(_SC_NPROCESSORS_CONF)); 514 _processors_online = sysconf (_SC_NPROCESSORS_ONLN); 515 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE); 516 } 517 518 int os::active_processor_count() { 519 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN); 520 pid_t pid = getpid(); 521 psetid_t pset = PS_NONE; 522 // Are we running in a processor set or is there any processor set around? 523 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) { 524 uint_t pset_cpus; 525 // Query the number of cpus available to us. 526 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) { 527 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check"); 528 _processors_online = pset_cpus; 529 return pset_cpus; 530 } 531 } 532 // Otherwise return number of online cpus 533 return online_cpus; 534 } 535 536 static bool find_processors_in_pset(psetid_t pset, 537 processorid_t** id_array, 538 uint_t* id_length) { 539 bool result = false; 540 // Find the number of processors in the processor set. 541 if (pset_info(pset, NULL, id_length, NULL) == 0) { 542 // Make up an array to hold their ids. 543 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); 544 // Fill in the array with their processor ids. 545 if (pset_info(pset, NULL, id_length, *id_array) == 0) { 546 result = true; 547 } 548 } 549 return result; 550 } 551 552 // Callers of find_processors_online() must tolerate imprecise results -- 553 // the system configuration can change asynchronously because of DR 554 // or explicit psradm operations. 555 // 556 // We also need to take care that the loop (below) terminates as the 557 // number of processors online can change between the _SC_NPROCESSORS_ONLN 558 // request and the loop that builds the list of processor ids. Unfortunately 559 // there's no reliable way to determine the maximum valid processor id, 560 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online 561 // man pages, which claim the processor id set is "sparse, but 562 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually 563 // exit the loop. 564 // 565 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's 566 // not available on S8.0. 567 568 static bool find_processors_online(processorid_t** id_array, 569 uint* id_length) { 570 const processorid_t MAX_PROCESSOR_ID = 100000 ; 571 // Find the number of processors online. 572 *id_length = sysconf(_SC_NPROCESSORS_ONLN); 573 // Make up an array to hold their ids. 574 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); 575 // Processors need not be numbered consecutively. 576 long found = 0; 577 processorid_t next = 0; 578 while (found < *id_length && next < MAX_PROCESSOR_ID) { 579 processor_info_t info; 580 if (processor_info(next, &info) == 0) { 581 // NB, PI_NOINTR processors are effectively online ... 582 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) { 583 (*id_array)[found] = next; 584 found += 1; 585 } 586 } 587 next += 1; 588 } 589 if (found < *id_length) { 590 // The loop above didn't identify the expected number of processors. 591 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN) 592 // and re-running the loop, above, but there's no guarantee of progress 593 // if the system configuration is in flux. Instead, we just return what 594 // we've got. Note that in the worst case find_processors_online() could 595 // return an empty set. (As a fall-back in the case of the empty set we 596 // could just return the ID of the current processor). 597 *id_length = found ; 598 } 599 600 return true; 601 } 602 603 static bool assign_distribution(processorid_t* id_array, 604 uint id_length, 605 uint* distribution, 606 uint distribution_length) { 607 // We assume we can assign processorid_t's to uint's. 608 assert(sizeof(processorid_t) == sizeof(uint), 609 "can't convert processorid_t to uint"); 610 // Quick check to see if we won't succeed. 611 if (id_length < distribution_length) { 612 return false; 613 } 614 // Assign processor ids to the distribution. 615 // Try to shuffle processors to distribute work across boards, 616 // assuming 4 processors per board. 617 const uint processors_per_board = ProcessDistributionStride; 618 // Find the maximum processor id. 619 processorid_t max_id = 0; 620 for (uint m = 0; m < id_length; m += 1) { 621 max_id = MAX2(max_id, id_array[m]); 622 } 623 // The next id, to limit loops. 624 const processorid_t limit_id = max_id + 1; 625 // Make up markers for available processors. 626 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal); 627 for (uint c = 0; c < limit_id; c += 1) { 628 available_id[c] = false; 629 } 630 for (uint a = 0; a < id_length; a += 1) { 631 available_id[id_array[a]] = true; 632 } 633 // Step by "boards", then by "slot", copying to "assigned". 634 // NEEDS_CLEANUP: The assignment of processors should be stateful, 635 // remembering which processors have been assigned by 636 // previous calls, etc., so as to distribute several 637 // independent calls of this method. What we'd like is 638 // It would be nice to have an API that let us ask 639 // how many processes are bound to a processor, 640 // but we don't have that, either. 641 // In the short term, "board" is static so that 642 // subsequent distributions don't all start at board 0. 643 static uint board = 0; 644 uint assigned = 0; 645 // Until we've found enough processors .... 646 while (assigned < distribution_length) { 647 // ... find the next available processor in the board. 648 for (uint slot = 0; slot < processors_per_board; slot += 1) { 649 uint try_id = board * processors_per_board + slot; 650 if ((try_id < limit_id) && (available_id[try_id] == true)) { 651 distribution[assigned] = try_id; 652 available_id[try_id] = false; 653 assigned += 1; 654 break; 655 } 656 } 657 board += 1; 658 if (board * processors_per_board + 0 >= limit_id) { 659 board = 0; 660 } 661 } 662 if (available_id != NULL) { 663 FREE_C_HEAP_ARRAY(bool, available_id, mtInternal); 664 } 665 return true; 666 } 667 668 void os::set_native_thread_name(const char *name) { 669 // Not yet implemented. 670 return; 671 } 672 673 bool os::distribute_processes(uint length, uint* distribution) { 674 bool result = false; 675 // Find the processor id's of all the available CPUs. 676 processorid_t* id_array = NULL; 677 uint id_length = 0; 678 // There are some races between querying information and using it, 679 // since processor sets can change dynamically. 680 psetid_t pset = PS_NONE; 681 // Are we running in a processor set? 682 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) { 683 result = find_processors_in_pset(pset, &id_array, &id_length); 684 } else { 685 result = find_processors_online(&id_array, &id_length); 686 } 687 if (result == true) { 688 if (id_length >= length) { 689 result = assign_distribution(id_array, id_length, distribution, length); 690 } else { 691 result = false; 692 } 693 } 694 if (id_array != NULL) { 695 FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal); 696 } 697 return result; 698 } 699 700 bool os::bind_to_processor(uint processor_id) { 701 // We assume that a processorid_t can be stored in a uint. 702 assert(sizeof(uint) == sizeof(processorid_t), 703 "can't convert uint to processorid_t"); 704 int bind_result = 705 processor_bind(P_LWPID, // bind LWP. 706 P_MYID, // bind current LWP. 707 (processorid_t) processor_id, // id. 708 NULL); // don't return old binding. 709 return (bind_result == 0); 710 } 711 712 bool os::getenv(const char* name, char* buffer, int len) { 713 char* val = ::getenv( name ); 714 if ( val == NULL 715 || strlen(val) + 1 > len ) { 716 if (len > 0) buffer[0] = 0; // return a null string 717 return false; 718 } 719 strcpy( buffer, val ); 720 return true; 721 } 722 723 724 // Return true if user is running as root. 725 726 bool os::have_special_privileges() { 727 static bool init = false; 728 static bool privileges = false; 729 if (!init) { 730 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 731 init = true; 732 } 733 return privileges; 734 } 735 736 737 void os::init_system_properties_values() { 738 char arch[12]; 739 sysinfo(SI_ARCHITECTURE, arch, sizeof(arch)); 740 741 // The next steps are taken in the product version: 742 // 743 // Obtain the JAVA_HOME value from the location of libjvm[_g].so. 744 // This library should be located at: 745 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so. 746 // 747 // If "/jre/lib/" appears at the right place in the path, then we 748 // assume libjvm[_g].so is installed in a JDK and we use this path. 749 // 750 // Otherwise exit with message: "Could not create the Java virtual machine." 751 // 752 // The following extra steps are taken in the debugging version: 753 // 754 // If "/jre/lib/" does NOT appear at the right place in the path 755 // instead of exit check for $JAVA_HOME environment variable. 756 // 757 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>, 758 // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so 759 // it looks like libjvm[_g].so is installed there 760 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so. 761 // 762 // Otherwise exit. 763 // 764 // Important note: if the location of libjvm.so changes this 765 // code needs to be changed accordingly. 766 767 // The next few definitions allow the code to be verbatim: 768 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal) 769 #define free(p) FREE_C_HEAP_ARRAY(char, p, mtInternal) 770 #define getenv(n) ::getenv(n) 771 772 #define EXTENSIONS_DIR "/lib/ext" 773 #define ENDORSED_DIR "/lib/endorsed" 774 #define COMMON_DIR "/usr/jdk/packages" 775 776 { 777 /* sysclasspath, java_home, dll_dir */ 778 { 779 char *home_path; 780 char *dll_path; 781 char *pslash; 782 char buf[MAXPATHLEN]; 783 os::jvm_path(buf, sizeof(buf)); 784 785 // Found the full path to libjvm.so. 786 // Now cut the path to <java_home>/jre if we can. 787 *(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */ 788 pslash = strrchr(buf, '/'); 789 if (pslash != NULL) 790 *pslash = '\0'; /* get rid of /{client|server|hotspot} */ 791 dll_path = malloc(strlen(buf) + 1); 792 if (dll_path == NULL) 793 return; 794 strcpy(dll_path, buf); 795 Arguments::set_dll_dir(dll_path); 796 797 if (pslash != NULL) { 798 pslash = strrchr(buf, '/'); 799 if (pslash != NULL) { 800 *pslash = '\0'; /* get rid of /<arch> */ 801 pslash = strrchr(buf, '/'); 802 if (pslash != NULL) 803 *pslash = '\0'; /* get rid of /lib */ 804 } 805 } 806 807 home_path = malloc(strlen(buf) + 1); 808 if (home_path == NULL) 809 return; 810 strcpy(home_path, buf); 811 Arguments::set_java_home(home_path); 812 813 if (!set_boot_path('/', ':')) 814 return; 815 } 816 817 /* 818 * Where to look for native libraries 819 */ 820 { 821 // Use dlinfo() to determine the correct java.library.path. 822 // 823 // If we're launched by the Java launcher, and the user 824 // does not set java.library.path explicitly on the commandline, 825 // the Java launcher sets LD_LIBRARY_PATH for us and unsets 826 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case 827 // dlinfo returns LD_LIBRARY_PATH + crle settings (including 828 // /usr/lib), which is exactly what we want. 829 // 830 // If the user does set java.library.path, it completely 831 // overwrites this setting, and always has. 832 // 833 // If we're not launched by the Java launcher, we may 834 // get here with any/all of the LD_LIBRARY_PATH[_32|64] 835 // settings. Again, dlinfo does exactly what we want. 836 837 Dl_serinfo _info, *info = &_info; 838 Dl_serpath *path; 839 char* library_path; 840 char *common_path; 841 int i; 842 843 // determine search path count and required buffer size 844 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) { 845 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror()); 846 } 847 848 // allocate new buffer and initialize 849 info = (Dl_serinfo*)malloc(_info.dls_size); 850 if (info == NULL) { 851 vm_exit_out_of_memory(_info.dls_size, 852 "init_system_properties_values info"); 853 } 854 info->dls_size = _info.dls_size; 855 info->dls_cnt = _info.dls_cnt; 856 857 // obtain search path information 858 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) { 859 free(info); 860 vm_exit_during_initialization("dlinfo SERINFO request", dlerror()); 861 } 862 863 path = &info->dls_serpath[0]; 864 865 // Note: Due to a legacy implementation, most of the library path 866 // is set in the launcher. This was to accomodate linking restrictions 867 // on legacy Solaris implementations (which are no longer supported). 868 // Eventually, all the library path setting will be done here. 869 // 870 // However, to prevent the proliferation of improperly built native 871 // libraries, the new path component /usr/jdk/packages is added here. 872 873 // Determine the actual CPU architecture. 874 char cpu_arch[12]; 875 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 876 #ifdef _LP64 877 // If we are a 64-bit vm, perform the following translations: 878 // sparc -> sparcv9 879 // i386 -> amd64 880 if (strcmp(cpu_arch, "sparc") == 0) 881 strcat(cpu_arch, "v9"); 882 else if (strcmp(cpu_arch, "i386") == 0) 883 strcpy(cpu_arch, "amd64"); 884 #endif 885 886 // Construct the invariant part of ld_library_path. Note that the 887 // space for the colon and the trailing null are provided by the 888 // nulls included by the sizeof operator. 889 size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch); 890 common_path = malloc(bufsize); 891 if (common_path == NULL) { 892 free(info); 893 vm_exit_out_of_memory(bufsize, 894 "init_system_properties_values common_path"); 895 } 896 sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch); 897 898 // struct size is more than sufficient for the path components obtained 899 // through the dlinfo() call, so only add additional space for the path 900 // components explicitly added here. 901 bufsize = info->dls_size + strlen(common_path); 902 library_path = malloc(bufsize); 903 if (library_path == NULL) { 904 free(info); 905 free(common_path); 906 vm_exit_out_of_memory(bufsize, 907 "init_system_properties_values library_path"); 908 } 909 library_path[0] = '\0'; 910 911 // Construct the desired Java library path from the linker's library 912 // search path. 913 // 914 // For compatibility, it is optimal that we insert the additional path 915 // components specific to the Java VM after those components specified 916 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so 917 // infrastructure. 918 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it 919 strcpy(library_path, common_path); 920 } else { 921 int inserted = 0; 922 for (i = 0; i < info->dls_cnt; i++, path++) { 923 uint_t flags = path->dls_flags & LA_SER_MASK; 924 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) { 925 strcat(library_path, common_path); 926 strcat(library_path, os::path_separator()); 927 inserted = 1; 928 } 929 strcat(library_path, path->dls_name); 930 strcat(library_path, os::path_separator()); 931 } 932 // eliminate trailing path separator 933 library_path[strlen(library_path)-1] = '\0'; 934 } 935 936 // happens before argument parsing - can't use a trace flag 937 // tty->print_raw("init_system_properties_values: native lib path: "); 938 // tty->print_raw_cr(library_path); 939 940 // callee copies into its own buffer 941 Arguments::set_library_path(library_path); 942 943 free(common_path); 944 free(library_path); 945 free(info); 946 } 947 948 /* 949 * Extensions directories. 950 * 951 * Note that the space for the colon and the trailing null are provided 952 * by the nulls included by the sizeof operator (so actually one byte more 953 * than necessary is allocated). 954 */ 955 { 956 char *buf = (char *) malloc(strlen(Arguments::get_java_home()) + 957 sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) + 958 sizeof(EXTENSIONS_DIR)); 959 sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR, 960 Arguments::get_java_home()); 961 Arguments::set_ext_dirs(buf); 962 } 963 964 /* Endorsed standards default directory. */ 965 { 966 char * buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR)); 967 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home()); 968 Arguments::set_endorsed_dirs(buf); 969 } 970 } 971 972 #undef malloc 973 #undef free 974 #undef getenv 975 #undef EXTENSIONS_DIR 976 #undef ENDORSED_DIR 977 #undef COMMON_DIR 978 979 } 980 981 void os::breakpoint() { 982 BREAKPOINT; 983 } 984 985 bool os::obsolete_option(const JavaVMOption *option) 986 { 987 if (!strncmp(option->optionString, "-Xt", 3)) { 988 return true; 989 } else if (!strncmp(option->optionString, "-Xtm", 4)) { 990 return true; 991 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) { 992 return true; 993 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) { 994 return true; 995 } 996 return false; 997 } 998 999 bool os::Solaris::valid_stack_address(Thread* thread, address sp) { 1000 address stackStart = (address)thread->stack_base(); 1001 address stackEnd = (address)(stackStart - (address)thread->stack_size()); 1002 if (sp < stackStart && sp >= stackEnd ) return true; 1003 return false; 1004 } 1005 1006 extern "C" void breakpoint() { 1007 // use debugger to set breakpoint here 1008 } 1009 1010 static thread_t main_thread; 1011 1012 // Thread start routine for all new Java threads 1013 extern "C" void* java_start(void* thread_addr) { 1014 // Try to randomize the cache line index of hot stack frames. 1015 // This helps when threads of the same stack traces evict each other's 1016 // cache lines. The threads can be either from the same JVM instance, or 1017 // from different JVM instances. The benefit is especially true for 1018 // processors with hyperthreading technology. 1019 static int counter = 0; 1020 int pid = os::current_process_id(); 1021 alloca(((pid ^ counter++) & 7) * 128); 1022 1023 int prio; 1024 Thread* thread = (Thread*)thread_addr; 1025 OSThread* osthr = thread->osthread(); 1026 1027 osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound 1028 thread->_schedctl = (void *) schedctl_init () ; 1029 1030 if (UseNUMA) { 1031 int lgrp_id = os::numa_get_group_id(); 1032 if (lgrp_id != -1) { 1033 thread->set_lgrp_id(lgrp_id); 1034 } 1035 } 1036 1037 // If the creator called set priority before we started, 1038 // we need to call set_native_priority now that we have an lwp. 1039 // We used to get the priority from thr_getprio (we called 1040 // thr_setprio way back in create_thread) and pass it to 1041 // set_native_priority, but Solaris scales the priority 1042 // in java_to_os_priority, so when we read it back here, 1043 // we pass trash to set_native_priority instead of what's 1044 // in java_to_os_priority. So we save the native priority 1045 // in the osThread and recall it here. 1046 1047 if ( osthr->thread_id() != -1 ) { 1048 if ( UseThreadPriorities ) { 1049 int prio = osthr->native_priority(); 1050 if (ThreadPriorityVerbose) { 1051 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " 1052 INTPTR_FORMAT ", setting priority: %d\n", 1053 osthr->thread_id(), osthr->lwp_id(), prio); 1054 } 1055 os::set_native_priority(thread, prio); 1056 } 1057 } else if (ThreadPriorityVerbose) { 1058 warning("Can't set priority in _start routine, thread id hasn't been set\n"); 1059 } 1060 1061 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 1062 1063 // initialize signal mask for this thread 1064 os::Solaris::hotspot_sigmask(thread); 1065 1066 thread->run(); 1067 1068 // One less thread is executing 1069 // When the VMThread gets here, the main thread may have already exited 1070 // which frees the CodeHeap containing the Atomic::dec code 1071 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 1072 Atomic::dec(&os::Solaris::_os_thread_count); 1073 } 1074 1075 if (UseDetachedThreads) { 1076 thr_exit(NULL); 1077 ShouldNotReachHere(); 1078 } 1079 return NULL; 1080 } 1081 1082 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) { 1083 // Allocate the OSThread object 1084 OSThread* osthread = new OSThread(NULL, NULL); 1085 if (osthread == NULL) return NULL; 1086 1087 // Store info on the Solaris thread into the OSThread 1088 osthread->set_thread_id(thread_id); 1089 osthread->set_lwp_id(_lwp_self()); 1090 thread->_schedctl = (void *) schedctl_init () ; 1091 1092 if (UseNUMA) { 1093 int lgrp_id = os::numa_get_group_id(); 1094 if (lgrp_id != -1) { 1095 thread->set_lgrp_id(lgrp_id); 1096 } 1097 } 1098 1099 if ( ThreadPriorityVerbose ) { 1100 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n", 1101 osthread->thread_id(), osthread->lwp_id() ); 1102 } 1103 1104 // Initial thread state is INITIALIZED, not SUSPENDED 1105 osthread->set_state(INITIALIZED); 1106 1107 return osthread; 1108 } 1109 1110 void os::Solaris::hotspot_sigmask(Thread* thread) { 1111 1112 //Save caller's signal mask 1113 sigset_t sigmask; 1114 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask); 1115 OSThread *osthread = thread->osthread(); 1116 osthread->set_caller_sigmask(sigmask); 1117 1118 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL); 1119 if (!ReduceSignalUsage) { 1120 if (thread->is_VM_thread()) { 1121 // Only the VM thread handles BREAK_SIGNAL ... 1122 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL); 1123 } else { 1124 // ... all other threads block BREAK_SIGNAL 1125 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked"); 1126 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL); 1127 } 1128 } 1129 } 1130 1131 bool os::create_attached_thread(JavaThread* thread) { 1132 #ifdef ASSERT 1133 thread->verify_not_published(); 1134 #endif 1135 OSThread* osthread = create_os_thread(thread, thr_self()); 1136 if (osthread == NULL) { 1137 return false; 1138 } 1139 1140 // Initial thread state is RUNNABLE 1141 osthread->set_state(RUNNABLE); 1142 thread->set_osthread(osthread); 1143 1144 // initialize signal mask for this thread 1145 // and save the caller's signal mask 1146 os::Solaris::hotspot_sigmask(thread); 1147 1148 return true; 1149 } 1150 1151 bool os::create_main_thread(JavaThread* thread) { 1152 #ifdef ASSERT 1153 thread->verify_not_published(); 1154 #endif 1155 if (_starting_thread == NULL) { 1156 _starting_thread = create_os_thread(thread, main_thread); 1157 if (_starting_thread == NULL) { 1158 return false; 1159 } 1160 } 1161 1162 // The primodial thread is runnable from the start 1163 _starting_thread->set_state(RUNNABLE); 1164 1165 thread->set_osthread(_starting_thread); 1166 1167 // initialize signal mask for this thread 1168 // and save the caller's signal mask 1169 os::Solaris::hotspot_sigmask(thread); 1170 1171 return true; 1172 } 1173 1174 // _T2_libthread is true if we believe we are running with the newer 1175 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default) 1176 bool os::Solaris::_T2_libthread = false; 1177 1178 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 1179 // Allocate the OSThread object 1180 OSThread* osthread = new OSThread(NULL, NULL); 1181 if (osthread == NULL) { 1182 return false; 1183 } 1184 1185 if ( ThreadPriorityVerbose ) { 1186 char *thrtyp; 1187 switch ( thr_type ) { 1188 case vm_thread: 1189 thrtyp = (char *)"vm"; 1190 break; 1191 case cgc_thread: 1192 thrtyp = (char *)"cgc"; 1193 break; 1194 case pgc_thread: 1195 thrtyp = (char *)"pgc"; 1196 break; 1197 case java_thread: 1198 thrtyp = (char *)"java"; 1199 break; 1200 case compiler_thread: 1201 thrtyp = (char *)"compiler"; 1202 break; 1203 case watcher_thread: 1204 thrtyp = (char *)"watcher"; 1205 break; 1206 default: 1207 thrtyp = (char *)"unknown"; 1208 break; 1209 } 1210 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp); 1211 } 1212 1213 // Calculate stack size if it's not specified by caller. 1214 if (stack_size == 0) { 1215 // The default stack size 1M (2M for LP64). 1216 stack_size = (BytesPerWord >> 2) * K * K; 1217 1218 switch (thr_type) { 1219 case os::java_thread: 1220 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 1221 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create(); 1222 break; 1223 case os::compiler_thread: 1224 if (CompilerThreadStackSize > 0) { 1225 stack_size = (size_t)(CompilerThreadStackSize * K); 1226 break; 1227 } // else fall through: 1228 // use VMThreadStackSize if CompilerThreadStackSize is not defined 1229 case os::vm_thread: 1230 case os::pgc_thread: 1231 case os::cgc_thread: 1232 case os::watcher_thread: 1233 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 1234 break; 1235 } 1236 } 1237 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed); 1238 1239 // Initial state is ALLOCATED but not INITIALIZED 1240 osthread->set_state(ALLOCATED); 1241 1242 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) { 1243 // We got lots of threads. Check if we still have some address space left. 1244 // Need to be at least 5Mb of unreserved address space. We do check by 1245 // trying to reserve some. 1246 const size_t VirtualMemoryBangSize = 20*K*K; 1247 char* mem = os::reserve_memory(VirtualMemoryBangSize); 1248 if (mem == NULL) { 1249 delete osthread; 1250 return false; 1251 } else { 1252 // Release the memory again 1253 os::release_memory(mem, VirtualMemoryBangSize); 1254 } 1255 } 1256 1257 // Setup osthread because the child thread may need it. 1258 thread->set_osthread(osthread); 1259 1260 // Create the Solaris thread 1261 // explicit THR_BOUND for T2_libthread case in case 1262 // that assumption is not accurate, but our alternate signal stack 1263 // handling is based on it which must have bound threads 1264 thread_t tid = 0; 1265 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED 1266 | ((UseBoundThreads || os::Solaris::T2_libthread() || 1267 (thr_type == vm_thread) || 1268 (thr_type == cgc_thread) || 1269 (thr_type == pgc_thread) || 1270 (thr_type == compiler_thread && BackgroundCompilation)) ? 1271 THR_BOUND : 0); 1272 int status; 1273 1274 // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs. 1275 // 1276 // On multiprocessors systems, libthread sometimes under-provisions our 1277 // process with LWPs. On a 30-way systems, for instance, we could have 1278 // 50 user-level threads in ready state and only 2 or 3 LWPs assigned 1279 // to our process. This can result in under utilization of PEs. 1280 // I suspect the problem is related to libthread's LWP 1281 // pool management and to the kernel's SIGBLOCKING "last LWP parked" 1282 // upcall policy. 1283 // 1284 // The following code is palliative -- it attempts to ensure that our 1285 // process has sufficient LWPs to take advantage of multiple PEs. 1286 // Proper long-term cures include using user-level threads bound to LWPs 1287 // (THR_BOUND) or using LWP-based synchronization. Note that there is a 1288 // slight timing window with respect to sampling _os_thread_count, but 1289 // the race is benign. Also, we should periodically recompute 1290 // _processors_online as the min of SC_NPROCESSORS_ONLN and the 1291 // the number of PEs in our partition. You might be tempted to use 1292 // THR_NEW_LWP here, but I'd recommend against it as that could 1293 // result in undesirable growth of the libthread's LWP pool. 1294 // The fix below isn't sufficient; for instance, it doesn't take into count 1295 // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks. 1296 // 1297 // Some pathologies this scheme doesn't handle: 1298 // * Threads can block, releasing the LWPs. The LWPs can age out. 1299 // When a large number of threads become ready again there aren't 1300 // enough LWPs available to service them. This can occur when the 1301 // number of ready threads oscillates. 1302 // * LWPs/Threads park on IO, thus taking the LWP out of circulation. 1303 // 1304 // Finally, we should call thr_setconcurrency() periodically to refresh 1305 // the LWP pool and thwart the LWP age-out mechanism. 1306 // The "+3" term provides a little slop -- we want to slightly overprovision. 1307 1308 if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) { 1309 if (!(flags & THR_BOUND)) { 1310 thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation 1311 } 1312 } 1313 // Although this doesn't hurt, we should warn of undefined behavior 1314 // when using unbound T1 threads with schedctl(). This should never 1315 // happen, as the compiler and VM threads are always created bound 1316 DEBUG_ONLY( 1317 if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) && 1318 (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) && 1319 ((thr_type == vm_thread) || (thr_type == cgc_thread) || 1320 (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) { 1321 warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound"); 1322 } 1323 ); 1324 1325 1326 // Mark that we don't have an lwp or thread id yet. 1327 // In case we attempt to set the priority before the thread starts. 1328 osthread->set_lwp_id(-1); 1329 osthread->set_thread_id(-1); 1330 1331 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid); 1332 if (status != 0) { 1333 if (PrintMiscellaneous && (Verbose || WizardMode)) { 1334 perror("os::create_thread"); 1335 } 1336 thread->set_osthread(NULL); 1337 // Need to clean up stuff we've allocated so far 1338 delete osthread; 1339 return false; 1340 } 1341 1342 Atomic::inc(&os::Solaris::_os_thread_count); 1343 1344 // Store info on the Solaris thread into the OSThread 1345 osthread->set_thread_id(tid); 1346 1347 // Remember that we created this thread so we can set priority on it 1348 osthread->set_vm_created(); 1349 1350 // Set the default thread priority. If using bound threads, setting 1351 // lwp priority will be delayed until thread start. 1352 set_native_priority(thread, 1353 DefaultThreadPriority == -1 ? 1354 java_to_os_priority[NormPriority] : 1355 DefaultThreadPriority); 1356 1357 // Initial thread state is INITIALIZED, not SUSPENDED 1358 osthread->set_state(INITIALIZED); 1359 1360 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 1361 return true; 1362 } 1363 1364 /* defined for >= Solaris 10. This allows builds on earlier versions 1365 * of Solaris to take advantage of the newly reserved Solaris JVM signals 1366 * With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2 1367 * and -XX:+UseAltSigs does nothing since these should have no conflict 1368 */ 1369 #if !defined(SIGJVM1) 1370 #define SIGJVM1 39 1371 #define SIGJVM2 40 1372 #endif 1373 1374 debug_only(static bool signal_sets_initialized = false); 1375 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; 1376 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL; 1377 int os::Solaris::_SIGasync = ASYNC_SIGNAL; 1378 1379 bool os::Solaris::is_sig_ignored(int sig) { 1380 struct sigaction oact; 1381 sigaction(sig, (struct sigaction*)NULL, &oact); 1382 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 1383 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 1384 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) 1385 return true; 1386 else 1387 return false; 1388 } 1389 1390 // Note: SIGRTMIN is a macro that calls sysconf() so it will 1391 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime 1392 static bool isJVM1available() { 1393 return SIGJVM1 < SIGRTMIN; 1394 } 1395 1396 void os::Solaris::signal_sets_init() { 1397 // Should also have an assertion stating we are still single-threaded. 1398 assert(!signal_sets_initialized, "Already initialized"); 1399 // Fill in signals that are necessarily unblocked for all threads in 1400 // the VM. Currently, we unblock the following signals: 1401 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 1402 // by -Xrs (=ReduceSignalUsage)); 1403 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 1404 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 1405 // the dispositions or masks wrt these signals. 1406 // Programs embedding the VM that want to use the above signals for their 1407 // own purposes must, at this time, use the "-Xrs" option to prevent 1408 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 1409 // (See bug 4345157, and other related bugs). 1410 // In reality, though, unblocking these signals is really a nop, since 1411 // these signals are not blocked by default. 1412 sigemptyset(&unblocked_sigs); 1413 sigemptyset(&allowdebug_blocked_sigs); 1414 sigaddset(&unblocked_sigs, SIGILL); 1415 sigaddset(&unblocked_sigs, SIGSEGV); 1416 sigaddset(&unblocked_sigs, SIGBUS); 1417 sigaddset(&unblocked_sigs, SIGFPE); 1418 1419 if (isJVM1available) { 1420 os::Solaris::set_SIGinterrupt(SIGJVM1); 1421 os::Solaris::set_SIGasync(SIGJVM2); 1422 } else if (UseAltSigs) { 1423 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL); 1424 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL); 1425 } else { 1426 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL); 1427 os::Solaris::set_SIGasync(ASYNC_SIGNAL); 1428 } 1429 1430 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt()); 1431 sigaddset(&unblocked_sigs, os::Solaris::SIGasync()); 1432 1433 if (!ReduceSignalUsage) { 1434 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 1435 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 1436 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); 1437 } 1438 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 1439 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 1440 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); 1441 } 1442 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 1443 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 1444 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); 1445 } 1446 } 1447 // Fill in signals that are blocked by all but the VM thread. 1448 sigemptyset(&vm_sigs); 1449 if (!ReduceSignalUsage) 1450 sigaddset(&vm_sigs, BREAK_SIGNAL); 1451 debug_only(signal_sets_initialized = true); 1452 1453 // For diagnostics only used in run_periodic_checks 1454 sigemptyset(&check_signal_done); 1455 } 1456 1457 // These are signals that are unblocked while a thread is running Java. 1458 // (For some reason, they get blocked by default.) 1459 sigset_t* os::Solaris::unblocked_signals() { 1460 assert(signal_sets_initialized, "Not initialized"); 1461 return &unblocked_sigs; 1462 } 1463 1464 // These are the signals that are blocked while a (non-VM) thread is 1465 // running Java. Only the VM thread handles these signals. 1466 sigset_t* os::Solaris::vm_signals() { 1467 assert(signal_sets_initialized, "Not initialized"); 1468 return &vm_sigs; 1469 } 1470 1471 // These are signals that are blocked during cond_wait to allow debugger in 1472 sigset_t* os::Solaris::allowdebug_blocked_signals() { 1473 assert(signal_sets_initialized, "Not initialized"); 1474 return &allowdebug_blocked_sigs; 1475 } 1476 1477 1478 void _handle_uncaught_cxx_exception() { 1479 VMError err("An uncaught C++ exception"); 1480 err.report_and_die(); 1481 } 1482 1483 1484 // First crack at OS-specific initialization, from inside the new thread. 1485 void os::initialize_thread(Thread* thr) { 1486 int r = thr_main() ; 1487 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 1488 if (r) { 1489 JavaThread* jt = (JavaThread *)thr; 1490 assert(jt != NULL,"Sanity check"); 1491 size_t stack_size; 1492 address base = jt->stack_base(); 1493 if (Arguments::created_by_java_launcher()) { 1494 // Use 2MB to allow for Solaris 7 64 bit mode. 1495 stack_size = JavaThread::stack_size_at_create() == 0 1496 ? 2048*K : JavaThread::stack_size_at_create(); 1497 1498 // There are rare cases when we may have already used more than 1499 // the basic stack size allotment before this method is invoked. 1500 // Attempt to allow for a normally sized java_stack. 1501 size_t current_stack_offset = (size_t)(base - (address)&stack_size); 1502 stack_size += ReservedSpace::page_align_size_down(current_stack_offset); 1503 } else { 1504 // 6269555: If we were not created by a Java launcher, i.e. if we are 1505 // running embedded in a native application, treat the primordial thread 1506 // as much like a native attached thread as possible. This means using 1507 // the current stack size from thr_stksegment(), unless it is too large 1508 // to reliably setup guard pages. A reasonable max size is 8MB. 1509 size_t current_size = current_stack_size(); 1510 // This should never happen, but just in case.... 1511 if (current_size == 0) current_size = 2 * K * K; 1512 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size; 1513 } 1514 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());; 1515 stack_size = (size_t)(base - bottom); 1516 1517 assert(stack_size > 0, "Stack size calculation problem"); 1518 1519 if (stack_size > jt->stack_size()) { 1520 NOT_PRODUCT( 1521 struct rlimit limits; 1522 getrlimit(RLIMIT_STACK, &limits); 1523 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur); 1524 assert(size >= jt->stack_size(), "Stack size problem in main thread"); 1525 ) 1526 tty->print_cr( 1527 "Stack size of %d Kb exceeds current limit of %d Kb.\n" 1528 "(Stack sizes are rounded up to a multiple of the system page size.)\n" 1529 "See limit(1) to increase the stack size limit.", 1530 stack_size / K, jt->stack_size() / K); 1531 vm_exit(1); 1532 } 1533 assert(jt->stack_size() >= stack_size, 1534 "Attempt to map more stack than was allocated"); 1535 jt->set_stack_size(stack_size); 1536 } 1537 1538 // 5/22/01: Right now alternate signal stacks do not handle 1539 // throwing stack overflow exceptions, see bug 4463178 1540 // Until a fix is found for this, T2 will NOT imply alternate signal 1541 // stacks. 1542 // If using T2 libthread threads, install an alternate signal stack. 1543 // Because alternate stacks associate with LWPs on Solaris, 1544 // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads 1545 // we prefer to explicitly stack bang. 1546 // If not using T2 libthread, but using UseBoundThreads any threads 1547 // (primordial thread, jni_attachCurrentThread) we do not create, 1548 // probably are not bound, therefore they can not have an alternate 1549 // signal stack. Since our stack banging code is generated and 1550 // is shared across threads, all threads must be bound to allow 1551 // using alternate signal stacks. The alternative is to interpose 1552 // on _lwp_create to associate an alt sig stack with each LWP, 1553 // and this could be a problem when the JVM is embedded. 1554 // We would prefer to use alternate signal stacks with T2 1555 // Since there is currently no accurate way to detect T2 1556 // we do not. Assuming T2 when running T1 causes sig 11s or assertions 1557 // on installing alternate signal stacks 1558 1559 1560 // 05/09/03: removed alternate signal stack support for Solaris 1561 // The alternate signal stack mechanism is no longer needed to 1562 // handle stack overflow. This is now handled by allocating 1563 // guard pages (red zone) and stackbanging. 1564 // Initially the alternate signal stack mechanism was removed because 1565 // it did not work with T1 llibthread. Alternate 1566 // signal stacks MUST have all threads bound to lwps. Applications 1567 // can create their own threads and attach them without their being 1568 // bound under T1. This is frequently the case for the primordial thread. 1569 // If we were ever to reenable this mechanism we would need to 1570 // use the dynamic check for T2 libthread. 1571 1572 os::Solaris::init_thread_fpu_state(); 1573 std::set_terminate(_handle_uncaught_cxx_exception); 1574 } 1575 1576 1577 1578 // Free Solaris resources related to the OSThread 1579 void os::free_thread(OSThread* osthread) { 1580 assert(osthread != NULL, "os::free_thread but osthread not set"); 1581 1582 1583 // We are told to free resources of the argument thread, 1584 // but we can only really operate on the current thread. 1585 // The main thread must take the VMThread down synchronously 1586 // before the main thread exits and frees up CodeHeap 1587 guarantee((Thread::current()->osthread() == osthread 1588 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread"); 1589 if (Thread::current()->osthread() == osthread) { 1590 // Restore caller's signal mask 1591 sigset_t sigmask = osthread->caller_sigmask(); 1592 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL); 1593 } 1594 delete osthread; 1595 } 1596 1597 void os::pd_start_thread(Thread* thread) { 1598 int status = thr_continue(thread->osthread()->thread_id()); 1599 assert_status(status == 0, status, "thr_continue failed"); 1600 } 1601 1602 1603 intx os::current_thread_id() { 1604 return (intx)thr_self(); 1605 } 1606 1607 static pid_t _initial_pid = 0; 1608 1609 int os::current_process_id() { 1610 return (int)(_initial_pid ? _initial_pid : getpid()); 1611 } 1612 1613 int os::allocate_thread_local_storage() { 1614 // %%% in Win32 this allocates a memory segment pointed to by a 1615 // register. Dan Stein can implement a similar feature in 1616 // Solaris. Alternatively, the VM can do the same thing 1617 // explicitly: malloc some storage and keep the pointer in a 1618 // register (which is part of the thread's context) (or keep it 1619 // in TLS). 1620 // %%% In current versions of Solaris, thr_self and TSD can 1621 // be accessed via short sequences of displaced indirections. 1622 // The value of thr_self is available as %g7(36). 1623 // The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4), 1624 // assuming that the current thread already has a value bound to k. 1625 // It may be worth experimenting with such access patterns, 1626 // and later having the parameters formally exported from a Solaris 1627 // interface. I think, however, that it will be faster to 1628 // maintain the invariant that %g2 always contains the 1629 // JavaThread in Java code, and have stubs simply 1630 // treat %g2 as a caller-save register, preserving it in a %lN. 1631 thread_key_t tk; 1632 if (thr_keycreate( &tk, NULL ) ) 1633 fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed " 1634 "(%s)", strerror(errno))); 1635 return int(tk); 1636 } 1637 1638 void os::free_thread_local_storage(int index) { 1639 // %%% don't think we need anything here 1640 // if ( pthread_key_delete((pthread_key_t) tk) ) 1641 // fatal("os::free_thread_local_storage: pthread_key_delete failed"); 1642 } 1643 1644 #define SMALLINT 32 // libthread allocate for tsd_common is a version specific 1645 // small number - point is NO swap space available 1646 void os::thread_local_storage_at_put(int index, void* value) { 1647 // %%% this is used only in threadLocalStorage.cpp 1648 if (thr_setspecific((thread_key_t)index, value)) { 1649 if (errno == ENOMEM) { 1650 vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space"); 1651 } else { 1652 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed " 1653 "(%s)", strerror(errno))); 1654 } 1655 } else { 1656 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ; 1657 } 1658 } 1659 1660 // This function could be called before TLS is initialized, for example, when 1661 // VM receives an async signal or when VM causes a fatal error during 1662 // initialization. Return NULL if thr_getspecific() fails. 1663 void* os::thread_local_storage_at(int index) { 1664 // %%% this is used only in threadLocalStorage.cpp 1665 void* r = NULL; 1666 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r; 1667 } 1668 1669 1670 // gethrtime can move backwards if read from one cpu and then a different cpu 1671 // getTimeNanos is guaranteed to not move backward on Solaris 1672 // local spinloop created as faster for a CAS on an int than 1673 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not 1674 // supported on sparc v8 or pre supports_cx8 intel boxes. 1675 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong 1676 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes 1677 inline hrtime_t oldgetTimeNanos() { 1678 int gotlock = LOCK_INVALID; 1679 hrtime_t newtime = gethrtime(); 1680 1681 for (;;) { 1682 // grab lock for max_hrtime 1683 int curlock = max_hrtime_lock; 1684 if (curlock & LOCK_BUSY) continue; 1685 if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue; 1686 if (newtime > max_hrtime) { 1687 max_hrtime = newtime; 1688 } else { 1689 newtime = max_hrtime; 1690 } 1691 // release lock 1692 max_hrtime_lock = LOCK_FREE; 1693 return newtime; 1694 } 1695 } 1696 // gethrtime can move backwards if read from one cpu and then a different cpu 1697 // getTimeNanos is guaranteed to not move backward on Solaris 1698 inline hrtime_t getTimeNanos() { 1699 if (VM_Version::supports_cx8()) { 1700 const hrtime_t now = gethrtime(); 1701 // Use atomic long load since 32-bit x86 uses 2 registers to keep long. 1702 const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime); 1703 if (now <= prev) return prev; // same or retrograde time; 1704 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); 1705 assert(obsv >= prev, "invariant"); // Monotonicity 1706 // If the CAS succeeded then we're done and return "now". 1707 // If the CAS failed and the observed value "obs" is >= now then 1708 // we should return "obs". If the CAS failed and now > obs > prv then 1709 // some other thread raced this thread and installed a new value, in which case 1710 // we could either (a) retry the entire operation, (b) retry trying to install now 1711 // or (c) just return obs. We use (c). No loop is required although in some cases 1712 // we might discard a higher "now" value in deference to a slightly lower but freshly 1713 // installed obs value. That's entirely benign -- it admits no new orderings compared 1714 // to (a) or (b) -- and greatly reduces coherence traffic. 1715 // We might also condition (c) on the magnitude of the delta between obs and now. 1716 // Avoiding excessive CAS operations to hot RW locations is critical. 1717 // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate 1718 return (prev == obsv) ? now : obsv ; 1719 } else { 1720 return oldgetTimeNanos(); 1721 } 1722 } 1723 1724 // Time since start-up in seconds to a fine granularity. 1725 // Used by VMSelfDestructTimer and the MemProfiler. 1726 double os::elapsedTime() { 1727 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz; 1728 } 1729 1730 jlong os::elapsed_counter() { 1731 return (jlong)(getTimeNanos() - first_hrtime); 1732 } 1733 1734 jlong os::elapsed_frequency() { 1735 return hrtime_hz; 1736 } 1737 1738 // Return the real, user, and system times in seconds from an 1739 // arbitrary fixed point in the past. 1740 bool os::getTimesSecs(double* process_real_time, 1741 double* process_user_time, 1742 double* process_system_time) { 1743 struct tms ticks; 1744 clock_t real_ticks = times(&ticks); 1745 1746 if (real_ticks == (clock_t) (-1)) { 1747 return false; 1748 } else { 1749 double ticks_per_second = (double) clock_tics_per_sec; 1750 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1751 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1752 // For consistency return the real time from getTimeNanos() 1753 // converted to seconds. 1754 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS); 1755 1756 return true; 1757 } 1758 } 1759 1760 bool os::supports_vtime() { return true; } 1761 1762 bool os::enable_vtime() { 1763 int fd = ::open("/proc/self/ctl", O_WRONLY); 1764 if (fd == -1) 1765 return false; 1766 1767 long cmd[] = { PCSET, PR_MSACCT }; 1768 int res = ::write(fd, cmd, sizeof(long) * 2); 1769 ::close(fd); 1770 if (res != sizeof(long) * 2) 1771 return false; 1772 1773 return true; 1774 } 1775 1776 bool os::vtime_enabled() { 1777 int fd = ::open("/proc/self/status", O_RDONLY); 1778 if (fd == -1) 1779 return false; 1780 1781 pstatus_t status; 1782 int res = os::read(fd, (void*) &status, sizeof(pstatus_t)); 1783 ::close(fd); 1784 if (res != sizeof(pstatus_t)) 1785 return false; 1786 1787 return status.pr_flags & PR_MSACCT; 1788 } 1789 1790 double os::elapsedVTime() { 1791 return (double)gethrvtime() / (double)hrtime_hz; 1792 } 1793 1794 // Used internally for comparisons only 1795 // getTimeMillis guaranteed to not move backwards on Solaris 1796 jlong getTimeMillis() { 1797 jlong nanotime = getTimeNanos(); 1798 return (jlong)(nanotime / NANOSECS_PER_MILLISEC); 1799 } 1800 1801 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis 1802 jlong os::javaTimeMillis() { 1803 timeval t; 1804 if (gettimeofday( &t, NULL) == -1) 1805 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno))); 1806 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; 1807 } 1808 1809 jlong os::javaTimeNanos() { 1810 return (jlong)getTimeNanos(); 1811 } 1812 1813 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1814 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits 1815 info_ptr->may_skip_backward = false; // not subject to resetting or drifting 1816 info_ptr->may_skip_forward = false; // not subject to resetting or drifting 1817 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1818 } 1819 1820 char * os::local_time_string(char *buf, size_t buflen) { 1821 struct tm t; 1822 time_t long_time; 1823 time(&long_time); 1824 localtime_r(&long_time, &t); 1825 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1826 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1827 t.tm_hour, t.tm_min, t.tm_sec); 1828 return buf; 1829 } 1830 1831 // Note: os::shutdown() might be called very early during initialization, or 1832 // called from signal handler. Before adding something to os::shutdown(), make 1833 // sure it is async-safe and can handle partially initialized VM. 1834 void os::shutdown() { 1835 1836 // allow PerfMemory to attempt cleanup of any persistent resources 1837 perfMemory_exit(); 1838 1839 // needs to remove object in file system 1840 AttachListener::abort(); 1841 1842 // flush buffered output, finish log files 1843 ostream_abort(); 1844 1845 // Check for abort hook 1846 abort_hook_t abort_hook = Arguments::abort_hook(); 1847 if (abort_hook != NULL) { 1848 abort_hook(); 1849 } 1850 } 1851 1852 // Note: os::abort() might be called very early during initialization, or 1853 // called from signal handler. Before adding something to os::abort(), make 1854 // sure it is async-safe and can handle partially initialized VM. 1855 void os::abort(bool dump_core) { 1856 os::shutdown(); 1857 if (dump_core) { 1858 #ifndef PRODUCT 1859 fdStream out(defaultStream::output_fd()); 1860 out.print_raw("Current thread is "); 1861 char buf[16]; 1862 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1863 out.print_raw_cr(buf); 1864 out.print_raw_cr("Dumping core ..."); 1865 #endif 1866 ::abort(); // dump core (for debugging) 1867 } 1868 1869 ::exit(1); 1870 } 1871 1872 // Die immediately, no exit hook, no abort hook, no cleanup. 1873 void os::die() { 1874 _exit(-1); 1875 } 1876 1877 // unused 1878 void os::set_error_file(const char *logfile) {} 1879 1880 // DLL functions 1881 1882 const char* os::dll_file_extension() { return ".so"; } 1883 1884 // This must be hard coded because it's the system's temporary 1885 // directory not the java application's temp directory, ala java.io.tmpdir. 1886 const char* os::get_temp_directory() { return "/tmp"; } 1887 1888 static bool file_exists(const char* filename) { 1889 struct stat statbuf; 1890 if (filename == NULL || strlen(filename) == 0) { 1891 return false; 1892 } 1893 return os::stat(filename, &statbuf) == 0; 1894 } 1895 1896 void os::dll_build_name(char* buffer, size_t buflen, 1897 const char* pname, const char* fname) { 1898 const size_t pnamelen = pname ? strlen(pname) : 0; 1899 1900 // Quietly truncate on buffer overflow. Should be an error. 1901 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { 1902 *buffer = '\0'; 1903 return; 1904 } 1905 1906 if (pnamelen == 0) { 1907 snprintf(buffer, buflen, "lib%s.so", fname); 1908 } else if (strchr(pname, *os::path_separator()) != NULL) { 1909 int n; 1910 char** pelements = split_path(pname, &n); 1911 for (int i = 0 ; i < n ; i++) { 1912 // really shouldn't be NULL but what the heck, check can't hurt 1913 if (pelements[i] == NULL || strlen(pelements[i]) == 0) { 1914 continue; // skip the empty path values 1915 } 1916 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); 1917 if (file_exists(buffer)) { 1918 break; 1919 } 1920 } 1921 // release the storage 1922 for (int i = 0 ; i < n ; i++) { 1923 if (pelements[i] != NULL) { 1924 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1925 } 1926 } 1927 if (pelements != NULL) { 1928 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1929 } 1930 } else { 1931 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1932 } 1933 } 1934 1935 const char* os::get_current_directory(char *buf, int buflen) { 1936 return getcwd(buf, buflen); 1937 } 1938 1939 // check if addr is inside libjvm[_g].so 1940 bool os::address_is_in_vm(address addr) { 1941 static address libjvm_base_addr; 1942 Dl_info dlinfo; 1943 1944 if (libjvm_base_addr == NULL) { 1945 dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo); 1946 libjvm_base_addr = (address)dlinfo.dli_fbase; 1947 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); 1948 } 1949 1950 if (dladdr((void *)addr, &dlinfo)) { 1951 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; 1952 } 1953 1954 return false; 1955 } 1956 1957 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int); 1958 static dladdr1_func_type dladdr1_func = NULL; 1959 1960 bool os::dll_address_to_function_name(address addr, char *buf, 1961 int buflen, int * offset) { 1962 Dl_info dlinfo; 1963 1964 // dladdr1_func was initialized in os::init() 1965 if (dladdr1_func){ 1966 // yes, we have dladdr1 1967 1968 // Support for dladdr1 is checked at runtime; it may be 1969 // available even if the vm is built on a machine that does 1970 // not have dladdr1 support. Make sure there is a value for 1971 // RTLD_DL_SYMENT. 1972 #ifndef RTLD_DL_SYMENT 1973 #define RTLD_DL_SYMENT 1 1974 #endif 1975 #ifdef _LP64 1976 Elf64_Sym * info; 1977 #else 1978 Elf32_Sym * info; 1979 #endif 1980 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info, 1981 RTLD_DL_SYMENT)) { 1982 if ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr) { 1983 if (buf != NULL) { 1984 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) 1985 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); 1986 } 1987 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 1988 return true; 1989 } 1990 } 1991 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) { 1992 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 1993 buf, buflen, offset, dlinfo.dli_fname)) { 1994 return true; 1995 } 1996 } 1997 if (buf != NULL) buf[0] = '\0'; 1998 if (offset != NULL) *offset = -1; 1999 return false; 2000 } else { 2001 // no, only dladdr is available 2002 if (dladdr((void *)addr, &dlinfo)) { 2003 if (buf != NULL) { 2004 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) 2005 jio_snprintf(buf, buflen, dlinfo.dli_sname); 2006 } 2007 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 2008 return true; 2009 } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) { 2010 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 2011 buf, buflen, offset, dlinfo.dli_fname)) { 2012 return true; 2013 } 2014 } 2015 if (buf != NULL) buf[0] = '\0'; 2016 if (offset != NULL) *offset = -1; 2017 return false; 2018 } 2019 } 2020 2021 bool os::dll_address_to_library_name(address addr, char* buf, 2022 int buflen, int* offset) { 2023 Dl_info dlinfo; 2024 2025 if (dladdr((void*)addr, &dlinfo)){ 2026 if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); 2027 if (offset) *offset = addr - (address)dlinfo.dli_fbase; 2028 return true; 2029 } else { 2030 if (buf) buf[0] = '\0'; 2031 if (offset) *offset = -1; 2032 return false; 2033 } 2034 } 2035 2036 // Prints the names and full paths of all opened dynamic libraries 2037 // for current process 2038 void os::print_dll_info(outputStream * st) { 2039 Dl_info dli; 2040 void *handle; 2041 Link_map *map; 2042 Link_map *p; 2043 2044 st->print_cr("Dynamic libraries:"); st->flush(); 2045 2046 if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) { 2047 st->print_cr("Error: Cannot print dynamic libraries."); 2048 return; 2049 } 2050 handle = dlopen(dli.dli_fname, RTLD_LAZY); 2051 if (handle == NULL) { 2052 st->print_cr("Error: Cannot print dynamic libraries."); 2053 return; 2054 } 2055 dlinfo(handle, RTLD_DI_LINKMAP, &map); 2056 if (map == NULL) { 2057 st->print_cr("Error: Cannot print dynamic libraries."); 2058 return; 2059 } 2060 2061 while (map->l_prev != NULL) 2062 map = map->l_prev; 2063 2064 while (map != NULL) { 2065 st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name); 2066 map = map->l_next; 2067 } 2068 2069 dlclose(handle); 2070 } 2071 2072 // Loads .dll/.so and 2073 // in case of error it checks if .dll/.so was built for the 2074 // same architecture as Hotspot is running on 2075 2076 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) 2077 { 2078 void * result= ::dlopen(filename, RTLD_LAZY); 2079 if (result != NULL) { 2080 // Successful loading 2081 return result; 2082 } 2083 2084 Elf32_Ehdr elf_head; 2085 2086 // Read system error message into ebuf 2087 // It may or may not be overwritten below 2088 ::strncpy(ebuf, ::dlerror(), ebuflen-1); 2089 ebuf[ebuflen-1]='\0'; 2090 int diag_msg_max_length=ebuflen-strlen(ebuf); 2091 char* diag_msg_buf=ebuf+strlen(ebuf); 2092 2093 if (diag_msg_max_length==0) { 2094 // No more space in ebuf for additional diagnostics message 2095 return NULL; 2096 } 2097 2098 2099 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); 2100 2101 if (file_descriptor < 0) { 2102 // Can't open library, report dlerror() message 2103 return NULL; 2104 } 2105 2106 bool failed_to_read_elf_head= 2107 (sizeof(elf_head)!= 2108 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ; 2109 2110 ::close(file_descriptor); 2111 if (failed_to_read_elf_head) { 2112 // file i/o error - report dlerror() msg 2113 return NULL; 2114 } 2115 2116 typedef struct { 2117 Elf32_Half code; // Actual value as defined in elf.h 2118 Elf32_Half compat_class; // Compatibility of archs at VM's sense 2119 char elf_class; // 32 or 64 bit 2120 char endianess; // MSB or LSB 2121 char* name; // String representation 2122 } arch_t; 2123 2124 static const arch_t arch_array[]={ 2125 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2126 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2127 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, 2128 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, 2129 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2130 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2131 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, 2132 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, 2133 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, 2134 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"} 2135 }; 2136 2137 #if (defined IA32) 2138 static Elf32_Half running_arch_code=EM_386; 2139 #elif (defined AMD64) 2140 static Elf32_Half running_arch_code=EM_X86_64; 2141 #elif (defined IA64) 2142 static Elf32_Half running_arch_code=EM_IA_64; 2143 #elif (defined __sparc) && (defined _LP64) 2144 static Elf32_Half running_arch_code=EM_SPARCV9; 2145 #elif (defined __sparc) && (!defined _LP64) 2146 static Elf32_Half running_arch_code=EM_SPARC; 2147 #elif (defined __powerpc64__) 2148 static Elf32_Half running_arch_code=EM_PPC64; 2149 #elif (defined __powerpc__) 2150 static Elf32_Half running_arch_code=EM_PPC; 2151 #elif (defined ARM) 2152 static Elf32_Half running_arch_code=EM_ARM; 2153 #else 2154 #error Method os::dll_load requires that one of following is defined:\ 2155 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM 2156 #endif 2157 2158 // Identify compatability class for VM's architecture and library's architecture 2159 // Obtain string descriptions for architectures 2160 2161 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; 2162 int running_arch_index=-1; 2163 2164 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) { 2165 if (running_arch_code == arch_array[i].code) { 2166 running_arch_index = i; 2167 } 2168 if (lib_arch.code == arch_array[i].code) { 2169 lib_arch.compat_class = arch_array[i].compat_class; 2170 lib_arch.name = arch_array[i].name; 2171 } 2172 } 2173 2174 assert(running_arch_index != -1, 2175 "Didn't find running architecture code (running_arch_code) in arch_array"); 2176 if (running_arch_index == -1) { 2177 // Even though running architecture detection failed 2178 // we may still continue with reporting dlerror() message 2179 return NULL; 2180 } 2181 2182 if (lib_arch.endianess != arch_array[running_arch_index].endianess) { 2183 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)"); 2184 return NULL; 2185 } 2186 2187 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { 2188 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)"); 2189 return NULL; 2190 } 2191 2192 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { 2193 if ( lib_arch.name!=NULL ) { 2194 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2195 " (Possible cause: can't load %s-bit .so on a %s-bit platform)", 2196 lib_arch.name, arch_array[running_arch_index].name); 2197 } else { 2198 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2199 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)", 2200 lib_arch.code, 2201 arch_array[running_arch_index].name); 2202 } 2203 } 2204 2205 return NULL; 2206 } 2207 2208 void* os::dll_lookup(void* handle, const char* name) { 2209 return dlsym(handle, name); 2210 } 2211 2212 int os::stat(const char *path, struct stat *sbuf) { 2213 char pathbuf[MAX_PATH]; 2214 if (strlen(path) > MAX_PATH - 1) { 2215 errno = ENAMETOOLONG; 2216 return -1; 2217 } 2218 os::native_path(strcpy(pathbuf, path)); 2219 return ::stat(pathbuf, sbuf); 2220 } 2221 2222 static bool _print_ascii_file(const char* filename, outputStream* st) { 2223 int fd = ::open(filename, O_RDONLY); 2224 if (fd == -1) { 2225 return false; 2226 } 2227 2228 char buf[32]; 2229 int bytes; 2230 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) { 2231 st->print_raw(buf, bytes); 2232 } 2233 2234 ::close(fd); 2235 2236 return true; 2237 } 2238 2239 void os::print_os_info_brief(outputStream* st) { 2240 os::Solaris::print_distro_info(st); 2241 2242 os::Posix::print_uname_info(st); 2243 2244 os::Solaris::print_libversion_info(st); 2245 } 2246 2247 void os::print_os_info(outputStream* st) { 2248 st->print("OS:"); 2249 2250 os::Solaris::print_distro_info(st); 2251 2252 os::Posix::print_uname_info(st); 2253 2254 os::Solaris::print_libversion_info(st); 2255 2256 os::Posix::print_rlimit_info(st); 2257 2258 os::Posix::print_load_average(st); 2259 } 2260 2261 void os::Solaris::print_distro_info(outputStream* st) { 2262 if (!_print_ascii_file("/etc/release", st)) { 2263 st->print("Solaris"); 2264 } 2265 st->cr(); 2266 } 2267 2268 void os::Solaris::print_libversion_info(outputStream* st) { 2269 if (os::Solaris::T2_libthread()) { 2270 st->print(" (T2 libthread)"); 2271 } 2272 else { 2273 st->print(" (T1 libthread)"); 2274 } 2275 st->cr(); 2276 } 2277 2278 static bool check_addr0(outputStream* st) { 2279 jboolean status = false; 2280 int fd = ::open("/proc/self/map",O_RDONLY); 2281 if (fd >= 0) { 2282 prmap_t p; 2283 while(::read(fd, &p, sizeof(p)) > 0) { 2284 if (p.pr_vaddr == 0x0) { 2285 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname); 2286 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname); 2287 st->print("Access:"); 2288 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-"); 2289 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-"); 2290 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-"); 2291 st->cr(); 2292 status = true; 2293 } 2294 ::close(fd); 2295 } 2296 } 2297 return status; 2298 } 2299 2300 void os::pd_print_cpu_info(outputStream* st) { 2301 // Nothing to do for now. 2302 } 2303 2304 void os::print_memory_info(outputStream* st) { 2305 st->print("Memory:"); 2306 st->print(" %dk page", os::vm_page_size()>>10); 2307 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10); 2308 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10); 2309 st->cr(); 2310 (void) check_addr0(st); 2311 } 2312 2313 // Taken from /usr/include/sys/machsig.h Supposed to be architecture specific 2314 // but they're the same for all the solaris architectures that we support. 2315 const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR", 2316 "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG", 2317 "ILL_COPROC", "ILL_BADSTK" }; 2318 2319 const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV", 2320 "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES", 2321 "FPE_FLTINV", "FPE_FLTSUB" }; 2322 2323 const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" }; 2324 2325 const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" }; 2326 2327 void os::print_siginfo(outputStream* st, void* siginfo) { 2328 st->print("siginfo:"); 2329 2330 const int buflen = 100; 2331 char buf[buflen]; 2332 siginfo_t *si = (siginfo_t*)siginfo; 2333 st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen)); 2334 char *err = strerror(si->si_errno); 2335 if (si->si_errno != 0 && err != NULL) { 2336 st->print("si_errno=%s", err); 2337 } else { 2338 st->print("si_errno=%d", si->si_errno); 2339 } 2340 const int c = si->si_code; 2341 assert(c > 0, "unexpected si_code"); 2342 switch (si->si_signo) { 2343 case SIGILL: 2344 st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]); 2345 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2346 break; 2347 case SIGFPE: 2348 st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]); 2349 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2350 break; 2351 case SIGSEGV: 2352 st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]); 2353 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2354 break; 2355 case SIGBUS: 2356 st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]); 2357 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2358 break; 2359 default: 2360 st->print(", si_code=%d", si->si_code); 2361 // no si_addr 2362 } 2363 2364 if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && 2365 UseSharedSpaces) { 2366 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2367 if (mapinfo->is_in_shared_space(si->si_addr)) { 2368 st->print("\n\nError accessing class data sharing archive." \ 2369 " Mapped file inaccessible during execution, " \ 2370 " possible disk/network problem."); 2371 } 2372 } 2373 st->cr(); 2374 } 2375 2376 // Moved from whole group, because we need them here for diagnostic 2377 // prints. 2378 #define OLDMAXSIGNUM 32 2379 static int Maxsignum = 0; 2380 static int *ourSigFlags = NULL; 2381 2382 extern "C" void sigINTRHandler(int, siginfo_t*, void*); 2383 2384 int os::Solaris::get_our_sigflags(int sig) { 2385 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2386 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2387 return ourSigFlags[sig]; 2388 } 2389 2390 void os::Solaris::set_our_sigflags(int sig, int flags) { 2391 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2392 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2393 ourSigFlags[sig] = flags; 2394 } 2395 2396 2397 static const char* get_signal_handler_name(address handler, 2398 char* buf, int buflen) { 2399 int offset; 2400 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 2401 if (found) { 2402 // skip directory names 2403 const char *p1, *p2; 2404 p1 = buf; 2405 size_t len = strlen(os::file_separator()); 2406 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 2407 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset); 2408 } else { 2409 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 2410 } 2411 return buf; 2412 } 2413 2414 static void print_signal_handler(outputStream* st, int sig, 2415 char* buf, size_t buflen) { 2416 struct sigaction sa; 2417 2418 sigaction(sig, NULL, &sa); 2419 2420 st->print("%s: ", os::exception_name(sig, buf, buflen)); 2421 2422 address handler = (sa.sa_flags & SA_SIGINFO) 2423 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 2424 : CAST_FROM_FN_PTR(address, sa.sa_handler); 2425 2426 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 2427 st->print("SIG_DFL"); 2428 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 2429 st->print("SIG_IGN"); 2430 } else { 2431 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 2432 } 2433 2434 st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask); 2435 2436 address rh = VMError::get_resetted_sighandler(sig); 2437 // May be, handler was resetted by VMError? 2438 if(rh != NULL) { 2439 handler = rh; 2440 sa.sa_flags = VMError::get_resetted_sigflags(sig); 2441 } 2442 2443 st->print(", sa_flags=" PTR32_FORMAT, sa.sa_flags); 2444 2445 // Check: is it our handler? 2446 if(handler == CAST_FROM_FN_PTR(address, signalHandler) || 2447 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) { 2448 // It is our signal handler 2449 // check for flags 2450 if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) { 2451 st->print( 2452 ", flags was changed from " PTR32_FORMAT ", consider using jsig library", 2453 os::Solaris::get_our_sigflags(sig)); 2454 } 2455 } 2456 st->cr(); 2457 } 2458 2459 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 2460 st->print_cr("Signal Handlers:"); 2461 print_signal_handler(st, SIGSEGV, buf, buflen); 2462 print_signal_handler(st, SIGBUS , buf, buflen); 2463 print_signal_handler(st, SIGFPE , buf, buflen); 2464 print_signal_handler(st, SIGPIPE, buf, buflen); 2465 print_signal_handler(st, SIGXFSZ, buf, buflen); 2466 print_signal_handler(st, SIGILL , buf, buflen); 2467 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen); 2468 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen); 2469 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 2470 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen); 2471 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 2472 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen); 2473 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen); 2474 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen); 2475 } 2476 2477 static char saved_jvm_path[MAXPATHLEN] = { 0 }; 2478 2479 // Find the full path to the current module, libjvm.so or libjvm_g.so 2480 void os::jvm_path(char *buf, jint buflen) { 2481 // Error checking. 2482 if (buflen < MAXPATHLEN) { 2483 assert(false, "must use a large-enough buffer"); 2484 buf[0] = '\0'; 2485 return; 2486 } 2487 // Lazy resolve the path to current module. 2488 if (saved_jvm_path[0] != 0) { 2489 strcpy(buf, saved_jvm_path); 2490 return; 2491 } 2492 2493 Dl_info dlinfo; 2494 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 2495 assert(ret != 0, "cannot locate libjvm"); 2496 realpath((char *)dlinfo.dli_fname, buf); 2497 2498 if (Arguments::created_by_gamma_launcher()) { 2499 // Support for the gamma launcher. Typical value for buf is 2500 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at 2501 // the right place in the string, then assume we are installed in a JDK and 2502 // we're done. Otherwise, check for a JAVA_HOME environment variable and fix 2503 // up the path so it looks like libjvm.so is installed there (append a 2504 // fake suffix hotspot/libjvm.so). 2505 const char *p = buf + strlen(buf) - 1; 2506 for (int count = 0; p > buf && count < 5; ++count) { 2507 for (--p; p > buf && *p != '/'; --p) 2508 /* empty */ ; 2509 } 2510 2511 if (strncmp(p, "/jre/lib/", 9) != 0) { 2512 // Look for JAVA_HOME in the environment. 2513 char* java_home_var = ::getenv("JAVA_HOME"); 2514 if (java_home_var != NULL && java_home_var[0] != 0) { 2515 char cpu_arch[12]; 2516 char* jrelib_p; 2517 int len; 2518 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 2519 #ifdef _LP64 2520 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9. 2521 if (strcmp(cpu_arch, "sparc") == 0) { 2522 strcat(cpu_arch, "v9"); 2523 } else if (strcmp(cpu_arch, "i386") == 0) { 2524 strcpy(cpu_arch, "amd64"); 2525 } 2526 #endif 2527 // Check the current module name "libjvm.so" or "libjvm_g.so". 2528 p = strrchr(buf, '/'); 2529 assert(strstr(p, "/libjvm") == p, "invalid library name"); 2530 p = strstr(p, "_g") ? "_g" : ""; 2531 2532 realpath(java_home_var, buf); 2533 // determine if this is a legacy image or modules image 2534 // modules image doesn't have "jre" subdirectory 2535 len = strlen(buf); 2536 jrelib_p = buf + len; 2537 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); 2538 if (0 != access(buf, F_OK)) { 2539 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch); 2540 } 2541 2542 if (0 == access(buf, F_OK)) { 2543 // Use current module name "libjvm[_g].so" instead of 2544 // "libjvm"debug_only("_g")".so" since for fastdebug version 2545 // we should have "libjvm.so" but debug_only("_g") adds "_g"! 2546 len = strlen(buf); 2547 snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p); 2548 } else { 2549 // Go back to path of .so 2550 realpath((char *)dlinfo.dli_fname, buf); 2551 } 2552 } 2553 } 2554 } 2555 2556 strcpy(saved_jvm_path, buf); 2557 } 2558 2559 2560 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 2561 // no prefix required, not even "_" 2562 } 2563 2564 2565 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 2566 // no suffix required 2567 } 2568 2569 // This method is a copy of JDK's sysGetLastErrorString 2570 // from src/solaris/hpi/src/system_md.c 2571 2572 size_t os::lasterror(char *buf, size_t len) { 2573 2574 if (errno == 0) return 0; 2575 2576 const char *s = ::strerror(errno); 2577 size_t n = ::strlen(s); 2578 if (n >= len) { 2579 n = len - 1; 2580 } 2581 ::strncpy(buf, s, n); 2582 buf[n] = '\0'; 2583 return n; 2584 } 2585 2586 2587 // sun.misc.Signal 2588 2589 extern "C" { 2590 static void UserHandler(int sig, void *siginfo, void *context) { 2591 // Ctrl-C is pressed during error reporting, likely because the error 2592 // handler fails to abort. Let VM die immediately. 2593 if (sig == SIGINT && is_error_reported()) { 2594 os::die(); 2595 } 2596 2597 os::signal_notify(sig); 2598 // We do not need to reinstate the signal handler each time... 2599 } 2600 } 2601 2602 void* os::user_handler() { 2603 return CAST_FROM_FN_PTR(void*, UserHandler); 2604 } 2605 2606 extern "C" { 2607 typedef void (*sa_handler_t)(int); 2608 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 2609 } 2610 2611 void* os::signal(int signal_number, void* handler) { 2612 struct sigaction sigAct, oldSigAct; 2613 sigfillset(&(sigAct.sa_mask)); 2614 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND; 2615 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 2616 2617 if (sigaction(signal_number, &sigAct, &oldSigAct)) 2618 // -1 means registration failed 2619 return (void *)-1; 2620 2621 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 2622 } 2623 2624 void os::signal_raise(int signal_number) { 2625 raise(signal_number); 2626 } 2627 2628 /* 2629 * The following code is moved from os.cpp for making this 2630 * code platform specific, which it is by its very nature. 2631 */ 2632 2633 // a counter for each possible signal value 2634 static int Sigexit = 0; 2635 static int Maxlibjsigsigs; 2636 static jint *pending_signals = NULL; 2637 static int *preinstalled_sigs = NULL; 2638 static struct sigaction *chainedsigactions = NULL; 2639 static sema_t sig_sem; 2640 typedef int (*version_getting_t)(); 2641 version_getting_t os::Solaris::get_libjsig_version = NULL; 2642 static int libjsigversion = NULL; 2643 2644 int os::sigexitnum_pd() { 2645 assert(Sigexit > 0, "signal memory not yet initialized"); 2646 return Sigexit; 2647 } 2648 2649 void os::Solaris::init_signal_mem() { 2650 // Initialize signal structures 2651 Maxsignum = SIGRTMAX; 2652 Sigexit = Maxsignum+1; 2653 assert(Maxsignum >0, "Unable to obtain max signal number"); 2654 2655 Maxlibjsigsigs = Maxsignum; 2656 2657 // pending_signals has one int per signal 2658 // The additional signal is for SIGEXIT - exit signal to signal_thread 2659 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal); 2660 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1))); 2661 2662 if (UseSignalChaining) { 2663 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction) 2664 * (Maxsignum + 1), mtInternal); 2665 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1))); 2666 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal); 2667 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1))); 2668 } 2669 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal); 2670 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1)); 2671 } 2672 2673 void os::signal_init_pd() { 2674 int ret; 2675 2676 ret = ::sema_init(&sig_sem, 0, NULL, NULL); 2677 assert(ret == 0, "sema_init() failed"); 2678 } 2679 2680 void os::signal_notify(int signal_number) { 2681 int ret; 2682 2683 Atomic::inc(&pending_signals[signal_number]); 2684 ret = ::sema_post(&sig_sem); 2685 assert(ret == 0, "sema_post() failed"); 2686 } 2687 2688 static int check_pending_signals(bool wait_for_signal) { 2689 int ret; 2690 while (true) { 2691 for (int i = 0; i < Sigexit + 1; i++) { 2692 jint n = pending_signals[i]; 2693 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2694 return i; 2695 } 2696 } 2697 if (!wait_for_signal) { 2698 return -1; 2699 } 2700 JavaThread *thread = JavaThread::current(); 2701 ThreadBlockInVM tbivm(thread); 2702 2703 bool threadIsSuspended; 2704 do { 2705 thread->set_suspend_equivalent(); 2706 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2707 while((ret = ::sema_wait(&sig_sem)) == EINTR) 2708 ; 2709 assert(ret == 0, "sema_wait() failed"); 2710 2711 // were we externally suspended while we were waiting? 2712 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2713 if (threadIsSuspended) { 2714 // 2715 // The semaphore has been incremented, but while we were waiting 2716 // another thread suspended us. We don't want to continue running 2717 // while suspended because that would surprise the thread that 2718 // suspended us. 2719 // 2720 ret = ::sema_post(&sig_sem); 2721 assert(ret == 0, "sema_post() failed"); 2722 2723 thread->java_suspend_self(); 2724 } 2725 } while (threadIsSuspended); 2726 } 2727 } 2728 2729 int os::signal_lookup() { 2730 return check_pending_signals(false); 2731 } 2732 2733 int os::signal_wait() { 2734 return check_pending_signals(true); 2735 } 2736 2737 //////////////////////////////////////////////////////////////////////////////// 2738 // Virtual Memory 2739 2740 static int page_size = -1; 2741 2742 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will 2743 // clear this var if support is not available. 2744 static bool has_map_align = true; 2745 2746 int os::vm_page_size() { 2747 assert(page_size != -1, "must call os::init"); 2748 return page_size; 2749 } 2750 2751 // Solaris allocates memory by pages. 2752 int os::vm_allocation_granularity() { 2753 assert(page_size != -1, "must call os::init"); 2754 return page_size; 2755 } 2756 2757 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 2758 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2759 size_t size = bytes; 2760 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); 2761 if (res != NULL) { 2762 if (UseNUMAInterleaving) { 2763 numa_make_global(addr, bytes); 2764 } 2765 return true; 2766 } 2767 return false; 2768 } 2769 2770 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint, 2771 bool exec) { 2772 if (commit_memory(addr, bytes, exec)) { 2773 if (UseMPSS && alignment_hint > (size_t)vm_page_size()) { 2774 // If the large page size has been set and the VM 2775 // is using large pages, use the large page size 2776 // if it is smaller than the alignment hint. This is 2777 // a case where the VM wants to use a larger alignment size 2778 // for its own reasons but still want to use large pages 2779 // (which is what matters to setting the mpss range. 2780 size_t page_size = 0; 2781 if (large_page_size() < alignment_hint) { 2782 assert(UseLargePages, "Expected to be here for large page use only"); 2783 page_size = large_page_size(); 2784 } else { 2785 // If the alignment hint is less than the large page 2786 // size, the VM wants a particular alignment (thus the hint) 2787 // for internal reasons. Try to set the mpss range using 2788 // the alignment_hint. 2789 page_size = alignment_hint; 2790 } 2791 // Since this is a hint, ignore any failures. 2792 (void)Solaris::set_mpss_range(addr, bytes, page_size); 2793 } 2794 return true; 2795 } 2796 return false; 2797 } 2798 2799 // Uncommit the pages in a specified region. 2800 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) { 2801 if (madvise(addr, bytes, MADV_FREE) < 0) { 2802 debug_only(warning("MADV_FREE failed.")); 2803 return; 2804 } 2805 } 2806 2807 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 2808 return os::commit_memory(addr, size); 2809 } 2810 2811 bool os::remove_stack_guard_pages(char* addr, size_t size) { 2812 return os::uncommit_memory(addr, size); 2813 } 2814 2815 // Change the page size in a given range. 2816 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2817 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); 2818 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); 2819 if (UseLargePages && UseMPSS) { 2820 Solaris::set_mpss_range(addr, bytes, alignment_hint); 2821 } 2822 } 2823 2824 // Tell the OS to make the range local to the first-touching LWP 2825 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2826 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2827 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) { 2828 debug_only(warning("MADV_ACCESS_LWP failed.")); 2829 } 2830 } 2831 2832 // Tell the OS that this range would be accessed from different LWPs. 2833 void os::numa_make_global(char *addr, size_t bytes) { 2834 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2835 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) { 2836 debug_only(warning("MADV_ACCESS_MANY failed.")); 2837 } 2838 } 2839 2840 // Get the number of the locality groups. 2841 size_t os::numa_get_groups_num() { 2842 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie()); 2843 return n != -1 ? n : 1; 2844 } 2845 2846 // Get a list of leaf locality groups. A leaf lgroup is group that 2847 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory 2848 // board. An LWP is assigned to one of these groups upon creation. 2849 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2850 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) { 2851 ids[0] = 0; 2852 return 1; 2853 } 2854 int result_size = 0, top = 1, bottom = 0, cur = 0; 2855 for (int k = 0; k < size; k++) { 2856 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur], 2857 (Solaris::lgrp_id_t*)&ids[top], size - top); 2858 if (r == -1) { 2859 ids[0] = 0; 2860 return 1; 2861 } 2862 if (!r) { 2863 // That's a leaf node. 2864 assert (bottom <= cur, "Sanity check"); 2865 // Check if the node has memory 2866 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur], 2867 NULL, 0, LGRP_RSRC_MEM) > 0) { 2868 ids[bottom++] = ids[cur]; 2869 } 2870 } 2871 top += r; 2872 cur++; 2873 } 2874 if (bottom == 0) { 2875 // Handle a situation, when the OS reports no memory available. 2876 // Assume UMA architecture. 2877 ids[0] = 0; 2878 return 1; 2879 } 2880 return bottom; 2881 } 2882 2883 // Detect the topology change. Typically happens during CPU plugging-unplugging. 2884 bool os::numa_topology_changed() { 2885 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie()); 2886 if (is_stale != -1 && is_stale) { 2887 Solaris::lgrp_fini(Solaris::lgrp_cookie()); 2888 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER); 2889 assert(c != 0, "Failure to initialize LGRP API"); 2890 Solaris::set_lgrp_cookie(c); 2891 return true; 2892 } 2893 return false; 2894 } 2895 2896 // Get the group id of the current LWP. 2897 int os::numa_get_group_id() { 2898 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID); 2899 if (lgrp_id == -1) { 2900 return 0; 2901 } 2902 const int size = os::numa_get_groups_num(); 2903 int *ids = (int*)alloca(size * sizeof(int)); 2904 2905 // Get the ids of all lgroups with memory; r is the count. 2906 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id, 2907 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM); 2908 if (r <= 0) { 2909 return 0; 2910 } 2911 return ids[os::random() % r]; 2912 } 2913 2914 // Request information about the page. 2915 bool os::get_page_info(char *start, page_info* info) { 2916 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2917 uint64_t addr = (uintptr_t)start; 2918 uint64_t outdata[2]; 2919 uint_t validity = 0; 2920 2921 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) { 2922 return false; 2923 } 2924 2925 info->size = 0; 2926 info->lgrp_id = -1; 2927 2928 if ((validity & 1) != 0) { 2929 if ((validity & 2) != 0) { 2930 info->lgrp_id = outdata[0]; 2931 } 2932 if ((validity & 4) != 0) { 2933 info->size = outdata[1]; 2934 } 2935 return true; 2936 } 2937 return false; 2938 } 2939 2940 // Scan the pages from start to end until a page different than 2941 // the one described in the info parameter is encountered. 2942 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 2943 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2944 const size_t types = sizeof(info_types) / sizeof(info_types[0]); 2945 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT]; 2946 uint_t validity[MAX_MEMINFO_CNT]; 2947 2948 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size); 2949 uint64_t p = (uint64_t)start; 2950 while (p < (uint64_t)end) { 2951 addrs[0] = p; 2952 size_t addrs_count = 1; 2953 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] < (uint64_t)end) { 2954 addrs[addrs_count] = addrs[addrs_count - 1] + page_size; 2955 addrs_count++; 2956 } 2957 2958 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) { 2959 return NULL; 2960 } 2961 2962 size_t i = 0; 2963 for (; i < addrs_count; i++) { 2964 if ((validity[i] & 1) != 0) { 2965 if ((validity[i] & 4) != 0) { 2966 if (outdata[types * i + 1] != page_expected->size) { 2967 break; 2968 } 2969 } else 2970 if (page_expected->size != 0) { 2971 break; 2972 } 2973 2974 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) { 2975 if (outdata[types * i] != page_expected->lgrp_id) { 2976 break; 2977 } 2978 } 2979 } else { 2980 return NULL; 2981 } 2982 } 2983 2984 if (i != addrs_count) { 2985 if ((validity[i] & 2) != 0) { 2986 page_found->lgrp_id = outdata[types * i]; 2987 } else { 2988 page_found->lgrp_id = -1; 2989 } 2990 if ((validity[i] & 4) != 0) { 2991 page_found->size = outdata[types * i + 1]; 2992 } else { 2993 page_found->size = 0; 2994 } 2995 return (char*)addrs[i]; 2996 } 2997 2998 p = addrs[addrs_count - 1] + page_size; 2999 } 3000 return end; 3001 } 3002 3003 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3004 size_t size = bytes; 3005 // Map uncommitted pages PROT_NONE so we fail early if we touch an 3006 // uncommitted page. Otherwise, the read/write might succeed if we 3007 // have enough swap space to back the physical page. 3008 return 3009 NULL != Solaris::mmap_chunk(addr, size, 3010 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, 3011 PROT_NONE); 3012 } 3013 3014 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) { 3015 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0); 3016 3017 if (b == MAP_FAILED) { 3018 return NULL; 3019 } 3020 return b; 3021 } 3022 3023 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) { 3024 char* addr = requested_addr; 3025 int flags = MAP_PRIVATE | MAP_NORESERVE; 3026 3027 assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap"); 3028 3029 if (fixed) { 3030 flags |= MAP_FIXED; 3031 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) { 3032 flags |= MAP_ALIGN; 3033 addr = (char*) alignment_hint; 3034 } 3035 3036 // Map uncommitted pages PROT_NONE so we fail early if we touch an 3037 // uncommitted page. Otherwise, the read/write might succeed if we 3038 // have enough swap space to back the physical page. 3039 return mmap_chunk(addr, bytes, flags, PROT_NONE); 3040 } 3041 3042 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 3043 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL)); 3044 3045 guarantee(requested_addr == NULL || requested_addr == addr, 3046 "OS failed to return requested mmap address."); 3047 return addr; 3048 } 3049 3050 // Reserve memory at an arbitrary address, only if that area is 3051 // available (and not reserved for something else). 3052 3053 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3054 const int max_tries = 10; 3055 char* base[max_tries]; 3056 size_t size[max_tries]; 3057 3058 // Solaris adds a gap between mmap'ed regions. The size of the gap 3059 // is dependent on the requested size and the MMU. Our initial gap 3060 // value here is just a guess and will be corrected later. 3061 bool had_top_overlap = false; 3062 bool have_adjusted_gap = false; 3063 size_t gap = 0x400000; 3064 3065 // Assert only that the size is a multiple of the page size, since 3066 // that's all that mmap requires, and since that's all we really know 3067 // about at this low abstraction level. If we need higher alignment, 3068 // we can either pass an alignment to this method or verify alignment 3069 // in one of the methods further up the call chain. See bug 5044738. 3070 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); 3071 3072 // Since snv_84, Solaris attempts to honor the address hint - see 5003415. 3073 // Give it a try, if the kernel honors the hint we can return immediately. 3074 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false); 3075 volatile int err = errno; 3076 if (addr == requested_addr) { 3077 return addr; 3078 } else if (addr != NULL) { 3079 unmap_memory(addr, bytes); 3080 } 3081 3082 if (PrintMiscellaneous && Verbose) { 3083 char buf[256]; 3084 buf[0] = '\0'; 3085 if (addr == NULL) { 3086 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err)); 3087 } 3088 warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at " 3089 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT 3090 "%s", bytes, requested_addr, addr, buf); 3091 } 3092 3093 // Address hint method didn't work. Fall back to the old method. 3094 // In theory, once SNV becomes our oldest supported platform, this 3095 // code will no longer be needed. 3096 // 3097 // Repeatedly allocate blocks until the block is allocated at the 3098 // right spot. Give up after max_tries. 3099 int i; 3100 for (i = 0; i < max_tries; ++i) { 3101 base[i] = reserve_memory(bytes); 3102 3103 if (base[i] != NULL) { 3104 // Is this the block we wanted? 3105 if (base[i] == requested_addr) { 3106 size[i] = bytes; 3107 break; 3108 } 3109 3110 // check that the gap value is right 3111 if (had_top_overlap && !have_adjusted_gap) { 3112 size_t actual_gap = base[i-1] - base[i] - bytes; 3113 if (gap != actual_gap) { 3114 // adjust the gap value and retry the last 2 allocations 3115 assert(i > 0, "gap adjustment code problem"); 3116 have_adjusted_gap = true; // adjust the gap only once, just in case 3117 gap = actual_gap; 3118 if (PrintMiscellaneous && Verbose) { 3119 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap); 3120 } 3121 unmap_memory(base[i], bytes); 3122 unmap_memory(base[i-1], size[i-1]); 3123 i-=2; 3124 continue; 3125 } 3126 } 3127 3128 // Does this overlap the block we wanted? Give back the overlapped 3129 // parts and try again. 3130 // 3131 // There is still a bug in this code: if top_overlap == bytes, 3132 // the overlap is offset from requested region by the value of gap. 3133 // In this case giving back the overlapped part will not work, 3134 // because we'll give back the entire block at base[i] and 3135 // therefore the subsequent allocation will not generate a new gap. 3136 // This could be fixed with a new algorithm that used larger 3137 // or variable size chunks to find the requested region - 3138 // but such a change would introduce additional complications. 3139 // It's rare enough that the planets align for this bug, 3140 // so we'll just wait for a fix for 6204603/5003415 which 3141 // will provide a mmap flag to allow us to avoid this business. 3142 3143 size_t top_overlap = requested_addr + (bytes + gap) - base[i]; 3144 if (top_overlap >= 0 && top_overlap < bytes) { 3145 had_top_overlap = true; 3146 unmap_memory(base[i], top_overlap); 3147 base[i] += top_overlap; 3148 size[i] = bytes - top_overlap; 3149 } else { 3150 size_t bottom_overlap = base[i] + bytes - requested_addr; 3151 if (bottom_overlap >= 0 && bottom_overlap < bytes) { 3152 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) { 3153 warning("attempt_reserve_memory_at: possible alignment bug"); 3154 } 3155 unmap_memory(requested_addr, bottom_overlap); 3156 size[i] = bytes - bottom_overlap; 3157 } else { 3158 size[i] = bytes; 3159 } 3160 } 3161 } 3162 } 3163 3164 // Give back the unused reserved pieces. 3165 3166 for (int j = 0; j < i; ++j) { 3167 if (base[j] != NULL) { 3168 unmap_memory(base[j], size[j]); 3169 } 3170 } 3171 3172 return (i < max_tries) ? requested_addr : NULL; 3173 } 3174 3175 bool os::pd_release_memory(char* addr, size_t bytes) { 3176 size_t size = bytes; 3177 return munmap(addr, size) == 0; 3178 } 3179 3180 static bool solaris_mprotect(char* addr, size_t bytes, int prot) { 3181 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()), 3182 "addr must be page aligned"); 3183 int retVal = mprotect(addr, bytes, prot); 3184 return retVal == 0; 3185 } 3186 3187 // Protect memory (Used to pass readonly pages through 3188 // JNI GetArray<type>Elements with empty arrays.) 3189 // Also, used for serialization page and for compressed oops null pointer 3190 // checking. 3191 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3192 bool is_committed) { 3193 unsigned int p = 0; 3194 switch (prot) { 3195 case MEM_PROT_NONE: p = PROT_NONE; break; 3196 case MEM_PROT_READ: p = PROT_READ; break; 3197 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 3198 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 3199 default: 3200 ShouldNotReachHere(); 3201 } 3202 // is_committed is unused. 3203 return solaris_mprotect(addr, bytes, p); 3204 } 3205 3206 // guard_memory and unguard_memory only happens within stack guard pages. 3207 // Since ISM pertains only to the heap, guard and unguard memory should not 3208 /// happen with an ISM region. 3209 bool os::guard_memory(char* addr, size_t bytes) { 3210 return solaris_mprotect(addr, bytes, PROT_NONE); 3211 } 3212 3213 bool os::unguard_memory(char* addr, size_t bytes) { 3214 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE); 3215 } 3216 3217 // Large page support 3218 3219 // UseLargePages is the master flag to enable/disable large page memory. 3220 // UseMPSS and UseISM are supported for compatibility reasons. Their combined 3221 // effects can be described in the following table: 3222 // 3223 // UseLargePages UseMPSS UseISM 3224 // false * * => UseLargePages is the master switch, turning 3225 // it off will turn off both UseMPSS and 3226 // UseISM. VM will not use large page memory 3227 // regardless the settings of UseMPSS/UseISM. 3228 // true false false => Unless future Solaris provides other 3229 // mechanism to use large page memory, this 3230 // combination is equivalent to -UseLargePages, 3231 // VM will not use large page memory 3232 // true true false => JVM will use MPSS for large page memory. 3233 // This is the default behavior. 3234 // true false true => JVM will use ISM for large page memory. 3235 // true true true => JVM will use ISM if it is available. 3236 // Otherwise, JVM will fall back to MPSS. 3237 // Becaues ISM is now available on all 3238 // supported Solaris versions, this combination 3239 // is equivalent to +UseISM -UseMPSS. 3240 3241 static size_t _large_page_size = 0; 3242 3243 bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) { 3244 // x86 uses either 2M or 4M page, depending on whether PAE (Physical Address 3245 // Extensions) mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. Sparc 3246 // can support multiple page sizes. 3247 3248 // Don't bother to probe page size because getpagesizes() comes with MPSS. 3249 // ISM is only recommended on old Solaris where there is no MPSS support. 3250 // Simply choose a conservative value as default. 3251 *page_size = LargePageSizeInBytes ? LargePageSizeInBytes : 3252 SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M) 3253 ARM_ONLY(2 * M); 3254 3255 // ISM is available on all supported Solaris versions 3256 return true; 3257 } 3258 3259 // Insertion sort for small arrays (descending order). 3260 static void insertion_sort_descending(size_t* array, int len) { 3261 for (int i = 0; i < len; i++) { 3262 size_t val = array[i]; 3263 for (size_t key = i; key > 0 && array[key - 1] < val; --key) { 3264 size_t tmp = array[key]; 3265 array[key] = array[key - 1]; 3266 array[key - 1] = tmp; 3267 } 3268 } 3269 } 3270 3271 bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) { 3272 const unsigned int usable_count = VM_Version::page_size_count(); 3273 if (usable_count == 1) { 3274 return false; 3275 } 3276 3277 // Find the right getpagesizes interface. When solaris 11 is the minimum 3278 // build platform, getpagesizes() (without the '2') can be called directly. 3279 typedef int (*gps_t)(size_t[], int); 3280 gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2")); 3281 if (gps_func == NULL) { 3282 gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes")); 3283 if (gps_func == NULL) { 3284 if (warn) { 3285 warning("MPSS is not supported by the operating system."); 3286 } 3287 return false; 3288 } 3289 } 3290 3291 // Fill the array of page sizes. 3292 int n = (*gps_func)(_page_sizes, page_sizes_max); 3293 assert(n > 0, "Solaris bug?"); 3294 3295 if (n == page_sizes_max) { 3296 // Add a sentinel value (necessary only if the array was completely filled 3297 // since it is static (zeroed at initialization)). 3298 _page_sizes[--n] = 0; 3299 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");) 3300 } 3301 assert(_page_sizes[n] == 0, "missing sentinel"); 3302 trace_page_sizes("available page sizes", _page_sizes, n); 3303 3304 if (n == 1) return false; // Only one page size available. 3305 3306 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and 3307 // select up to usable_count elements. First sort the array, find the first 3308 // acceptable value, then copy the usable sizes to the top of the array and 3309 // trim the rest. Make sure to include the default page size :-). 3310 // 3311 // A better policy could get rid of the 4M limit by taking the sizes of the 3312 // important VM memory regions (java heap and possibly the code cache) into 3313 // account. 3314 insertion_sort_descending(_page_sizes, n); 3315 const size_t size_limit = 3316 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes; 3317 int beg; 3318 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ; 3319 const int end = MIN2((int)usable_count, n) - 1; 3320 for (int cur = 0; cur < end; ++cur, ++beg) { 3321 _page_sizes[cur] = _page_sizes[beg]; 3322 } 3323 _page_sizes[end] = vm_page_size(); 3324 _page_sizes[end + 1] = 0; 3325 3326 if (_page_sizes[end] > _page_sizes[end - 1]) { 3327 // Default page size is not the smallest; sort again. 3328 insertion_sort_descending(_page_sizes, end + 1); 3329 } 3330 *page_size = _page_sizes[0]; 3331 3332 trace_page_sizes("usable page sizes", _page_sizes, end + 1); 3333 return true; 3334 } 3335 3336 void os::large_page_init() { 3337 if (!UseLargePages) { 3338 UseISM = false; 3339 UseMPSS = false; 3340 return; 3341 } 3342 3343 // print a warning if any large page related flag is specified on command line 3344 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3345 !FLAG_IS_DEFAULT(UseISM) || 3346 !FLAG_IS_DEFAULT(UseMPSS) || 3347 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3348 UseISM = UseISM && 3349 Solaris::ism_sanity_check(warn_on_failure, &_large_page_size); 3350 if (UseISM) { 3351 // ISM disables MPSS to be compatible with old JDK behavior 3352 UseMPSS = false; 3353 _page_sizes[0] = _large_page_size; 3354 _page_sizes[1] = vm_page_size(); 3355 } 3356 3357 UseMPSS = UseMPSS && 3358 Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); 3359 3360 UseLargePages = UseISM || UseMPSS; 3361 } 3362 3363 bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) { 3364 // Signal to OS that we want large pages for addresses 3365 // from addr, addr + bytes 3366 struct memcntl_mha mpss_struct; 3367 mpss_struct.mha_cmd = MHA_MAPSIZE_VA; 3368 mpss_struct.mha_pagesize = align; 3369 mpss_struct.mha_flags = 0; 3370 if (memcntl(start, bytes, MC_HAT_ADVISE, 3371 (caddr_t) &mpss_struct, 0, 0) < 0) { 3372 debug_only(warning("Attempt to use MPSS failed.")); 3373 return false; 3374 } 3375 return true; 3376 } 3377 3378 char* os::reserve_memory_special(size_t size, char* addr, bool exec) { 3379 // "exec" is passed in but not used. Creating the shared image for 3380 // the code cache doesn't have an SHM_X executable permission to check. 3381 assert(UseLargePages && UseISM, "only for ISM large pages"); 3382 3383 char* retAddr = NULL; 3384 int shmid; 3385 key_t ismKey; 3386 3387 bool warn_on_failure = UseISM && 3388 (!FLAG_IS_DEFAULT(UseLargePages) || 3389 !FLAG_IS_DEFAULT(UseISM) || 3390 !FLAG_IS_DEFAULT(LargePageSizeInBytes) 3391 ); 3392 char msg[128]; 3393 3394 ismKey = IPC_PRIVATE; 3395 3396 // Create a large shared memory region to attach to based on size. 3397 // Currently, size is the total size of the heap 3398 shmid = shmget(ismKey, size, SHM_R | SHM_W | IPC_CREAT); 3399 if (shmid == -1){ 3400 if (warn_on_failure) { 3401 jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno); 3402 warning(msg); 3403 } 3404 return NULL; 3405 } 3406 3407 // Attach to the region 3408 retAddr = (char *) shmat(shmid, 0, SHM_SHARE_MMU | SHM_R | SHM_W); 3409 int err = errno; 3410 3411 // Remove shmid. If shmat() is successful, the actual shared memory segment 3412 // will be deleted when it's detached by shmdt() or when the process 3413 // terminates. If shmat() is not successful this will remove the shared 3414 // segment immediately. 3415 shmctl(shmid, IPC_RMID, NULL); 3416 3417 if (retAddr == (char *) -1) { 3418 if (warn_on_failure) { 3419 jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err); 3420 warning(msg); 3421 } 3422 return NULL; 3423 } 3424 if ((retAddr != NULL) && UseNUMAInterleaving) { 3425 numa_make_global(retAddr, size); 3426 } 3427 return retAddr; 3428 } 3429 3430 bool os::release_memory_special(char* base, size_t bytes) { 3431 // detaching the SHM segment will also delete it, see reserve_memory_special() 3432 int rslt = shmdt(base); 3433 return rslt == 0; 3434 } 3435 3436 size_t os::large_page_size() { 3437 return _large_page_size; 3438 } 3439 3440 // MPSS allows application to commit large page memory on demand; with ISM 3441 // the entire memory region must be allocated as shared memory. 3442 bool os::can_commit_large_page_memory() { 3443 return UseISM ? false : true; 3444 } 3445 3446 bool os::can_execute_large_page_memory() { 3447 return UseISM ? false : true; 3448 } 3449 3450 static int os_sleep(jlong millis, bool interruptible) { 3451 const jlong limit = INT_MAX; 3452 jlong prevtime; 3453 int res; 3454 3455 while (millis > limit) { 3456 if ((res = os_sleep(limit, interruptible)) != OS_OK) 3457 return res; 3458 millis -= limit; 3459 } 3460 3461 // Restart interrupted polls with new parameters until the proper delay 3462 // has been completed. 3463 3464 prevtime = getTimeMillis(); 3465 3466 while (millis > 0) { 3467 jlong newtime; 3468 3469 if (!interruptible) { 3470 // Following assert fails for os::yield_all: 3471 // assert(!thread->is_Java_thread(), "must not be java thread"); 3472 res = poll(NULL, 0, millis); 3473 } else { 3474 JavaThread *jt = JavaThread::current(); 3475 3476 INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt, 3477 os::Solaris::clear_interrupted); 3478 } 3479 3480 // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for 3481 // thread.Interrupt. 3482 3483 // See c/r 6751923. Poll can return 0 before time 3484 // has elapsed if time is set via clock_settime (as NTP does). 3485 // res == 0 if poll timed out (see man poll RETURN VALUES) 3486 // using the logic below checks that we really did 3487 // sleep at least "millis" if not we'll sleep again. 3488 if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) { 3489 newtime = getTimeMillis(); 3490 assert(newtime >= prevtime, "time moving backwards"); 3491 /* Doing prevtime and newtime in microseconds doesn't help precision, 3492 and trying to round up to avoid lost milliseconds can result in a 3493 too-short delay. */ 3494 millis -= newtime - prevtime; 3495 if(millis <= 0) 3496 return OS_OK; 3497 prevtime = newtime; 3498 } else 3499 return res; 3500 } 3501 3502 return OS_OK; 3503 } 3504 3505 // Read calls from inside the vm need to perform state transitions 3506 size_t os::read(int fd, void *buf, unsigned int nBytes) { 3507 INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3508 } 3509 3510 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) { 3511 INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3512 } 3513 3514 int os::sleep(Thread* thread, jlong millis, bool interruptible) { 3515 assert(thread == Thread::current(), "thread consistency check"); 3516 3517 // TODO-FIXME: this should be removed. 3518 // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock 3519 // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate 3520 // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving 3521 // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel 3522 // is fooled into believing that the system is making progress. In the code below we block the 3523 // the watcher thread while safepoint is in progress so that it would not appear as though the 3524 // system is making progress. 3525 if (!Solaris::T2_libthread() && 3526 thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) { 3527 // We now try to acquire the threads lock. Since this lock is held by the VM thread during 3528 // the entire safepoint, the watcher thread will line up here during the safepoint. 3529 Threads_lock->lock_without_safepoint_check(); 3530 Threads_lock->unlock(); 3531 } 3532 3533 if (thread->is_Java_thread()) { 3534 // This is a JavaThread so we honor the _thread_blocked protocol 3535 // even for sleeps of 0 milliseconds. This was originally done 3536 // as a workaround for bug 4338139. However, now we also do it 3537 // to honor the suspend-equivalent protocol. 3538 3539 JavaThread *jt = (JavaThread *) thread; 3540 ThreadBlockInVM tbivm(jt); 3541 3542 jt->set_suspend_equivalent(); 3543 // cleared by handle_special_suspend_equivalent_condition() or 3544 // java_suspend_self() via check_and_wait_while_suspended() 3545 3546 int ret_code; 3547 if (millis <= 0) { 3548 thr_yield(); 3549 ret_code = 0; 3550 } else { 3551 // The original sleep() implementation did not create an 3552 // OSThreadWaitState helper for sleeps of 0 milliseconds. 3553 // I'm preserving that decision for now. 3554 OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */); 3555 3556 ret_code = os_sleep(millis, interruptible); 3557 } 3558 3559 // were we externally suspended while we were waiting? 3560 jt->check_and_wait_while_suspended(); 3561 3562 return ret_code; 3563 } 3564 3565 // non-JavaThread from this point on: 3566 3567 if (millis <= 0) { 3568 thr_yield(); 3569 return 0; 3570 } 3571 3572 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 3573 3574 return os_sleep(millis, interruptible); 3575 } 3576 3577 int os::naked_sleep() { 3578 // %% make the sleep time an integer flag. for now use 1 millisec. 3579 return os_sleep(1, false); 3580 } 3581 3582 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3583 void os::infinite_sleep() { 3584 while (true) { // sleep forever ... 3585 ::sleep(100); // ... 100 seconds at a time 3586 } 3587 } 3588 3589 // Used to convert frequent JVM_Yield() to nops 3590 bool os::dont_yield() { 3591 if (DontYieldALot) { 3592 static hrtime_t last_time = 0; 3593 hrtime_t diff = getTimeNanos() - last_time; 3594 3595 if (diff < DontYieldALotInterval * 1000000) 3596 return true; 3597 3598 last_time += diff; 3599 3600 return false; 3601 } 3602 else { 3603 return false; 3604 } 3605 } 3606 3607 // Caveat: Solaris os::yield() causes a thread-state transition whereas 3608 // the linux and win32 implementations do not. This should be checked. 3609 3610 void os::yield() { 3611 // Yields to all threads with same or greater priority 3612 os::sleep(Thread::current(), 0, false); 3613 } 3614 3615 // Note that yield semantics are defined by the scheduling class to which 3616 // the thread currently belongs. Typically, yield will _not yield to 3617 // other equal or higher priority threads that reside on the dispatch queues 3618 // of other CPUs. 3619 3620 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; } 3621 3622 3623 // On Solaris we found that yield_all doesn't always yield to all other threads. 3624 // There have been cases where there is a thread ready to execute but it doesn't 3625 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond. 3626 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a 3627 // SIGWAITING signal which will cause a new lwp to be created. So we count the 3628 // number of times yield_all is called in the one loop and increase the sleep 3629 // time after 8 attempts. If this fails too we increase the concurrency level 3630 // so that the starving thread would get an lwp 3631 3632 void os::yield_all(int attempts) { 3633 // Yields to all threads, including threads with lower priorities 3634 if (attempts == 0) { 3635 os::sleep(Thread::current(), 1, false); 3636 } else { 3637 int iterations = attempts % 30; 3638 if (iterations == 0 && !os::Solaris::T2_libthread()) { 3639 // thr_setconcurrency and _getconcurrency make sense only under T1. 3640 int noofLWPS = thr_getconcurrency(); 3641 if (noofLWPS < (Threads::number_of_threads() + 2)) { 3642 thr_setconcurrency(thr_getconcurrency() + 1); 3643 } 3644 } else if (iterations < 25) { 3645 os::sleep(Thread::current(), 1, false); 3646 } else { 3647 os::sleep(Thread::current(), 10, false); 3648 } 3649 } 3650 } 3651 3652 // Called from the tight loops to possibly influence time-sharing heuristics 3653 void os::loop_breaker(int attempts) { 3654 os::yield_all(attempts); 3655 } 3656 3657 3658 // Interface for setting lwp priorities. If we are using T2 libthread, 3659 // which forces the use of BoundThreads or we manually set UseBoundThreads, 3660 // all of our threads will be assigned to real lwp's. Using the thr_setprio 3661 // function is meaningless in this mode so we must adjust the real lwp's priority 3662 // The routines below implement the getting and setting of lwp priorities. 3663 // 3664 // Note: There are three priority scales used on Solaris. Java priotities 3665 // which range from 1 to 10, libthread "thr_setprio" scale which range 3666 // from 0 to 127, and the current scheduling class of the process we 3667 // are running in. This is typically from -60 to +60. 3668 // The setting of the lwp priorities in done after a call to thr_setprio 3669 // so Java priorities are mapped to libthread priorities and we map from 3670 // the latter to lwp priorities. We don't keep priorities stored in 3671 // Java priorities since some of our worker threads want to set priorities 3672 // higher than all Java threads. 3673 // 3674 // For related information: 3675 // (1) man -s 2 priocntl 3676 // (2) man -s 4 priocntl 3677 // (3) man dispadmin 3678 // = librt.so 3679 // = libthread/common/rtsched.c - thrp_setlwpprio(). 3680 // = ps -cL <pid> ... to validate priority. 3681 // = sched_get_priority_min and _max 3682 // pthread_create 3683 // sched_setparam 3684 // pthread_setschedparam 3685 // 3686 // Assumptions: 3687 // + We assume that all threads in the process belong to the same 3688 // scheduling class. IE. an homogenous process. 3689 // + Must be root or in IA group to change change "interactive" attribute. 3690 // Priocntl() will fail silently. The only indication of failure is when 3691 // we read-back the value and notice that it hasn't changed. 3692 // + Interactive threads enter the runq at the head, non-interactive at the tail. 3693 // + For RT, change timeslice as well. Invariant: 3694 // constant "priority integral" 3695 // Konst == TimeSlice * (60-Priority) 3696 // Given a priority, compute appropriate timeslice. 3697 // + Higher numerical values have higher priority. 3698 3699 // sched class attributes 3700 typedef struct { 3701 int schedPolicy; // classID 3702 int maxPrio; 3703 int minPrio; 3704 } SchedInfo; 3705 3706 3707 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits; 3708 3709 #ifdef ASSERT 3710 static int ReadBackValidate = 1; 3711 #endif 3712 static int myClass = 0; 3713 static int myMin = 0; 3714 static int myMax = 0; 3715 static int myCur = 0; 3716 static bool priocntl_enable = false; 3717 3718 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4 3719 static int java_MaxPriority_to_os_priority = 0; // Saved mapping 3720 3721 // Call the version of priocntl suitable for all supported versions 3722 // of Solaris. We need to call through this wrapper so that we can 3723 // build on Solaris 9 and run on Solaris 8, 9 and 10. 3724 // 3725 // This code should be removed if we ever stop supporting Solaris 8 3726 // and earlier releases. 3727 3728 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); 3729 typedef long (*priocntl_type)(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); 3730 static priocntl_type priocntl_ptr = priocntl_stub; 3731 3732 // Stub to set the value of the real pointer, and then call the real 3733 // function. 3734 3735 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg) { 3736 // Try Solaris 8- name only. 3737 priocntl_type tmp = (priocntl_type)dlsym(RTLD_DEFAULT, "__priocntl"); 3738 guarantee(tmp != NULL, "priocntl function not found."); 3739 priocntl_ptr = tmp; 3740 return (*priocntl_ptr)(PC_VERSION, idtype, id, cmd, arg); 3741 } 3742 3743 3744 // lwp_priocntl_init 3745 // 3746 // Try to determine the priority scale for our process. 3747 // 3748 // Return errno or 0 if OK. 3749 // 3750 static 3751 int lwp_priocntl_init () 3752 { 3753 int rslt; 3754 pcinfo_t ClassInfo; 3755 pcparms_t ParmInfo; 3756 int i; 3757 3758 if (!UseThreadPriorities) return 0; 3759 3760 // We are using Bound threads, we need to determine our priority ranges 3761 if (os::Solaris::T2_libthread() || UseBoundThreads) { 3762 // If ThreadPriorityPolicy is 1, switch tables 3763 if (ThreadPriorityPolicy == 1) { 3764 for (i = 0 ; i < CriticalPriority+1; i++) 3765 os::java_to_os_priority[i] = prio_policy1[i]; 3766 } 3767 if (UseCriticalJavaThreadPriority) { 3768 // MaxPriority always maps to the FX scheduling class and criticalPrio. 3769 // See set_native_priority() and set_lwp_class_and_priority(). 3770 // Save original MaxPriority mapping in case attempt to 3771 // use critical priority fails. 3772 java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority]; 3773 // Set negative to distinguish from other priorities 3774 os::java_to_os_priority[MaxPriority] = -criticalPrio; 3775 } 3776 } 3777 // Not using Bound Threads, set to ThreadPolicy 1 3778 else { 3779 for ( i = 0 ; i < CriticalPriority+1; i++ ) { 3780 os::java_to_os_priority[i] = prio_policy1[i]; 3781 } 3782 return 0; 3783 } 3784 3785 // Get IDs for a set of well-known scheduling classes. 3786 // TODO-FIXME: GETCLINFO returns the current # of classes in the 3787 // the system. We should have a loop that iterates over the 3788 // classID values, which are known to be "small" integers. 3789 3790 strcpy(ClassInfo.pc_clname, "TS"); 3791 ClassInfo.pc_cid = -1; 3792 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3793 if (rslt < 0) return errno; 3794 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1"); 3795 tsLimits.schedPolicy = ClassInfo.pc_cid; 3796 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri; 3797 tsLimits.minPrio = -tsLimits.maxPrio; 3798 3799 strcpy(ClassInfo.pc_clname, "IA"); 3800 ClassInfo.pc_cid = -1; 3801 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3802 if (rslt < 0) return errno; 3803 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1"); 3804 iaLimits.schedPolicy = ClassInfo.pc_cid; 3805 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri; 3806 iaLimits.minPrio = -iaLimits.maxPrio; 3807 3808 strcpy(ClassInfo.pc_clname, "RT"); 3809 ClassInfo.pc_cid = -1; 3810 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3811 if (rslt < 0) return errno; 3812 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1"); 3813 rtLimits.schedPolicy = ClassInfo.pc_cid; 3814 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri; 3815 rtLimits.minPrio = 0; 3816 3817 strcpy(ClassInfo.pc_clname, "FX"); 3818 ClassInfo.pc_cid = -1; 3819 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3820 if (rslt < 0) return errno; 3821 assert(ClassInfo.pc_cid != -1, "cid for FX class is -1"); 3822 fxLimits.schedPolicy = ClassInfo.pc_cid; 3823 fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri; 3824 fxLimits.minPrio = 0; 3825 3826 // Query our "current" scheduling class. 3827 // This will normally be IA, TS or, rarely, FX or RT. 3828 memset(&ParmInfo, 0, sizeof(ParmInfo)); 3829 ParmInfo.pc_cid = PC_CLNULL; 3830 rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3831 if (rslt < 0) return errno; 3832 myClass = ParmInfo.pc_cid; 3833 3834 // We now know our scheduling classId, get specific information 3835 // about the class. 3836 ClassInfo.pc_cid = myClass; 3837 ClassInfo.pc_clname[0] = 0; 3838 rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo); 3839 if (rslt < 0) return errno; 3840 3841 if (ThreadPriorityVerbose) { 3842 tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname); 3843 } 3844 3845 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3846 ParmInfo.pc_cid = PC_CLNULL; 3847 rslt = (*priocntl_ptr)(PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3848 if (rslt < 0) return errno; 3849 3850 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3851 myMin = rtLimits.minPrio; 3852 myMax = rtLimits.maxPrio; 3853 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3854 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3855 myMin = iaLimits.minPrio; 3856 myMax = iaLimits.maxPrio; 3857 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict 3858 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3859 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3860 myMin = tsLimits.minPrio; 3861 myMax = tsLimits.maxPrio; 3862 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict 3863 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 3864 fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 3865 myMin = fxLimits.minPrio; 3866 myMax = fxLimits.maxPrio; 3867 myMax = MIN2(myMax, (int)fxInfo->fx_uprilim); // clamp - restrict 3868 } else { 3869 // No clue - punt 3870 if (ThreadPriorityVerbose) 3871 tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname); 3872 return EINVAL; // no clue, punt 3873 } 3874 3875 if (ThreadPriorityVerbose) { 3876 tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax); 3877 } 3878 3879 priocntl_enable = true; // Enable changing priorities 3880 return 0; 3881 } 3882 3883 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms)) 3884 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms)) 3885 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms)) 3886 #define FXPRI(x) ((fxparms_t *)((x).pc_clparms)) 3887 3888 3889 // scale_to_lwp_priority 3890 // 3891 // Convert from the libthread "thr_setprio" scale to our current 3892 // lwp scheduling class scale. 3893 // 3894 static 3895 int scale_to_lwp_priority (int rMin, int rMax, int x) 3896 { 3897 int v; 3898 3899 if (x == 127) return rMax; // avoid round-down 3900 v = (((x*(rMax-rMin)))/128)+rMin; 3901 return v; 3902 } 3903 3904 3905 // set_lwp_class_and_priority 3906 // 3907 // Set the class and priority of the lwp. This call should only 3908 // be made when using bound threads (T2 threads are bound by default). 3909 // 3910 int set_lwp_class_and_priority(int ThreadID, int lwpid, 3911 int newPrio, int new_class, bool scale) { 3912 int rslt; 3913 int Actual, Expected, prv; 3914 pcparms_t ParmInfo; // for GET-SET 3915 #ifdef ASSERT 3916 pcparms_t ReadBack; // for readback 3917 #endif 3918 3919 // Set priority via PC_GETPARMS, update, PC_SETPARMS 3920 // Query current values. 3921 // TODO: accelerate this by eliminating the PC_GETPARMS call. 3922 // Cache "pcparms_t" in global ParmCache. 3923 // TODO: elide set-to-same-value 3924 3925 // If something went wrong on init, don't change priorities. 3926 if ( !priocntl_enable ) { 3927 if (ThreadPriorityVerbose) 3928 tty->print_cr("Trying to set priority but init failed, ignoring"); 3929 return EINVAL; 3930 } 3931 3932 // If lwp hasn't started yet, just return 3933 // the _start routine will call us again. 3934 if ( lwpid <= 0 ) { 3935 if (ThreadPriorityVerbose) { 3936 tty->print_cr ("deferring the set_lwp_class_and_priority of thread " 3937 INTPTR_FORMAT " to %d, lwpid not set", 3938 ThreadID, newPrio); 3939 } 3940 return 0; 3941 } 3942 3943 if (ThreadPriorityVerbose) { 3944 tty->print_cr ("set_lwp_class_and_priority(" 3945 INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ", 3946 ThreadID, lwpid, newPrio); 3947 } 3948 3949 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3950 ParmInfo.pc_cid = PC_CLNULL; 3951 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo); 3952 if (rslt < 0) return errno; 3953 3954 int cur_class = ParmInfo.pc_cid; 3955 ParmInfo.pc_cid = (id_t)new_class; 3956 3957 if (new_class == rtLimits.schedPolicy) { 3958 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms; 3959 rtInfo->rt_pri = scale ? scale_to_lwp_priority(rtLimits.minPrio, 3960 rtLimits.maxPrio, newPrio) 3961 : newPrio; 3962 rtInfo->rt_tqsecs = RT_NOCHANGE; 3963 rtInfo->rt_tqnsecs = RT_NOCHANGE; 3964 if (ThreadPriorityVerbose) { 3965 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri); 3966 } 3967 } else if (new_class == iaLimits.schedPolicy) { 3968 iaparms_t* iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3969 int maxClamped = MIN2(iaLimits.maxPrio, 3970 cur_class == new_class 3971 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio); 3972 iaInfo->ia_upri = scale ? scale_to_lwp_priority(iaLimits.minPrio, 3973 maxClamped, newPrio) 3974 : newPrio; 3975 iaInfo->ia_uprilim = cur_class == new_class 3976 ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio; 3977 iaInfo->ia_mode = IA_NOCHANGE; 3978 if (ThreadPriorityVerbose) { 3979 tty->print_cr("IA: [%d...%d] %d->%d\n", 3980 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri); 3981 } 3982 } else if (new_class == tsLimits.schedPolicy) { 3983 tsparms_t* tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3984 int maxClamped = MIN2(tsLimits.maxPrio, 3985 cur_class == new_class 3986 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio); 3987 tsInfo->ts_upri = scale ? scale_to_lwp_priority(tsLimits.minPrio, 3988 maxClamped, newPrio) 3989 : newPrio; 3990 tsInfo->ts_uprilim = cur_class == new_class 3991 ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio; 3992 if (ThreadPriorityVerbose) { 3993 tty->print_cr("TS: [%d...%d] %d->%d\n", 3994 tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri); 3995 } 3996 } else if (new_class == fxLimits.schedPolicy) { 3997 fxparms_t* fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 3998 int maxClamped = MIN2(fxLimits.maxPrio, 3999 cur_class == new_class 4000 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio); 4001 fxInfo->fx_upri = scale ? scale_to_lwp_priority(fxLimits.minPrio, 4002 maxClamped, newPrio) 4003 : newPrio; 4004 fxInfo->fx_uprilim = cur_class == new_class 4005 ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio; 4006 fxInfo->fx_tqsecs = FX_NOCHANGE; 4007 fxInfo->fx_tqnsecs = FX_NOCHANGE; 4008 if (ThreadPriorityVerbose) { 4009 tty->print_cr("FX: [%d...%d] %d->%d\n", 4010 fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri); 4011 } 4012 } else { 4013 if (ThreadPriorityVerbose) { 4014 tty->print_cr("Unknown new scheduling class %d\n", new_class); 4015 } 4016 return EINVAL; // no clue, punt 4017 } 4018 4019 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo); 4020 if (ThreadPriorityVerbose && rslt) { 4021 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno); 4022 } 4023 if (rslt < 0) return errno; 4024 4025 #ifdef ASSERT 4026 // Sanity check: read back what we just attempted to set. 4027 // In theory it could have changed in the interim ... 4028 // 4029 // The priocntl system call is tricky. 4030 // Sometimes it'll validate the priority value argument and 4031 // return EINVAL if unhappy. At other times it fails silently. 4032 // Readbacks are prudent. 4033 4034 if (!ReadBackValidate) return 0; 4035 4036 memset(&ReadBack, 0, sizeof(pcparms_t)); 4037 ReadBack.pc_cid = PC_CLNULL; 4038 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack); 4039 assert(rslt >= 0, "priocntl failed"); 4040 Actual = Expected = 0xBAD; 4041 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match"); 4042 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 4043 Actual = RTPRI(ReadBack)->rt_pri; 4044 Expected = RTPRI(ParmInfo)->rt_pri; 4045 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 4046 Actual = IAPRI(ReadBack)->ia_upri; 4047 Expected = IAPRI(ParmInfo)->ia_upri; 4048 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 4049 Actual = TSPRI(ReadBack)->ts_upri; 4050 Expected = TSPRI(ParmInfo)->ts_upri; 4051 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 4052 Actual = FXPRI(ReadBack)->fx_upri; 4053 Expected = FXPRI(ParmInfo)->fx_upri; 4054 } else { 4055 if (ThreadPriorityVerbose) { 4056 tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n", 4057 ParmInfo.pc_cid); 4058 } 4059 } 4060 4061 if (Actual != Expected) { 4062 if (ThreadPriorityVerbose) { 4063 tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n", 4064 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected); 4065 } 4066 } 4067 #endif 4068 4069 return 0; 4070 } 4071 4072 // Solaris only gives access to 128 real priorities at a time, 4073 // so we expand Java's ten to fill this range. This would be better 4074 // if we dynamically adjusted relative priorities. 4075 // 4076 // The ThreadPriorityPolicy option allows us to select 2 different 4077 // priority scales. 4078 // 4079 // ThreadPriorityPolicy=0 4080 // Since the Solaris' default priority is MaximumPriority, we do not 4081 // set a priority lower than Max unless a priority lower than 4082 // NormPriority is requested. 4083 // 4084 // ThreadPriorityPolicy=1 4085 // This mode causes the priority table to get filled with 4086 // linear values. NormPriority get's mapped to 50% of the 4087 // Maximum priority an so on. This will cause VM threads 4088 // to get unfair treatment against other Solaris processes 4089 // which do not explicitly alter their thread priorities. 4090 // 4091 4092 int os::java_to_os_priority[CriticalPriority + 1] = { 4093 -99999, // 0 Entry should never be used 4094 4095 0, // 1 MinPriority 4096 32, // 2 4097 64, // 3 4098 4099 96, // 4 4100 127, // 5 NormPriority 4101 127, // 6 4102 4103 127, // 7 4104 127, // 8 4105 127, // 9 NearMaxPriority 4106 4107 127, // 10 MaxPriority 4108 4109 -criticalPrio // 11 CriticalPriority 4110 }; 4111 4112 OSReturn os::set_native_priority(Thread* thread, int newpri) { 4113 OSThread* osthread = thread->osthread(); 4114 4115 // Save requested priority in case the thread hasn't been started 4116 osthread->set_native_priority(newpri); 4117 4118 // Check for critical priority request 4119 bool fxcritical = false; 4120 if (newpri == -criticalPrio) { 4121 fxcritical = true; 4122 newpri = criticalPrio; 4123 } 4124 4125 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping"); 4126 if (!UseThreadPriorities) return OS_OK; 4127 4128 int status = 0; 4129 4130 if (!fxcritical) { 4131 // Use thr_setprio only if we have a priority that thr_setprio understands 4132 status = thr_setprio(thread->osthread()->thread_id(), newpri); 4133 } 4134 4135 if (os::Solaris::T2_libthread() || 4136 (UseBoundThreads && osthread->is_vm_created())) { 4137 int lwp_status = 4138 set_lwp_class_and_priority(osthread->thread_id(), 4139 osthread->lwp_id(), 4140 newpri, 4141 fxcritical ? fxLimits.schedPolicy : myClass, 4142 !fxcritical); 4143 if (lwp_status != 0 && fxcritical) { 4144 // Try again, this time without changing the scheduling class 4145 newpri = java_MaxPriority_to_os_priority; 4146 lwp_status = set_lwp_class_and_priority(osthread->thread_id(), 4147 osthread->lwp_id(), 4148 newpri, myClass, false); 4149 } 4150 status |= lwp_status; 4151 } 4152 return (status == 0) ? OS_OK : OS_ERR; 4153 } 4154 4155 4156 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { 4157 int p; 4158 if ( !UseThreadPriorities ) { 4159 *priority_ptr = NormalPriority; 4160 return OS_OK; 4161 } 4162 int status = thr_getprio(thread->osthread()->thread_id(), &p); 4163 if (status != 0) { 4164 return OS_ERR; 4165 } 4166 *priority_ptr = p; 4167 return OS_OK; 4168 } 4169 4170 4171 // Hint to the underlying OS that a task switch would not be good. 4172 // Void return because it's a hint and can fail. 4173 void os::hint_no_preempt() { 4174 schedctl_start(schedctl_init()); 4175 } 4176 4177 void os::interrupt(Thread* thread) { 4178 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); 4179 4180 OSThread* osthread = thread->osthread(); 4181 4182 int isInterrupted = osthread->interrupted(); 4183 if (!isInterrupted) { 4184 osthread->set_interrupted(true); 4185 OrderAccess::fence(); 4186 // os::sleep() is implemented with either poll (NULL,0,timeout) or 4187 // by parking on _SleepEvent. If the former, thr_kill will unwedge 4188 // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper. 4189 ParkEvent * const slp = thread->_SleepEvent ; 4190 if (slp != NULL) slp->unpark() ; 4191 } 4192 4193 // For JSR166: unpark after setting status but before thr_kill -dl 4194 if (thread->is_Java_thread()) { 4195 ((JavaThread*)thread)->parker()->unpark(); 4196 } 4197 4198 // Handle interruptible wait() ... 4199 ParkEvent * const ev = thread->_ParkEvent ; 4200 if (ev != NULL) ev->unpark() ; 4201 4202 // When events are used everywhere for os::sleep, then this thr_kill 4203 // will only be needed if UseVMInterruptibleIO is true. 4204 4205 if (!isInterrupted) { 4206 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt()); 4207 assert_status(status == 0, status, "thr_kill"); 4208 4209 // Bump thread interruption counter 4210 RuntimeService::record_thread_interrupt_signaled_count(); 4211 } 4212 } 4213 4214 4215 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 4216 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); 4217 4218 OSThread* osthread = thread->osthread(); 4219 4220 bool res = osthread->interrupted(); 4221 4222 // NOTE that since there is no "lock" around these two operations, 4223 // there is the possibility that the interrupted flag will be 4224 // "false" but that the interrupt event will be set. This is 4225 // intentional. The effect of this is that Object.wait() will appear 4226 // to have a spurious wakeup, which is not harmful, and the 4227 // possibility is so rare that it is not worth the added complexity 4228 // to add yet another lock. It has also been recommended not to put 4229 // the interrupted flag into the os::Solaris::Event structure, 4230 // because it hides the issue. 4231 if (res && clear_interrupted) { 4232 osthread->set_interrupted(false); 4233 } 4234 return res; 4235 } 4236 4237 4238 void os::print_statistics() { 4239 } 4240 4241 int os::message_box(const char* title, const char* message) { 4242 int i; 4243 fdStream err(defaultStream::error_fd()); 4244 for (i = 0; i < 78; i++) err.print_raw("="); 4245 err.cr(); 4246 err.print_raw_cr(title); 4247 for (i = 0; i < 78; i++) err.print_raw("-"); 4248 err.cr(); 4249 err.print_raw_cr(message); 4250 for (i = 0; i < 78; i++) err.print_raw("="); 4251 err.cr(); 4252 4253 char buf[16]; 4254 // Prevent process from exiting upon "read error" without consuming all CPU 4255 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 4256 4257 return buf[0] == 'y' || buf[0] == 'Y'; 4258 } 4259 4260 // A lightweight implementation that does not suspend the target thread and 4261 // thus returns only a hint. Used for profiling only! 4262 ExtendedPC os::get_thread_pc(Thread* thread) { 4263 // Make sure that it is called by the watcher and the Threads lock is owned. 4264 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); 4265 // For now, is only used to profile the VM Thread 4266 assert(thread->is_VM_thread(), "Can only be called for VMThread"); 4267 ExtendedPC epc; 4268 4269 GetThreadPC_Callback cb(ProfileVM_lock); 4270 OSThread *osthread = thread->osthread(); 4271 const int time_to_wait = 400; // 400ms wait for initial response 4272 int status = cb.interrupt(thread, time_to_wait); 4273 4274 if (cb.is_done() ) { 4275 epc = cb.addr(); 4276 } else { 4277 DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status", 4278 osthread->thread_id(), status);); 4279 // epc is already NULL 4280 } 4281 return epc; 4282 } 4283 4284 4285 // This does not do anything on Solaris. This is basically a hook for being 4286 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32. 4287 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) { 4288 f(value, method, args, thread); 4289 } 4290 4291 // This routine may be used by user applications as a "hook" to catch signals. 4292 // The user-defined signal handler must pass unrecognized signals to this 4293 // routine, and if it returns true (non-zero), then the signal handler must 4294 // return immediately. If the flag "abort_if_unrecognized" is true, then this 4295 // routine will never retun false (zero), but instead will execute a VM panic 4296 // routine kill the process. 4297 // 4298 // If this routine returns false, it is OK to call it again. This allows 4299 // the user-defined signal handler to perform checks either before or after 4300 // the VM performs its own checks. Naturally, the user code would be making 4301 // a serious error if it tried to handle an exception (such as a null check 4302 // or breakpoint) that the VM was generating for its own correct operation. 4303 // 4304 // This routine may recognize any of the following kinds of signals: 4305 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ, 4306 // os::Solaris::SIGasync 4307 // It should be consulted by handlers for any of those signals. 4308 // It explicitly does not recognize os::Solaris::SIGinterrupt 4309 // 4310 // The caller of this routine must pass in the three arguments supplied 4311 // to the function referred to in the "sa_sigaction" (not the "sa_handler") 4312 // field of the structure passed to sigaction(). This routine assumes that 4313 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 4314 // 4315 // Note that the VM will print warnings if it detects conflicting signal 4316 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 4317 // 4318 extern "C" JNIEXPORT int 4319 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, 4320 int abort_if_unrecognized); 4321 4322 4323 void signalHandler(int sig, siginfo_t* info, void* ucVoid) { 4324 JVM_handle_solaris_signal(sig, info, ucVoid, true); 4325 } 4326 4327 /* Do not delete - if guarantee is ever removed, a signal handler (even empty) 4328 is needed to provoke threads blocked on IO to return an EINTR 4329 Note: this explicitly does NOT call JVM_handle_solaris_signal and 4330 does NOT participate in signal chaining due to requirement for 4331 NOT setting SA_RESTART to make EINTR work. */ 4332 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) { 4333 if (UseSignalChaining) { 4334 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig); 4335 if (actp && actp->sa_handler) { 4336 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs"); 4337 } 4338 } 4339 } 4340 4341 // This boolean allows users to forward their own non-matching signals 4342 // to JVM_handle_solaris_signal, harmlessly. 4343 bool os::Solaris::signal_handlers_are_installed = false; 4344 4345 // For signal-chaining 4346 bool os::Solaris::libjsig_is_loaded = false; 4347 typedef struct sigaction *(*get_signal_t)(int); 4348 get_signal_t os::Solaris::get_signal_action = NULL; 4349 4350 struct sigaction* os::Solaris::get_chained_signal_action(int sig) { 4351 struct sigaction *actp = NULL; 4352 4353 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) { 4354 // Retrieve the old signal handler from libjsig 4355 actp = (*get_signal_action)(sig); 4356 } 4357 if (actp == NULL) { 4358 // Retrieve the preinstalled signal handler from jvm 4359 actp = get_preinstalled_handler(sig); 4360 } 4361 4362 return actp; 4363 } 4364 4365 static bool call_chained_handler(struct sigaction *actp, int sig, 4366 siginfo_t *siginfo, void *context) { 4367 // Call the old signal handler 4368 if (actp->sa_handler == SIG_DFL) { 4369 // It's more reasonable to let jvm treat it as an unexpected exception 4370 // instead of taking the default action. 4371 return false; 4372 } else if (actp->sa_handler != SIG_IGN) { 4373 if ((actp->sa_flags & SA_NODEFER) == 0) { 4374 // automaticlly block the signal 4375 sigaddset(&(actp->sa_mask), sig); 4376 } 4377 4378 sa_handler_t hand; 4379 sa_sigaction_t sa; 4380 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 4381 // retrieve the chained handler 4382 if (siginfo_flag_set) { 4383 sa = actp->sa_sigaction; 4384 } else { 4385 hand = actp->sa_handler; 4386 } 4387 4388 if ((actp->sa_flags & SA_RESETHAND) != 0) { 4389 actp->sa_handler = SIG_DFL; 4390 } 4391 4392 // try to honor the signal mask 4393 sigset_t oset; 4394 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset); 4395 4396 // call into the chained handler 4397 if (siginfo_flag_set) { 4398 (*sa)(sig, siginfo, context); 4399 } else { 4400 (*hand)(sig); 4401 } 4402 4403 // restore the signal mask 4404 thr_sigsetmask(SIG_SETMASK, &oset, 0); 4405 } 4406 // Tell jvm's signal handler the signal is taken care of. 4407 return true; 4408 } 4409 4410 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) { 4411 bool chained = false; 4412 // signal-chaining 4413 if (UseSignalChaining) { 4414 struct sigaction *actp = get_chained_signal_action(sig); 4415 if (actp != NULL) { 4416 chained = call_chained_handler(actp, sig, siginfo, context); 4417 } 4418 } 4419 return chained; 4420 } 4421 4422 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) { 4423 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4424 if (preinstalled_sigs[sig] != 0) { 4425 return &chainedsigactions[sig]; 4426 } 4427 return NULL; 4428 } 4429 4430 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) { 4431 4432 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range"); 4433 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4434 chainedsigactions[sig] = oldAct; 4435 preinstalled_sigs[sig] = 1; 4436 } 4437 4438 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) { 4439 // Check for overwrite. 4440 struct sigaction oldAct; 4441 sigaction(sig, (struct sigaction*)NULL, &oldAct); 4442 void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4443 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4444 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 4445 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 4446 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) { 4447 if (AllowUserSignalHandlers || !set_installed) { 4448 // Do not overwrite; user takes responsibility to forward to us. 4449 return; 4450 } else if (UseSignalChaining) { 4451 if (oktochain) { 4452 // save the old handler in jvm 4453 save_preinstalled_handler(sig, oldAct); 4454 } else { 4455 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs."); 4456 } 4457 // libjsig also interposes the sigaction() call below and saves the 4458 // old sigaction on it own. 4459 } else { 4460 fatal(err_msg("Encountered unexpected pre-existing sigaction handler " 4461 "%#lx for signal %d.", (long)oldhand, sig)); 4462 } 4463 } 4464 4465 struct sigaction sigAct; 4466 sigfillset(&(sigAct.sa_mask)); 4467 sigAct.sa_handler = SIG_DFL; 4468 4469 sigAct.sa_sigaction = signalHandler; 4470 // Handle SIGSEGV on alternate signal stack if 4471 // not using stack banging 4472 if (!UseStackBanging && sig == SIGSEGV) { 4473 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK; 4474 // Interruptible i/o requires SA_RESTART cleared so EINTR 4475 // is returned instead of restarting system calls 4476 } else if (sig == os::Solaris::SIGinterrupt()) { 4477 sigemptyset(&sigAct.sa_mask); 4478 sigAct.sa_handler = NULL; 4479 sigAct.sa_flags = SA_SIGINFO; 4480 sigAct.sa_sigaction = sigINTRHandler; 4481 } else { 4482 sigAct.sa_flags = SA_SIGINFO | SA_RESTART; 4483 } 4484 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags); 4485 4486 sigaction(sig, &sigAct, &oldAct); 4487 4488 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4489 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4490 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 4491 } 4492 4493 4494 #define DO_SIGNAL_CHECK(sig) \ 4495 if (!sigismember(&check_signal_done, sig)) \ 4496 os::Solaris::check_signal_handler(sig) 4497 4498 // This method is a periodic task to check for misbehaving JNI applications 4499 // under CheckJNI, we can add any periodic checks here 4500 4501 void os::run_periodic_checks() { 4502 // A big source of grief is hijacking virt. addr 0x0 on Solaris, 4503 // thereby preventing a NULL checks. 4504 if(!check_addr0_done) check_addr0_done = check_addr0(tty); 4505 4506 if (check_signals == false) return; 4507 4508 // SEGV and BUS if overridden could potentially prevent 4509 // generation of hs*.log in the event of a crash, debugging 4510 // such a case can be very challenging, so we absolutely 4511 // check for the following for a good measure: 4512 DO_SIGNAL_CHECK(SIGSEGV); 4513 DO_SIGNAL_CHECK(SIGILL); 4514 DO_SIGNAL_CHECK(SIGFPE); 4515 DO_SIGNAL_CHECK(SIGBUS); 4516 DO_SIGNAL_CHECK(SIGPIPE); 4517 DO_SIGNAL_CHECK(SIGXFSZ); 4518 4519 // ReduceSignalUsage allows the user to override these handlers 4520 // see comments at the very top and jvm_solaris.h 4521 if (!ReduceSignalUsage) { 4522 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 4523 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 4524 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 4525 DO_SIGNAL_CHECK(BREAK_SIGNAL); 4526 } 4527 4528 // See comments above for using JVM1/JVM2 and UseAltSigs 4529 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt()); 4530 DO_SIGNAL_CHECK(os::Solaris::SIGasync()); 4531 4532 } 4533 4534 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 4535 4536 static os_sigaction_t os_sigaction = NULL; 4537 4538 void os::Solaris::check_signal_handler(int sig) { 4539 char buf[O_BUFLEN]; 4540 address jvmHandler = NULL; 4541 4542 struct sigaction act; 4543 if (os_sigaction == NULL) { 4544 // only trust the default sigaction, in case it has been interposed 4545 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction"); 4546 if (os_sigaction == NULL) return; 4547 } 4548 4549 os_sigaction(sig, (struct sigaction*)NULL, &act); 4550 4551 address thisHandler = (act.sa_flags & SA_SIGINFO) 4552 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 4553 : CAST_FROM_FN_PTR(address, act.sa_handler) ; 4554 4555 4556 switch(sig) { 4557 case SIGSEGV: 4558 case SIGBUS: 4559 case SIGFPE: 4560 case SIGPIPE: 4561 case SIGXFSZ: 4562 case SIGILL: 4563 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4564 break; 4565 4566 case SHUTDOWN1_SIGNAL: 4567 case SHUTDOWN2_SIGNAL: 4568 case SHUTDOWN3_SIGNAL: 4569 case BREAK_SIGNAL: 4570 jvmHandler = (address)user_handler(); 4571 break; 4572 4573 default: 4574 int intrsig = os::Solaris::SIGinterrupt(); 4575 int asynsig = os::Solaris::SIGasync(); 4576 4577 if (sig == intrsig) { 4578 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler); 4579 } else if (sig == asynsig) { 4580 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4581 } else { 4582 return; 4583 } 4584 break; 4585 } 4586 4587 4588 if (thisHandler != jvmHandler) { 4589 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 4590 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 4591 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 4592 // No need to check this sig any longer 4593 sigaddset(&check_signal_done, sig); 4594 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) { 4595 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 4596 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig)); 4597 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags); 4598 // No need to check this sig any longer 4599 sigaddset(&check_signal_done, sig); 4600 } 4601 4602 // Print all the signal handler state 4603 if (sigismember(&check_signal_done, sig)) { 4604 print_signal_handlers(tty, buf, O_BUFLEN); 4605 } 4606 4607 } 4608 4609 void os::Solaris::install_signal_handlers() { 4610 bool libjsigdone = false; 4611 signal_handlers_are_installed = true; 4612 4613 // signal-chaining 4614 typedef void (*signal_setting_t)(); 4615 signal_setting_t begin_signal_setting = NULL; 4616 signal_setting_t end_signal_setting = NULL; 4617 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4618 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 4619 if (begin_signal_setting != NULL) { 4620 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4621 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 4622 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 4623 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 4624 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t, 4625 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version")); 4626 libjsig_is_loaded = true; 4627 if (os::Solaris::get_libjsig_version != NULL) { 4628 libjsigversion = (*os::Solaris::get_libjsig_version)(); 4629 } 4630 assert(UseSignalChaining, "should enable signal-chaining"); 4631 } 4632 if (libjsig_is_loaded) { 4633 // Tell libjsig jvm is setting signal handlers 4634 (*begin_signal_setting)(); 4635 } 4636 4637 set_signal_handler(SIGSEGV, true, true); 4638 set_signal_handler(SIGPIPE, true, true); 4639 set_signal_handler(SIGXFSZ, true, true); 4640 set_signal_handler(SIGBUS, true, true); 4641 set_signal_handler(SIGILL, true, true); 4642 set_signal_handler(SIGFPE, true, true); 4643 4644 4645 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) { 4646 4647 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so 4648 // can not register overridable signals which might be > 32 4649 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) { 4650 // Tell libjsig jvm has finished setting signal handlers 4651 (*end_signal_setting)(); 4652 libjsigdone = true; 4653 } 4654 } 4655 4656 // Never ok to chain our SIGinterrupt 4657 set_signal_handler(os::Solaris::SIGinterrupt(), true, false); 4658 set_signal_handler(os::Solaris::SIGasync(), true, true); 4659 4660 if (libjsig_is_loaded && !libjsigdone) { 4661 // Tell libjsig jvm finishes setting signal handlers 4662 (*end_signal_setting)(); 4663 } 4664 4665 // We don't activate signal checker if libjsig is in place, we trust ourselves 4666 // and if UserSignalHandler is installed all bets are off. 4667 // Log that signal checking is off only if -verbose:jni is specified. 4668 if (CheckJNICalls) { 4669 if (libjsig_is_loaded) { 4670 if (PrintJNIResolving) { 4671 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 4672 } 4673 check_signals = false; 4674 } 4675 if (AllowUserSignalHandlers) { 4676 if (PrintJNIResolving) { 4677 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 4678 } 4679 check_signals = false; 4680 } 4681 } 4682 } 4683 4684 4685 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...); 4686 4687 const char * signames[] = { 4688 "SIG0", 4689 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP", 4690 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS", 4691 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM", 4692 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH", 4693 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT", 4694 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU", 4695 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW", 4696 "SIGCANCEL", "SIGLOST" 4697 }; 4698 4699 const char* os::exception_name(int exception_code, char* buf, size_t size) { 4700 if (0 < exception_code && exception_code <= SIGRTMAX) { 4701 // signal 4702 if (exception_code < sizeof(signames)/sizeof(const char*)) { 4703 jio_snprintf(buf, size, "%s", signames[exception_code]); 4704 } else { 4705 jio_snprintf(buf, size, "SIG%d", exception_code); 4706 } 4707 return buf; 4708 } else { 4709 return NULL; 4710 } 4711 } 4712 4713 // (Static) wrappers for the new libthread API 4714 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate; 4715 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate; 4716 int_fnP_thread_t_i os::Solaris::_thr_setmutator; 4717 int_fnP_thread_t os::Solaris::_thr_suspend_mutator; 4718 int_fnP_thread_t os::Solaris::_thr_continue_mutator; 4719 4720 // (Static) wrapper for getisax(2) call. 4721 os::Solaris::getisax_func_t os::Solaris::_getisax = 0; 4722 4723 // (Static) wrappers for the liblgrp API 4724 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home; 4725 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init; 4726 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini; 4727 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root; 4728 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children; 4729 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources; 4730 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps; 4731 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale; 4732 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0; 4733 4734 // (Static) wrapper for meminfo() call. 4735 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0; 4736 4737 static address resolve_symbol_lazy(const char* name) { 4738 address addr = (address) dlsym(RTLD_DEFAULT, name); 4739 if(addr == NULL) { 4740 // RTLD_DEFAULT was not defined on some early versions of 2.5.1 4741 addr = (address) dlsym(RTLD_NEXT, name); 4742 } 4743 return addr; 4744 } 4745 4746 static address resolve_symbol(const char* name) { 4747 address addr = resolve_symbol_lazy(name); 4748 if(addr == NULL) { 4749 fatal(dlerror()); 4750 } 4751 return addr; 4752 } 4753 4754 4755 4756 // isT2_libthread() 4757 // 4758 // Routine to determine if we are currently using the new T2 libthread. 4759 // 4760 // We determine if we are using T2 by reading /proc/self/lstatus and 4761 // looking for a thread with the ASLWP bit set. If we find this status 4762 // bit set, we must assume that we are NOT using T2. The T2 team 4763 // has approved this algorithm. 4764 // 4765 // We need to determine if we are running with the new T2 libthread 4766 // since setting native thread priorities is handled differently 4767 // when using this library. All threads created using T2 are bound 4768 // threads. Calling thr_setprio is meaningless in this case. 4769 // 4770 bool isT2_libthread() { 4771 static prheader_t * lwpArray = NULL; 4772 static int lwpSize = 0; 4773 static int lwpFile = -1; 4774 lwpstatus_t * that; 4775 char lwpName [128]; 4776 bool isT2 = false; 4777 4778 #define ADR(x) ((uintptr_t)(x)) 4779 #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1)))) 4780 4781 lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0); 4782 if (lwpFile < 0) { 4783 if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n"); 4784 return false; 4785 } 4786 lwpSize = 16*1024; 4787 for (;;) { 4788 ::lseek64 (lwpFile, 0, SEEK_SET); 4789 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal); 4790 if (::read(lwpFile, lwpArray, lwpSize) < 0) { 4791 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n"); 4792 break; 4793 } 4794 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) { 4795 // We got a good snapshot - now iterate over the list. 4796 int aslwpcount = 0; 4797 for (int i = 0; i < lwpArray->pr_nent; i++ ) { 4798 that = LWPINDEX(lwpArray,i); 4799 if (that->pr_flags & PR_ASLWP) { 4800 aslwpcount++; 4801 } 4802 } 4803 if (aslwpcount == 0) isT2 = true; 4804 break; 4805 } 4806 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize; 4807 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); // retry. 4808 } 4809 4810 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); 4811 ::close (lwpFile); 4812 if (ThreadPriorityVerbose) { 4813 if (isT2) tty->print_cr("We are running with a T2 libthread\n"); 4814 else tty->print_cr("We are not running with a T2 libthread\n"); 4815 } 4816 return isT2; 4817 } 4818 4819 4820 void os::Solaris::libthread_init() { 4821 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators"); 4822 4823 // Determine if we are running with the new T2 libthread 4824 os::Solaris::set_T2_libthread(isT2_libthread()); 4825 4826 lwp_priocntl_init(); 4827 4828 // RTLD_DEFAULT was not defined on some early versions of 5.5.1 4829 if(func == NULL) { 4830 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators"); 4831 // Guarantee that this VM is running on an new enough OS (5.6 or 4832 // later) that it will have a new enough libthread.so. 4833 guarantee(func != NULL, "libthread.so is too old."); 4834 } 4835 4836 // Initialize the new libthread getstate API wrappers 4837 func = resolve_symbol("thr_getstate"); 4838 os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func)); 4839 4840 func = resolve_symbol("thr_setstate"); 4841 os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func)); 4842 4843 func = resolve_symbol("thr_setmutator"); 4844 os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func)); 4845 4846 func = resolve_symbol("thr_suspend_mutator"); 4847 os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4848 4849 func = resolve_symbol("thr_continue_mutator"); 4850 os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4851 4852 int size; 4853 void (*handler_info_func)(address *, int *); 4854 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo")); 4855 handler_info_func(&handler_start, &size); 4856 handler_end = handler_start + size; 4857 } 4858 4859 4860 int_fnP_mutex_tP os::Solaris::_mutex_lock; 4861 int_fnP_mutex_tP os::Solaris::_mutex_trylock; 4862 int_fnP_mutex_tP os::Solaris::_mutex_unlock; 4863 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init; 4864 int_fnP_mutex_tP os::Solaris::_mutex_destroy; 4865 int os::Solaris::_mutex_scope = USYNC_THREAD; 4866 4867 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait; 4868 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait; 4869 int_fnP_cond_tP os::Solaris::_cond_signal; 4870 int_fnP_cond_tP os::Solaris::_cond_broadcast; 4871 int_fnP_cond_tP_i_vP os::Solaris::_cond_init; 4872 int_fnP_cond_tP os::Solaris::_cond_destroy; 4873 int os::Solaris::_cond_scope = USYNC_THREAD; 4874 4875 void os::Solaris::synchronization_init() { 4876 if(UseLWPSynchronization) { 4877 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock"))); 4878 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock"))); 4879 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock"))); 4880 os::Solaris::set_mutex_init(lwp_mutex_init); 4881 os::Solaris::set_mutex_destroy(lwp_mutex_destroy); 4882 os::Solaris::set_mutex_scope(USYNC_THREAD); 4883 4884 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait"))); 4885 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait"))); 4886 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal"))); 4887 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast"))); 4888 os::Solaris::set_cond_init(lwp_cond_init); 4889 os::Solaris::set_cond_destroy(lwp_cond_destroy); 4890 os::Solaris::set_cond_scope(USYNC_THREAD); 4891 } 4892 else { 4893 os::Solaris::set_mutex_scope(USYNC_THREAD); 4894 os::Solaris::set_cond_scope(USYNC_THREAD); 4895 4896 if(UsePthreads) { 4897 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock"))); 4898 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock"))); 4899 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock"))); 4900 os::Solaris::set_mutex_init(pthread_mutex_default_init); 4901 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy"))); 4902 4903 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait"))); 4904 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait"))); 4905 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal"))); 4906 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast"))); 4907 os::Solaris::set_cond_init(pthread_cond_default_init); 4908 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy"))); 4909 } 4910 else { 4911 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock"))); 4912 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock"))); 4913 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock"))); 4914 os::Solaris::set_mutex_init(::mutex_init); 4915 os::Solaris::set_mutex_destroy(::mutex_destroy); 4916 4917 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait"))); 4918 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait"))); 4919 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal"))); 4920 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast"))); 4921 os::Solaris::set_cond_init(::cond_init); 4922 os::Solaris::set_cond_destroy(::cond_destroy); 4923 } 4924 } 4925 } 4926 4927 bool os::Solaris::liblgrp_init() { 4928 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY); 4929 if (handle != NULL) { 4930 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home"))); 4931 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init"))); 4932 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini"))); 4933 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root"))); 4934 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children"))); 4935 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources"))); 4936 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps"))); 4937 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t, 4938 dlsym(handle, "lgrp_cookie_stale"))); 4939 4940 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER); 4941 set_lgrp_cookie(c); 4942 return true; 4943 } 4944 return false; 4945 } 4946 4947 void os::Solaris::misc_sym_init() { 4948 address func; 4949 4950 // getisax 4951 func = resolve_symbol_lazy("getisax"); 4952 if (func != NULL) { 4953 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func); 4954 } 4955 4956 // meminfo 4957 func = resolve_symbol_lazy("meminfo"); 4958 if (func != NULL) { 4959 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func)); 4960 } 4961 } 4962 4963 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) { 4964 assert(_getisax != NULL, "_getisax not set"); 4965 return _getisax(array, n); 4966 } 4967 4968 // Symbol doesn't exist in Solaris 8 pset.h 4969 #ifndef PS_MYID 4970 #define PS_MYID -3 4971 #endif 4972 4973 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem); 4974 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem); 4975 static pset_getloadavg_type pset_getloadavg_ptr = NULL; 4976 4977 void init_pset_getloadavg_ptr(void) { 4978 pset_getloadavg_ptr = 4979 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg"); 4980 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) { 4981 warning("pset_getloadavg function not found"); 4982 } 4983 } 4984 4985 int os::Solaris::_dev_zero_fd = -1; 4986 4987 // this is called _before_ the global arguments have been parsed 4988 void os::init(void) { 4989 _initial_pid = getpid(); 4990 4991 max_hrtime = first_hrtime = gethrtime(); 4992 4993 init_random(1234567); 4994 4995 page_size = sysconf(_SC_PAGESIZE); 4996 if (page_size == -1) 4997 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)", 4998 strerror(errno))); 4999 init_page_sizes((size_t) page_size); 5000 5001 Solaris::initialize_system_info(); 5002 5003 // Initialize misc. symbols as soon as possible, so we can use them 5004 // if we need them. 5005 Solaris::misc_sym_init(); 5006 5007 int fd = ::open("/dev/zero", O_RDWR); 5008 if (fd < 0) { 5009 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno))); 5010 } else { 5011 Solaris::set_dev_zero_fd(fd); 5012 5013 // Close on exec, child won't inherit. 5014 fcntl(fd, F_SETFD, FD_CLOEXEC); 5015 } 5016 5017 clock_tics_per_sec = CLK_TCK; 5018 5019 // check if dladdr1() exists; dladdr1 can provide more information than 5020 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9 5021 // and is available on linker patches for 5.7 and 5.8. 5022 // libdl.so must have been loaded, this call is just an entry lookup 5023 void * hdl = dlopen("libdl.so", RTLD_NOW); 5024 if (hdl) 5025 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1")); 5026 5027 // (Solaris only) this switches to calls that actually do locking. 5028 ThreadCritical::initialize(); 5029 5030 main_thread = thr_self(); 5031 5032 // Constant minimum stack size allowed. It must be at least 5033 // the minimum of what the OS supports (thr_min_stack()), and 5034 // enough to allow the thread to get to user bytecode execution. 5035 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed); 5036 // If the pagesize of the VM is greater than 8K determine the appropriate 5037 // number of initial guard pages. The user can change this with the 5038 // command line arguments, if needed. 5039 if (vm_page_size() > 8*K) { 5040 StackYellowPages = 1; 5041 StackRedPages = 1; 5042 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size(); 5043 } 5044 } 5045 5046 // To install functions for atexit system call 5047 extern "C" { 5048 static void perfMemory_exit_helper() { 5049 perfMemory_exit(); 5050 } 5051 } 5052 5053 // this is called _after_ the global arguments have been parsed 5054 jint os::init_2(void) { 5055 // try to enable extended file IO ASAP, see 6431278 5056 os::Solaris::try_enable_extended_io(); 5057 5058 // Allocate a single page and mark it as readable for safepoint polling. Also 5059 // use this first mmap call to check support for MAP_ALIGN. 5060 address polling_page = (address)Solaris::mmap_chunk((char*)page_size, 5061 page_size, 5062 MAP_PRIVATE | MAP_ALIGN, 5063 PROT_READ); 5064 if (polling_page == NULL) { 5065 has_map_align = false; 5066 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, 5067 PROT_READ); 5068 } 5069 5070 os::set_polling_page(polling_page); 5071 5072 #ifndef PRODUCT 5073 if( Verbose && PrintMiscellaneous ) 5074 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 5075 #endif 5076 5077 if (!UseMembar) { 5078 address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE ); 5079 guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 5080 os::set_memory_serialize_page( mem_serialize_page ); 5081 5082 #ifndef PRODUCT 5083 if(Verbose && PrintMiscellaneous) 5084 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 5085 #endif 5086 } 5087 5088 os::large_page_init(); 5089 5090 // Check minimum allowable stack size for thread creation and to initialize 5091 // the java system classes, including StackOverflowError - depends on page 5092 // size. Add a page for compiler2 recursion in main thread. 5093 // Add in 2*BytesPerWord times page size to account for VM stack during 5094 // class initialization depending on 32 or 64 bit VM. 5095 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed, 5096 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 5097 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size); 5098 5099 size_t threadStackSizeInBytes = ThreadStackSize * K; 5100 if (threadStackSizeInBytes != 0 && 5101 threadStackSizeInBytes < os::Solaris::min_stack_allowed) { 5102 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk", 5103 os::Solaris::min_stack_allowed/K); 5104 return JNI_ERR; 5105 } 5106 5107 // For 64kbps there will be a 64kb page size, which makes 5108 // the usable default stack size quite a bit less. Increase the 5109 // stack for 64kb (or any > than 8kb) pages, this increases 5110 // virtual memory fragmentation (since we're not creating the 5111 // stack on a power of 2 boundary. The real fix for this 5112 // should be to fix the guard page mechanism. 5113 5114 if (vm_page_size() > 8*K) { 5115 threadStackSizeInBytes = (threadStackSizeInBytes != 0) 5116 ? threadStackSizeInBytes + 5117 ((StackYellowPages + StackRedPages) * vm_page_size()) 5118 : 0; 5119 ThreadStackSize = threadStackSizeInBytes/K; 5120 } 5121 5122 // Make the stack size a multiple of the page size so that 5123 // the yellow/red zones can be guarded. 5124 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, 5125 vm_page_size())); 5126 5127 Solaris::libthread_init(); 5128 5129 if (UseNUMA) { 5130 if (!Solaris::liblgrp_init()) { 5131 UseNUMA = false; 5132 } else { 5133 size_t lgrp_limit = os::numa_get_groups_num(); 5134 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal); 5135 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); 5136 FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal); 5137 if (lgrp_num < 2) { 5138 // There's only one locality group, disable NUMA. 5139 UseNUMA = false; 5140 } 5141 } 5142 // ISM is not compatible with the NUMA allocator - it always allocates 5143 // pages round-robin across the lgroups. 5144 if (UseNUMA && UseLargePages && UseISM) { 5145 if (!FLAG_IS_DEFAULT(UseNUMA)) { 5146 if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseISM)) { 5147 UseLargePages = false; 5148 } else { 5149 warning("UseNUMA is not compatible with ISM large pages, disabling NUMA allocator"); 5150 UseNUMA = false; 5151 } 5152 } else { 5153 UseNUMA = false; 5154 } 5155 } 5156 if (!UseNUMA && ForceNUMA) { 5157 UseNUMA = true; 5158 } 5159 } 5160 5161 Solaris::signal_sets_init(); 5162 Solaris::init_signal_mem(); 5163 Solaris::install_signal_handlers(); 5164 5165 if (libjsigversion < JSIG_VERSION_1_4_1) { 5166 Maxlibjsigsigs = OLDMAXSIGNUM; 5167 } 5168 5169 // initialize synchronization primitives to use either thread or 5170 // lwp synchronization (controlled by UseLWPSynchronization) 5171 Solaris::synchronization_init(); 5172 5173 if (MaxFDLimit) { 5174 // set the number of file descriptors to max. print out error 5175 // if getrlimit/setrlimit fails but continue regardless. 5176 struct rlimit nbr_files; 5177 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 5178 if (status != 0) { 5179 if (PrintMiscellaneous && (Verbose || WizardMode)) 5180 perror("os::init_2 getrlimit failed"); 5181 } else { 5182 nbr_files.rlim_cur = nbr_files.rlim_max; 5183 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 5184 if (status != 0) { 5185 if (PrintMiscellaneous && (Verbose || WizardMode)) 5186 perror("os::init_2 setrlimit failed"); 5187 } 5188 } 5189 } 5190 5191 // Calculate theoretical max. size of Threads to guard gainst 5192 // artifical out-of-memory situations, where all available address- 5193 // space has been reserved by thread stacks. Default stack size is 1Mb. 5194 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ? 5195 JavaThread::stack_size_at_create() : (1*K*K); 5196 assert(pre_thread_stack_size != 0, "Must have a stack"); 5197 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when 5198 // we should start doing Virtual Memory banging. Currently when the threads will 5199 // have used all but 200Mb of space. 5200 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K); 5201 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size; 5202 5203 // at-exit methods are called in the reverse order of their registration. 5204 // In Solaris 7 and earlier, atexit functions are called on return from 5205 // main or as a result of a call to exit(3C). There can be only 32 of 5206 // these functions registered and atexit() does not set errno. In Solaris 5207 // 8 and later, there is no limit to the number of functions registered 5208 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit 5209 // functions are called upon dlclose(3DL) in addition to return from main 5210 // and exit(3C). 5211 5212 if (PerfAllowAtExitRegistration) { 5213 // only register atexit functions if PerfAllowAtExitRegistration is set. 5214 // atexit functions can be delayed until process exit time, which 5215 // can be problematic for embedded VM situations. Embedded VMs should 5216 // call DestroyJavaVM() to assure that VM resources are released. 5217 5218 // note: perfMemory_exit_helper atexit function may be removed in 5219 // the future if the appropriate cleanup code can be added to the 5220 // VM_Exit VMOperation's doit method. 5221 if (atexit(perfMemory_exit_helper) != 0) { 5222 warning("os::init2 atexit(perfMemory_exit_helper) failed"); 5223 } 5224 } 5225 5226 // Init pset_loadavg function pointer 5227 init_pset_getloadavg_ptr(); 5228 5229 return JNI_OK; 5230 } 5231 5232 void os::init_3(void) { 5233 return; 5234 } 5235 5236 // Mark the polling page as unreadable 5237 void os::make_polling_page_unreadable(void) { 5238 if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 ) 5239 fatal("Could not disable polling page"); 5240 }; 5241 5242 // Mark the polling page as readable 5243 void os::make_polling_page_readable(void) { 5244 if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 ) 5245 fatal("Could not enable polling page"); 5246 }; 5247 5248 // OS interface. 5249 5250 bool os::check_heap(bool force) { return true; } 5251 5252 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr); 5253 static vsnprintf_t sol_vsnprintf = NULL; 5254 5255 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) { 5256 if (!sol_vsnprintf) { 5257 //search for the named symbol in the objects that were loaded after libjvm 5258 void* where = RTLD_NEXT; 5259 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5260 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5261 if (!sol_vsnprintf){ 5262 //search for the named symbol in the objects that were loaded before libjvm 5263 where = RTLD_DEFAULT; 5264 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5265 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5266 assert(sol_vsnprintf != NULL, "vsnprintf not found"); 5267 } 5268 } 5269 return (*sol_vsnprintf)(buf, count, fmt, argptr); 5270 } 5271 5272 5273 // Is a (classpath) directory empty? 5274 bool os::dir_is_empty(const char* path) { 5275 DIR *dir = NULL; 5276 struct dirent *ptr; 5277 5278 dir = opendir(path); 5279 if (dir == NULL) return true; 5280 5281 /* Scan the directory */ 5282 bool result = true; 5283 char buf[sizeof(struct dirent) + MAX_PATH]; 5284 struct dirent *dbuf = (struct dirent *) buf; 5285 while (result && (ptr = readdir(dir, dbuf)) != NULL) { 5286 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 5287 result = false; 5288 } 5289 } 5290 closedir(dir); 5291 return result; 5292 } 5293 5294 // This code originates from JDK's sysOpen and open64_w 5295 // from src/solaris/hpi/src/system_md.c 5296 5297 #ifndef O_DELETE 5298 #define O_DELETE 0x10000 5299 #endif 5300 5301 // Open a file. Unlink the file immediately after open returns 5302 // if the specified oflag has the O_DELETE flag set. 5303 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c 5304 5305 int os::open(const char *path, int oflag, int mode) { 5306 if (strlen(path) > MAX_PATH - 1) { 5307 errno = ENAMETOOLONG; 5308 return -1; 5309 } 5310 int fd; 5311 int o_delete = (oflag & O_DELETE); 5312 oflag = oflag & ~O_DELETE; 5313 5314 fd = ::open64(path, oflag, mode); 5315 if (fd == -1) return -1; 5316 5317 //If the open succeeded, the file might still be a directory 5318 { 5319 struct stat64 buf64; 5320 int ret = ::fstat64(fd, &buf64); 5321 int st_mode = buf64.st_mode; 5322 5323 if (ret != -1) { 5324 if ((st_mode & S_IFMT) == S_IFDIR) { 5325 errno = EISDIR; 5326 ::close(fd); 5327 return -1; 5328 } 5329 } else { 5330 ::close(fd); 5331 return -1; 5332 } 5333 } 5334 /* 5335 * 32-bit Solaris systems suffer from: 5336 * 5337 * - an historical default soft limit of 256 per-process file 5338 * descriptors that is too low for many Java programs. 5339 * 5340 * - a design flaw where file descriptors created using stdio 5341 * fopen must be less than 256, _even_ when the first limit above 5342 * has been raised. This can cause calls to fopen (but not calls to 5343 * open, for example) to fail mysteriously, perhaps in 3rd party 5344 * native code (although the JDK itself uses fopen). One can hardly 5345 * criticize them for using this most standard of all functions. 5346 * 5347 * We attempt to make everything work anyways by: 5348 * 5349 * - raising the soft limit on per-process file descriptors beyond 5350 * 256 5351 * 5352 * - As of Solaris 10u4, we can request that Solaris raise the 256 5353 * stdio fopen limit by calling function enable_extended_FILE_stdio. 5354 * This is done in init_2 and recorded in enabled_extended_FILE_stdio 5355 * 5356 * - If we are stuck on an old (pre 10u4) Solaris system, we can 5357 * workaround the bug by remapping non-stdio file descriptors below 5358 * 256 to ones beyond 256, which is done below. 5359 * 5360 * See: 5361 * 1085341: 32-bit stdio routines should support file descriptors >255 5362 * 6533291: Work around 32-bit Solaris stdio limit of 256 open files 5363 * 6431278: Netbeans crash on 32 bit Solaris: need to call 5364 * enable_extended_FILE_stdio() in VM initialisation 5365 * Giri Mandalika's blog 5366 * http://technopark02.blogspot.com/2005_05_01_archive.html 5367 */ 5368 #ifndef _LP64 5369 if ((!enabled_extended_FILE_stdio) && fd < 256) { 5370 int newfd = ::fcntl(fd, F_DUPFD, 256); 5371 if (newfd != -1) { 5372 ::close(fd); 5373 fd = newfd; 5374 } 5375 } 5376 #endif // 32-bit Solaris 5377 /* 5378 * All file descriptors that are opened in the JVM and not 5379 * specifically destined for a subprocess should have the 5380 * close-on-exec flag set. If we don't set it, then careless 3rd 5381 * party native code might fork and exec without closing all 5382 * appropriate file descriptors (e.g. as we do in closeDescriptors in 5383 * UNIXProcess.c), and this in turn might: 5384 * 5385 * - cause end-of-file to fail to be detected on some file 5386 * descriptors, resulting in mysterious hangs, or 5387 * 5388 * - might cause an fopen in the subprocess to fail on a system 5389 * suffering from bug 1085341. 5390 * 5391 * (Yes, the default setting of the close-on-exec flag is a Unix 5392 * design flaw) 5393 * 5394 * See: 5395 * 1085341: 32-bit stdio routines should support file descriptors >255 5396 * 4843136: (process) pipe file descriptor from Runtime.exec not being closed 5397 * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 5398 */ 5399 #ifdef FD_CLOEXEC 5400 { 5401 int flags = ::fcntl(fd, F_GETFD); 5402 if (flags != -1) 5403 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); 5404 } 5405 #endif 5406 5407 if (o_delete != 0) { 5408 ::unlink(path); 5409 } 5410 return fd; 5411 } 5412 5413 // create binary file, rewriting existing file if required 5414 int os::create_binary_file(const char* path, bool rewrite_existing) { 5415 int oflags = O_WRONLY | O_CREAT; 5416 if (!rewrite_existing) { 5417 oflags |= O_EXCL; 5418 } 5419 return ::open64(path, oflags, S_IREAD | S_IWRITE); 5420 } 5421 5422 // return current position of file pointer 5423 jlong os::current_file_offset(int fd) { 5424 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 5425 } 5426 5427 // move file pointer to the specified offset 5428 jlong os::seek_to_file_offset(int fd, jlong offset) { 5429 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 5430 } 5431 5432 jlong os::lseek(int fd, jlong offset, int whence) { 5433 return (jlong) ::lseek64(fd, offset, whence); 5434 } 5435 5436 char * os::native_path(char *path) { 5437 return path; 5438 } 5439 5440 int os::ftruncate(int fd, jlong length) { 5441 return ::ftruncate64(fd, length); 5442 } 5443 5444 int os::fsync(int fd) { 5445 RESTARTABLE_RETURN_INT(::fsync(fd)); 5446 } 5447 5448 int os::available(int fd, jlong *bytes) { 5449 jlong cur, end; 5450 int mode; 5451 struct stat64 buf64; 5452 5453 if (::fstat64(fd, &buf64) >= 0) { 5454 mode = buf64.st_mode; 5455 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { 5456 /* 5457 * XXX: is the following call interruptible? If so, this might 5458 * need to go through the INTERRUPT_IO() wrapper as for other 5459 * blocking, interruptible calls in this file. 5460 */ 5461 int n,ioctl_return; 5462 5463 INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted); 5464 if (ioctl_return>= 0) { 5465 *bytes = n; 5466 return 1; 5467 } 5468 } 5469 } 5470 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) { 5471 return 0; 5472 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) { 5473 return 0; 5474 } else if (::lseek64(fd, cur, SEEK_SET) == -1) { 5475 return 0; 5476 } 5477 *bytes = end - cur; 5478 return 1; 5479 } 5480 5481 // Map a block of memory. 5482 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 5483 char *addr, size_t bytes, bool read_only, 5484 bool allow_exec) { 5485 int prot; 5486 int flags; 5487 5488 if (read_only) { 5489 prot = PROT_READ; 5490 flags = MAP_SHARED; 5491 } else { 5492 prot = PROT_READ | PROT_WRITE; 5493 flags = MAP_PRIVATE; 5494 } 5495 5496 if (allow_exec) { 5497 prot |= PROT_EXEC; 5498 } 5499 5500 if (addr != NULL) { 5501 flags |= MAP_FIXED; 5502 } 5503 5504 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, 5505 fd, file_offset); 5506 if (mapped_address == MAP_FAILED) { 5507 return NULL; 5508 } 5509 return mapped_address; 5510 } 5511 5512 5513 // Remap a block of memory. 5514 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 5515 char *addr, size_t bytes, bool read_only, 5516 bool allow_exec) { 5517 // same as map_memory() on this OS 5518 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 5519 allow_exec); 5520 } 5521 5522 5523 // Unmap a block of memory. 5524 bool os::pd_unmap_memory(char* addr, size_t bytes) { 5525 return munmap(addr, bytes) == 0; 5526 } 5527 5528 void os::pause() { 5529 char filename[MAX_PATH]; 5530 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 5531 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 5532 } else { 5533 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 5534 } 5535 5536 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 5537 if (fd != -1) { 5538 struct stat buf; 5539 ::close(fd); 5540 while (::stat(filename, &buf) == 0) { 5541 (void)::poll(NULL, 0, 100); 5542 } 5543 } else { 5544 jio_fprintf(stderr, 5545 "Could not open pause file '%s', continuing immediately.\n", filename); 5546 } 5547 } 5548 5549 #ifndef PRODUCT 5550 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5551 // Turn this on if you need to trace synch operations. 5552 // Set RECORD_SYNCH_LIMIT to a large-enough value, 5553 // and call record_synch_enable and record_synch_disable 5554 // around the computation of interest. 5555 5556 void record_synch(char* name, bool returning); // defined below 5557 5558 class RecordSynch { 5559 char* _name; 5560 public: 5561 RecordSynch(char* name) :_name(name) 5562 { record_synch(_name, false); } 5563 ~RecordSynch() { record_synch(_name, true); } 5564 }; 5565 5566 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \ 5567 extern "C" ret name params { \ 5568 typedef ret name##_t params; \ 5569 static name##_t* implem = NULL; \ 5570 static int callcount = 0; \ 5571 if (implem == NULL) { \ 5572 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \ 5573 if (implem == NULL) fatal(dlerror()); \ 5574 } \ 5575 ++callcount; \ 5576 RecordSynch _rs(#name); \ 5577 inner; \ 5578 return implem args; \ 5579 } 5580 // in dbx, examine callcounts this way: 5581 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done 5582 5583 #define CHECK_POINTER_OK(p) \ 5584 (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p))) 5585 #define CHECK_MU \ 5586 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only."); 5587 #define CHECK_CV \ 5588 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only."); 5589 #define CHECK_P(p) \ 5590 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only."); 5591 5592 #define CHECK_MUTEX(mutex_op) \ 5593 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU); 5594 5595 CHECK_MUTEX( mutex_lock) 5596 CHECK_MUTEX( _mutex_lock) 5597 CHECK_MUTEX( mutex_unlock) 5598 CHECK_MUTEX(_mutex_unlock) 5599 CHECK_MUTEX( mutex_trylock) 5600 CHECK_MUTEX(_mutex_trylock) 5601 5602 #define CHECK_COND(cond_op) \ 5603 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV); 5604 5605 CHECK_COND( cond_wait); 5606 CHECK_COND(_cond_wait); 5607 CHECK_COND(_cond_wait_cancel); 5608 5609 #define CHECK_COND2(cond_op) \ 5610 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV); 5611 5612 CHECK_COND2( cond_timedwait); 5613 CHECK_COND2(_cond_timedwait); 5614 CHECK_COND2(_cond_timedwait_cancel); 5615 5616 // do the _lwp_* versions too 5617 #define mutex_t lwp_mutex_t 5618 #define cond_t lwp_cond_t 5619 CHECK_MUTEX( _lwp_mutex_lock) 5620 CHECK_MUTEX( _lwp_mutex_unlock) 5621 CHECK_MUTEX( _lwp_mutex_trylock) 5622 CHECK_MUTEX( __lwp_mutex_lock) 5623 CHECK_MUTEX( __lwp_mutex_unlock) 5624 CHECK_MUTEX( __lwp_mutex_trylock) 5625 CHECK_MUTEX(___lwp_mutex_lock) 5626 CHECK_MUTEX(___lwp_mutex_unlock) 5627 5628 CHECK_COND( _lwp_cond_wait); 5629 CHECK_COND( __lwp_cond_wait); 5630 CHECK_COND(___lwp_cond_wait); 5631 5632 CHECK_COND2( _lwp_cond_timedwait); 5633 CHECK_COND2( __lwp_cond_timedwait); 5634 #undef mutex_t 5635 #undef cond_t 5636 5637 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5638 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5639 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0); 5640 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0); 5641 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5642 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5643 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5644 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5645 5646 5647 // recording machinery: 5648 5649 enum { RECORD_SYNCH_LIMIT = 200 }; 5650 char* record_synch_name[RECORD_SYNCH_LIMIT]; 5651 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT]; 5652 bool record_synch_returning[RECORD_SYNCH_LIMIT]; 5653 thread_t record_synch_thread[RECORD_SYNCH_LIMIT]; 5654 int record_synch_count = 0; 5655 bool record_synch_enabled = false; 5656 5657 // in dbx, examine recorded data this way: 5658 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done 5659 5660 void record_synch(char* name, bool returning) { 5661 if (record_synch_enabled) { 5662 if (record_synch_count < RECORD_SYNCH_LIMIT) { 5663 record_synch_name[record_synch_count] = name; 5664 record_synch_returning[record_synch_count] = returning; 5665 record_synch_thread[record_synch_count] = thr_self(); 5666 record_synch_arg0ptr[record_synch_count] = &name; 5667 record_synch_count++; 5668 } 5669 // put more checking code here: 5670 // ... 5671 } 5672 } 5673 5674 void record_synch_enable() { 5675 // start collecting trace data, if not already doing so 5676 if (!record_synch_enabled) record_synch_count = 0; 5677 record_synch_enabled = true; 5678 } 5679 5680 void record_synch_disable() { 5681 // stop collecting trace data 5682 record_synch_enabled = false; 5683 } 5684 5685 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5686 #endif // PRODUCT 5687 5688 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5689 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) - 5690 (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5691 5692 5693 // JVMTI & JVM monitoring and management support 5694 // The thread_cpu_time() and current_thread_cpu_time() are only 5695 // supported if is_thread_cpu_time_supported() returns true. 5696 // They are not supported on Solaris T1. 5697 5698 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 5699 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 5700 // of a thread. 5701 // 5702 // current_thread_cpu_time() and thread_cpu_time(Thread *) 5703 // returns the fast estimate available on the platform. 5704 5705 // hrtime_t gethrvtime() return value includes 5706 // user time but does not include system time 5707 jlong os::current_thread_cpu_time() { 5708 return (jlong) gethrvtime(); 5709 } 5710 5711 jlong os::thread_cpu_time(Thread *thread) { 5712 // return user level CPU time only to be consistent with 5713 // what current_thread_cpu_time returns. 5714 // thread_cpu_time_info() must be changed if this changes 5715 return os::thread_cpu_time(thread, false /* user time only */); 5716 } 5717 5718 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 5719 if (user_sys_cpu_time) { 5720 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 5721 } else { 5722 return os::current_thread_cpu_time(); 5723 } 5724 } 5725 5726 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 5727 char proc_name[64]; 5728 int count; 5729 prusage_t prusage; 5730 jlong lwp_time; 5731 int fd; 5732 5733 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage", 5734 getpid(), 5735 thread->osthread()->lwp_id()); 5736 fd = ::open(proc_name, O_RDONLY); 5737 if ( fd == -1 ) return -1; 5738 5739 do { 5740 count = ::pread(fd, 5741 (void *)&prusage.pr_utime, 5742 thr_time_size, 5743 thr_time_off); 5744 } while (count < 0 && errno == EINTR); 5745 ::close(fd); 5746 if ( count < 0 ) return -1; 5747 5748 if (user_sys_cpu_time) { 5749 // user + system CPU time 5750 lwp_time = (((jlong)prusage.pr_stime.tv_sec + 5751 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) + 5752 (jlong)prusage.pr_stime.tv_nsec + 5753 (jlong)prusage.pr_utime.tv_nsec; 5754 } else { 5755 // user level CPU time only 5756 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) + 5757 (jlong)prusage.pr_utime.tv_nsec; 5758 } 5759 5760 return(lwp_time); 5761 } 5762 5763 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5764 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5765 info_ptr->may_skip_backward = false; // elapsed time not wall time 5766 info_ptr->may_skip_forward = false; // elapsed time not wall time 5767 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5768 } 5769 5770 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5771 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5772 info_ptr->may_skip_backward = false; // elapsed time not wall time 5773 info_ptr->may_skip_forward = false; // elapsed time not wall time 5774 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5775 } 5776 5777 bool os::is_thread_cpu_time_supported() { 5778 if ( os::Solaris::T2_libthread() || UseBoundThreads ) { 5779 return true; 5780 } else { 5781 return false; 5782 } 5783 } 5784 5785 // System loadavg support. Returns -1 if load average cannot be obtained. 5786 // Return the load average for our processor set if the primitive exists 5787 // (Solaris 9 and later). Otherwise just return system wide loadavg. 5788 int os::loadavg(double loadavg[], int nelem) { 5789 if (pset_getloadavg_ptr != NULL) { 5790 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem); 5791 } else { 5792 return ::getloadavg(loadavg, nelem); 5793 } 5794 } 5795 5796 //--------------------------------------------------------------------------------- 5797 5798 static address same_page(address x, address y) { 5799 intptr_t page_bits = -os::vm_page_size(); 5800 if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits)) 5801 return x; 5802 else if (x > y) 5803 return (address)(intptr_t(y) | ~page_bits) + 1; 5804 else 5805 return (address)(intptr_t(y) & page_bits); 5806 } 5807 5808 bool os::find(address addr, outputStream* st) { 5809 Dl_info dlinfo; 5810 memset(&dlinfo, 0, sizeof(dlinfo)); 5811 if (dladdr(addr, &dlinfo)) { 5812 #ifdef _LP64 5813 st->print("0x%016lx: ", addr); 5814 #else 5815 st->print("0x%08x: ", addr); 5816 #endif 5817 if (dlinfo.dli_sname != NULL) 5818 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr); 5819 else if (dlinfo.dli_fname) 5820 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase); 5821 else 5822 st->print("<absolute address>"); 5823 if (dlinfo.dli_fname) st->print(" in %s", dlinfo.dli_fname); 5824 #ifdef _LP64 5825 if (dlinfo.dli_fbase) st->print(" at 0x%016lx", dlinfo.dli_fbase); 5826 #else 5827 if (dlinfo.dli_fbase) st->print(" at 0x%08x", dlinfo.dli_fbase); 5828 #endif 5829 st->cr(); 5830 5831 if (Verbose) { 5832 // decode some bytes around the PC 5833 address begin = same_page(addr-40, addr); 5834 address end = same_page(addr+40, addr); 5835 address lowest = (address) dlinfo.dli_sname; 5836 if (!lowest) lowest = (address) dlinfo.dli_fbase; 5837 if (begin < lowest) begin = lowest; 5838 Dl_info dlinfo2; 5839 if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr 5840 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) 5841 end = (address) dlinfo2.dli_saddr; 5842 Disassembler::decode(begin, end, st); 5843 } 5844 return true; 5845 } 5846 return false; 5847 } 5848 5849 // Following function has been added to support HotSparc's libjvm.so running 5850 // under Solaris production JDK 1.2.2 / 1.3.0. These came from 5851 // src/solaris/hpi/native_threads in the EVM codebase. 5852 // 5853 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release 5854 // libraries and should thus be removed. We will leave it behind for a while 5855 // until we no longer want to able to run on top of 1.3.0 Solaris production 5856 // JDK. See 4341971. 5857 5858 #define STACK_SLACK 0x800 5859 5860 extern "C" { 5861 intptr_t sysThreadAvailableStackWithSlack() { 5862 stack_t st; 5863 intptr_t retval, stack_top; 5864 retval = thr_stksegment(&st); 5865 assert(retval == 0, "incorrect return value from thr_stksegment"); 5866 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 5867 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 5868 stack_top=(intptr_t)st.ss_sp-st.ss_size; 5869 return ((intptr_t)&stack_top - stack_top - STACK_SLACK); 5870 } 5871 } 5872 5873 // Just to get the Kernel build to link on solaris for testing. 5874 5875 extern "C" { 5876 class ASGCT_CallTrace; 5877 void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext) 5878 KERNEL_RETURN; 5879 } 5880 5881 5882 // ObjectMonitor park-unpark infrastructure ... 5883 // 5884 // We implement Solaris and Linux PlatformEvents with the 5885 // obvious condvar-mutex-flag triple. 5886 // Another alternative that works quite well is pipes: 5887 // Each PlatformEvent consists of a pipe-pair. 5888 // The thread associated with the PlatformEvent 5889 // calls park(), which reads from the input end of the pipe. 5890 // Unpark() writes into the other end of the pipe. 5891 // The write-side of the pipe must be set NDELAY. 5892 // Unfortunately pipes consume a large # of handles. 5893 // Native solaris lwp_park() and lwp_unpark() work nicely, too. 5894 // Using pipes for the 1st few threads might be workable, however. 5895 // 5896 // park() is permitted to return spuriously. 5897 // Callers of park() should wrap the call to park() in 5898 // an appropriate loop. A litmus test for the correct 5899 // usage of park is the following: if park() were modified 5900 // to immediately return 0 your code should still work, 5901 // albeit degenerating to a spin loop. 5902 // 5903 // An interesting optimization for park() is to use a trylock() 5904 // to attempt to acquire the mutex. If the trylock() fails 5905 // then we know that a concurrent unpark() operation is in-progress. 5906 // in that case the park() code could simply set _count to 0 5907 // and return immediately. The subsequent park() operation *might* 5908 // return immediately. That's harmless as the caller of park() is 5909 // expected to loop. By using trylock() we will have avoided a 5910 // avoided a context switch caused by contention on the per-thread mutex. 5911 // 5912 // TODO-FIXME: 5913 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the 5914 // objectmonitor implementation. 5915 // 2. Collapse the JSR166 parker event, and the 5916 // objectmonitor ParkEvent into a single "Event" construct. 5917 // 3. In park() and unpark() add: 5918 // assert (Thread::current() == AssociatedWith). 5919 // 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch. 5920 // 1-out-of-N park() operations will return immediately. 5921 // 5922 // _Event transitions in park() 5923 // -1 => -1 : illegal 5924 // 1 => 0 : pass - return immediately 5925 // 0 => -1 : block 5926 // 5927 // _Event serves as a restricted-range semaphore. 5928 // 5929 // Another possible encoding of _Event would be with 5930 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5931 // 5932 // TODO-FIXME: add DTRACE probes for: 5933 // 1. Tx parks 5934 // 2. Ty unparks Tx 5935 // 3. Tx resumes from park 5936 5937 5938 // value determined through experimentation 5939 #define ROUNDINGFIX 11 5940 5941 // utility to compute the abstime argument to timedwait. 5942 // TODO-FIXME: switch from compute_abstime() to unpackTime(). 5943 5944 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) { 5945 // millis is the relative timeout time 5946 // abstime will be the absolute timeout time 5947 if (millis < 0) millis = 0; 5948 struct timeval now; 5949 int status = gettimeofday(&now, NULL); 5950 assert(status == 0, "gettimeofday"); 5951 jlong seconds = millis / 1000; 5952 jlong max_wait_period; 5953 5954 if (UseLWPSynchronization) { 5955 // forward port of fix for 4275818 (not sleeping long enough) 5956 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where 5957 // _lwp_cond_timedwait() used a round_down algorithm rather 5958 // than a round_up. For millis less than our roundfactor 5959 // it rounded down to 0 which doesn't meet the spec. 5960 // For millis > roundfactor we may return a bit sooner, but 5961 // since we can not accurately identify the patch level and 5962 // this has already been fixed in Solaris 9 and 8 we will 5963 // leave it alone rather than always rounding down. 5964 5965 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX; 5966 // It appears that when we go directly through Solaris _lwp_cond_timedwait() 5967 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6 5968 max_wait_period = 21000000; 5969 } else { 5970 max_wait_period = 50000000; 5971 } 5972 millis %= 1000; 5973 if (seconds > max_wait_period) { // see man cond_timedwait(3T) 5974 seconds = max_wait_period; 5975 } 5976 abstime->tv_sec = now.tv_sec + seconds; 5977 long usec = now.tv_usec + millis * 1000; 5978 if (usec >= 1000000) { 5979 abstime->tv_sec += 1; 5980 usec -= 1000000; 5981 } 5982 abstime->tv_nsec = usec * 1000; 5983 return abstime; 5984 } 5985 5986 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately. 5987 // Conceptually TryPark() should be equivalent to park(0). 5988 5989 int os::PlatformEvent::TryPark() { 5990 for (;;) { 5991 const int v = _Event ; 5992 guarantee ((v == 0) || (v == 1), "invariant") ; 5993 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ; 5994 } 5995 } 5996 5997 void os::PlatformEvent::park() { // AKA: down() 5998 // Invariant: Only the thread associated with the Event/PlatformEvent 5999 // may call park(). 6000 int v ; 6001 for (;;) { 6002 v = _Event ; 6003 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 6004 } 6005 guarantee (v >= 0, "invariant") ; 6006 if (v == 0) { 6007 // Do this the hard way by blocking ... 6008 // See http://monaco.sfbay/detail.jsf?cr=5094058. 6009 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 6010 // Only for SPARC >= V8PlusA 6011 #if defined(__sparc) && defined(COMPILER2) 6012 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 6013 #endif 6014 int status = os::Solaris::mutex_lock(_mutex); 6015 assert_status(status == 0, status, "mutex_lock"); 6016 guarantee (_nParked == 0, "invariant") ; 6017 ++ _nParked ; 6018 while (_Event < 0) { 6019 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ... 6020 // Treat this the same as if the wait was interrupted 6021 // With usr/lib/lwp going to kernel, always handle ETIME 6022 status = os::Solaris::cond_wait(_cond, _mutex); 6023 if (status == ETIME) status = EINTR ; 6024 assert_status(status == 0 || status == EINTR, status, "cond_wait"); 6025 } 6026 -- _nParked ; 6027 _Event = 0 ; 6028 status = os::Solaris::mutex_unlock(_mutex); 6029 assert_status(status == 0, status, "mutex_unlock"); 6030 } 6031 } 6032 6033 int os::PlatformEvent::park(jlong millis) { 6034 guarantee (_nParked == 0, "invariant") ; 6035 int v ; 6036 for (;;) { 6037 v = _Event ; 6038 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 6039 } 6040 guarantee (v >= 0, "invariant") ; 6041 if (v != 0) return OS_OK ; 6042 6043 int ret = OS_TIMEOUT; 6044 timestruc_t abst; 6045 compute_abstime (&abst, millis); 6046 6047 // See http://monaco.sfbay/detail.jsf?cr=5094058. 6048 // For Solaris SPARC set fprs.FEF=0 prior to parking. 6049 // Only for SPARC >= V8PlusA 6050 #if defined(__sparc) && defined(COMPILER2) 6051 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 6052 #endif 6053 int status = os::Solaris::mutex_lock(_mutex); 6054 assert_status(status == 0, status, "mutex_lock"); 6055 guarantee (_nParked == 0, "invariant") ; 6056 ++ _nParked ; 6057 while (_Event < 0) { 6058 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst); 6059 assert_status(status == 0 || status == EINTR || 6060 status == ETIME || status == ETIMEDOUT, 6061 status, "cond_timedwait"); 6062 if (!FilterSpuriousWakeups) break ; // previous semantics 6063 if (status == ETIME || status == ETIMEDOUT) break ; 6064 // We consume and ignore EINTR and spurious wakeups. 6065 } 6066 -- _nParked ; 6067 if (_Event >= 0) ret = OS_OK ; 6068 _Event = 0 ; 6069 status = os::Solaris::mutex_unlock(_mutex); 6070 assert_status(status == 0, status, "mutex_unlock"); 6071 return ret; 6072 } 6073 6074 void os::PlatformEvent::unpark() { 6075 int v, AnyWaiters; 6076 6077 // Increment _Event. 6078 // Another acceptable implementation would be to simply swap 1 6079 // into _Event: 6080 // if (Swap (&_Event, 1) < 0) { 6081 // mutex_lock (_mutex) ; AnyWaiters = nParked; mutex_unlock (_mutex) ; 6082 // if (AnyWaiters) cond_signal (_cond) ; 6083 // } 6084 6085 for (;;) { 6086 v = _Event ; 6087 if (v > 0) { 6088 // The LD of _Event could have reordered or be satisfied 6089 // by a read-aside from this processor's write buffer. 6090 // To avoid problems execute a barrier and then 6091 // ratify the value. A degenerate CAS() would also work. 6092 // Viz., CAS (v+0, &_Event, v) == v). 6093 OrderAccess::fence() ; 6094 if (_Event == v) return ; 6095 continue ; 6096 } 6097 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ; 6098 } 6099 6100 // If the thread associated with the event was parked, wake it. 6101 if (v < 0) { 6102 int status ; 6103 // Wait for the thread assoc with the PlatformEvent to vacate. 6104 status = os::Solaris::mutex_lock(_mutex); 6105 assert_status(status == 0, status, "mutex_lock"); 6106 AnyWaiters = _nParked ; 6107 status = os::Solaris::mutex_unlock(_mutex); 6108 assert_status(status == 0, status, "mutex_unlock"); 6109 guarantee (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ; 6110 if (AnyWaiters != 0) { 6111 // We intentional signal *after* dropping the lock 6112 // to avoid a common class of futile wakeups. 6113 status = os::Solaris::cond_signal(_cond); 6114 assert_status(status == 0, status, "cond_signal"); 6115 } 6116 } 6117 } 6118 6119 // JSR166 6120 // ------------------------------------------------------- 6121 6122 /* 6123 * The solaris and linux implementations of park/unpark are fairly 6124 * conservative for now, but can be improved. They currently use a 6125 * mutex/condvar pair, plus _counter. 6126 * Park decrements _counter if > 0, else does a condvar wait. Unpark 6127 * sets count to 1 and signals condvar. Only one thread ever waits 6128 * on the condvar. Contention seen when trying to park implies that someone 6129 * is unparking you, so don't wait. And spurious returns are fine, so there 6130 * is no need to track notifications. 6131 */ 6132 6133 #define MAX_SECS 100000000 6134 /* 6135 * This code is common to linux and solaris and will be moved to a 6136 * common place in dolphin. 6137 * 6138 * The passed in time value is either a relative time in nanoseconds 6139 * or an absolute time in milliseconds. Either way it has to be unpacked 6140 * into suitable seconds and nanoseconds components and stored in the 6141 * given timespec structure. 6142 * Given time is a 64-bit value and the time_t used in the timespec is only 6143 * a signed-32-bit value (except on 64-bit Linux) we have to watch for 6144 * overflow if times way in the future are given. Further on Solaris versions 6145 * prior to 10 there is a restriction (see cond_timedwait) that the specified 6146 * number of seconds, in abstime, is less than current_time + 100,000,000. 6147 * As it will be 28 years before "now + 100000000" will overflow we can 6148 * ignore overflow and just impose a hard-limit on seconds using the value 6149 * of "now + 100,000,000". This places a limit on the timeout of about 3.17 6150 * years from "now". 6151 */ 6152 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { 6153 assert (time > 0, "convertTime"); 6154 6155 struct timeval now; 6156 int status = gettimeofday(&now, NULL); 6157 assert(status == 0, "gettimeofday"); 6158 6159 time_t max_secs = now.tv_sec + MAX_SECS; 6160 6161 if (isAbsolute) { 6162 jlong secs = time / 1000; 6163 if (secs > max_secs) { 6164 absTime->tv_sec = max_secs; 6165 } 6166 else { 6167 absTime->tv_sec = secs; 6168 } 6169 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; 6170 } 6171 else { 6172 jlong secs = time / NANOSECS_PER_SEC; 6173 if (secs >= MAX_SECS) { 6174 absTime->tv_sec = max_secs; 6175 absTime->tv_nsec = 0; 6176 } 6177 else { 6178 absTime->tv_sec = now.tv_sec + secs; 6179 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; 6180 if (absTime->tv_nsec >= NANOSECS_PER_SEC) { 6181 absTime->tv_nsec -= NANOSECS_PER_SEC; 6182 ++absTime->tv_sec; // note: this must be <= max_secs 6183 } 6184 } 6185 } 6186 assert(absTime->tv_sec >= 0, "tv_sec < 0"); 6187 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); 6188 assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); 6189 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); 6190 } 6191 6192 void Parker::park(bool isAbsolute, jlong time) { 6193 6194 // Optional fast-path check: 6195 // Return immediately if a permit is available. 6196 if (_counter > 0) { 6197 _counter = 0 ; 6198 OrderAccess::fence(); 6199 return ; 6200 } 6201 6202 // Optional fast-exit: Check interrupt before trying to wait 6203 Thread* thread = Thread::current(); 6204 assert(thread->is_Java_thread(), "Must be JavaThread"); 6205 JavaThread *jt = (JavaThread *)thread; 6206 if (Thread::is_interrupted(thread, false)) { 6207 return; 6208 } 6209 6210 // First, demultiplex/decode time arguments 6211 timespec absTime; 6212 if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all 6213 return; 6214 } 6215 if (time > 0) { 6216 // Warning: this code might be exposed to the old Solaris time 6217 // round-down bugs. Grep "roundingFix" for details. 6218 unpackTime(&absTime, isAbsolute, time); 6219 } 6220 6221 // Enter safepoint region 6222 // Beware of deadlocks such as 6317397. 6223 // The per-thread Parker:: _mutex is a classic leaf-lock. 6224 // In particular a thread must never block on the Threads_lock while 6225 // holding the Parker:: mutex. If safepoints are pending both the 6226 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. 6227 ThreadBlockInVM tbivm(jt); 6228 6229 // Don't wait if cannot get lock since interference arises from 6230 // unblocking. Also. check interrupt before trying wait 6231 if (Thread::is_interrupted(thread, false) || 6232 os::Solaris::mutex_trylock(_mutex) != 0) { 6233 return; 6234 } 6235 6236 int status ; 6237 6238 if (_counter > 0) { // no wait needed 6239 _counter = 0; 6240 status = os::Solaris::mutex_unlock(_mutex); 6241 assert (status == 0, "invariant") ; 6242 OrderAccess::fence(); 6243 return; 6244 } 6245 6246 #ifdef ASSERT 6247 // Don't catch signals while blocked; let the running threads have the signals. 6248 // (This allows a debugger to break into the running thread.) 6249 sigset_t oldsigs; 6250 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals(); 6251 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); 6252 #endif 6253 6254 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 6255 jt->set_suspend_equivalent(); 6256 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 6257 6258 // Do this the hard way by blocking ... 6259 // See http://monaco.sfbay/detail.jsf?cr=5094058. 6260 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 6261 // Only for SPARC >= V8PlusA 6262 #if defined(__sparc) && defined(COMPILER2) 6263 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 6264 #endif 6265 6266 if (time == 0) { 6267 status = os::Solaris::cond_wait (_cond, _mutex) ; 6268 } else { 6269 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime); 6270 } 6271 // Note that an untimed cond_wait() can sometimes return ETIME on older 6272 // versions of the Solaris. 6273 assert_status(status == 0 || status == EINTR || 6274 status == ETIME || status == ETIMEDOUT, 6275 status, "cond_timedwait"); 6276 6277 #ifdef ASSERT 6278 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL); 6279 #endif 6280 _counter = 0 ; 6281 status = os::Solaris::mutex_unlock(_mutex); 6282 assert_status(status == 0, status, "mutex_unlock") ; 6283 6284 // If externally suspended while waiting, re-suspend 6285 if (jt->handle_special_suspend_equivalent_condition()) { 6286 jt->java_suspend_self(); 6287 } 6288 OrderAccess::fence(); 6289 } 6290 6291 void Parker::unpark() { 6292 int s, status ; 6293 status = os::Solaris::mutex_lock (_mutex) ; 6294 assert (status == 0, "invariant") ; 6295 s = _counter; 6296 _counter = 1; 6297 status = os::Solaris::mutex_unlock (_mutex) ; 6298 assert (status == 0, "invariant") ; 6299 6300 if (s < 1) { 6301 status = os::Solaris::cond_signal (_cond) ; 6302 assert (status == 0, "invariant") ; 6303 } 6304 } 6305 6306 extern char** environ; 6307 6308 // Run the specified command in a separate process. Return its exit value, 6309 // or -1 on failure (e.g. can't fork a new process). 6310 // Unlike system(), this function can be called from signal handler. It 6311 // doesn't block SIGINT et al. 6312 int os::fork_and_exec(char* cmd) { 6313 char * argv[4]; 6314 argv[0] = (char *)"sh"; 6315 argv[1] = (char *)"-c"; 6316 argv[2] = cmd; 6317 argv[3] = NULL; 6318 6319 // fork is async-safe, fork1 is not so can't use in signal handler 6320 pid_t pid; 6321 Thread* t = ThreadLocalStorage::get_thread_slow(); 6322 if (t != NULL && t->is_inside_signal_handler()) { 6323 pid = fork(); 6324 } else { 6325 pid = fork1(); 6326 } 6327 6328 if (pid < 0) { 6329 // fork failed 6330 warning("fork failed: %s", strerror(errno)); 6331 return -1; 6332 6333 } else if (pid == 0) { 6334 // child process 6335 6336 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris 6337 execve("/usr/bin/sh", argv, environ); 6338 6339 // execve failed 6340 _exit(-1); 6341 6342 } else { 6343 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 6344 // care about the actual exit code, for now. 6345 6346 int status; 6347 6348 // Wait for the child process to exit. This returns immediately if 6349 // the child has already exited. */ 6350 while (waitpid(pid, &status, 0) < 0) { 6351 switch (errno) { 6352 case ECHILD: return 0; 6353 case EINTR: break; 6354 default: return -1; 6355 } 6356 } 6357 6358 if (WIFEXITED(status)) { 6359 // The child exited normally; get its exit code. 6360 return WEXITSTATUS(status); 6361 } else if (WIFSIGNALED(status)) { 6362 // The child exited because of a signal 6363 // The best value to return is 0x80 + signal number, 6364 // because that is what all Unix shells do, and because 6365 // it allows callers to distinguish between process exit and 6366 // process death by signal. 6367 return 0x80 + WTERMSIG(status); 6368 } else { 6369 // Unknown exit code; pass it through 6370 return status; 6371 } 6372 } 6373 } 6374 6375 // is_headless_jre() 6376 // 6377 // Test for the existence of xawt/libmawt.so or libawt_xawt.so 6378 // in order to report if we are running in a headless jre 6379 // 6380 // Since JDK8 xawt/libmawt.so was moved into the same directory 6381 // as libawt.so, and renamed libawt_xawt.so 6382 // 6383 bool os::is_headless_jre() { 6384 struct stat statbuf; 6385 char buf[MAXPATHLEN]; 6386 char libmawtpath[MAXPATHLEN]; 6387 const char *xawtstr = "/xawt/libmawt.so"; 6388 const char *new_xawtstr = "/libawt_xawt.so"; 6389 char *p; 6390 6391 // Get path to libjvm.so 6392 os::jvm_path(buf, sizeof(buf)); 6393 6394 // Get rid of libjvm.so 6395 p = strrchr(buf, '/'); 6396 if (p == NULL) return false; 6397 else *p = '\0'; 6398 6399 // Get rid of client or server 6400 p = strrchr(buf, '/'); 6401 if (p == NULL) return false; 6402 else *p = '\0'; 6403 6404 // check xawt/libmawt.so 6405 strcpy(libmawtpath, buf); 6406 strcat(libmawtpath, xawtstr); 6407 if (::stat(libmawtpath, &statbuf) == 0) return false; 6408 6409 // check libawt_xawt.so 6410 strcpy(libmawtpath, buf); 6411 strcat(libmawtpath, new_xawtstr); 6412 if (::stat(libmawtpath, &statbuf) == 0) return false; 6413 6414 return true; 6415 } 6416 6417 size_t os::write(int fd, const void *buf, unsigned int nBytes) { 6418 INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted); 6419 } 6420 6421 int os::close(int fd) { 6422 RESTARTABLE_RETURN_INT(::close(fd)); 6423 } 6424 6425 int os::socket_close(int fd) { 6426 RESTARTABLE_RETURN_INT(::close(fd)); 6427 } 6428 6429 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 6430 INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted); 6431 } 6432 6433 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 6434 INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted); 6435 } 6436 6437 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 6438 RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags)); 6439 } 6440 6441 // As both poll and select can be interrupted by signals, we have to be 6442 // prepared to restart the system call after updating the timeout, unless 6443 // a poll() is done with timeout == -1, in which case we repeat with this 6444 // "wait forever" value. 6445 6446 int os::timeout(int fd, long timeout) { 6447 int res; 6448 struct timeval t; 6449 julong prevtime, newtime; 6450 static const char* aNull = 0; 6451 struct pollfd pfd; 6452 pfd.fd = fd; 6453 pfd.events = POLLIN; 6454 6455 gettimeofday(&t, &aNull); 6456 prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000; 6457 6458 for(;;) { 6459 INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted); 6460 if(res == OS_ERR && errno == EINTR) { 6461 if(timeout != -1) { 6462 gettimeofday(&t, &aNull); 6463 newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000; 6464 timeout -= newtime - prevtime; 6465 if(timeout <= 0) 6466 return OS_OK; 6467 prevtime = newtime; 6468 } 6469 } else return res; 6470 } 6471 } 6472 6473 int os::connect(int fd, struct sockaddr *him, socklen_t len) { 6474 int _result; 6475 INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\ 6476 os::Solaris::clear_interrupted); 6477 6478 // Depending on when thread interruption is reset, _result could be 6479 // one of two values when errno == EINTR 6480 6481 if (((_result == OS_INTRPT) || (_result == OS_ERR)) 6482 && (errno == EINTR)) { 6483 /* restarting a connect() changes its errno semantics */ 6484 INTERRUPTIBLE(::connect(fd, him, len), _result,\ 6485 os::Solaris::clear_interrupted); 6486 /* undo these changes */ 6487 if (_result == OS_ERR) { 6488 if (errno == EALREADY) { 6489 errno = EINPROGRESS; /* fall through */ 6490 } else if (errno == EISCONN) { 6491 errno = 0; 6492 return OS_OK; 6493 } 6494 } 6495 } 6496 return _result; 6497 } 6498 6499 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 6500 if (fd < 0) { 6501 return OS_ERR; 6502 } 6503 INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\ 6504 os::Solaris::clear_interrupted); 6505 } 6506 6507 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags, 6508 sockaddr* from, socklen_t* fromlen) { 6509 INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\ 6510 os::Solaris::clear_interrupted); 6511 } 6512 6513 int os::sendto(int fd, char* buf, size_t len, uint flags, 6514 struct sockaddr* to, socklen_t tolen) { 6515 INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\ 6516 os::Solaris::clear_interrupted); 6517 } 6518 6519 int os::socket_available(int fd, jint *pbytes) { 6520 if (fd < 0) { 6521 return OS_OK; 6522 } 6523 int ret; 6524 RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret); 6525 // note: ioctl can return 0 when successful, JVM_SocketAvailable 6526 // is expected to return 0 on failure and 1 on success to the jdk. 6527 return (ret == OS_ERR) ? 0 : 1; 6528 } 6529 6530 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 6531 INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\ 6532 os::Solaris::clear_interrupted); 6533 } 6534 6535 // Get the default path to the core file 6536 // Returns the length of the string 6537 int os::get_core_path(char* buffer, size_t bufferSize) { 6538 const char* p = get_current_directory(buffer, bufferSize); 6539 6540 if (p == NULL) { 6541 assert(p != NULL, "failed to get current directory"); 6542 return 0; 6543 } 6544 6545 return strlen(buffer); 6546 }