1 /* 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/classLoader.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "compiler/compileBroker.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "jvm_solaris.h" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/filemap.hpp" 36 #include "mutex_solaris.inline.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "os_share_solaris.hpp" 39 #include "prims/jniFastGetField.hpp" 40 #include "prims/jvm.h" 41 #include "prims/jvm_misc.hpp" 42 #include "runtime/arguments.hpp" 43 #include "runtime/extendedPC.hpp" 44 #include "runtime/globals.hpp" 45 #include "runtime/hpi.hpp" 46 #include "runtime/interfaceSupport.hpp" 47 #include "runtime/java.hpp" 48 #include "runtime/javaCalls.hpp" 49 #include "runtime/mutexLocker.hpp" 50 #include "runtime/objectMonitor.hpp" 51 #include "runtime/objectMonitor.inline.hpp" 52 #include "runtime/osThread.hpp" 53 #include "runtime/perfMemory.hpp" 54 #include "runtime/sharedRuntime.hpp" 55 #include "runtime/statSampler.hpp" 56 #include "runtime/stubRoutines.hpp" 57 #include "runtime/threadCritical.hpp" 58 #include "runtime/timer.hpp" 59 #include "services/attachListener.hpp" 60 #include "services/runtimeService.hpp" 61 #include "thread_solaris.inline.hpp" 62 #include "utilities/defaultStream.hpp" 63 #include "utilities/events.hpp" 64 #include "utilities/growableArray.hpp" 65 #include "utilities/vmError.hpp" 66 #ifdef TARGET_ARCH_x86 67 # include "assembler_x86.inline.hpp" 68 # include "nativeInst_x86.hpp" 69 #endif 70 #ifdef TARGET_ARCH_sparc 71 # include "assembler_sparc.inline.hpp" 72 # include "nativeInst_sparc.hpp" 73 #endif 74 #ifdef COMPILER1 75 #include "c1/c1_Runtime1.hpp" 76 #endif 77 #ifdef COMPILER2 78 #include "opto/runtime.hpp" 79 #endif 80 81 // do not include precompiled header file 82 // put OS-includes here 83 # include <dlfcn.h> 84 # include <errno.h> 85 # include <link.h> 86 # include <poll.h> 87 # include <pthread.h> 88 # include <pwd.h> 89 # include <schedctl.h> 90 # include <setjmp.h> 91 # include <signal.h> 92 # include <stdio.h> 93 # include <alloca.h> 94 # include <sys/filio.h> 95 # include <sys/ipc.h> 96 # include <sys/lwp.h> 97 # include <sys/machelf.h> // for elf Sym structure used by dladdr1 98 # include <sys/mman.h> 99 # include <sys/processor.h> 100 # include <sys/procset.h> 101 # include <sys/pset.h> 102 # include <sys/resource.h> 103 # include <sys/shm.h> 104 # include <sys/socket.h> 105 # include <sys/stat.h> 106 # include <sys/systeminfo.h> 107 # include <sys/time.h> 108 # include <sys/times.h> 109 # include <sys/types.h> 110 # include <sys/wait.h> 111 # include <sys/utsname.h> 112 # include <thread.h> 113 # include <unistd.h> 114 # include <sys/priocntl.h> 115 # include <sys/rtpriocntl.h> 116 # include <sys/tspriocntl.h> 117 # include <sys/iapriocntl.h> 118 # include <sys/loadavg.h> 119 # include <string.h> 120 121 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later 122 # include <sys/procfs.h> // see comment in <sys/procfs.h> 123 124 #define MAX_PATH (2 * K) 125 126 // for timer info max values which include all bits 127 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 128 129 #ifdef _GNU_SOURCE 130 // See bug #6514594 131 extern "C" int madvise(caddr_t, size_t, int); 132 extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg, 133 int attr, int mask); 134 #endif //_GNU_SOURCE 135 136 /* 137 MPSS Changes Start. 138 The JVM binary needs to be built and run on pre-Solaris 9 139 systems, but the constants needed by MPSS are only in Solaris 9 140 header files. They are textually replicated here to allow 141 building on earlier systems. Once building on Solaris 8 is 142 no longer a requirement, these #defines can be replaced by ordinary 143 system .h inclusion. 144 145 In earlier versions of the JDK and Solaris, we used ISM for large pages. 146 But ISM requires shared memory to achieve this and thus has many caveats. 147 MPSS is a fully transparent and is a cleaner way to get large pages. 148 Although we still require keeping ISM for backward compatiblitiy as well as 149 giving the opportunity to use large pages on older systems it is 150 recommended that MPSS be used for Solaris 9 and above. 151 152 */ 153 154 #ifndef MC_HAT_ADVISE 155 156 struct memcntl_mha { 157 uint_t mha_cmd; /* command(s) */ 158 uint_t mha_flags; 159 size_t mha_pagesize; 160 }; 161 #define MC_HAT_ADVISE 7 /* advise hat map size */ 162 #define MHA_MAPSIZE_VA 0x1 /* set preferred page size */ 163 #define MAP_ALIGN 0x200 /* addr specifies alignment */ 164 165 #endif 166 // MPSS Changes End. 167 168 169 // Here are some liblgrp types from sys/lgrp_user.h to be able to 170 // compile on older systems without this header file. 171 172 #ifndef MADV_ACCESS_LWP 173 # define MADV_ACCESS_LWP 7 /* next LWP to access heavily */ 174 #endif 175 #ifndef MADV_ACCESS_MANY 176 # define MADV_ACCESS_MANY 8 /* many processes to access heavily */ 177 #endif 178 179 #ifndef LGRP_RSRC_CPU 180 # define LGRP_RSRC_CPU 0 /* CPU resources */ 181 #endif 182 #ifndef LGRP_RSRC_MEM 183 # define LGRP_RSRC_MEM 1 /* memory resources */ 184 #endif 185 186 // Some more macros from sys/mman.h that are not present in Solaris 8. 187 188 #ifndef MAX_MEMINFO_CNT 189 /* 190 * info_req request type definitions for meminfo 191 * request types starting with MEMINFO_V are used for Virtual addresses 192 * and should not be mixed with MEMINFO_PLGRP which is targeted for Physical 193 * addresses 194 */ 195 # define MEMINFO_SHIFT 16 196 # define MEMINFO_MASK (0xFF << MEMINFO_SHIFT) 197 # define MEMINFO_VPHYSICAL (0x01 << MEMINFO_SHIFT) /* get physical addr */ 198 # define MEMINFO_VLGRP (0x02 << MEMINFO_SHIFT) /* get lgroup */ 199 # define MEMINFO_VPAGESIZE (0x03 << MEMINFO_SHIFT) /* size of phys page */ 200 # define MEMINFO_VREPLCNT (0x04 << MEMINFO_SHIFT) /* no. of replica */ 201 # define MEMINFO_VREPL (0x05 << MEMINFO_SHIFT) /* physical replica */ 202 # define MEMINFO_VREPL_LGRP (0x06 << MEMINFO_SHIFT) /* lgrp of replica */ 203 # define MEMINFO_PLGRP (0x07 << MEMINFO_SHIFT) /* lgroup for paddr */ 204 205 /* maximum number of addresses meminfo() can process at a time */ 206 # define MAX_MEMINFO_CNT 256 207 208 /* maximum number of request types */ 209 # define MAX_MEMINFO_REQ 31 210 #endif 211 212 // see thr_setprio(3T) for the basis of these numbers 213 #define MinimumPriority 0 214 #define NormalPriority 64 215 #define MaximumPriority 127 216 217 // Values for ThreadPriorityPolicy == 1 218 int prio_policy1[MaxPriority+1] = { -99999, 0, 16, 32, 48, 64, 219 80, 96, 112, 124, 127 }; 220 221 // System parameters used internally 222 static clock_t clock_tics_per_sec = 100; 223 224 // For diagnostics to print a message once. see run_periodic_checks 225 static bool check_addr0_done = false; 226 static sigset_t check_signal_done; 227 static bool check_signals = true; 228 229 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo 230 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo 231 232 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround 233 234 235 // "default" initializers for missing libc APIs 236 extern "C" { 237 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 238 static int lwp_mutex_destroy(mutex_t *mx) { return 0; } 239 240 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 241 static int lwp_cond_destroy(cond_t *cv) { return 0; } 242 } 243 244 // "default" initializers for pthread-based synchronization 245 extern "C" { 246 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 247 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 248 } 249 250 // Thread Local Storage 251 // This is common to all Solaris platforms so it is defined here, 252 // in this common file. 253 // The declarations are in the os_cpu threadLS*.hpp files. 254 // 255 // Static member initialization for TLS 256 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL}; 257 258 #ifndef PRODUCT 259 #define _PCT(n,d) ((100.0*(double)(n))/(double)(d)) 260 261 int ThreadLocalStorage::_tcacheHit = 0; 262 int ThreadLocalStorage::_tcacheMiss = 0; 263 264 void ThreadLocalStorage::print_statistics() { 265 int total = _tcacheMiss+_tcacheHit; 266 tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n", 267 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total)); 268 } 269 #undef _PCT 270 #endif // PRODUCT 271 272 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id, 273 int index) { 274 Thread *thread = get_thread_slow(); 275 if (thread != NULL) { 276 address sp = os::current_stack_pointer(); 277 guarantee(thread->_stack_base == NULL || 278 (sp <= thread->_stack_base && 279 sp >= thread->_stack_base - thread->_stack_size) || 280 is_error_reported(), 281 "sp must be inside of selected thread stack"); 282 283 thread->_self_raw_id = raw_id; // mark for quick retrieval 284 _get_thread_cache[ index ] = thread; 285 } 286 return thread; 287 } 288 289 290 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0}; 291 #define NO_CACHED_THREAD ((Thread*)all_zero) 292 293 void ThreadLocalStorage::pd_set_thread(Thread* thread) { 294 295 // Store the new value before updating the cache to prevent a race 296 // between get_thread_via_cache_slowly() and this store operation. 297 os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); 298 299 // Update thread cache with new thread if setting on thread create, 300 // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit. 301 uintptr_t raw = pd_raw_thread_id(); 302 int ix = pd_cache_index(raw); 303 _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread; 304 } 305 306 void ThreadLocalStorage::pd_init() { 307 for (int i = 0; i < _pd_cache_size; i++) { 308 _get_thread_cache[i] = NO_CACHED_THREAD; 309 } 310 } 311 312 // Invalidate all the caches (happens to be the same as pd_init). 313 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); } 314 315 #undef NO_CACHED_THREAD 316 317 // END Thread Local Storage 318 319 static inline size_t adjust_stack_size(address base, size_t size) { 320 if ((ssize_t)size < 0) { 321 // 4759953: Compensate for ridiculous stack size. 322 size = max_intx; 323 } 324 if (size > (size_t)base) { 325 // 4812466: Make sure size doesn't allow the stack to wrap the address space. 326 size = (size_t)base; 327 } 328 return size; 329 } 330 331 static inline stack_t get_stack_info() { 332 stack_t st; 333 int retval = thr_stksegment(&st); 334 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size); 335 assert(retval == 0, "incorrect return value from thr_stksegment"); 336 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 337 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 338 return st; 339 } 340 341 address os::current_stack_base() { 342 int r = thr_main() ; 343 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 344 bool is_primordial_thread = r; 345 346 // Workaround 4352906, avoid calls to thr_stksegment by 347 // thr_main after the first one (it looks like we trash 348 // some data, causing the value for ss_sp to be incorrect). 349 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) { 350 stack_t st = get_stack_info(); 351 if (is_primordial_thread) { 352 // cache initial value of stack base 353 os::Solaris::_main_stack_base = (address)st.ss_sp; 354 } 355 return (address)st.ss_sp; 356 } else { 357 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base"); 358 return os::Solaris::_main_stack_base; 359 } 360 } 361 362 size_t os::current_stack_size() { 363 size_t size; 364 365 int r = thr_main() ; 366 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 367 if(!r) { 368 size = get_stack_info().ss_size; 369 } else { 370 struct rlimit limits; 371 getrlimit(RLIMIT_STACK, &limits); 372 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur); 373 } 374 // base may not be page aligned 375 address base = current_stack_base(); 376 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());; 377 return (size_t)(base - bottom); 378 } 379 380 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 381 return localtime_r(clock, res); 382 } 383 384 // interruptible infrastructure 385 386 // setup_interruptible saves the thread state before going into an 387 // interruptible system call. 388 // The saved state is used to restore the thread to 389 // its former state whether or not an interrupt is received. 390 // Used by classloader os::read 391 // hpi calls skip this layer and stay in _thread_in_native 392 393 void os::Solaris::setup_interruptible(JavaThread* thread) { 394 395 JavaThreadState thread_state = thread->thread_state(); 396 397 assert(thread_state != _thread_blocked, "Coming from the wrong thread"); 398 assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible"); 399 OSThread* osthread = thread->osthread(); 400 osthread->set_saved_interrupt_thread_state(thread_state); 401 thread->frame_anchor()->make_walkable(thread); 402 ThreadStateTransition::transition(thread, thread_state, _thread_blocked); 403 } 404 405 // Version of setup_interruptible() for threads that are already in 406 // _thread_blocked. Used by os_sleep(). 407 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) { 408 thread->frame_anchor()->make_walkable(thread); 409 } 410 411 JavaThread* os::Solaris::setup_interruptible() { 412 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 413 setup_interruptible(thread); 414 return thread; 415 } 416 417 void os::Solaris::try_enable_extended_io() { 418 typedef int (*enable_extended_FILE_stdio_t)(int, int); 419 420 if (!UseExtendedFileIO) { 421 return; 422 } 423 424 enable_extended_FILE_stdio_t enabler = 425 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT, 426 "enable_extended_FILE_stdio"); 427 if (enabler) { 428 enabler(-1, -1); 429 } 430 } 431 432 433 #ifdef ASSERT 434 435 JavaThread* os::Solaris::setup_interruptible_native() { 436 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 437 JavaThreadState thread_state = thread->thread_state(); 438 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 439 return thread; 440 } 441 442 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) { 443 JavaThreadState thread_state = thread->thread_state(); 444 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 445 } 446 #endif 447 448 // cleanup_interruptible reverses the effects of setup_interruptible 449 // setup_interruptible_already_blocked() does not need any cleanup. 450 451 void os::Solaris::cleanup_interruptible(JavaThread* thread) { 452 OSThread* osthread = thread->osthread(); 453 454 ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state()); 455 } 456 457 // I/O interruption related counters called in _INTERRUPTIBLE 458 459 void os::Solaris::bump_interrupted_before_count() { 460 RuntimeService::record_interrupted_before_count(); 461 } 462 463 void os::Solaris::bump_interrupted_during_count() { 464 RuntimeService::record_interrupted_during_count(); 465 } 466 467 static int _processors_online = 0; 468 469 jint os::Solaris::_os_thread_limit = 0; 470 volatile jint os::Solaris::_os_thread_count = 0; 471 472 julong os::available_memory() { 473 return Solaris::available_memory(); 474 } 475 476 julong os::Solaris::available_memory() { 477 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size(); 478 } 479 480 julong os::Solaris::_physical_memory = 0; 481 482 julong os::physical_memory() { 483 return Solaris::physical_memory(); 484 } 485 486 julong os::allocatable_physical_memory(julong size) { 487 #ifdef _LP64 488 return size; 489 #else 490 julong result = MIN2(size, (julong)3835*M); 491 if (!is_allocatable(result)) { 492 // Memory allocations will be aligned but the alignment 493 // is not known at this point. Alignments will 494 // be at most to LargePageSizeInBytes. Protect 495 // allocations from alignments up to illegal 496 // values. If at this point 2G is illegal. 497 julong reasonable_size = (julong)2*G - 2 * LargePageSizeInBytes; 498 result = MIN2(size, reasonable_size); 499 } 500 return result; 501 #endif 502 } 503 504 static hrtime_t first_hrtime = 0; 505 static const hrtime_t hrtime_hz = 1000*1000*1000; 506 const int LOCK_BUSY = 1; 507 const int LOCK_FREE = 0; 508 const int LOCK_INVALID = -1; 509 static volatile hrtime_t max_hrtime = 0; 510 static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB as lock-in-progress 511 512 513 void os::Solaris::initialize_system_info() { 514 set_processor_count(sysconf(_SC_NPROCESSORS_CONF)); 515 _processors_online = sysconf (_SC_NPROCESSORS_ONLN); 516 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE); 517 } 518 519 int os::active_processor_count() { 520 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN); 521 pid_t pid = getpid(); 522 psetid_t pset = PS_NONE; 523 // Are we running in a processor set or is there any processor set around? 524 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) { 525 uint_t pset_cpus; 526 // Query the number of cpus available to us. 527 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) { 528 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check"); 529 _processors_online = pset_cpus; 530 return pset_cpus; 531 } 532 } 533 // Otherwise return number of online cpus 534 return online_cpus; 535 } 536 537 static bool find_processors_in_pset(psetid_t pset, 538 processorid_t** id_array, 539 uint_t* id_length) { 540 bool result = false; 541 // Find the number of processors in the processor set. 542 if (pset_info(pset, NULL, id_length, NULL) == 0) { 543 // Make up an array to hold their ids. 544 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length); 545 // Fill in the array with their processor ids. 546 if (pset_info(pset, NULL, id_length, *id_array) == 0) { 547 result = true; 548 } 549 } 550 return result; 551 } 552 553 // Callers of find_processors_online() must tolerate imprecise results -- 554 // the system configuration can change asynchronously because of DR 555 // or explicit psradm operations. 556 // 557 // We also need to take care that the loop (below) terminates as the 558 // number of processors online can change between the _SC_NPROCESSORS_ONLN 559 // request and the loop that builds the list of processor ids. Unfortunately 560 // there's no reliable way to determine the maximum valid processor id, 561 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online 562 // man pages, which claim the processor id set is "sparse, but 563 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually 564 // exit the loop. 565 // 566 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's 567 // not available on S8.0. 568 569 static bool find_processors_online(processorid_t** id_array, 570 uint* id_length) { 571 const processorid_t MAX_PROCESSOR_ID = 100000 ; 572 // Find the number of processors online. 573 *id_length = sysconf(_SC_NPROCESSORS_ONLN); 574 // Make up an array to hold their ids. 575 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length); 576 // Processors need not be numbered consecutively. 577 long found = 0; 578 processorid_t next = 0; 579 while (found < *id_length && next < MAX_PROCESSOR_ID) { 580 processor_info_t info; 581 if (processor_info(next, &info) == 0) { 582 // NB, PI_NOINTR processors are effectively online ... 583 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) { 584 (*id_array)[found] = next; 585 found += 1; 586 } 587 } 588 next += 1; 589 } 590 if (found < *id_length) { 591 // The loop above didn't identify the expected number of processors. 592 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN) 593 // and re-running the loop, above, but there's no guarantee of progress 594 // if the system configuration is in flux. Instead, we just return what 595 // we've got. Note that in the worst case find_processors_online() could 596 // return an empty set. (As a fall-back in the case of the empty set we 597 // could just return the ID of the current processor). 598 *id_length = found ; 599 } 600 601 return true; 602 } 603 604 static bool assign_distribution(processorid_t* id_array, 605 uint id_length, 606 uint* distribution, 607 uint distribution_length) { 608 // We assume we can assign processorid_t's to uint's. 609 assert(sizeof(processorid_t) == sizeof(uint), 610 "can't convert processorid_t to uint"); 611 // Quick check to see if we won't succeed. 612 if (id_length < distribution_length) { 613 return false; 614 } 615 // Assign processor ids to the distribution. 616 // Try to shuffle processors to distribute work across boards, 617 // assuming 4 processors per board. 618 const uint processors_per_board = ProcessDistributionStride; 619 // Find the maximum processor id. 620 processorid_t max_id = 0; 621 for (uint m = 0; m < id_length; m += 1) { 622 max_id = MAX2(max_id, id_array[m]); 623 } 624 // The next id, to limit loops. 625 const processorid_t limit_id = max_id + 1; 626 // Make up markers for available processors. 627 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id); 628 for (uint c = 0; c < limit_id; c += 1) { 629 available_id[c] = false; 630 } 631 for (uint a = 0; a < id_length; a += 1) { 632 available_id[id_array[a]] = true; 633 } 634 // Step by "boards", then by "slot", copying to "assigned". 635 // NEEDS_CLEANUP: The assignment of processors should be stateful, 636 // remembering which processors have been assigned by 637 // previous calls, etc., so as to distribute several 638 // independent calls of this method. What we'd like is 639 // It would be nice to have an API that let us ask 640 // how many processes are bound to a processor, 641 // but we don't have that, either. 642 // In the short term, "board" is static so that 643 // subsequent distributions don't all start at board 0. 644 static uint board = 0; 645 uint assigned = 0; 646 // Until we've found enough processors .... 647 while (assigned < distribution_length) { 648 // ... find the next available processor in the board. 649 for (uint slot = 0; slot < processors_per_board; slot += 1) { 650 uint try_id = board * processors_per_board + slot; 651 if ((try_id < limit_id) && (available_id[try_id] == true)) { 652 distribution[assigned] = try_id; 653 available_id[try_id] = false; 654 assigned += 1; 655 break; 656 } 657 } 658 board += 1; 659 if (board * processors_per_board + 0 >= limit_id) { 660 board = 0; 661 } 662 } 663 if (available_id != NULL) { 664 FREE_C_HEAP_ARRAY(bool, available_id); 665 } 666 return true; 667 } 668 669 bool os::distribute_processes(uint length, uint* distribution) { 670 bool result = false; 671 // Find the processor id's of all the available CPUs. 672 processorid_t* id_array = NULL; 673 uint id_length = 0; 674 // There are some races between querying information and using it, 675 // since processor sets can change dynamically. 676 psetid_t pset = PS_NONE; 677 // Are we running in a processor set? 678 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) { 679 result = find_processors_in_pset(pset, &id_array, &id_length); 680 } else { 681 result = find_processors_online(&id_array, &id_length); 682 } 683 if (result == true) { 684 if (id_length >= length) { 685 result = assign_distribution(id_array, id_length, distribution, length); 686 } else { 687 result = false; 688 } 689 } 690 if (id_array != NULL) { 691 FREE_C_HEAP_ARRAY(processorid_t, id_array); 692 } 693 return result; 694 } 695 696 bool os::bind_to_processor(uint processor_id) { 697 // We assume that a processorid_t can be stored in a uint. 698 assert(sizeof(uint) == sizeof(processorid_t), 699 "can't convert uint to processorid_t"); 700 int bind_result = 701 processor_bind(P_LWPID, // bind LWP. 702 P_MYID, // bind current LWP. 703 (processorid_t) processor_id, // id. 704 NULL); // don't return old binding. 705 return (bind_result == 0); 706 } 707 708 bool os::getenv(const char* name, char* buffer, int len) { 709 char* val = ::getenv( name ); 710 if ( val == NULL 711 || strlen(val) + 1 > len ) { 712 if (len > 0) buffer[0] = 0; // return a null string 713 return false; 714 } 715 strcpy( buffer, val ); 716 return true; 717 } 718 719 720 // Return true if user is running as root. 721 722 bool os::have_special_privileges() { 723 static bool init = false; 724 static bool privileges = false; 725 if (!init) { 726 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 727 init = true; 728 } 729 return privileges; 730 } 731 732 733 void os::init_system_properties_values() { 734 char arch[12]; 735 sysinfo(SI_ARCHITECTURE, arch, sizeof(arch)); 736 737 // The next steps are taken in the product version: 738 // 739 // Obtain the JAVA_HOME value from the location of libjvm[_g].so. 740 // This library should be located at: 741 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so. 742 // 743 // If "/jre/lib/" appears at the right place in the path, then we 744 // assume libjvm[_g].so is installed in a JDK and we use this path. 745 // 746 // Otherwise exit with message: "Could not create the Java virtual machine." 747 // 748 // The following extra steps are taken in the debugging version: 749 // 750 // If "/jre/lib/" does NOT appear at the right place in the path 751 // instead of exit check for $JAVA_HOME environment variable. 752 // 753 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>, 754 // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so 755 // it looks like libjvm[_g].so is installed there 756 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so. 757 // 758 // Otherwise exit. 759 // 760 // Important note: if the location of libjvm.so changes this 761 // code needs to be changed accordingly. 762 763 // The next few definitions allow the code to be verbatim: 764 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n)) 765 #define free(p) FREE_C_HEAP_ARRAY(char, p) 766 #define getenv(n) ::getenv(n) 767 768 #define EXTENSIONS_DIR "/lib/ext" 769 #define ENDORSED_DIR "/lib/endorsed" 770 #define COMMON_DIR "/usr/jdk/packages" 771 772 { 773 /* sysclasspath, java_home, dll_dir */ 774 { 775 char *home_path; 776 char *dll_path; 777 char *pslash; 778 char buf[MAXPATHLEN]; 779 os::jvm_path(buf, sizeof(buf)); 780 781 // Found the full path to libjvm.so. 782 // Now cut the path to <java_home>/jre if we can. 783 *(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */ 784 pslash = strrchr(buf, '/'); 785 if (pslash != NULL) 786 *pslash = '\0'; /* get rid of /{client|server|hotspot} */ 787 dll_path = malloc(strlen(buf) + 1); 788 if (dll_path == NULL) 789 return; 790 strcpy(dll_path, buf); 791 Arguments::set_dll_dir(dll_path); 792 793 if (pslash != NULL) { 794 pslash = strrchr(buf, '/'); 795 if (pslash != NULL) { 796 *pslash = '\0'; /* get rid of /<arch> */ 797 pslash = strrchr(buf, '/'); 798 if (pslash != NULL) 799 *pslash = '\0'; /* get rid of /lib */ 800 } 801 } 802 803 home_path = malloc(strlen(buf) + 1); 804 if (home_path == NULL) 805 return; 806 strcpy(home_path, buf); 807 Arguments::set_java_home(home_path); 808 809 if (!set_boot_path('/', ':')) 810 return; 811 } 812 813 /* 814 * Where to look for native libraries 815 */ 816 { 817 // Use dlinfo() to determine the correct java.library.path. 818 // 819 // If we're launched by the Java launcher, and the user 820 // does not set java.library.path explicitly on the commandline, 821 // the Java launcher sets LD_LIBRARY_PATH for us and unsets 822 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case 823 // dlinfo returns LD_LIBRARY_PATH + crle settings (including 824 // /usr/lib), which is exactly what we want. 825 // 826 // If the user does set java.library.path, it completely 827 // overwrites this setting, and always has. 828 // 829 // If we're not launched by the Java launcher, we may 830 // get here with any/all of the LD_LIBRARY_PATH[_32|64] 831 // settings. Again, dlinfo does exactly what we want. 832 833 Dl_serinfo _info, *info = &_info; 834 Dl_serpath *path; 835 char* library_path; 836 char *common_path; 837 int i; 838 839 // determine search path count and required buffer size 840 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) { 841 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror()); 842 } 843 844 // allocate new buffer and initialize 845 info = (Dl_serinfo*)malloc(_info.dls_size); 846 if (info == NULL) { 847 vm_exit_out_of_memory(_info.dls_size, 848 "init_system_properties_values info"); 849 } 850 info->dls_size = _info.dls_size; 851 info->dls_cnt = _info.dls_cnt; 852 853 // obtain search path information 854 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) { 855 free(info); 856 vm_exit_during_initialization("dlinfo SERINFO request", dlerror()); 857 } 858 859 path = &info->dls_serpath[0]; 860 861 // Note: Due to a legacy implementation, most of the library path 862 // is set in the launcher. This was to accomodate linking restrictions 863 // on legacy Solaris implementations (which are no longer supported). 864 // Eventually, all the library path setting will be done here. 865 // 866 // However, to prevent the proliferation of improperly built native 867 // libraries, the new path component /usr/jdk/packages is added here. 868 869 // Determine the actual CPU architecture. 870 char cpu_arch[12]; 871 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 872 #ifdef _LP64 873 // If we are a 64-bit vm, perform the following translations: 874 // sparc -> sparcv9 875 // i386 -> amd64 876 if (strcmp(cpu_arch, "sparc") == 0) 877 strcat(cpu_arch, "v9"); 878 else if (strcmp(cpu_arch, "i386") == 0) 879 strcpy(cpu_arch, "amd64"); 880 #endif 881 882 // Construct the invariant part of ld_library_path. Note that the 883 // space for the colon and the trailing null are provided by the 884 // nulls included by the sizeof operator. 885 size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch); 886 common_path = malloc(bufsize); 887 if (common_path == NULL) { 888 free(info); 889 vm_exit_out_of_memory(bufsize, 890 "init_system_properties_values common_path"); 891 } 892 sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch); 893 894 // struct size is more than sufficient for the path components obtained 895 // through the dlinfo() call, so only add additional space for the path 896 // components explicitly added here. 897 bufsize = info->dls_size + strlen(common_path); 898 library_path = malloc(bufsize); 899 if (library_path == NULL) { 900 free(info); 901 free(common_path); 902 vm_exit_out_of_memory(bufsize, 903 "init_system_properties_values library_path"); 904 } 905 library_path[0] = '\0'; 906 907 // Construct the desired Java library path from the linker's library 908 // search path. 909 // 910 // For compatibility, it is optimal that we insert the additional path 911 // components specific to the Java VM after those components specified 912 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so 913 // infrastructure. 914 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it 915 strcpy(library_path, common_path); 916 } else { 917 int inserted = 0; 918 for (i = 0; i < info->dls_cnt; i++, path++) { 919 uint_t flags = path->dls_flags & LA_SER_MASK; 920 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) { 921 strcat(library_path, common_path); 922 strcat(library_path, os::path_separator()); 923 inserted = 1; 924 } 925 strcat(library_path, path->dls_name); 926 strcat(library_path, os::path_separator()); 927 } 928 // eliminate trailing path separator 929 library_path[strlen(library_path)-1] = '\0'; 930 } 931 932 // happens before argument parsing - can't use a trace flag 933 // tty->print_raw("init_system_properties_values: native lib path: "); 934 // tty->print_raw_cr(library_path); 935 936 // callee copies into its own buffer 937 Arguments::set_library_path(library_path); 938 939 free(common_path); 940 free(library_path); 941 free(info); 942 } 943 944 /* 945 * Extensions directories. 946 * 947 * Note that the space for the colon and the trailing null are provided 948 * by the nulls included by the sizeof operator (so actually one byte more 949 * than necessary is allocated). 950 */ 951 { 952 char *buf = (char *) malloc(strlen(Arguments::get_java_home()) + 953 sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) + 954 sizeof(EXTENSIONS_DIR)); 955 sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR, 956 Arguments::get_java_home()); 957 Arguments::set_ext_dirs(buf); 958 } 959 960 /* Endorsed standards default directory. */ 961 { 962 char * buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR)); 963 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home()); 964 Arguments::set_endorsed_dirs(buf); 965 } 966 } 967 968 #undef malloc 969 #undef free 970 #undef getenv 971 #undef EXTENSIONS_DIR 972 #undef ENDORSED_DIR 973 #undef COMMON_DIR 974 975 } 976 977 void os::breakpoint() { 978 BREAKPOINT; 979 } 980 981 bool os::obsolete_option(const JavaVMOption *option) 982 { 983 if (!strncmp(option->optionString, "-Xt", 3)) { 984 return true; 985 } else if (!strncmp(option->optionString, "-Xtm", 4)) { 986 return true; 987 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) { 988 return true; 989 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) { 990 return true; 991 } 992 return false; 993 } 994 995 bool os::Solaris::valid_stack_address(Thread* thread, address sp) { 996 address stackStart = (address)thread->stack_base(); 997 address stackEnd = (address)(stackStart - (address)thread->stack_size()); 998 if (sp < stackStart && sp >= stackEnd ) return true; 999 return false; 1000 } 1001 1002 extern "C" void breakpoint() { 1003 // use debugger to set breakpoint here 1004 } 1005 1006 // Returns an estimate of the current stack pointer. Result must be guaranteed to 1007 // point into the calling threads stack, and be no lower than the current stack 1008 // pointer. 1009 address os::current_stack_pointer() { 1010 volatile int dummy; 1011 address sp = (address)&dummy + 8; // %%%% need to confirm if this is right 1012 return sp; 1013 } 1014 1015 static thread_t main_thread; 1016 1017 // Thread start routine for all new Java threads 1018 extern "C" void* java_start(void* thread_addr) { 1019 // Try to randomize the cache line index of hot stack frames. 1020 // This helps when threads of the same stack traces evict each other's 1021 // cache lines. The threads can be either from the same JVM instance, or 1022 // from different JVM instances. The benefit is especially true for 1023 // processors with hyperthreading technology. 1024 static int counter = 0; 1025 int pid = os::current_process_id(); 1026 alloca(((pid ^ counter++) & 7) * 128); 1027 1028 int prio; 1029 Thread* thread = (Thread*)thread_addr; 1030 OSThread* osthr = thread->osthread(); 1031 1032 osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound 1033 thread->_schedctl = (void *) schedctl_init () ; 1034 1035 if (UseNUMA) { 1036 int lgrp_id = os::numa_get_group_id(); 1037 if (lgrp_id != -1) { 1038 thread->set_lgrp_id(lgrp_id); 1039 } 1040 } 1041 1042 // If the creator called set priority before we started, 1043 // we need to call set priority now that we have an lwp. 1044 // Get the priority from libthread and set the priority 1045 // for the new Solaris lwp. 1046 if ( osthr->thread_id() != -1 ) { 1047 if ( UseThreadPriorities ) { 1048 thr_getprio(osthr->thread_id(), &prio); 1049 if (ThreadPriorityVerbose) { 1050 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT ", setting priority: %d\n", 1051 osthr->thread_id(), osthr->lwp_id(), prio ); 1052 } 1053 os::set_native_priority(thread, prio); 1054 } 1055 } else if (ThreadPriorityVerbose) { 1056 warning("Can't set priority in _start routine, thread id hasn't been set\n"); 1057 } 1058 1059 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 1060 1061 // initialize signal mask for this thread 1062 os::Solaris::hotspot_sigmask(thread); 1063 1064 thread->run(); 1065 1066 // One less thread is executing 1067 // When the VMThread gets here, the main thread may have already exited 1068 // which frees the CodeHeap containing the Atomic::dec code 1069 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 1070 Atomic::dec(&os::Solaris::_os_thread_count); 1071 } 1072 1073 if (UseDetachedThreads) { 1074 thr_exit(NULL); 1075 ShouldNotReachHere(); 1076 } 1077 return NULL; 1078 } 1079 1080 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) { 1081 // Allocate the OSThread object 1082 OSThread* osthread = new OSThread(NULL, NULL); 1083 if (osthread == NULL) return NULL; 1084 1085 // Store info on the Solaris thread into the OSThread 1086 osthread->set_thread_id(thread_id); 1087 osthread->set_lwp_id(_lwp_self()); 1088 thread->_schedctl = (void *) schedctl_init () ; 1089 1090 if (UseNUMA) { 1091 int lgrp_id = os::numa_get_group_id(); 1092 if (lgrp_id != -1) { 1093 thread->set_lgrp_id(lgrp_id); 1094 } 1095 } 1096 1097 if ( ThreadPriorityVerbose ) { 1098 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n", 1099 osthread->thread_id(), osthread->lwp_id() ); 1100 } 1101 1102 // Initial thread state is INITIALIZED, not SUSPENDED 1103 osthread->set_state(INITIALIZED); 1104 1105 return osthread; 1106 } 1107 1108 void os::Solaris::hotspot_sigmask(Thread* thread) { 1109 1110 //Save caller's signal mask 1111 sigset_t sigmask; 1112 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask); 1113 OSThread *osthread = thread->osthread(); 1114 osthread->set_caller_sigmask(sigmask); 1115 1116 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL); 1117 if (!ReduceSignalUsage) { 1118 if (thread->is_VM_thread()) { 1119 // Only the VM thread handles BREAK_SIGNAL ... 1120 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL); 1121 } else { 1122 // ... all other threads block BREAK_SIGNAL 1123 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked"); 1124 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL); 1125 } 1126 } 1127 } 1128 1129 bool os::create_attached_thread(JavaThread* thread) { 1130 #ifdef ASSERT 1131 thread->verify_not_published(); 1132 #endif 1133 OSThread* osthread = create_os_thread(thread, thr_self()); 1134 if (osthread == NULL) { 1135 return false; 1136 } 1137 1138 // Initial thread state is RUNNABLE 1139 osthread->set_state(RUNNABLE); 1140 thread->set_osthread(osthread); 1141 1142 // initialize signal mask for this thread 1143 // and save the caller's signal mask 1144 os::Solaris::hotspot_sigmask(thread); 1145 1146 return true; 1147 } 1148 1149 bool os::create_main_thread(JavaThread* thread) { 1150 #ifdef ASSERT 1151 thread->verify_not_published(); 1152 #endif 1153 if (_starting_thread == NULL) { 1154 _starting_thread = create_os_thread(thread, main_thread); 1155 if (_starting_thread == NULL) { 1156 return false; 1157 } 1158 } 1159 1160 // The primodial thread is runnable from the start 1161 _starting_thread->set_state(RUNNABLE); 1162 1163 thread->set_osthread(_starting_thread); 1164 1165 // initialize signal mask for this thread 1166 // and save the caller's signal mask 1167 os::Solaris::hotspot_sigmask(thread); 1168 1169 return true; 1170 } 1171 1172 // _T2_libthread is true if we believe we are running with the newer 1173 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default) 1174 bool os::Solaris::_T2_libthread = false; 1175 1176 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 1177 // Allocate the OSThread object 1178 OSThread* osthread = new OSThread(NULL, NULL); 1179 if (osthread == NULL) { 1180 return false; 1181 } 1182 1183 if ( ThreadPriorityVerbose ) { 1184 char *thrtyp; 1185 switch ( thr_type ) { 1186 case vm_thread: 1187 thrtyp = (char *)"vm"; 1188 break; 1189 case cgc_thread: 1190 thrtyp = (char *)"cgc"; 1191 break; 1192 case pgc_thread: 1193 thrtyp = (char *)"pgc"; 1194 break; 1195 case java_thread: 1196 thrtyp = (char *)"java"; 1197 break; 1198 case compiler_thread: 1199 thrtyp = (char *)"compiler"; 1200 break; 1201 case watcher_thread: 1202 thrtyp = (char *)"watcher"; 1203 break; 1204 default: 1205 thrtyp = (char *)"unknown"; 1206 break; 1207 } 1208 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp); 1209 } 1210 1211 // Calculate stack size if it's not specified by caller. 1212 if (stack_size == 0) { 1213 // The default stack size 1M (2M for LP64). 1214 stack_size = (BytesPerWord >> 2) * K * K; 1215 1216 switch (thr_type) { 1217 case os::java_thread: 1218 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 1219 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create(); 1220 break; 1221 case os::compiler_thread: 1222 if (CompilerThreadStackSize > 0) { 1223 stack_size = (size_t)(CompilerThreadStackSize * K); 1224 break; 1225 } // else fall through: 1226 // use VMThreadStackSize if CompilerThreadStackSize is not defined 1227 case os::vm_thread: 1228 case os::pgc_thread: 1229 case os::cgc_thread: 1230 case os::watcher_thread: 1231 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 1232 break; 1233 } 1234 } 1235 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed); 1236 1237 // Initial state is ALLOCATED but not INITIALIZED 1238 osthread->set_state(ALLOCATED); 1239 1240 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) { 1241 // We got lots of threads. Check if we still have some address space left. 1242 // Need to be at least 5Mb of unreserved address space. We do check by 1243 // trying to reserve some. 1244 const size_t VirtualMemoryBangSize = 20*K*K; 1245 char* mem = os::reserve_memory(VirtualMemoryBangSize); 1246 if (mem == NULL) { 1247 delete osthread; 1248 return false; 1249 } else { 1250 // Release the memory again 1251 os::release_memory(mem, VirtualMemoryBangSize); 1252 } 1253 } 1254 1255 // Setup osthread because the child thread may need it. 1256 thread->set_osthread(osthread); 1257 1258 // Create the Solaris thread 1259 // explicit THR_BOUND for T2_libthread case in case 1260 // that assumption is not accurate, but our alternate signal stack 1261 // handling is based on it which must have bound threads 1262 thread_t tid = 0; 1263 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED 1264 | ((UseBoundThreads || os::Solaris::T2_libthread() || 1265 (thr_type == vm_thread) || 1266 (thr_type == cgc_thread) || 1267 (thr_type == pgc_thread) || 1268 (thr_type == compiler_thread && BackgroundCompilation)) ? 1269 THR_BOUND : 0); 1270 int status; 1271 1272 // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs. 1273 // 1274 // On multiprocessors systems, libthread sometimes under-provisions our 1275 // process with LWPs. On a 30-way systems, for instance, we could have 1276 // 50 user-level threads in ready state and only 2 or 3 LWPs assigned 1277 // to our process. This can result in under utilization of PEs. 1278 // I suspect the problem is related to libthread's LWP 1279 // pool management and to the kernel's SIGBLOCKING "last LWP parked" 1280 // upcall policy. 1281 // 1282 // The following code is palliative -- it attempts to ensure that our 1283 // process has sufficient LWPs to take advantage of multiple PEs. 1284 // Proper long-term cures include using user-level threads bound to LWPs 1285 // (THR_BOUND) or using LWP-based synchronization. Note that there is a 1286 // slight timing window with respect to sampling _os_thread_count, but 1287 // the race is benign. Also, we should periodically recompute 1288 // _processors_online as the min of SC_NPROCESSORS_ONLN and the 1289 // the number of PEs in our partition. You might be tempted to use 1290 // THR_NEW_LWP here, but I'd recommend against it as that could 1291 // result in undesirable growth of the libthread's LWP pool. 1292 // The fix below isn't sufficient; for instance, it doesn't take into count 1293 // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks. 1294 // 1295 // Some pathologies this scheme doesn't handle: 1296 // * Threads can block, releasing the LWPs. The LWPs can age out. 1297 // When a large number of threads become ready again there aren't 1298 // enough LWPs available to service them. This can occur when the 1299 // number of ready threads oscillates. 1300 // * LWPs/Threads park on IO, thus taking the LWP out of circulation. 1301 // 1302 // Finally, we should call thr_setconcurrency() periodically to refresh 1303 // the LWP pool and thwart the LWP age-out mechanism. 1304 // The "+3" term provides a little slop -- we want to slightly overprovision. 1305 1306 if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) { 1307 if (!(flags & THR_BOUND)) { 1308 thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation 1309 } 1310 } 1311 // Although this doesn't hurt, we should warn of undefined behavior 1312 // when using unbound T1 threads with schedctl(). This should never 1313 // happen, as the compiler and VM threads are always created bound 1314 DEBUG_ONLY( 1315 if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) && 1316 (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) && 1317 ((thr_type == vm_thread) || (thr_type == cgc_thread) || 1318 (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) { 1319 warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound"); 1320 } 1321 ); 1322 1323 1324 // Mark that we don't have an lwp or thread id yet. 1325 // In case we attempt to set the priority before the thread starts. 1326 osthread->set_lwp_id(-1); 1327 osthread->set_thread_id(-1); 1328 1329 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid); 1330 if (status != 0) { 1331 if (PrintMiscellaneous && (Verbose || WizardMode)) { 1332 perror("os::create_thread"); 1333 } 1334 thread->set_osthread(NULL); 1335 // Need to clean up stuff we've allocated so far 1336 delete osthread; 1337 return false; 1338 } 1339 1340 Atomic::inc(&os::Solaris::_os_thread_count); 1341 1342 // Store info on the Solaris thread into the OSThread 1343 osthread->set_thread_id(tid); 1344 1345 // Remember that we created this thread so we can set priority on it 1346 osthread->set_vm_created(); 1347 1348 // Set the default thread priority otherwise use NormalPriority 1349 1350 if ( UseThreadPriorities ) { 1351 thr_setprio(tid, (DefaultThreadPriority == -1) ? 1352 java_to_os_priority[NormPriority] : 1353 DefaultThreadPriority); 1354 } 1355 1356 // Initial thread state is INITIALIZED, not SUSPENDED 1357 osthread->set_state(INITIALIZED); 1358 1359 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 1360 return true; 1361 } 1362 1363 /* defined for >= Solaris 10. This allows builds on earlier versions 1364 * of Solaris to take advantage of the newly reserved Solaris JVM signals 1365 * With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2 1366 * and -XX:+UseAltSigs does nothing since these should have no conflict 1367 */ 1368 #if !defined(SIGJVM1) 1369 #define SIGJVM1 39 1370 #define SIGJVM2 40 1371 #endif 1372 1373 debug_only(static bool signal_sets_initialized = false); 1374 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; 1375 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL; 1376 int os::Solaris::_SIGasync = ASYNC_SIGNAL; 1377 1378 bool os::Solaris::is_sig_ignored(int sig) { 1379 struct sigaction oact; 1380 sigaction(sig, (struct sigaction*)NULL, &oact); 1381 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 1382 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 1383 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) 1384 return true; 1385 else 1386 return false; 1387 } 1388 1389 // Note: SIGRTMIN is a macro that calls sysconf() so it will 1390 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime 1391 static bool isJVM1available() { 1392 return SIGJVM1 < SIGRTMIN; 1393 } 1394 1395 void os::Solaris::signal_sets_init() { 1396 // Should also have an assertion stating we are still single-threaded. 1397 assert(!signal_sets_initialized, "Already initialized"); 1398 // Fill in signals that are necessarily unblocked for all threads in 1399 // the VM. Currently, we unblock the following signals: 1400 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 1401 // by -Xrs (=ReduceSignalUsage)); 1402 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 1403 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 1404 // the dispositions or masks wrt these signals. 1405 // Programs embedding the VM that want to use the above signals for their 1406 // own purposes must, at this time, use the "-Xrs" option to prevent 1407 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 1408 // (See bug 4345157, and other related bugs). 1409 // In reality, though, unblocking these signals is really a nop, since 1410 // these signals are not blocked by default. 1411 sigemptyset(&unblocked_sigs); 1412 sigemptyset(&allowdebug_blocked_sigs); 1413 sigaddset(&unblocked_sigs, SIGILL); 1414 sigaddset(&unblocked_sigs, SIGSEGV); 1415 sigaddset(&unblocked_sigs, SIGBUS); 1416 sigaddset(&unblocked_sigs, SIGFPE); 1417 1418 if (isJVM1available) { 1419 os::Solaris::set_SIGinterrupt(SIGJVM1); 1420 os::Solaris::set_SIGasync(SIGJVM2); 1421 } else if (UseAltSigs) { 1422 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL); 1423 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL); 1424 } else { 1425 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL); 1426 os::Solaris::set_SIGasync(ASYNC_SIGNAL); 1427 } 1428 1429 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt()); 1430 sigaddset(&unblocked_sigs, os::Solaris::SIGasync()); 1431 1432 if (!ReduceSignalUsage) { 1433 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 1434 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 1435 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); 1436 } 1437 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 1438 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 1439 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); 1440 } 1441 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 1442 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 1443 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); 1444 } 1445 } 1446 // Fill in signals that are blocked by all but the VM thread. 1447 sigemptyset(&vm_sigs); 1448 if (!ReduceSignalUsage) 1449 sigaddset(&vm_sigs, BREAK_SIGNAL); 1450 debug_only(signal_sets_initialized = true); 1451 1452 // For diagnostics only used in run_periodic_checks 1453 sigemptyset(&check_signal_done); 1454 } 1455 1456 // These are signals that are unblocked while a thread is running Java. 1457 // (For some reason, they get blocked by default.) 1458 sigset_t* os::Solaris::unblocked_signals() { 1459 assert(signal_sets_initialized, "Not initialized"); 1460 return &unblocked_sigs; 1461 } 1462 1463 // These are the signals that are blocked while a (non-VM) thread is 1464 // running Java. Only the VM thread handles these signals. 1465 sigset_t* os::Solaris::vm_signals() { 1466 assert(signal_sets_initialized, "Not initialized"); 1467 return &vm_sigs; 1468 } 1469 1470 // These are signals that are blocked during cond_wait to allow debugger in 1471 sigset_t* os::Solaris::allowdebug_blocked_signals() { 1472 assert(signal_sets_initialized, "Not initialized"); 1473 return &allowdebug_blocked_sigs; 1474 } 1475 1476 // First crack at OS-specific initialization, from inside the new thread. 1477 void os::initialize_thread() { 1478 int r = thr_main() ; 1479 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 1480 if (r) { 1481 JavaThread* jt = (JavaThread *)Thread::current(); 1482 assert(jt != NULL,"Sanity check"); 1483 size_t stack_size; 1484 address base = jt->stack_base(); 1485 if (Arguments::created_by_java_launcher()) { 1486 // Use 2MB to allow for Solaris 7 64 bit mode. 1487 stack_size = JavaThread::stack_size_at_create() == 0 1488 ? 2048*K : JavaThread::stack_size_at_create(); 1489 1490 // There are rare cases when we may have already used more than 1491 // the basic stack size allotment before this method is invoked. 1492 // Attempt to allow for a normally sized java_stack. 1493 size_t current_stack_offset = (size_t)(base - (address)&stack_size); 1494 stack_size += ReservedSpace::page_align_size_down(current_stack_offset); 1495 } else { 1496 // 6269555: If we were not created by a Java launcher, i.e. if we are 1497 // running embedded in a native application, treat the primordial thread 1498 // as much like a native attached thread as possible. This means using 1499 // the current stack size from thr_stksegment(), unless it is too large 1500 // to reliably setup guard pages. A reasonable max size is 8MB. 1501 size_t current_size = current_stack_size(); 1502 // This should never happen, but just in case.... 1503 if (current_size == 0) current_size = 2 * K * K; 1504 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size; 1505 } 1506 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());; 1507 stack_size = (size_t)(base - bottom); 1508 1509 assert(stack_size > 0, "Stack size calculation problem"); 1510 1511 if (stack_size > jt->stack_size()) { 1512 NOT_PRODUCT( 1513 struct rlimit limits; 1514 getrlimit(RLIMIT_STACK, &limits); 1515 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur); 1516 assert(size >= jt->stack_size(), "Stack size problem in main thread"); 1517 ) 1518 tty->print_cr( 1519 "Stack size of %d Kb exceeds current limit of %d Kb.\n" 1520 "(Stack sizes are rounded up to a multiple of the system page size.)\n" 1521 "See limit(1) to increase the stack size limit.", 1522 stack_size / K, jt->stack_size() / K); 1523 vm_exit(1); 1524 } 1525 assert(jt->stack_size() >= stack_size, 1526 "Attempt to map more stack than was allocated"); 1527 jt->set_stack_size(stack_size); 1528 } 1529 1530 // 5/22/01: Right now alternate signal stacks do not handle 1531 // throwing stack overflow exceptions, see bug 4463178 1532 // Until a fix is found for this, T2 will NOT imply alternate signal 1533 // stacks. 1534 // If using T2 libthread threads, install an alternate signal stack. 1535 // Because alternate stacks associate with LWPs on Solaris, 1536 // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads 1537 // we prefer to explicitly stack bang. 1538 // If not using T2 libthread, but using UseBoundThreads any threads 1539 // (primordial thread, jni_attachCurrentThread) we do not create, 1540 // probably are not bound, therefore they can not have an alternate 1541 // signal stack. Since our stack banging code is generated and 1542 // is shared across threads, all threads must be bound to allow 1543 // using alternate signal stacks. The alternative is to interpose 1544 // on _lwp_create to associate an alt sig stack with each LWP, 1545 // and this could be a problem when the JVM is embedded. 1546 // We would prefer to use alternate signal stacks with T2 1547 // Since there is currently no accurate way to detect T2 1548 // we do not. Assuming T2 when running T1 causes sig 11s or assertions 1549 // on installing alternate signal stacks 1550 1551 1552 // 05/09/03: removed alternate signal stack support for Solaris 1553 // The alternate signal stack mechanism is no longer needed to 1554 // handle stack overflow. This is now handled by allocating 1555 // guard pages (red zone) and stackbanging. 1556 // Initially the alternate signal stack mechanism was removed because 1557 // it did not work with T1 llibthread. Alternate 1558 // signal stacks MUST have all threads bound to lwps. Applications 1559 // can create their own threads and attach them without their being 1560 // bound under T1. This is frequently the case for the primordial thread. 1561 // If we were ever to reenable this mechanism we would need to 1562 // use the dynamic check for T2 libthread. 1563 1564 os::Solaris::init_thread_fpu_state(); 1565 } 1566 1567 1568 1569 // Free Solaris resources related to the OSThread 1570 void os::free_thread(OSThread* osthread) { 1571 assert(osthread != NULL, "os::free_thread but osthread not set"); 1572 1573 1574 // We are told to free resources of the argument thread, 1575 // but we can only really operate on the current thread. 1576 // The main thread must take the VMThread down synchronously 1577 // before the main thread exits and frees up CodeHeap 1578 guarantee((Thread::current()->osthread() == osthread 1579 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread"); 1580 if (Thread::current()->osthread() == osthread) { 1581 // Restore caller's signal mask 1582 sigset_t sigmask = osthread->caller_sigmask(); 1583 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL); 1584 } 1585 delete osthread; 1586 } 1587 1588 void os::pd_start_thread(Thread* thread) { 1589 int status = thr_continue(thread->osthread()->thread_id()); 1590 assert_status(status == 0, status, "thr_continue failed"); 1591 } 1592 1593 1594 intx os::current_thread_id() { 1595 return (intx)thr_self(); 1596 } 1597 1598 static pid_t _initial_pid = 0; 1599 1600 int os::current_process_id() { 1601 return (int)(_initial_pid ? _initial_pid : getpid()); 1602 } 1603 1604 int os::allocate_thread_local_storage() { 1605 // %%% in Win32 this allocates a memory segment pointed to by a 1606 // register. Dan Stein can implement a similar feature in 1607 // Solaris. Alternatively, the VM can do the same thing 1608 // explicitly: malloc some storage and keep the pointer in a 1609 // register (which is part of the thread's context) (or keep it 1610 // in TLS). 1611 // %%% In current versions of Solaris, thr_self and TSD can 1612 // be accessed via short sequences of displaced indirections. 1613 // The value of thr_self is available as %g7(36). 1614 // The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4), 1615 // assuming that the current thread already has a value bound to k. 1616 // It may be worth experimenting with such access patterns, 1617 // and later having the parameters formally exported from a Solaris 1618 // interface. I think, however, that it will be faster to 1619 // maintain the invariant that %g2 always contains the 1620 // JavaThread in Java code, and have stubs simply 1621 // treat %g2 as a caller-save register, preserving it in a %lN. 1622 thread_key_t tk; 1623 if (thr_keycreate( &tk, NULL ) ) 1624 fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed " 1625 "(%s)", strerror(errno))); 1626 return int(tk); 1627 } 1628 1629 void os::free_thread_local_storage(int index) { 1630 // %%% don't think we need anything here 1631 // if ( pthread_key_delete((pthread_key_t) tk) ) 1632 // fatal("os::free_thread_local_storage: pthread_key_delete failed"); 1633 } 1634 1635 #define SMALLINT 32 // libthread allocate for tsd_common is a version specific 1636 // small number - point is NO swap space available 1637 void os::thread_local_storage_at_put(int index, void* value) { 1638 // %%% this is used only in threadLocalStorage.cpp 1639 if (thr_setspecific((thread_key_t)index, value)) { 1640 if (errno == ENOMEM) { 1641 vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space"); 1642 } else { 1643 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed " 1644 "(%s)", strerror(errno))); 1645 } 1646 } else { 1647 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ; 1648 } 1649 } 1650 1651 // This function could be called before TLS is initialized, for example, when 1652 // VM receives an async signal or when VM causes a fatal error during 1653 // initialization. Return NULL if thr_getspecific() fails. 1654 void* os::thread_local_storage_at(int index) { 1655 // %%% this is used only in threadLocalStorage.cpp 1656 void* r = NULL; 1657 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r; 1658 } 1659 1660 1661 const int NANOSECS_PER_MILLISECS = 1000000; 1662 // gethrtime can move backwards if read from one cpu and then a different cpu 1663 // getTimeNanos is guaranteed to not move backward on Solaris 1664 // local spinloop created as faster for a CAS on an int than 1665 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not 1666 // supported on sparc v8 or pre supports_cx8 intel boxes. 1667 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong 1668 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes 1669 inline hrtime_t oldgetTimeNanos() { 1670 int gotlock = LOCK_INVALID; 1671 hrtime_t newtime = gethrtime(); 1672 1673 for (;;) { 1674 // grab lock for max_hrtime 1675 int curlock = max_hrtime_lock; 1676 if (curlock & LOCK_BUSY) continue; 1677 if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue; 1678 if (newtime > max_hrtime) { 1679 max_hrtime = newtime; 1680 } else { 1681 newtime = max_hrtime; 1682 } 1683 // release lock 1684 max_hrtime_lock = LOCK_FREE; 1685 return newtime; 1686 } 1687 } 1688 // gethrtime can move backwards if read from one cpu and then a different cpu 1689 // getTimeNanos is guaranteed to not move backward on Solaris 1690 inline hrtime_t getTimeNanos() { 1691 if (VM_Version::supports_cx8()) { 1692 const hrtime_t now = gethrtime(); 1693 // Use atomic long load since 32-bit x86 uses 2 registers to keep long. 1694 const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime); 1695 if (now <= prev) return prev; // same or retrograde time; 1696 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); 1697 assert(obsv >= prev, "invariant"); // Monotonicity 1698 // If the CAS succeeded then we're done and return "now". 1699 // If the CAS failed and the observed value "obs" is >= now then 1700 // we should return "obs". If the CAS failed and now > obs > prv then 1701 // some other thread raced this thread and installed a new value, in which case 1702 // we could either (a) retry the entire operation, (b) retry trying to install now 1703 // or (c) just return obs. We use (c). No loop is required although in some cases 1704 // we might discard a higher "now" value in deference to a slightly lower but freshly 1705 // installed obs value. That's entirely benign -- it admits no new orderings compared 1706 // to (a) or (b) -- and greatly reduces coherence traffic. 1707 // We might also condition (c) on the magnitude of the delta between obs and now. 1708 // Avoiding excessive CAS operations to hot RW locations is critical. 1709 // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate 1710 return (prev == obsv) ? now : obsv ; 1711 } else { 1712 return oldgetTimeNanos(); 1713 } 1714 } 1715 1716 // Time since start-up in seconds to a fine granularity. 1717 // Used by VMSelfDestructTimer and the MemProfiler. 1718 double os::elapsedTime() { 1719 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz; 1720 } 1721 1722 jlong os::elapsed_counter() { 1723 return (jlong)(getTimeNanos() - first_hrtime); 1724 } 1725 1726 jlong os::elapsed_frequency() { 1727 return hrtime_hz; 1728 } 1729 1730 // Return the real, user, and system times in seconds from an 1731 // arbitrary fixed point in the past. 1732 bool os::getTimesSecs(double* process_real_time, 1733 double* process_user_time, 1734 double* process_system_time) { 1735 struct tms ticks; 1736 clock_t real_ticks = times(&ticks); 1737 1738 if (real_ticks == (clock_t) (-1)) { 1739 return false; 1740 } else { 1741 double ticks_per_second = (double) clock_tics_per_sec; 1742 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1743 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1744 // For consistency return the real time from getTimeNanos() 1745 // converted to seconds. 1746 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS); 1747 1748 return true; 1749 } 1750 } 1751 1752 bool os::supports_vtime() { return true; } 1753 1754 bool os::enable_vtime() { 1755 int fd = open("/proc/self/ctl", O_WRONLY); 1756 if (fd == -1) 1757 return false; 1758 1759 long cmd[] = { PCSET, PR_MSACCT }; 1760 int res = write(fd, cmd, sizeof(long) * 2); 1761 close(fd); 1762 if (res != sizeof(long) * 2) 1763 return false; 1764 1765 return true; 1766 } 1767 1768 bool os::vtime_enabled() { 1769 int fd = open("/proc/self/status", O_RDONLY); 1770 if (fd == -1) 1771 return false; 1772 1773 pstatus_t status; 1774 int res = read(fd, (void*) &status, sizeof(pstatus_t)); 1775 close(fd); 1776 if (res != sizeof(pstatus_t)) 1777 return false; 1778 1779 return status.pr_flags & PR_MSACCT; 1780 } 1781 1782 double os::elapsedVTime() { 1783 return (double)gethrvtime() / (double)hrtime_hz; 1784 } 1785 1786 // Used internally for comparisons only 1787 // getTimeMillis guaranteed to not move backwards on Solaris 1788 jlong getTimeMillis() { 1789 jlong nanotime = getTimeNanos(); 1790 return (jlong)(nanotime / NANOSECS_PER_MILLISECS); 1791 } 1792 1793 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis 1794 jlong os::javaTimeMillis() { 1795 timeval t; 1796 if (gettimeofday( &t, NULL) == -1) 1797 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno))); 1798 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; 1799 } 1800 1801 jlong os::javaTimeNanos() { 1802 return (jlong)getTimeNanos(); 1803 } 1804 1805 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1806 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits 1807 info_ptr->may_skip_backward = false; // not subject to resetting or drifting 1808 info_ptr->may_skip_forward = false; // not subject to resetting or drifting 1809 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1810 } 1811 1812 char * os::local_time_string(char *buf, size_t buflen) { 1813 struct tm t; 1814 time_t long_time; 1815 time(&long_time); 1816 localtime_r(&long_time, &t); 1817 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1818 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1819 t.tm_hour, t.tm_min, t.tm_sec); 1820 return buf; 1821 } 1822 1823 // Note: os::shutdown() might be called very early during initialization, or 1824 // called from signal handler. Before adding something to os::shutdown(), make 1825 // sure it is async-safe and can handle partially initialized VM. 1826 void os::shutdown() { 1827 1828 // allow PerfMemory to attempt cleanup of any persistent resources 1829 perfMemory_exit(); 1830 1831 // needs to remove object in file system 1832 AttachListener::abort(); 1833 1834 // flush buffered output, finish log files 1835 ostream_abort(); 1836 1837 // Check for abort hook 1838 abort_hook_t abort_hook = Arguments::abort_hook(); 1839 if (abort_hook != NULL) { 1840 abort_hook(); 1841 } 1842 } 1843 1844 // Note: os::abort() might be called very early during initialization, or 1845 // called from signal handler. Before adding something to os::abort(), make 1846 // sure it is async-safe and can handle partially initialized VM. 1847 void os::abort(bool dump_core) { 1848 os::shutdown(); 1849 if (dump_core) { 1850 #ifndef PRODUCT 1851 fdStream out(defaultStream::output_fd()); 1852 out.print_raw("Current thread is "); 1853 char buf[16]; 1854 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1855 out.print_raw_cr(buf); 1856 out.print_raw_cr("Dumping core ..."); 1857 #endif 1858 ::abort(); // dump core (for debugging) 1859 } 1860 1861 ::exit(1); 1862 } 1863 1864 // Die immediately, no exit hook, no abort hook, no cleanup. 1865 void os::die() { 1866 _exit(-1); 1867 } 1868 1869 // unused 1870 void os::set_error_file(const char *logfile) {} 1871 1872 // DLL functions 1873 1874 const char* os::dll_file_extension() { return ".so"; } 1875 1876 const char* os::get_temp_directory() { 1877 const char *prop = Arguments::get_property("java.io.tmpdir"); 1878 return prop == NULL ? "/tmp" : prop; 1879 } 1880 1881 static bool file_exists(const char* filename) { 1882 struct stat statbuf; 1883 if (filename == NULL || strlen(filename) == 0) { 1884 return false; 1885 } 1886 return os::stat(filename, &statbuf) == 0; 1887 } 1888 1889 void os::dll_build_name(char* buffer, size_t buflen, 1890 const char* pname, const char* fname) { 1891 // Copied from libhpi 1892 const size_t pnamelen = pname ? strlen(pname) : 0; 1893 1894 // Quietly truncate on buffer overflow. Should be an error. 1895 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { 1896 *buffer = '\0'; 1897 return; 1898 } 1899 1900 if (pnamelen == 0) { 1901 snprintf(buffer, buflen, "lib%s.so", fname); 1902 } else if (strchr(pname, *os::path_separator()) != NULL) { 1903 int n; 1904 char** pelements = split_path(pname, &n); 1905 for (int i = 0 ; i < n ; i++) { 1906 // really shouldn't be NULL but what the heck, check can't hurt 1907 if (pelements[i] == NULL || strlen(pelements[i]) == 0) { 1908 continue; // skip the empty path values 1909 } 1910 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); 1911 if (file_exists(buffer)) { 1912 break; 1913 } 1914 } 1915 // release the storage 1916 for (int i = 0 ; i < n ; i++) { 1917 if (pelements[i] != NULL) { 1918 FREE_C_HEAP_ARRAY(char, pelements[i]); 1919 } 1920 } 1921 if (pelements != NULL) { 1922 FREE_C_HEAP_ARRAY(char*, pelements); 1923 } 1924 } else { 1925 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1926 } 1927 } 1928 1929 const char* os::get_current_directory(char *buf, int buflen) { 1930 return getcwd(buf, buflen); 1931 } 1932 1933 // check if addr is inside libjvm[_g].so 1934 bool os::address_is_in_vm(address addr) { 1935 static address libjvm_base_addr; 1936 Dl_info dlinfo; 1937 1938 if (libjvm_base_addr == NULL) { 1939 dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo); 1940 libjvm_base_addr = (address)dlinfo.dli_fbase; 1941 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); 1942 } 1943 1944 if (dladdr((void *)addr, &dlinfo)) { 1945 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; 1946 } 1947 1948 return false; 1949 } 1950 1951 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int); 1952 static dladdr1_func_type dladdr1_func = NULL; 1953 1954 bool os::dll_address_to_function_name(address addr, char *buf, 1955 int buflen, int * offset) { 1956 Dl_info dlinfo; 1957 1958 // dladdr1_func was initialized in os::init() 1959 if (dladdr1_func){ 1960 // yes, we have dladdr1 1961 1962 // Support for dladdr1 is checked at runtime; it may be 1963 // available even if the vm is built on a machine that does 1964 // not have dladdr1 support. Make sure there is a value for 1965 // RTLD_DL_SYMENT. 1966 #ifndef RTLD_DL_SYMENT 1967 #define RTLD_DL_SYMENT 1 1968 #endif 1969 Sym * info; 1970 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info, 1971 RTLD_DL_SYMENT)) { 1972 if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); 1973 if (offset) *offset = addr - (address)dlinfo.dli_saddr; 1974 1975 // check if the returned symbol really covers addr 1976 return ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr); 1977 } else { 1978 if (buf) buf[0] = '\0'; 1979 if (offset) *offset = -1; 1980 return false; 1981 } 1982 } else { 1983 // no, only dladdr is available 1984 if(dladdr((void *)addr, &dlinfo)) { 1985 if (buf) jio_snprintf(buf, buflen, dlinfo.dli_sname); 1986 if (offset) *offset = addr - (address)dlinfo.dli_saddr; 1987 return true; 1988 } else { 1989 if (buf) buf[0] = '\0'; 1990 if (offset) *offset = -1; 1991 return false; 1992 } 1993 } 1994 } 1995 1996 bool os::dll_address_to_library_name(address addr, char* buf, 1997 int buflen, int* offset) { 1998 Dl_info dlinfo; 1999 2000 if (dladdr((void*)addr, &dlinfo)){ 2001 if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); 2002 if (offset) *offset = addr - (address)dlinfo.dli_fbase; 2003 return true; 2004 } else { 2005 if (buf) buf[0] = '\0'; 2006 if (offset) *offset = -1; 2007 return false; 2008 } 2009 } 2010 2011 // Prints the names and full paths of all opened dynamic libraries 2012 // for current process 2013 void os::print_dll_info(outputStream * st) { 2014 Dl_info dli; 2015 void *handle; 2016 Link_map *map; 2017 Link_map *p; 2018 2019 st->print_cr("Dynamic libraries:"); st->flush(); 2020 2021 if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) { 2022 st->print_cr("Error: Cannot print dynamic libraries."); 2023 return; 2024 } 2025 handle = dlopen(dli.dli_fname, RTLD_LAZY); 2026 if (handle == NULL) { 2027 st->print_cr("Error: Cannot print dynamic libraries."); 2028 return; 2029 } 2030 dlinfo(handle, RTLD_DI_LINKMAP, &map); 2031 if (map == NULL) { 2032 st->print_cr("Error: Cannot print dynamic libraries."); 2033 return; 2034 } 2035 2036 while (map->l_prev != NULL) 2037 map = map->l_prev; 2038 2039 while (map != NULL) { 2040 st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name); 2041 map = map->l_next; 2042 } 2043 2044 dlclose(handle); 2045 } 2046 2047 // Loads .dll/.so and 2048 // in case of error it checks if .dll/.so was built for the 2049 // same architecture as Hotspot is running on 2050 2051 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) 2052 { 2053 void * result= ::dlopen(filename, RTLD_LAZY); 2054 if (result != NULL) { 2055 // Successful loading 2056 return result; 2057 } 2058 2059 Elf32_Ehdr elf_head; 2060 2061 // Read system error message into ebuf 2062 // It may or may not be overwritten below 2063 ::strncpy(ebuf, ::dlerror(), ebuflen-1); 2064 ebuf[ebuflen-1]='\0'; 2065 int diag_msg_max_length=ebuflen-strlen(ebuf); 2066 char* diag_msg_buf=ebuf+strlen(ebuf); 2067 2068 if (diag_msg_max_length==0) { 2069 // No more space in ebuf for additional diagnostics message 2070 return NULL; 2071 } 2072 2073 2074 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); 2075 2076 if (file_descriptor < 0) { 2077 // Can't open library, report dlerror() message 2078 return NULL; 2079 } 2080 2081 bool failed_to_read_elf_head= 2082 (sizeof(elf_head)!= 2083 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ; 2084 2085 ::close(file_descriptor); 2086 if (failed_to_read_elf_head) { 2087 // file i/o error - report dlerror() msg 2088 return NULL; 2089 } 2090 2091 typedef struct { 2092 Elf32_Half code; // Actual value as defined in elf.h 2093 Elf32_Half compat_class; // Compatibility of archs at VM's sense 2094 char elf_class; // 32 or 64 bit 2095 char endianess; // MSB or LSB 2096 char* name; // String representation 2097 } arch_t; 2098 2099 static const arch_t arch_array[]={ 2100 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2101 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2102 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, 2103 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, 2104 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2105 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2106 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, 2107 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, 2108 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, 2109 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"} 2110 }; 2111 2112 #if (defined IA32) 2113 static Elf32_Half running_arch_code=EM_386; 2114 #elif (defined AMD64) 2115 static Elf32_Half running_arch_code=EM_X86_64; 2116 #elif (defined IA64) 2117 static Elf32_Half running_arch_code=EM_IA_64; 2118 #elif (defined __sparc) && (defined _LP64) 2119 static Elf32_Half running_arch_code=EM_SPARCV9; 2120 #elif (defined __sparc) && (!defined _LP64) 2121 static Elf32_Half running_arch_code=EM_SPARC; 2122 #elif (defined __powerpc64__) 2123 static Elf32_Half running_arch_code=EM_PPC64; 2124 #elif (defined __powerpc__) 2125 static Elf32_Half running_arch_code=EM_PPC; 2126 #elif (defined ARM) 2127 static Elf32_Half running_arch_code=EM_ARM; 2128 #else 2129 #error Method os::dll_load requires that one of following is defined:\ 2130 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM 2131 #endif 2132 2133 // Identify compatability class for VM's architecture and library's architecture 2134 // Obtain string descriptions for architectures 2135 2136 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; 2137 int running_arch_index=-1; 2138 2139 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) { 2140 if (running_arch_code == arch_array[i].code) { 2141 running_arch_index = i; 2142 } 2143 if (lib_arch.code == arch_array[i].code) { 2144 lib_arch.compat_class = arch_array[i].compat_class; 2145 lib_arch.name = arch_array[i].name; 2146 } 2147 } 2148 2149 assert(running_arch_index != -1, 2150 "Didn't find running architecture code (running_arch_code) in arch_array"); 2151 if (running_arch_index == -1) { 2152 // Even though running architecture detection failed 2153 // we may still continue with reporting dlerror() message 2154 return NULL; 2155 } 2156 2157 if (lib_arch.endianess != arch_array[running_arch_index].endianess) { 2158 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)"); 2159 return NULL; 2160 } 2161 2162 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { 2163 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)"); 2164 return NULL; 2165 } 2166 2167 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { 2168 if ( lib_arch.name!=NULL ) { 2169 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2170 " (Possible cause: can't load %s-bit .so on a %s-bit platform)", 2171 lib_arch.name, arch_array[running_arch_index].name); 2172 } else { 2173 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2174 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)", 2175 lib_arch.code, 2176 arch_array[running_arch_index].name); 2177 } 2178 } 2179 2180 return NULL; 2181 } 2182 2183 void* os::dll_lookup(void* handle, const char* name) { 2184 return dlsym(handle, name); 2185 } 2186 2187 2188 bool _print_ascii_file(const char* filename, outputStream* st) { 2189 int fd = open(filename, O_RDONLY); 2190 if (fd == -1) { 2191 return false; 2192 } 2193 2194 char buf[32]; 2195 int bytes; 2196 while ((bytes = read(fd, buf, sizeof(buf))) > 0) { 2197 st->print_raw(buf, bytes); 2198 } 2199 2200 close(fd); 2201 2202 return true; 2203 } 2204 2205 void os::print_os_info(outputStream* st) { 2206 st->print("OS:"); 2207 2208 if (!_print_ascii_file("/etc/release", st)) { 2209 st->print("Solaris"); 2210 } 2211 st->cr(); 2212 2213 // kernel 2214 st->print("uname:"); 2215 struct utsname name; 2216 uname(&name); 2217 st->print(name.sysname); st->print(" "); 2218 st->print(name.release); st->print(" "); 2219 st->print(name.version); st->print(" "); 2220 st->print(name.machine); 2221 2222 // libthread 2223 if (os::Solaris::T2_libthread()) st->print(" (T2 libthread)"); 2224 else st->print(" (T1 libthread)"); 2225 st->cr(); 2226 2227 // rlimit 2228 st->print("rlimit:"); 2229 struct rlimit rlim; 2230 2231 st->print(" STACK "); 2232 getrlimit(RLIMIT_STACK, &rlim); 2233 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2234 else st->print("%uk", rlim.rlim_cur >> 10); 2235 2236 st->print(", CORE "); 2237 getrlimit(RLIMIT_CORE, &rlim); 2238 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2239 else st->print("%uk", rlim.rlim_cur >> 10); 2240 2241 st->print(", NOFILE "); 2242 getrlimit(RLIMIT_NOFILE, &rlim); 2243 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2244 else st->print("%d", rlim.rlim_cur); 2245 2246 st->print(", AS "); 2247 getrlimit(RLIMIT_AS, &rlim); 2248 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2249 else st->print("%uk", rlim.rlim_cur >> 10); 2250 st->cr(); 2251 2252 // load average 2253 st->print("load average:"); 2254 double loadavg[3]; 2255 os::loadavg(loadavg, 3); 2256 st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]); 2257 st->cr(); 2258 } 2259 2260 2261 static bool check_addr0(outputStream* st) { 2262 jboolean status = false; 2263 int fd = open("/proc/self/map",O_RDONLY); 2264 if (fd >= 0) { 2265 prmap_t p; 2266 while(read(fd, &p, sizeof(p)) > 0) { 2267 if (p.pr_vaddr == 0x0) { 2268 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname); 2269 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname); 2270 st->print("Access:"); 2271 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-"); 2272 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-"); 2273 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-"); 2274 st->cr(); 2275 status = true; 2276 } 2277 close(fd); 2278 } 2279 } 2280 return status; 2281 } 2282 2283 void os::print_memory_info(outputStream* st) { 2284 st->print("Memory:"); 2285 st->print(" %dk page", os::vm_page_size()>>10); 2286 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10); 2287 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10); 2288 st->cr(); 2289 (void) check_addr0(st); 2290 } 2291 2292 // Taken from /usr/include/sys/machsig.h Supposed to be architecture specific 2293 // but they're the same for all the solaris architectures that we support. 2294 const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR", 2295 "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG", 2296 "ILL_COPROC", "ILL_BADSTK" }; 2297 2298 const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV", 2299 "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES", 2300 "FPE_FLTINV", "FPE_FLTSUB" }; 2301 2302 const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" }; 2303 2304 const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" }; 2305 2306 void os::print_siginfo(outputStream* st, void* siginfo) { 2307 st->print("siginfo:"); 2308 2309 const int buflen = 100; 2310 char buf[buflen]; 2311 siginfo_t *si = (siginfo_t*)siginfo; 2312 st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen)); 2313 char *err = strerror(si->si_errno); 2314 if (si->si_errno != 0 && err != NULL) { 2315 st->print("si_errno=%s", err); 2316 } else { 2317 st->print("si_errno=%d", si->si_errno); 2318 } 2319 const int c = si->si_code; 2320 assert(c > 0, "unexpected si_code"); 2321 switch (si->si_signo) { 2322 case SIGILL: 2323 st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]); 2324 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2325 break; 2326 case SIGFPE: 2327 st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]); 2328 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2329 break; 2330 case SIGSEGV: 2331 st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]); 2332 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2333 break; 2334 case SIGBUS: 2335 st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]); 2336 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2337 break; 2338 default: 2339 st->print(", si_code=%d", si->si_code); 2340 // no si_addr 2341 } 2342 2343 if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && 2344 UseSharedSpaces) { 2345 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2346 if (mapinfo->is_in_shared_space(si->si_addr)) { 2347 st->print("\n\nError accessing class data sharing archive." \ 2348 " Mapped file inaccessible during execution, " \ 2349 " possible disk/network problem."); 2350 } 2351 } 2352 st->cr(); 2353 } 2354 2355 // Moved from whole group, because we need them here for diagnostic 2356 // prints. 2357 #define OLDMAXSIGNUM 32 2358 static int Maxsignum = 0; 2359 static int *ourSigFlags = NULL; 2360 2361 extern "C" void sigINTRHandler(int, siginfo_t*, void*); 2362 2363 int os::Solaris::get_our_sigflags(int sig) { 2364 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2365 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2366 return ourSigFlags[sig]; 2367 } 2368 2369 void os::Solaris::set_our_sigflags(int sig, int flags) { 2370 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2371 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2372 ourSigFlags[sig] = flags; 2373 } 2374 2375 2376 static const char* get_signal_handler_name(address handler, 2377 char* buf, int buflen) { 2378 int offset; 2379 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 2380 if (found) { 2381 // skip directory names 2382 const char *p1, *p2; 2383 p1 = buf; 2384 size_t len = strlen(os::file_separator()); 2385 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 2386 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset); 2387 } else { 2388 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 2389 } 2390 return buf; 2391 } 2392 2393 static void print_signal_handler(outputStream* st, int sig, 2394 char* buf, size_t buflen) { 2395 struct sigaction sa; 2396 2397 sigaction(sig, NULL, &sa); 2398 2399 st->print("%s: ", os::exception_name(sig, buf, buflen)); 2400 2401 address handler = (sa.sa_flags & SA_SIGINFO) 2402 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 2403 : CAST_FROM_FN_PTR(address, sa.sa_handler); 2404 2405 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 2406 st->print("SIG_DFL"); 2407 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 2408 st->print("SIG_IGN"); 2409 } else { 2410 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 2411 } 2412 2413 st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask); 2414 2415 address rh = VMError::get_resetted_sighandler(sig); 2416 // May be, handler was resetted by VMError? 2417 if(rh != NULL) { 2418 handler = rh; 2419 sa.sa_flags = VMError::get_resetted_sigflags(sig); 2420 } 2421 2422 st->print(", sa_flags=" PTR32_FORMAT, sa.sa_flags); 2423 2424 // Check: is it our handler? 2425 if(handler == CAST_FROM_FN_PTR(address, signalHandler) || 2426 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) { 2427 // It is our signal handler 2428 // check for flags 2429 if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) { 2430 st->print( 2431 ", flags was changed from " PTR32_FORMAT ", consider using jsig library", 2432 os::Solaris::get_our_sigflags(sig)); 2433 } 2434 } 2435 st->cr(); 2436 } 2437 2438 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 2439 st->print_cr("Signal Handlers:"); 2440 print_signal_handler(st, SIGSEGV, buf, buflen); 2441 print_signal_handler(st, SIGBUS , buf, buflen); 2442 print_signal_handler(st, SIGFPE , buf, buflen); 2443 print_signal_handler(st, SIGPIPE, buf, buflen); 2444 print_signal_handler(st, SIGXFSZ, buf, buflen); 2445 print_signal_handler(st, SIGILL , buf, buflen); 2446 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen); 2447 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen); 2448 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 2449 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen); 2450 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 2451 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen); 2452 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen); 2453 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen); 2454 } 2455 2456 static char saved_jvm_path[MAXPATHLEN] = { 0 }; 2457 2458 // Find the full path to the current module, libjvm.so or libjvm_g.so 2459 void os::jvm_path(char *buf, jint buflen) { 2460 // Error checking. 2461 if (buflen < MAXPATHLEN) { 2462 assert(false, "must use a large-enough buffer"); 2463 buf[0] = '\0'; 2464 return; 2465 } 2466 // Lazy resolve the path to current module. 2467 if (saved_jvm_path[0] != 0) { 2468 strcpy(buf, saved_jvm_path); 2469 return; 2470 } 2471 2472 Dl_info dlinfo; 2473 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 2474 assert(ret != 0, "cannot locate libjvm"); 2475 realpath((char *)dlinfo.dli_fname, buf); 2476 2477 if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) { 2478 // Support for the gamma launcher. Typical value for buf is 2479 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at 2480 // the right place in the string, then assume we are installed in a JDK and 2481 // we're done. Otherwise, check for a JAVA_HOME environment variable and fix 2482 // up the path so it looks like libjvm.so is installed there (append a 2483 // fake suffix hotspot/libjvm.so). 2484 const char *p = buf + strlen(buf) - 1; 2485 for (int count = 0; p > buf && count < 5; ++count) { 2486 for (--p; p > buf && *p != '/'; --p) 2487 /* empty */ ; 2488 } 2489 2490 if (strncmp(p, "/jre/lib/", 9) != 0) { 2491 // Look for JAVA_HOME in the environment. 2492 char* java_home_var = ::getenv("JAVA_HOME"); 2493 if (java_home_var != NULL && java_home_var[0] != 0) { 2494 char cpu_arch[12]; 2495 char* jrelib_p; 2496 int len; 2497 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 2498 #ifdef _LP64 2499 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9. 2500 if (strcmp(cpu_arch, "sparc") == 0) { 2501 strcat(cpu_arch, "v9"); 2502 } else if (strcmp(cpu_arch, "i386") == 0) { 2503 strcpy(cpu_arch, "amd64"); 2504 } 2505 #endif 2506 // Check the current module name "libjvm.so" or "libjvm_g.so". 2507 p = strrchr(buf, '/'); 2508 assert(strstr(p, "/libjvm") == p, "invalid library name"); 2509 p = strstr(p, "_g") ? "_g" : ""; 2510 2511 realpath(java_home_var, buf); 2512 // determine if this is a legacy image or modules image 2513 // modules image doesn't have "jre" subdirectory 2514 len = strlen(buf); 2515 jrelib_p = buf + len; 2516 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); 2517 if (0 != access(buf, F_OK)) { 2518 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch); 2519 } 2520 2521 if (0 == access(buf, F_OK)) { 2522 // Use current module name "libjvm[_g].so" instead of 2523 // "libjvm"debug_only("_g")".so" since for fastdebug version 2524 // we should have "libjvm.so" but debug_only("_g") adds "_g"! 2525 // It is used when we are choosing the HPI library's name 2526 // "libhpi[_g].so" in hpi::initialize_get_interface(). 2527 len = strlen(buf); 2528 snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p); 2529 } else { 2530 // Go back to path of .so 2531 realpath((char *)dlinfo.dli_fname, buf); 2532 } 2533 } 2534 } 2535 } 2536 2537 strcpy(saved_jvm_path, buf); 2538 } 2539 2540 2541 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 2542 // no prefix required, not even "_" 2543 } 2544 2545 2546 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 2547 // no suffix required 2548 } 2549 2550 2551 // sun.misc.Signal 2552 2553 extern "C" { 2554 static void UserHandler(int sig, void *siginfo, void *context) { 2555 // Ctrl-C is pressed during error reporting, likely because the error 2556 // handler fails to abort. Let VM die immediately. 2557 if (sig == SIGINT && is_error_reported()) { 2558 os::die(); 2559 } 2560 2561 os::signal_notify(sig); 2562 // We do not need to reinstate the signal handler each time... 2563 } 2564 } 2565 2566 void* os::user_handler() { 2567 return CAST_FROM_FN_PTR(void*, UserHandler); 2568 } 2569 2570 extern "C" { 2571 typedef void (*sa_handler_t)(int); 2572 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 2573 } 2574 2575 void* os::signal(int signal_number, void* handler) { 2576 struct sigaction sigAct, oldSigAct; 2577 sigfillset(&(sigAct.sa_mask)); 2578 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND; 2579 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 2580 2581 if (sigaction(signal_number, &sigAct, &oldSigAct)) 2582 // -1 means registration failed 2583 return (void *)-1; 2584 2585 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 2586 } 2587 2588 void os::signal_raise(int signal_number) { 2589 raise(signal_number); 2590 } 2591 2592 /* 2593 * The following code is moved from os.cpp for making this 2594 * code platform specific, which it is by its very nature. 2595 */ 2596 2597 // a counter for each possible signal value 2598 static int Sigexit = 0; 2599 static int Maxlibjsigsigs; 2600 static jint *pending_signals = NULL; 2601 static int *preinstalled_sigs = NULL; 2602 static struct sigaction *chainedsigactions = NULL; 2603 static sema_t sig_sem; 2604 typedef int (*version_getting_t)(); 2605 version_getting_t os::Solaris::get_libjsig_version = NULL; 2606 static int libjsigversion = NULL; 2607 2608 int os::sigexitnum_pd() { 2609 assert(Sigexit > 0, "signal memory not yet initialized"); 2610 return Sigexit; 2611 } 2612 2613 void os::Solaris::init_signal_mem() { 2614 // Initialize signal structures 2615 Maxsignum = SIGRTMAX; 2616 Sigexit = Maxsignum+1; 2617 assert(Maxsignum >0, "Unable to obtain max signal number"); 2618 2619 Maxlibjsigsigs = Maxsignum; 2620 2621 // pending_signals has one int per signal 2622 // The additional signal is for SIGEXIT - exit signal to signal_thread 2623 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1)); 2624 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1))); 2625 2626 if (UseSignalChaining) { 2627 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction) 2628 * (Maxsignum + 1)); 2629 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1))); 2630 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1)); 2631 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1))); 2632 } 2633 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 )); 2634 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1)); 2635 } 2636 2637 void os::signal_init_pd() { 2638 int ret; 2639 2640 ret = ::sema_init(&sig_sem, 0, NULL, NULL); 2641 assert(ret == 0, "sema_init() failed"); 2642 } 2643 2644 void os::signal_notify(int signal_number) { 2645 int ret; 2646 2647 Atomic::inc(&pending_signals[signal_number]); 2648 ret = ::sema_post(&sig_sem); 2649 assert(ret == 0, "sema_post() failed"); 2650 } 2651 2652 static int check_pending_signals(bool wait_for_signal) { 2653 int ret; 2654 while (true) { 2655 for (int i = 0; i < Sigexit + 1; i++) { 2656 jint n = pending_signals[i]; 2657 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2658 return i; 2659 } 2660 } 2661 if (!wait_for_signal) { 2662 return -1; 2663 } 2664 JavaThread *thread = JavaThread::current(); 2665 ThreadBlockInVM tbivm(thread); 2666 2667 bool threadIsSuspended; 2668 do { 2669 thread->set_suspend_equivalent(); 2670 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2671 while((ret = ::sema_wait(&sig_sem)) == EINTR) 2672 ; 2673 assert(ret == 0, "sema_wait() failed"); 2674 2675 // were we externally suspended while we were waiting? 2676 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2677 if (threadIsSuspended) { 2678 // 2679 // The semaphore has been incremented, but while we were waiting 2680 // another thread suspended us. We don't want to continue running 2681 // while suspended because that would surprise the thread that 2682 // suspended us. 2683 // 2684 ret = ::sema_post(&sig_sem); 2685 assert(ret == 0, "sema_post() failed"); 2686 2687 thread->java_suspend_self(); 2688 } 2689 } while (threadIsSuspended); 2690 } 2691 } 2692 2693 int os::signal_lookup() { 2694 return check_pending_signals(false); 2695 } 2696 2697 int os::signal_wait() { 2698 return check_pending_signals(true); 2699 } 2700 2701 //////////////////////////////////////////////////////////////////////////////// 2702 // Virtual Memory 2703 2704 static int page_size = -1; 2705 2706 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will 2707 // clear this var if support is not available. 2708 static bool has_map_align = true; 2709 2710 int os::vm_page_size() { 2711 assert(page_size != -1, "must call os::init"); 2712 return page_size; 2713 } 2714 2715 // Solaris allocates memory by pages. 2716 int os::vm_allocation_granularity() { 2717 assert(page_size != -1, "must call os::init"); 2718 return page_size; 2719 } 2720 2721 bool os::commit_memory(char* addr, size_t bytes, bool exec) { 2722 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2723 size_t size = bytes; 2724 return 2725 NULL != Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); 2726 } 2727 2728 bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint, 2729 bool exec) { 2730 if (commit_memory(addr, bytes, exec)) { 2731 if (UseMPSS && alignment_hint > (size_t)vm_page_size()) { 2732 // If the large page size has been set and the VM 2733 // is using large pages, use the large page size 2734 // if it is smaller than the alignment hint. This is 2735 // a case where the VM wants to use a larger alignment size 2736 // for its own reasons but still want to use large pages 2737 // (which is what matters to setting the mpss range. 2738 size_t page_size = 0; 2739 if (large_page_size() < alignment_hint) { 2740 assert(UseLargePages, "Expected to be here for large page use only"); 2741 page_size = large_page_size(); 2742 } else { 2743 // If the alignment hint is less than the large page 2744 // size, the VM wants a particular alignment (thus the hint) 2745 // for internal reasons. Try to set the mpss range using 2746 // the alignment_hint. 2747 page_size = alignment_hint; 2748 } 2749 // Since this is a hint, ignore any failures. 2750 (void)Solaris::set_mpss_range(addr, bytes, page_size); 2751 } 2752 return true; 2753 } 2754 return false; 2755 } 2756 2757 // Uncommit the pages in a specified region. 2758 void os::free_memory(char* addr, size_t bytes) { 2759 if (madvise(addr, bytes, MADV_FREE) < 0) { 2760 debug_only(warning("MADV_FREE failed.")); 2761 return; 2762 } 2763 } 2764 2765 bool os::create_stack_guard_pages(char* addr, size_t size) { 2766 return os::commit_memory(addr, size); 2767 } 2768 2769 bool os::remove_stack_guard_pages(char* addr, size_t size) { 2770 return os::uncommit_memory(addr, size); 2771 } 2772 2773 // Change the page size in a given range. 2774 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2775 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); 2776 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); 2777 Solaris::set_mpss_range(addr, bytes, alignment_hint); 2778 } 2779 2780 // Tell the OS to make the range local to the first-touching LWP 2781 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2782 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2783 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) { 2784 debug_only(warning("MADV_ACCESS_LWP failed.")); 2785 } 2786 } 2787 2788 // Tell the OS that this range would be accessed from different LWPs. 2789 void os::numa_make_global(char *addr, size_t bytes) { 2790 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2791 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) { 2792 debug_only(warning("MADV_ACCESS_MANY failed.")); 2793 } 2794 } 2795 2796 // Get the number of the locality groups. 2797 size_t os::numa_get_groups_num() { 2798 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie()); 2799 return n != -1 ? n : 1; 2800 } 2801 2802 // Get a list of leaf locality groups. A leaf lgroup is group that 2803 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory 2804 // board. An LWP is assigned to one of these groups upon creation. 2805 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2806 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) { 2807 ids[0] = 0; 2808 return 1; 2809 } 2810 int result_size = 0, top = 1, bottom = 0, cur = 0; 2811 for (int k = 0; k < size; k++) { 2812 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur], 2813 (Solaris::lgrp_id_t*)&ids[top], size - top); 2814 if (r == -1) { 2815 ids[0] = 0; 2816 return 1; 2817 } 2818 if (!r) { 2819 // That's a leaf node. 2820 assert (bottom <= cur, "Sanity check"); 2821 // Check if the node has memory 2822 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur], 2823 NULL, 0, LGRP_RSRC_MEM) > 0) { 2824 ids[bottom++] = ids[cur]; 2825 } 2826 } 2827 top += r; 2828 cur++; 2829 } 2830 if (bottom == 0) { 2831 // Handle a situation, when the OS reports no memory available. 2832 // Assume UMA architecture. 2833 ids[0] = 0; 2834 return 1; 2835 } 2836 return bottom; 2837 } 2838 2839 // Detect the topology change. Typically happens during CPU plugging-unplugging. 2840 bool os::numa_topology_changed() { 2841 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie()); 2842 if (is_stale != -1 && is_stale) { 2843 Solaris::lgrp_fini(Solaris::lgrp_cookie()); 2844 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER); 2845 assert(c != 0, "Failure to initialize LGRP API"); 2846 Solaris::set_lgrp_cookie(c); 2847 return true; 2848 } 2849 return false; 2850 } 2851 2852 // Get the group id of the current LWP. 2853 int os::numa_get_group_id() { 2854 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID); 2855 if (lgrp_id == -1) { 2856 return 0; 2857 } 2858 const int size = os::numa_get_groups_num(); 2859 int *ids = (int*)alloca(size * sizeof(int)); 2860 2861 // Get the ids of all lgroups with memory; r is the count. 2862 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id, 2863 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM); 2864 if (r <= 0) { 2865 return 0; 2866 } 2867 return ids[os::random() % r]; 2868 } 2869 2870 // Request information about the page. 2871 bool os::get_page_info(char *start, page_info* info) { 2872 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2873 uint64_t addr = (uintptr_t)start; 2874 uint64_t outdata[2]; 2875 uint_t validity = 0; 2876 2877 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) { 2878 return false; 2879 } 2880 2881 info->size = 0; 2882 info->lgrp_id = -1; 2883 2884 if ((validity & 1) != 0) { 2885 if ((validity & 2) != 0) { 2886 info->lgrp_id = outdata[0]; 2887 } 2888 if ((validity & 4) != 0) { 2889 info->size = outdata[1]; 2890 } 2891 return true; 2892 } 2893 return false; 2894 } 2895 2896 // Scan the pages from start to end until a page different than 2897 // the one described in the info parameter is encountered. 2898 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 2899 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2900 const size_t types = sizeof(info_types) / sizeof(info_types[0]); 2901 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT]; 2902 uint_t validity[MAX_MEMINFO_CNT]; 2903 2904 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size); 2905 uint64_t p = (uint64_t)start; 2906 while (p < (uint64_t)end) { 2907 addrs[0] = p; 2908 size_t addrs_count = 1; 2909 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] < (uint64_t)end) { 2910 addrs[addrs_count] = addrs[addrs_count - 1] + page_size; 2911 addrs_count++; 2912 } 2913 2914 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) { 2915 return NULL; 2916 } 2917 2918 size_t i = 0; 2919 for (; i < addrs_count; i++) { 2920 if ((validity[i] & 1) != 0) { 2921 if ((validity[i] & 4) != 0) { 2922 if (outdata[types * i + 1] != page_expected->size) { 2923 break; 2924 } 2925 } else 2926 if (page_expected->size != 0) { 2927 break; 2928 } 2929 2930 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) { 2931 if (outdata[types * i] != page_expected->lgrp_id) { 2932 break; 2933 } 2934 } 2935 } else { 2936 return NULL; 2937 } 2938 } 2939 2940 if (i != addrs_count) { 2941 if ((validity[i] & 2) != 0) { 2942 page_found->lgrp_id = outdata[types * i]; 2943 } else { 2944 page_found->lgrp_id = -1; 2945 } 2946 if ((validity[i] & 4) != 0) { 2947 page_found->size = outdata[types * i + 1]; 2948 } else { 2949 page_found->size = 0; 2950 } 2951 return (char*)addrs[i]; 2952 } 2953 2954 p = addrs[addrs_count - 1] + page_size; 2955 } 2956 return end; 2957 } 2958 2959 bool os::uncommit_memory(char* addr, size_t bytes) { 2960 size_t size = bytes; 2961 // Map uncommitted pages PROT_NONE so we fail early if we touch an 2962 // uncommitted page. Otherwise, the read/write might succeed if we 2963 // have enough swap space to back the physical page. 2964 return 2965 NULL != Solaris::mmap_chunk(addr, size, 2966 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, 2967 PROT_NONE); 2968 } 2969 2970 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) { 2971 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0); 2972 2973 if (b == MAP_FAILED) { 2974 return NULL; 2975 } 2976 return b; 2977 } 2978 2979 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) { 2980 char* addr = requested_addr; 2981 int flags = MAP_PRIVATE | MAP_NORESERVE; 2982 2983 assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap"); 2984 2985 if (fixed) { 2986 flags |= MAP_FIXED; 2987 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) { 2988 flags |= MAP_ALIGN; 2989 addr = (char*) alignment_hint; 2990 } 2991 2992 // Map uncommitted pages PROT_NONE so we fail early if we touch an 2993 // uncommitted page. Otherwise, the read/write might succeed if we 2994 // have enough swap space to back the physical page. 2995 return mmap_chunk(addr, bytes, flags, PROT_NONE); 2996 } 2997 2998 char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 2999 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL)); 3000 3001 guarantee(requested_addr == NULL || requested_addr == addr, 3002 "OS failed to return requested mmap address."); 3003 return addr; 3004 } 3005 3006 // Reserve memory at an arbitrary address, only if that area is 3007 // available (and not reserved for something else). 3008 3009 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3010 const int max_tries = 10; 3011 char* base[max_tries]; 3012 size_t size[max_tries]; 3013 3014 // Solaris adds a gap between mmap'ed regions. The size of the gap 3015 // is dependent on the requested size and the MMU. Our initial gap 3016 // value here is just a guess and will be corrected later. 3017 bool had_top_overlap = false; 3018 bool have_adjusted_gap = false; 3019 size_t gap = 0x400000; 3020 3021 // Assert only that the size is a multiple of the page size, since 3022 // that's all that mmap requires, and since that's all we really know 3023 // about at this low abstraction level. If we need higher alignment, 3024 // we can either pass an alignment to this method or verify alignment 3025 // in one of the methods further up the call chain. See bug 5044738. 3026 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); 3027 3028 // Since snv_84, Solaris attempts to honor the address hint - see 5003415. 3029 // Give it a try, if the kernel honors the hint we can return immediately. 3030 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false); 3031 volatile int err = errno; 3032 if (addr == requested_addr) { 3033 return addr; 3034 } else if (addr != NULL) { 3035 unmap_memory(addr, bytes); 3036 } 3037 3038 if (PrintMiscellaneous && Verbose) { 3039 char buf[256]; 3040 buf[0] = '\0'; 3041 if (addr == NULL) { 3042 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err)); 3043 } 3044 warning("attempt_reserve_memory_at: couldn't reserve %d bytes at " 3045 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT 3046 "%s", bytes, requested_addr, addr, buf); 3047 } 3048 3049 // Address hint method didn't work. Fall back to the old method. 3050 // In theory, once SNV becomes our oldest supported platform, this 3051 // code will no longer be needed. 3052 // 3053 // Repeatedly allocate blocks until the block is allocated at the 3054 // right spot. Give up after max_tries. 3055 int i; 3056 for (i = 0; i < max_tries; ++i) { 3057 base[i] = reserve_memory(bytes); 3058 3059 if (base[i] != NULL) { 3060 // Is this the block we wanted? 3061 if (base[i] == requested_addr) { 3062 size[i] = bytes; 3063 break; 3064 } 3065 3066 // check that the gap value is right 3067 if (had_top_overlap && !have_adjusted_gap) { 3068 size_t actual_gap = base[i-1] - base[i] - bytes; 3069 if (gap != actual_gap) { 3070 // adjust the gap value and retry the last 2 allocations 3071 assert(i > 0, "gap adjustment code problem"); 3072 have_adjusted_gap = true; // adjust the gap only once, just in case 3073 gap = actual_gap; 3074 if (PrintMiscellaneous && Verbose) { 3075 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap); 3076 } 3077 unmap_memory(base[i], bytes); 3078 unmap_memory(base[i-1], size[i-1]); 3079 i-=2; 3080 continue; 3081 } 3082 } 3083 3084 // Does this overlap the block we wanted? Give back the overlapped 3085 // parts and try again. 3086 // 3087 // There is still a bug in this code: if top_overlap == bytes, 3088 // the overlap is offset from requested region by the value of gap. 3089 // In this case giving back the overlapped part will not work, 3090 // because we'll give back the entire block at base[i] and 3091 // therefore the subsequent allocation will not generate a new gap. 3092 // This could be fixed with a new algorithm that used larger 3093 // or variable size chunks to find the requested region - 3094 // but such a change would introduce additional complications. 3095 // It's rare enough that the planets align for this bug, 3096 // so we'll just wait for a fix for 6204603/5003415 which 3097 // will provide a mmap flag to allow us to avoid this business. 3098 3099 size_t top_overlap = requested_addr + (bytes + gap) - base[i]; 3100 if (top_overlap >= 0 && top_overlap < bytes) { 3101 had_top_overlap = true; 3102 unmap_memory(base[i], top_overlap); 3103 base[i] += top_overlap; 3104 size[i] = bytes - top_overlap; 3105 } else { 3106 size_t bottom_overlap = base[i] + bytes - requested_addr; 3107 if (bottom_overlap >= 0 && bottom_overlap < bytes) { 3108 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) { 3109 warning("attempt_reserve_memory_at: possible alignment bug"); 3110 } 3111 unmap_memory(requested_addr, bottom_overlap); 3112 size[i] = bytes - bottom_overlap; 3113 } else { 3114 size[i] = bytes; 3115 } 3116 } 3117 } 3118 } 3119 3120 // Give back the unused reserved pieces. 3121 3122 for (int j = 0; j < i; ++j) { 3123 if (base[j] != NULL) { 3124 unmap_memory(base[j], size[j]); 3125 } 3126 } 3127 3128 return (i < max_tries) ? requested_addr : NULL; 3129 } 3130 3131 bool os::release_memory(char* addr, size_t bytes) { 3132 size_t size = bytes; 3133 return munmap(addr, size) == 0; 3134 } 3135 3136 static bool solaris_mprotect(char* addr, size_t bytes, int prot) { 3137 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()), 3138 "addr must be page aligned"); 3139 int retVal = mprotect(addr, bytes, prot); 3140 return retVal == 0; 3141 } 3142 3143 // Protect memory (Used to pass readonly pages through 3144 // JNI GetArray<type>Elements with empty arrays.) 3145 // Also, used for serialization page and for compressed oops null pointer 3146 // checking. 3147 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3148 bool is_committed) { 3149 unsigned int p = 0; 3150 switch (prot) { 3151 case MEM_PROT_NONE: p = PROT_NONE; break; 3152 case MEM_PROT_READ: p = PROT_READ; break; 3153 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 3154 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 3155 default: 3156 ShouldNotReachHere(); 3157 } 3158 // is_committed is unused. 3159 return solaris_mprotect(addr, bytes, p); 3160 } 3161 3162 // guard_memory and unguard_memory only happens within stack guard pages. 3163 // Since ISM pertains only to the heap, guard and unguard memory should not 3164 /// happen with an ISM region. 3165 bool os::guard_memory(char* addr, size_t bytes) { 3166 return solaris_mprotect(addr, bytes, PROT_NONE); 3167 } 3168 3169 bool os::unguard_memory(char* addr, size_t bytes) { 3170 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE); 3171 } 3172 3173 // Large page support 3174 3175 // UseLargePages is the master flag to enable/disable large page memory. 3176 // UseMPSS and UseISM are supported for compatibility reasons. Their combined 3177 // effects can be described in the following table: 3178 // 3179 // UseLargePages UseMPSS UseISM 3180 // false * * => UseLargePages is the master switch, turning 3181 // it off will turn off both UseMPSS and 3182 // UseISM. VM will not use large page memory 3183 // regardless the settings of UseMPSS/UseISM. 3184 // true false false => Unless future Solaris provides other 3185 // mechanism to use large page memory, this 3186 // combination is equivalent to -UseLargePages, 3187 // VM will not use large page memory 3188 // true true false => JVM will use MPSS for large page memory. 3189 // This is the default behavior. 3190 // true false true => JVM will use ISM for large page memory. 3191 // true true true => JVM will use ISM if it is available. 3192 // Otherwise, JVM will fall back to MPSS. 3193 // Becaues ISM is now available on all 3194 // supported Solaris versions, this combination 3195 // is equivalent to +UseISM -UseMPSS. 3196 3197 typedef int (*getpagesizes_func_type) (size_t[], int); 3198 static size_t _large_page_size = 0; 3199 3200 bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) { 3201 // x86 uses either 2M or 4M page, depending on whether PAE (Physical Address 3202 // Extensions) mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. Sparc 3203 // can support multiple page sizes. 3204 3205 // Don't bother to probe page size because getpagesizes() comes with MPSS. 3206 // ISM is only recommended on old Solaris where there is no MPSS support. 3207 // Simply choose a conservative value as default. 3208 *page_size = LargePageSizeInBytes ? LargePageSizeInBytes : 3209 SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M) 3210 ARM_ONLY(2 * M); 3211 3212 // ISM is available on all supported Solaris versions 3213 return true; 3214 } 3215 3216 // Insertion sort for small arrays (descending order). 3217 static void insertion_sort_descending(size_t* array, int len) { 3218 for (int i = 0; i < len; i++) { 3219 size_t val = array[i]; 3220 for (size_t key = i; key > 0 && array[key - 1] < val; --key) { 3221 size_t tmp = array[key]; 3222 array[key] = array[key - 1]; 3223 array[key - 1] = tmp; 3224 } 3225 } 3226 } 3227 3228 bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) { 3229 getpagesizes_func_type getpagesizes_func = 3230 CAST_TO_FN_PTR(getpagesizes_func_type, dlsym(RTLD_DEFAULT, "getpagesizes")); 3231 if (getpagesizes_func == NULL) { 3232 if (warn) { 3233 warning("MPSS is not supported by the operating system."); 3234 } 3235 return false; 3236 } 3237 3238 const unsigned int usable_count = VM_Version::page_size_count(); 3239 if (usable_count == 1) { 3240 return false; 3241 } 3242 3243 // Fill the array of page sizes. 3244 int n = getpagesizes_func(_page_sizes, page_sizes_max); 3245 assert(n > 0, "Solaris bug?"); 3246 if (n == page_sizes_max) { 3247 // Add a sentinel value (necessary only if the array was completely filled 3248 // since it is static (zeroed at initialization)). 3249 _page_sizes[--n] = 0; 3250 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");) 3251 } 3252 assert(_page_sizes[n] == 0, "missing sentinel"); 3253 3254 if (n == 1) return false; // Only one page size available. 3255 3256 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and 3257 // select up to usable_count elements. First sort the array, find the first 3258 // acceptable value, then copy the usable sizes to the top of the array and 3259 // trim the rest. Make sure to include the default page size :-). 3260 // 3261 // A better policy could get rid of the 4M limit by taking the sizes of the 3262 // important VM memory regions (java heap and possibly the code cache) into 3263 // account. 3264 insertion_sort_descending(_page_sizes, n); 3265 const size_t size_limit = 3266 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes; 3267 int beg; 3268 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ; 3269 const int end = MIN2((int)usable_count, n) - 1; 3270 for (int cur = 0; cur < end; ++cur, ++beg) { 3271 _page_sizes[cur] = _page_sizes[beg]; 3272 } 3273 _page_sizes[end] = vm_page_size(); 3274 _page_sizes[end + 1] = 0; 3275 3276 if (_page_sizes[end] > _page_sizes[end - 1]) { 3277 // Default page size is not the smallest; sort again. 3278 insertion_sort_descending(_page_sizes, end + 1); 3279 } 3280 *page_size = _page_sizes[0]; 3281 3282 return true; 3283 } 3284 3285 bool os::large_page_init() { 3286 if (!UseLargePages) { 3287 UseISM = false; 3288 UseMPSS = false; 3289 return false; 3290 } 3291 3292 // print a warning if any large page related flag is specified on command line 3293 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3294 !FLAG_IS_DEFAULT(UseISM) || 3295 !FLAG_IS_DEFAULT(UseMPSS) || 3296 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3297 UseISM = UseISM && 3298 Solaris::ism_sanity_check(warn_on_failure, &_large_page_size); 3299 if (UseISM) { 3300 // ISM disables MPSS to be compatible with old JDK behavior 3301 UseMPSS = false; 3302 _page_sizes[0] = _large_page_size; 3303 _page_sizes[1] = vm_page_size(); 3304 } 3305 3306 UseMPSS = UseMPSS && 3307 Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); 3308 3309 UseLargePages = UseISM || UseMPSS; 3310 return UseLargePages; 3311 } 3312 3313 bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) { 3314 // Signal to OS that we want large pages for addresses 3315 // from addr, addr + bytes 3316 struct memcntl_mha mpss_struct; 3317 mpss_struct.mha_cmd = MHA_MAPSIZE_VA; 3318 mpss_struct.mha_pagesize = align; 3319 mpss_struct.mha_flags = 0; 3320 if (memcntl(start, bytes, MC_HAT_ADVISE, 3321 (caddr_t) &mpss_struct, 0, 0) < 0) { 3322 debug_only(warning("Attempt to use MPSS failed.")); 3323 return false; 3324 } 3325 return true; 3326 } 3327 3328 char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) { 3329 // "exec" is passed in but not used. Creating the shared image for 3330 // the code cache doesn't have an SHM_X executable permission to check. 3331 assert(UseLargePages && UseISM, "only for ISM large pages"); 3332 3333 size_t size = bytes; 3334 char* retAddr = NULL; 3335 int shmid; 3336 key_t ismKey; 3337 3338 bool warn_on_failure = UseISM && 3339 (!FLAG_IS_DEFAULT(UseLargePages) || 3340 !FLAG_IS_DEFAULT(UseISM) || 3341 !FLAG_IS_DEFAULT(LargePageSizeInBytes) 3342 ); 3343 char msg[128]; 3344 3345 ismKey = IPC_PRIVATE; 3346 3347 // Create a large shared memory region to attach to based on size. 3348 // Currently, size is the total size of the heap 3349 shmid = shmget(ismKey, size, SHM_R | SHM_W | IPC_CREAT); 3350 if (shmid == -1){ 3351 if (warn_on_failure) { 3352 jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno); 3353 warning(msg); 3354 } 3355 return NULL; 3356 } 3357 3358 // Attach to the region 3359 retAddr = (char *) shmat(shmid, 0, SHM_SHARE_MMU | SHM_R | SHM_W); 3360 int err = errno; 3361 3362 // Remove shmid. If shmat() is successful, the actual shared memory segment 3363 // will be deleted when it's detached by shmdt() or when the process 3364 // terminates. If shmat() is not successful this will remove the shared 3365 // segment immediately. 3366 shmctl(shmid, IPC_RMID, NULL); 3367 3368 if (retAddr == (char *) -1) { 3369 if (warn_on_failure) { 3370 jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err); 3371 warning(msg); 3372 } 3373 return NULL; 3374 } 3375 3376 return retAddr; 3377 } 3378 3379 bool os::release_memory_special(char* base, size_t bytes) { 3380 // detaching the SHM segment will also delete it, see reserve_memory_special() 3381 int rslt = shmdt(base); 3382 return rslt == 0; 3383 } 3384 3385 size_t os::large_page_size() { 3386 return _large_page_size; 3387 } 3388 3389 // MPSS allows application to commit large page memory on demand; with ISM 3390 // the entire memory region must be allocated as shared memory. 3391 bool os::can_commit_large_page_memory() { 3392 return UseISM ? false : true; 3393 } 3394 3395 bool os::can_execute_large_page_memory() { 3396 return UseISM ? false : true; 3397 } 3398 3399 static int os_sleep(jlong millis, bool interruptible) { 3400 const jlong limit = INT_MAX; 3401 jlong prevtime; 3402 int res; 3403 3404 while (millis > limit) { 3405 if ((res = os_sleep(limit, interruptible)) != OS_OK) 3406 return res; 3407 millis -= limit; 3408 } 3409 3410 // Restart interrupted polls with new parameters until the proper delay 3411 // has been completed. 3412 3413 prevtime = getTimeMillis(); 3414 3415 while (millis > 0) { 3416 jlong newtime; 3417 3418 if (!interruptible) { 3419 // Following assert fails for os::yield_all: 3420 // assert(!thread->is_Java_thread(), "must not be java thread"); 3421 res = poll(NULL, 0, millis); 3422 } else { 3423 JavaThread *jt = JavaThread::current(); 3424 3425 INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt, 3426 os::Solaris::clear_interrupted); 3427 } 3428 3429 // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for 3430 // thread.Interrupt. 3431 3432 if((res == OS_ERR) && (errno == EINTR)) { 3433 newtime = getTimeMillis(); 3434 assert(newtime >= prevtime, "time moving backwards"); 3435 /* Doing prevtime and newtime in microseconds doesn't help precision, 3436 and trying to round up to avoid lost milliseconds can result in a 3437 too-short delay. */ 3438 millis -= newtime - prevtime; 3439 if(millis <= 0) 3440 return OS_OK; 3441 prevtime = newtime; 3442 } else 3443 return res; 3444 } 3445 3446 return OS_OK; 3447 } 3448 3449 // Read calls from inside the vm need to perform state transitions 3450 size_t os::read(int fd, void *buf, unsigned int nBytes) { 3451 INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3452 } 3453 3454 int os::sleep(Thread* thread, jlong millis, bool interruptible) { 3455 assert(thread == Thread::current(), "thread consistency check"); 3456 3457 // TODO-FIXME: this should be removed. 3458 // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock 3459 // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate 3460 // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving 3461 // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel 3462 // is fooled into believing that the system is making progress. In the code below we block the 3463 // the watcher thread while safepoint is in progress so that it would not appear as though the 3464 // system is making progress. 3465 if (!Solaris::T2_libthread() && 3466 thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) { 3467 // We now try to acquire the threads lock. Since this lock is held by the VM thread during 3468 // the entire safepoint, the watcher thread will line up here during the safepoint. 3469 Threads_lock->lock_without_safepoint_check(); 3470 Threads_lock->unlock(); 3471 } 3472 3473 if (thread->is_Java_thread()) { 3474 // This is a JavaThread so we honor the _thread_blocked protocol 3475 // even for sleeps of 0 milliseconds. This was originally done 3476 // as a workaround for bug 4338139. However, now we also do it 3477 // to honor the suspend-equivalent protocol. 3478 3479 JavaThread *jt = (JavaThread *) thread; 3480 ThreadBlockInVM tbivm(jt); 3481 3482 jt->set_suspend_equivalent(); 3483 // cleared by handle_special_suspend_equivalent_condition() or 3484 // java_suspend_self() via check_and_wait_while_suspended() 3485 3486 int ret_code; 3487 if (millis <= 0) { 3488 thr_yield(); 3489 ret_code = 0; 3490 } else { 3491 // The original sleep() implementation did not create an 3492 // OSThreadWaitState helper for sleeps of 0 milliseconds. 3493 // I'm preserving that decision for now. 3494 OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */); 3495 3496 ret_code = os_sleep(millis, interruptible); 3497 } 3498 3499 // were we externally suspended while we were waiting? 3500 jt->check_and_wait_while_suspended(); 3501 3502 return ret_code; 3503 } 3504 3505 // non-JavaThread from this point on: 3506 3507 if (millis <= 0) { 3508 thr_yield(); 3509 return 0; 3510 } 3511 3512 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 3513 3514 return os_sleep(millis, interruptible); 3515 } 3516 3517 int os::naked_sleep() { 3518 // %% make the sleep time an integer flag. for now use 1 millisec. 3519 return os_sleep(1, false); 3520 } 3521 3522 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3523 void os::infinite_sleep() { 3524 while (true) { // sleep forever ... 3525 ::sleep(100); // ... 100 seconds at a time 3526 } 3527 } 3528 3529 // Used to convert frequent JVM_Yield() to nops 3530 bool os::dont_yield() { 3531 if (DontYieldALot) { 3532 static hrtime_t last_time = 0; 3533 hrtime_t diff = getTimeNanos() - last_time; 3534 3535 if (diff < DontYieldALotInterval * 1000000) 3536 return true; 3537 3538 last_time += diff; 3539 3540 return false; 3541 } 3542 else { 3543 return false; 3544 } 3545 } 3546 3547 // Caveat: Solaris os::yield() causes a thread-state transition whereas 3548 // the linux and win32 implementations do not. This should be checked. 3549 3550 void os::yield() { 3551 // Yields to all threads with same or greater priority 3552 os::sleep(Thread::current(), 0, false); 3553 } 3554 3555 // Note that yield semantics are defined by the scheduling class to which 3556 // the thread currently belongs. Typically, yield will _not yield to 3557 // other equal or higher priority threads that reside on the dispatch queues 3558 // of other CPUs. 3559 3560 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; } 3561 3562 3563 // On Solaris we found that yield_all doesn't always yield to all other threads. 3564 // There have been cases where there is a thread ready to execute but it doesn't 3565 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond. 3566 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a 3567 // SIGWAITING signal which will cause a new lwp to be created. So we count the 3568 // number of times yield_all is called in the one loop and increase the sleep 3569 // time after 8 attempts. If this fails too we increase the concurrency level 3570 // so that the starving thread would get an lwp 3571 3572 void os::yield_all(int attempts) { 3573 // Yields to all threads, including threads with lower priorities 3574 if (attempts == 0) { 3575 os::sleep(Thread::current(), 1, false); 3576 } else { 3577 int iterations = attempts % 30; 3578 if (iterations == 0 && !os::Solaris::T2_libthread()) { 3579 // thr_setconcurrency and _getconcurrency make sense only under T1. 3580 int noofLWPS = thr_getconcurrency(); 3581 if (noofLWPS < (Threads::number_of_threads() + 2)) { 3582 thr_setconcurrency(thr_getconcurrency() + 1); 3583 } 3584 } else if (iterations < 25) { 3585 os::sleep(Thread::current(), 1, false); 3586 } else { 3587 os::sleep(Thread::current(), 10, false); 3588 } 3589 } 3590 } 3591 3592 // Called from the tight loops to possibly influence time-sharing heuristics 3593 void os::loop_breaker(int attempts) { 3594 os::yield_all(attempts); 3595 } 3596 3597 3598 // Interface for setting lwp priorities. If we are using T2 libthread, 3599 // which forces the use of BoundThreads or we manually set UseBoundThreads, 3600 // all of our threads will be assigned to real lwp's. Using the thr_setprio 3601 // function is meaningless in this mode so we must adjust the real lwp's priority 3602 // The routines below implement the getting and setting of lwp priorities. 3603 // 3604 // Note: There are three priority scales used on Solaris. Java priotities 3605 // which range from 1 to 10, libthread "thr_setprio" scale which range 3606 // from 0 to 127, and the current scheduling class of the process we 3607 // are running in. This is typically from -60 to +60. 3608 // The setting of the lwp priorities in done after a call to thr_setprio 3609 // so Java priorities are mapped to libthread priorities and we map from 3610 // the latter to lwp priorities. We don't keep priorities stored in 3611 // Java priorities since some of our worker threads want to set priorities 3612 // higher than all Java threads. 3613 // 3614 // For related information: 3615 // (1) man -s 2 priocntl 3616 // (2) man -s 4 priocntl 3617 // (3) man dispadmin 3618 // = librt.so 3619 // = libthread/common/rtsched.c - thrp_setlwpprio(). 3620 // = ps -cL <pid> ... to validate priority. 3621 // = sched_get_priority_min and _max 3622 // pthread_create 3623 // sched_setparam 3624 // pthread_setschedparam 3625 // 3626 // Assumptions: 3627 // + We assume that all threads in the process belong to the same 3628 // scheduling class. IE. an homogenous process. 3629 // + Must be root or in IA group to change change "interactive" attribute. 3630 // Priocntl() will fail silently. The only indication of failure is when 3631 // we read-back the value and notice that it hasn't changed. 3632 // + Interactive threads enter the runq at the head, non-interactive at the tail. 3633 // + For RT, change timeslice as well. Invariant: 3634 // constant "priority integral" 3635 // Konst == TimeSlice * (60-Priority) 3636 // Given a priority, compute appropriate timeslice. 3637 // + Higher numerical values have higher priority. 3638 3639 // sched class attributes 3640 typedef struct { 3641 int schedPolicy; // classID 3642 int maxPrio; 3643 int minPrio; 3644 } SchedInfo; 3645 3646 3647 static SchedInfo tsLimits, iaLimits, rtLimits; 3648 3649 #ifdef ASSERT 3650 static int ReadBackValidate = 1; 3651 #endif 3652 static int myClass = 0; 3653 static int myMin = 0; 3654 static int myMax = 0; 3655 static int myCur = 0; 3656 static bool priocntl_enable = false; 3657 3658 3659 // Call the version of priocntl suitable for all supported versions 3660 // of Solaris. We need to call through this wrapper so that we can 3661 // build on Solaris 9 and run on Solaris 8, 9 and 10. 3662 // 3663 // This code should be removed if we ever stop supporting Solaris 8 3664 // and earlier releases. 3665 3666 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); 3667 typedef long (*priocntl_type)(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); 3668 static priocntl_type priocntl_ptr = priocntl_stub; 3669 3670 // Stub to set the value of the real pointer, and then call the real 3671 // function. 3672 3673 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg) { 3674 // Try Solaris 8- name only. 3675 priocntl_type tmp = (priocntl_type)dlsym(RTLD_DEFAULT, "__priocntl"); 3676 guarantee(tmp != NULL, "priocntl function not found."); 3677 priocntl_ptr = tmp; 3678 return (*priocntl_ptr)(PC_VERSION, idtype, id, cmd, arg); 3679 } 3680 3681 3682 // lwp_priocntl_init 3683 // 3684 // Try to determine the priority scale for our process. 3685 // 3686 // Return errno or 0 if OK. 3687 // 3688 static 3689 int lwp_priocntl_init () 3690 { 3691 int rslt; 3692 pcinfo_t ClassInfo; 3693 pcparms_t ParmInfo; 3694 int i; 3695 3696 if (!UseThreadPriorities) return 0; 3697 3698 // We are using Bound threads, we need to determine our priority ranges 3699 if (os::Solaris::T2_libthread() || UseBoundThreads) { 3700 // If ThreadPriorityPolicy is 1, switch tables 3701 if (ThreadPriorityPolicy == 1) { 3702 for (i = 0 ; i < MaxPriority+1; i++) 3703 os::java_to_os_priority[i] = prio_policy1[i]; 3704 } 3705 } 3706 // Not using Bound Threads, set to ThreadPolicy 1 3707 else { 3708 for ( i = 0 ; i < MaxPriority+1; i++ ) { 3709 os::java_to_os_priority[i] = prio_policy1[i]; 3710 } 3711 return 0; 3712 } 3713 3714 3715 // Get IDs for a set of well-known scheduling classes. 3716 // TODO-FIXME: GETCLINFO returns the current # of classes in the 3717 // the system. We should have a loop that iterates over the 3718 // classID values, which are known to be "small" integers. 3719 3720 strcpy(ClassInfo.pc_clname, "TS"); 3721 ClassInfo.pc_cid = -1; 3722 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3723 if (rslt < 0) return errno; 3724 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1"); 3725 tsLimits.schedPolicy = ClassInfo.pc_cid; 3726 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri; 3727 tsLimits.minPrio = -tsLimits.maxPrio; 3728 3729 strcpy(ClassInfo.pc_clname, "IA"); 3730 ClassInfo.pc_cid = -1; 3731 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3732 if (rslt < 0) return errno; 3733 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1"); 3734 iaLimits.schedPolicy = ClassInfo.pc_cid; 3735 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri; 3736 iaLimits.minPrio = -iaLimits.maxPrio; 3737 3738 strcpy(ClassInfo.pc_clname, "RT"); 3739 ClassInfo.pc_cid = -1; 3740 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3741 if (rslt < 0) return errno; 3742 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1"); 3743 rtLimits.schedPolicy = ClassInfo.pc_cid; 3744 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri; 3745 rtLimits.minPrio = 0; 3746 3747 3748 // Query our "current" scheduling class. 3749 // This will normally be IA,TS or, rarely, RT. 3750 memset (&ParmInfo, 0, sizeof(ParmInfo)); 3751 ParmInfo.pc_cid = PC_CLNULL; 3752 rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo ); 3753 if ( rslt < 0 ) return errno; 3754 myClass = ParmInfo.pc_cid; 3755 3756 // We now know our scheduling classId, get specific information 3757 // the class. 3758 ClassInfo.pc_cid = myClass; 3759 ClassInfo.pc_clname[0] = 0; 3760 rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo ); 3761 if ( rslt < 0 ) return errno; 3762 3763 if (ThreadPriorityVerbose) 3764 tty->print_cr ("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname); 3765 3766 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3767 ParmInfo.pc_cid = PC_CLNULL; 3768 rslt = (*priocntl_ptr)(PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3769 if (rslt < 0) return errno; 3770 3771 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3772 myMin = rtLimits.minPrio; 3773 myMax = rtLimits.maxPrio; 3774 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3775 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3776 myMin = iaLimits.minPrio; 3777 myMax = iaLimits.maxPrio; 3778 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict 3779 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3780 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3781 myMin = tsLimits.minPrio; 3782 myMax = tsLimits.maxPrio; 3783 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict 3784 } else { 3785 // No clue - punt 3786 if (ThreadPriorityVerbose) 3787 tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname); 3788 return EINVAL; // no clue, punt 3789 } 3790 3791 if (ThreadPriorityVerbose) 3792 tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax); 3793 3794 priocntl_enable = true; // Enable changing priorities 3795 return 0; 3796 } 3797 3798 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms)) 3799 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms)) 3800 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms)) 3801 3802 3803 // scale_to_lwp_priority 3804 // 3805 // Convert from the libthread "thr_setprio" scale to our current 3806 // lwp scheduling class scale. 3807 // 3808 static 3809 int scale_to_lwp_priority (int rMin, int rMax, int x) 3810 { 3811 int v; 3812 3813 if (x == 127) return rMax; // avoid round-down 3814 v = (((x*(rMax-rMin)))/128)+rMin; 3815 return v; 3816 } 3817 3818 3819 // set_lwp_priority 3820 // 3821 // Set the priority of the lwp. This call should only be made 3822 // when using bound threads (T2 threads are bound by default). 3823 // 3824 int set_lwp_priority (int ThreadID, int lwpid, int newPrio ) 3825 { 3826 int rslt; 3827 int Actual, Expected, prv; 3828 pcparms_t ParmInfo; // for GET-SET 3829 #ifdef ASSERT 3830 pcparms_t ReadBack; // for readback 3831 #endif 3832 3833 // Set priority via PC_GETPARMS, update, PC_SETPARMS 3834 // Query current values. 3835 // TODO: accelerate this by eliminating the PC_GETPARMS call. 3836 // Cache "pcparms_t" in global ParmCache. 3837 // TODO: elide set-to-same-value 3838 3839 // If something went wrong on init, don't change priorities. 3840 if ( !priocntl_enable ) { 3841 if (ThreadPriorityVerbose) 3842 tty->print_cr("Trying to set priority but init failed, ignoring"); 3843 return EINVAL; 3844 } 3845 3846 3847 // If lwp hasn't started yet, just return 3848 // the _start routine will call us again. 3849 if ( lwpid <= 0 ) { 3850 if (ThreadPriorityVerbose) { 3851 tty->print_cr ("deferring the set_lwp_priority of thread " INTPTR_FORMAT " to %d, lwpid not set", 3852 ThreadID, newPrio); 3853 } 3854 return 0; 3855 } 3856 3857 if (ThreadPriorityVerbose) { 3858 tty->print_cr ("set_lwp_priority(" INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ", 3859 ThreadID, lwpid, newPrio); 3860 } 3861 3862 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3863 ParmInfo.pc_cid = PC_CLNULL; 3864 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo); 3865 if (rslt < 0) return errno; 3866 3867 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3868 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms; 3869 rtInfo->rt_pri = scale_to_lwp_priority (rtLimits.minPrio, rtLimits.maxPrio, newPrio); 3870 rtInfo->rt_tqsecs = RT_NOCHANGE; 3871 rtInfo->rt_tqnsecs = RT_NOCHANGE; 3872 if (ThreadPriorityVerbose) { 3873 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri); 3874 } 3875 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3876 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3877 int maxClamped = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim); 3878 iaInfo->ia_upri = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio); 3879 iaInfo->ia_uprilim = IA_NOCHANGE; 3880 iaInfo->ia_mode = IA_NOCHANGE; 3881 if (ThreadPriorityVerbose) { 3882 tty->print_cr ("IA: [%d...%d] %d->%d\n", 3883 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri); 3884 } 3885 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3886 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3887 int maxClamped = MIN2(tsLimits.maxPrio, (int)tsInfo->ts_uprilim); 3888 prv = tsInfo->ts_upri; 3889 tsInfo->ts_upri = scale_to_lwp_priority(tsLimits.minPrio, maxClamped, newPrio); 3890 tsInfo->ts_uprilim = IA_NOCHANGE; 3891 if (ThreadPriorityVerbose) { 3892 tty->print_cr ("TS: %d [%d...%d] %d->%d\n", 3893 prv, tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri); 3894 } 3895 if (prv == tsInfo->ts_upri) return 0; 3896 } else { 3897 if ( ThreadPriorityVerbose ) { 3898 tty->print_cr ("Unknown scheduling class\n"); 3899 } 3900 return EINVAL; // no clue, punt 3901 } 3902 3903 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo); 3904 if (ThreadPriorityVerbose && rslt) { 3905 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno); 3906 } 3907 if (rslt < 0) return errno; 3908 3909 #ifdef ASSERT 3910 // Sanity check: read back what we just attempted to set. 3911 // In theory it could have changed in the interim ... 3912 // 3913 // The priocntl system call is tricky. 3914 // Sometimes it'll validate the priority value argument and 3915 // return EINVAL if unhappy. At other times it fails silently. 3916 // Readbacks are prudent. 3917 3918 if (!ReadBackValidate) return 0; 3919 3920 memset(&ReadBack, 0, sizeof(pcparms_t)); 3921 ReadBack.pc_cid = PC_CLNULL; 3922 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack); 3923 assert(rslt >= 0, "priocntl failed"); 3924 Actual = Expected = 0xBAD; 3925 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match"); 3926 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3927 Actual = RTPRI(ReadBack)->rt_pri; 3928 Expected = RTPRI(ParmInfo)->rt_pri; 3929 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3930 Actual = IAPRI(ReadBack)->ia_upri; 3931 Expected = IAPRI(ParmInfo)->ia_upri; 3932 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3933 Actual = TSPRI(ReadBack)->ts_upri; 3934 Expected = TSPRI(ParmInfo)->ts_upri; 3935 } else { 3936 if ( ThreadPriorityVerbose ) { 3937 tty->print_cr("set_lwp_priority: unexpected class in readback: %d\n", ParmInfo.pc_cid); 3938 } 3939 } 3940 3941 if (Actual != Expected) { 3942 if ( ThreadPriorityVerbose ) { 3943 tty->print_cr ("set_lwp_priority(%d %d) Class=%d: actual=%d vs expected=%d\n", 3944 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected); 3945 } 3946 } 3947 #endif 3948 3949 return 0; 3950 } 3951 3952 3953 3954 // Solaris only gives access to 128 real priorities at a time, 3955 // so we expand Java's ten to fill this range. This would be better 3956 // if we dynamically adjusted relative priorities. 3957 // 3958 // The ThreadPriorityPolicy option allows us to select 2 different 3959 // priority scales. 3960 // 3961 // ThreadPriorityPolicy=0 3962 // Since the Solaris' default priority is MaximumPriority, we do not 3963 // set a priority lower than Max unless a priority lower than 3964 // NormPriority is requested. 3965 // 3966 // ThreadPriorityPolicy=1 3967 // This mode causes the priority table to get filled with 3968 // linear values. NormPriority get's mapped to 50% of the 3969 // Maximum priority an so on. This will cause VM threads 3970 // to get unfair treatment against other Solaris processes 3971 // which do not explicitly alter their thread priorities. 3972 // 3973 3974 3975 int os::java_to_os_priority[MaxPriority + 1] = { 3976 -99999, // 0 Entry should never be used 3977 3978 0, // 1 MinPriority 3979 32, // 2 3980 64, // 3 3981 3982 96, // 4 3983 127, // 5 NormPriority 3984 127, // 6 3985 3986 127, // 7 3987 127, // 8 3988 127, // 9 NearMaxPriority 3989 3990 127 // 10 MaxPriority 3991 }; 3992 3993 3994 OSReturn os::set_native_priority(Thread* thread, int newpri) { 3995 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping"); 3996 if ( !UseThreadPriorities ) return OS_OK; 3997 int status = thr_setprio(thread->osthread()->thread_id(), newpri); 3998 if ( os::Solaris::T2_libthread() || (UseBoundThreads && thread->osthread()->is_vm_created()) ) 3999 status |= (set_lwp_priority (thread->osthread()->thread_id(), 4000 thread->osthread()->lwp_id(), newpri )); 4001 return (status == 0) ? OS_OK : OS_ERR; 4002 } 4003 4004 4005 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { 4006 int p; 4007 if ( !UseThreadPriorities ) { 4008 *priority_ptr = NormalPriority; 4009 return OS_OK; 4010 } 4011 int status = thr_getprio(thread->osthread()->thread_id(), &p); 4012 if (status != 0) { 4013 return OS_ERR; 4014 } 4015 *priority_ptr = p; 4016 return OS_OK; 4017 } 4018 4019 4020 // Hint to the underlying OS that a task switch would not be good. 4021 // Void return because it's a hint and can fail. 4022 void os::hint_no_preempt() { 4023 schedctl_start(schedctl_init()); 4024 } 4025 4026 void os::interrupt(Thread* thread) { 4027 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); 4028 4029 OSThread* osthread = thread->osthread(); 4030 4031 int isInterrupted = osthread->interrupted(); 4032 if (!isInterrupted) { 4033 osthread->set_interrupted(true); 4034 OrderAccess::fence(); 4035 // os::sleep() is implemented with either poll (NULL,0,timeout) or 4036 // by parking on _SleepEvent. If the former, thr_kill will unwedge 4037 // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper. 4038 ParkEvent * const slp = thread->_SleepEvent ; 4039 if (slp != NULL) slp->unpark() ; 4040 } 4041 4042 // For JSR166: unpark after setting status but before thr_kill -dl 4043 if (thread->is_Java_thread()) { 4044 ((JavaThread*)thread)->parker()->unpark(); 4045 } 4046 4047 // Handle interruptible wait() ... 4048 ParkEvent * const ev = thread->_ParkEvent ; 4049 if (ev != NULL) ev->unpark() ; 4050 4051 // When events are used everywhere for os::sleep, then this thr_kill 4052 // will only be needed if UseVMInterruptibleIO is true. 4053 4054 if (!isInterrupted) { 4055 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt()); 4056 assert_status(status == 0, status, "thr_kill"); 4057 4058 // Bump thread interruption counter 4059 RuntimeService::record_thread_interrupt_signaled_count(); 4060 } 4061 } 4062 4063 4064 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 4065 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); 4066 4067 OSThread* osthread = thread->osthread(); 4068 4069 bool res = osthread->interrupted(); 4070 4071 // NOTE that since there is no "lock" around these two operations, 4072 // there is the possibility that the interrupted flag will be 4073 // "false" but that the interrupt event will be set. This is 4074 // intentional. The effect of this is that Object.wait() will appear 4075 // to have a spurious wakeup, which is not harmful, and the 4076 // possibility is so rare that it is not worth the added complexity 4077 // to add yet another lock. It has also been recommended not to put 4078 // the interrupted flag into the os::Solaris::Event structure, 4079 // because it hides the issue. 4080 if (res && clear_interrupted) { 4081 osthread->set_interrupted(false); 4082 } 4083 return res; 4084 } 4085 4086 4087 void os::print_statistics() { 4088 } 4089 4090 int os::message_box(const char* title, const char* message) { 4091 int i; 4092 fdStream err(defaultStream::error_fd()); 4093 for (i = 0; i < 78; i++) err.print_raw("="); 4094 err.cr(); 4095 err.print_raw_cr(title); 4096 for (i = 0; i < 78; i++) err.print_raw("-"); 4097 err.cr(); 4098 err.print_raw_cr(message); 4099 for (i = 0; i < 78; i++) err.print_raw("="); 4100 err.cr(); 4101 4102 char buf[16]; 4103 // Prevent process from exiting upon "read error" without consuming all CPU 4104 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 4105 4106 return buf[0] == 'y' || buf[0] == 'Y'; 4107 } 4108 4109 // A lightweight implementation that does not suspend the target thread and 4110 // thus returns only a hint. Used for profiling only! 4111 ExtendedPC os::get_thread_pc(Thread* thread) { 4112 // Make sure that it is called by the watcher and the Threads lock is owned. 4113 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); 4114 // For now, is only used to profile the VM Thread 4115 assert(thread->is_VM_thread(), "Can only be called for VMThread"); 4116 ExtendedPC epc; 4117 4118 GetThreadPC_Callback cb(ProfileVM_lock); 4119 OSThread *osthread = thread->osthread(); 4120 const int time_to_wait = 400; // 400ms wait for initial response 4121 int status = cb.interrupt(thread, time_to_wait); 4122 4123 if (cb.is_done() ) { 4124 epc = cb.addr(); 4125 } else { 4126 DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status", 4127 osthread->thread_id(), status);); 4128 // epc is already NULL 4129 } 4130 return epc; 4131 } 4132 4133 4134 // This does not do anything on Solaris. This is basically a hook for being 4135 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32. 4136 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) { 4137 f(value, method, args, thread); 4138 } 4139 4140 // This routine may be used by user applications as a "hook" to catch signals. 4141 // The user-defined signal handler must pass unrecognized signals to this 4142 // routine, and if it returns true (non-zero), then the signal handler must 4143 // return immediately. If the flag "abort_if_unrecognized" is true, then this 4144 // routine will never retun false (zero), but instead will execute a VM panic 4145 // routine kill the process. 4146 // 4147 // If this routine returns false, it is OK to call it again. This allows 4148 // the user-defined signal handler to perform checks either before or after 4149 // the VM performs its own checks. Naturally, the user code would be making 4150 // a serious error if it tried to handle an exception (such as a null check 4151 // or breakpoint) that the VM was generating for its own correct operation. 4152 // 4153 // This routine may recognize any of the following kinds of signals: 4154 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ, 4155 // os::Solaris::SIGasync 4156 // It should be consulted by handlers for any of those signals. 4157 // It explicitly does not recognize os::Solaris::SIGinterrupt 4158 // 4159 // The caller of this routine must pass in the three arguments supplied 4160 // to the function referred to in the "sa_sigaction" (not the "sa_handler") 4161 // field of the structure passed to sigaction(). This routine assumes that 4162 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 4163 // 4164 // Note that the VM will print warnings if it detects conflicting signal 4165 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 4166 // 4167 extern "C" int JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized); 4168 4169 4170 void signalHandler(int sig, siginfo_t* info, void* ucVoid) { 4171 JVM_handle_solaris_signal(sig, info, ucVoid, true); 4172 } 4173 4174 /* Do not delete - if guarantee is ever removed, a signal handler (even empty) 4175 is needed to provoke threads blocked on IO to return an EINTR 4176 Note: this explicitly does NOT call JVM_handle_solaris_signal and 4177 does NOT participate in signal chaining due to requirement for 4178 NOT setting SA_RESTART to make EINTR work. */ 4179 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) { 4180 if (UseSignalChaining) { 4181 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig); 4182 if (actp && actp->sa_handler) { 4183 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs"); 4184 } 4185 } 4186 } 4187 4188 // This boolean allows users to forward their own non-matching signals 4189 // to JVM_handle_solaris_signal, harmlessly. 4190 bool os::Solaris::signal_handlers_are_installed = false; 4191 4192 // For signal-chaining 4193 bool os::Solaris::libjsig_is_loaded = false; 4194 typedef struct sigaction *(*get_signal_t)(int); 4195 get_signal_t os::Solaris::get_signal_action = NULL; 4196 4197 struct sigaction* os::Solaris::get_chained_signal_action(int sig) { 4198 struct sigaction *actp = NULL; 4199 4200 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) { 4201 // Retrieve the old signal handler from libjsig 4202 actp = (*get_signal_action)(sig); 4203 } 4204 if (actp == NULL) { 4205 // Retrieve the preinstalled signal handler from jvm 4206 actp = get_preinstalled_handler(sig); 4207 } 4208 4209 return actp; 4210 } 4211 4212 static bool call_chained_handler(struct sigaction *actp, int sig, 4213 siginfo_t *siginfo, void *context) { 4214 // Call the old signal handler 4215 if (actp->sa_handler == SIG_DFL) { 4216 // It's more reasonable to let jvm treat it as an unexpected exception 4217 // instead of taking the default action. 4218 return false; 4219 } else if (actp->sa_handler != SIG_IGN) { 4220 if ((actp->sa_flags & SA_NODEFER) == 0) { 4221 // automaticlly block the signal 4222 sigaddset(&(actp->sa_mask), sig); 4223 } 4224 4225 sa_handler_t hand; 4226 sa_sigaction_t sa; 4227 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 4228 // retrieve the chained handler 4229 if (siginfo_flag_set) { 4230 sa = actp->sa_sigaction; 4231 } else { 4232 hand = actp->sa_handler; 4233 } 4234 4235 if ((actp->sa_flags & SA_RESETHAND) != 0) { 4236 actp->sa_handler = SIG_DFL; 4237 } 4238 4239 // try to honor the signal mask 4240 sigset_t oset; 4241 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset); 4242 4243 // call into the chained handler 4244 if (siginfo_flag_set) { 4245 (*sa)(sig, siginfo, context); 4246 } else { 4247 (*hand)(sig); 4248 } 4249 4250 // restore the signal mask 4251 thr_sigsetmask(SIG_SETMASK, &oset, 0); 4252 } 4253 // Tell jvm's signal handler the signal is taken care of. 4254 return true; 4255 } 4256 4257 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) { 4258 bool chained = false; 4259 // signal-chaining 4260 if (UseSignalChaining) { 4261 struct sigaction *actp = get_chained_signal_action(sig); 4262 if (actp != NULL) { 4263 chained = call_chained_handler(actp, sig, siginfo, context); 4264 } 4265 } 4266 return chained; 4267 } 4268 4269 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) { 4270 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4271 if (preinstalled_sigs[sig] != 0) { 4272 return &chainedsigactions[sig]; 4273 } 4274 return NULL; 4275 } 4276 4277 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) { 4278 4279 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range"); 4280 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4281 chainedsigactions[sig] = oldAct; 4282 preinstalled_sigs[sig] = 1; 4283 } 4284 4285 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) { 4286 // Check for overwrite. 4287 struct sigaction oldAct; 4288 sigaction(sig, (struct sigaction*)NULL, &oldAct); 4289 void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4290 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4291 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 4292 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 4293 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) { 4294 if (AllowUserSignalHandlers || !set_installed) { 4295 // Do not overwrite; user takes responsibility to forward to us. 4296 return; 4297 } else if (UseSignalChaining) { 4298 if (oktochain) { 4299 // save the old handler in jvm 4300 save_preinstalled_handler(sig, oldAct); 4301 } else { 4302 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs."); 4303 } 4304 // libjsig also interposes the sigaction() call below and saves the 4305 // old sigaction on it own. 4306 } else { 4307 fatal(err_msg("Encountered unexpected pre-existing sigaction handler " 4308 "%#lx for signal %d.", (long)oldhand, sig)); 4309 } 4310 } 4311 4312 struct sigaction sigAct; 4313 sigfillset(&(sigAct.sa_mask)); 4314 sigAct.sa_handler = SIG_DFL; 4315 4316 sigAct.sa_sigaction = signalHandler; 4317 // Handle SIGSEGV on alternate signal stack if 4318 // not using stack banging 4319 if (!UseStackBanging && sig == SIGSEGV) { 4320 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK; 4321 // Interruptible i/o requires SA_RESTART cleared so EINTR 4322 // is returned instead of restarting system calls 4323 } else if (sig == os::Solaris::SIGinterrupt()) { 4324 sigemptyset(&sigAct.sa_mask); 4325 sigAct.sa_handler = NULL; 4326 sigAct.sa_flags = SA_SIGINFO; 4327 sigAct.sa_sigaction = sigINTRHandler; 4328 } else { 4329 sigAct.sa_flags = SA_SIGINFO | SA_RESTART; 4330 } 4331 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags); 4332 4333 sigaction(sig, &sigAct, &oldAct); 4334 4335 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4336 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4337 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 4338 } 4339 4340 4341 #define DO_SIGNAL_CHECK(sig) \ 4342 if (!sigismember(&check_signal_done, sig)) \ 4343 os::Solaris::check_signal_handler(sig) 4344 4345 // This method is a periodic task to check for misbehaving JNI applications 4346 // under CheckJNI, we can add any periodic checks here 4347 4348 void os::run_periodic_checks() { 4349 // A big source of grief is hijacking virt. addr 0x0 on Solaris, 4350 // thereby preventing a NULL checks. 4351 if(!check_addr0_done) check_addr0_done = check_addr0(tty); 4352 4353 if (check_signals == false) return; 4354 4355 // SEGV and BUS if overridden could potentially prevent 4356 // generation of hs*.log in the event of a crash, debugging 4357 // such a case can be very challenging, so we absolutely 4358 // check for the following for a good measure: 4359 DO_SIGNAL_CHECK(SIGSEGV); 4360 DO_SIGNAL_CHECK(SIGILL); 4361 DO_SIGNAL_CHECK(SIGFPE); 4362 DO_SIGNAL_CHECK(SIGBUS); 4363 DO_SIGNAL_CHECK(SIGPIPE); 4364 DO_SIGNAL_CHECK(SIGXFSZ); 4365 4366 // ReduceSignalUsage allows the user to override these handlers 4367 // see comments at the very top and jvm_solaris.h 4368 if (!ReduceSignalUsage) { 4369 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 4370 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 4371 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 4372 DO_SIGNAL_CHECK(BREAK_SIGNAL); 4373 } 4374 4375 // See comments above for using JVM1/JVM2 and UseAltSigs 4376 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt()); 4377 DO_SIGNAL_CHECK(os::Solaris::SIGasync()); 4378 4379 } 4380 4381 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 4382 4383 static os_sigaction_t os_sigaction = NULL; 4384 4385 void os::Solaris::check_signal_handler(int sig) { 4386 char buf[O_BUFLEN]; 4387 address jvmHandler = NULL; 4388 4389 struct sigaction act; 4390 if (os_sigaction == NULL) { 4391 // only trust the default sigaction, in case it has been interposed 4392 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction"); 4393 if (os_sigaction == NULL) return; 4394 } 4395 4396 os_sigaction(sig, (struct sigaction*)NULL, &act); 4397 4398 address thisHandler = (act.sa_flags & SA_SIGINFO) 4399 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 4400 : CAST_FROM_FN_PTR(address, act.sa_handler) ; 4401 4402 4403 switch(sig) { 4404 case SIGSEGV: 4405 case SIGBUS: 4406 case SIGFPE: 4407 case SIGPIPE: 4408 case SIGXFSZ: 4409 case SIGILL: 4410 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4411 break; 4412 4413 case SHUTDOWN1_SIGNAL: 4414 case SHUTDOWN2_SIGNAL: 4415 case SHUTDOWN3_SIGNAL: 4416 case BREAK_SIGNAL: 4417 jvmHandler = (address)user_handler(); 4418 break; 4419 4420 default: 4421 int intrsig = os::Solaris::SIGinterrupt(); 4422 int asynsig = os::Solaris::SIGasync(); 4423 4424 if (sig == intrsig) { 4425 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler); 4426 } else if (sig == asynsig) { 4427 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4428 } else { 4429 return; 4430 } 4431 break; 4432 } 4433 4434 4435 if (thisHandler != jvmHandler) { 4436 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 4437 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 4438 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 4439 // No need to check this sig any longer 4440 sigaddset(&check_signal_done, sig); 4441 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) { 4442 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 4443 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig)); 4444 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags); 4445 // No need to check this sig any longer 4446 sigaddset(&check_signal_done, sig); 4447 } 4448 4449 // Print all the signal handler state 4450 if (sigismember(&check_signal_done, sig)) { 4451 print_signal_handlers(tty, buf, O_BUFLEN); 4452 } 4453 4454 } 4455 4456 void os::Solaris::install_signal_handlers() { 4457 bool libjsigdone = false; 4458 signal_handlers_are_installed = true; 4459 4460 // signal-chaining 4461 typedef void (*signal_setting_t)(); 4462 signal_setting_t begin_signal_setting = NULL; 4463 signal_setting_t end_signal_setting = NULL; 4464 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4465 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 4466 if (begin_signal_setting != NULL) { 4467 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4468 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 4469 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 4470 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 4471 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t, 4472 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version")); 4473 libjsig_is_loaded = true; 4474 if (os::Solaris::get_libjsig_version != NULL) { 4475 libjsigversion = (*os::Solaris::get_libjsig_version)(); 4476 } 4477 assert(UseSignalChaining, "should enable signal-chaining"); 4478 } 4479 if (libjsig_is_loaded) { 4480 // Tell libjsig jvm is setting signal handlers 4481 (*begin_signal_setting)(); 4482 } 4483 4484 set_signal_handler(SIGSEGV, true, true); 4485 set_signal_handler(SIGPIPE, true, true); 4486 set_signal_handler(SIGXFSZ, true, true); 4487 set_signal_handler(SIGBUS, true, true); 4488 set_signal_handler(SIGILL, true, true); 4489 set_signal_handler(SIGFPE, true, true); 4490 4491 4492 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) { 4493 4494 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so 4495 // can not register overridable signals which might be > 32 4496 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) { 4497 // Tell libjsig jvm has finished setting signal handlers 4498 (*end_signal_setting)(); 4499 libjsigdone = true; 4500 } 4501 } 4502 4503 // Never ok to chain our SIGinterrupt 4504 set_signal_handler(os::Solaris::SIGinterrupt(), true, false); 4505 set_signal_handler(os::Solaris::SIGasync(), true, true); 4506 4507 if (libjsig_is_loaded && !libjsigdone) { 4508 // Tell libjsig jvm finishes setting signal handlers 4509 (*end_signal_setting)(); 4510 } 4511 4512 // We don't activate signal checker if libjsig is in place, we trust ourselves 4513 // and if UserSignalHandler is installed all bets are off 4514 if (CheckJNICalls) { 4515 if (libjsig_is_loaded) { 4516 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 4517 check_signals = false; 4518 } 4519 if (AllowUserSignalHandlers) { 4520 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 4521 check_signals = false; 4522 } 4523 } 4524 } 4525 4526 4527 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...); 4528 4529 const char * signames[] = { 4530 "SIG0", 4531 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP", 4532 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS", 4533 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM", 4534 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH", 4535 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT", 4536 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU", 4537 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW", 4538 "SIGCANCEL", "SIGLOST" 4539 }; 4540 4541 const char* os::exception_name(int exception_code, char* buf, size_t size) { 4542 if (0 < exception_code && exception_code <= SIGRTMAX) { 4543 // signal 4544 if (exception_code < sizeof(signames)/sizeof(const char*)) { 4545 jio_snprintf(buf, size, "%s", signames[exception_code]); 4546 } else { 4547 jio_snprintf(buf, size, "SIG%d", exception_code); 4548 } 4549 return buf; 4550 } else { 4551 return NULL; 4552 } 4553 } 4554 4555 // (Static) wrappers for the new libthread API 4556 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate; 4557 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate; 4558 int_fnP_thread_t_i os::Solaris::_thr_setmutator; 4559 int_fnP_thread_t os::Solaris::_thr_suspend_mutator; 4560 int_fnP_thread_t os::Solaris::_thr_continue_mutator; 4561 4562 // (Static) wrapper for getisax(2) call. 4563 os::Solaris::getisax_func_t os::Solaris::_getisax = 0; 4564 4565 // (Static) wrappers for the liblgrp API 4566 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home; 4567 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init; 4568 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini; 4569 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root; 4570 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children; 4571 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources; 4572 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps; 4573 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale; 4574 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0; 4575 4576 // (Static) wrapper for meminfo() call. 4577 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0; 4578 4579 static address resolve_symbol_lazy(const char* name) { 4580 address addr = (address) dlsym(RTLD_DEFAULT, name); 4581 if(addr == NULL) { 4582 // RTLD_DEFAULT was not defined on some early versions of 2.5.1 4583 addr = (address) dlsym(RTLD_NEXT, name); 4584 } 4585 return addr; 4586 } 4587 4588 static address resolve_symbol(const char* name) { 4589 address addr = resolve_symbol_lazy(name); 4590 if(addr == NULL) { 4591 fatal(dlerror()); 4592 } 4593 return addr; 4594 } 4595 4596 4597 4598 // isT2_libthread() 4599 // 4600 // Routine to determine if we are currently using the new T2 libthread. 4601 // 4602 // We determine if we are using T2 by reading /proc/self/lstatus and 4603 // looking for a thread with the ASLWP bit set. If we find this status 4604 // bit set, we must assume that we are NOT using T2. The T2 team 4605 // has approved this algorithm. 4606 // 4607 // We need to determine if we are running with the new T2 libthread 4608 // since setting native thread priorities is handled differently 4609 // when using this library. All threads created using T2 are bound 4610 // threads. Calling thr_setprio is meaningless in this case. 4611 // 4612 bool isT2_libthread() { 4613 static prheader_t * lwpArray = NULL; 4614 static int lwpSize = 0; 4615 static int lwpFile = -1; 4616 lwpstatus_t * that; 4617 char lwpName [128]; 4618 bool isT2 = false; 4619 4620 #define ADR(x) ((uintptr_t)(x)) 4621 #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1)))) 4622 4623 lwpFile = open("/proc/self/lstatus", O_RDONLY, 0); 4624 if (lwpFile < 0) { 4625 if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n"); 4626 return false; 4627 } 4628 lwpSize = 16*1024; 4629 for (;;) { 4630 lseek (lwpFile, 0, SEEK_SET); 4631 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize); 4632 if (read(lwpFile, lwpArray, lwpSize) < 0) { 4633 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n"); 4634 break; 4635 } 4636 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) { 4637 // We got a good snapshot - now iterate over the list. 4638 int aslwpcount = 0; 4639 for (int i = 0; i < lwpArray->pr_nent; i++ ) { 4640 that = LWPINDEX(lwpArray,i); 4641 if (that->pr_flags & PR_ASLWP) { 4642 aslwpcount++; 4643 } 4644 } 4645 if (aslwpcount == 0) isT2 = true; 4646 break; 4647 } 4648 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize; 4649 FREE_C_HEAP_ARRAY(char, lwpArray); // retry. 4650 } 4651 4652 FREE_C_HEAP_ARRAY(char, lwpArray); 4653 close (lwpFile); 4654 if (ThreadPriorityVerbose) { 4655 if (isT2) tty->print_cr("We are running with a T2 libthread\n"); 4656 else tty->print_cr("We are not running with a T2 libthread\n"); 4657 } 4658 return isT2; 4659 } 4660 4661 4662 void os::Solaris::libthread_init() { 4663 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators"); 4664 4665 // Determine if we are running with the new T2 libthread 4666 os::Solaris::set_T2_libthread(isT2_libthread()); 4667 4668 lwp_priocntl_init(); 4669 4670 // RTLD_DEFAULT was not defined on some early versions of 5.5.1 4671 if(func == NULL) { 4672 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators"); 4673 // Guarantee that this VM is running on an new enough OS (5.6 or 4674 // later) that it will have a new enough libthread.so. 4675 guarantee(func != NULL, "libthread.so is too old."); 4676 } 4677 4678 // Initialize the new libthread getstate API wrappers 4679 func = resolve_symbol("thr_getstate"); 4680 os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func)); 4681 4682 func = resolve_symbol("thr_setstate"); 4683 os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func)); 4684 4685 func = resolve_symbol("thr_setmutator"); 4686 os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func)); 4687 4688 func = resolve_symbol("thr_suspend_mutator"); 4689 os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4690 4691 func = resolve_symbol("thr_continue_mutator"); 4692 os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4693 4694 int size; 4695 void (*handler_info_func)(address *, int *); 4696 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo")); 4697 handler_info_func(&handler_start, &size); 4698 handler_end = handler_start + size; 4699 } 4700 4701 4702 int_fnP_mutex_tP os::Solaris::_mutex_lock; 4703 int_fnP_mutex_tP os::Solaris::_mutex_trylock; 4704 int_fnP_mutex_tP os::Solaris::_mutex_unlock; 4705 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init; 4706 int_fnP_mutex_tP os::Solaris::_mutex_destroy; 4707 int os::Solaris::_mutex_scope = USYNC_THREAD; 4708 4709 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait; 4710 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait; 4711 int_fnP_cond_tP os::Solaris::_cond_signal; 4712 int_fnP_cond_tP os::Solaris::_cond_broadcast; 4713 int_fnP_cond_tP_i_vP os::Solaris::_cond_init; 4714 int_fnP_cond_tP os::Solaris::_cond_destroy; 4715 int os::Solaris::_cond_scope = USYNC_THREAD; 4716 4717 void os::Solaris::synchronization_init() { 4718 if(UseLWPSynchronization) { 4719 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock"))); 4720 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock"))); 4721 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock"))); 4722 os::Solaris::set_mutex_init(lwp_mutex_init); 4723 os::Solaris::set_mutex_destroy(lwp_mutex_destroy); 4724 os::Solaris::set_mutex_scope(USYNC_THREAD); 4725 4726 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait"))); 4727 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait"))); 4728 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal"))); 4729 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast"))); 4730 os::Solaris::set_cond_init(lwp_cond_init); 4731 os::Solaris::set_cond_destroy(lwp_cond_destroy); 4732 os::Solaris::set_cond_scope(USYNC_THREAD); 4733 } 4734 else { 4735 os::Solaris::set_mutex_scope(USYNC_THREAD); 4736 os::Solaris::set_cond_scope(USYNC_THREAD); 4737 4738 if(UsePthreads) { 4739 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock"))); 4740 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock"))); 4741 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock"))); 4742 os::Solaris::set_mutex_init(pthread_mutex_default_init); 4743 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy"))); 4744 4745 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait"))); 4746 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait"))); 4747 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal"))); 4748 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast"))); 4749 os::Solaris::set_cond_init(pthread_cond_default_init); 4750 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy"))); 4751 } 4752 else { 4753 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock"))); 4754 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock"))); 4755 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock"))); 4756 os::Solaris::set_mutex_init(::mutex_init); 4757 os::Solaris::set_mutex_destroy(::mutex_destroy); 4758 4759 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait"))); 4760 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait"))); 4761 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal"))); 4762 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast"))); 4763 os::Solaris::set_cond_init(::cond_init); 4764 os::Solaris::set_cond_destroy(::cond_destroy); 4765 } 4766 } 4767 } 4768 4769 bool os::Solaris::liblgrp_init() { 4770 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY); 4771 if (handle != NULL) { 4772 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home"))); 4773 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init"))); 4774 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini"))); 4775 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root"))); 4776 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children"))); 4777 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources"))); 4778 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps"))); 4779 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t, 4780 dlsym(handle, "lgrp_cookie_stale"))); 4781 4782 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER); 4783 set_lgrp_cookie(c); 4784 return true; 4785 } 4786 return false; 4787 } 4788 4789 void os::Solaris::misc_sym_init() { 4790 address func; 4791 4792 // getisax 4793 func = resolve_symbol_lazy("getisax"); 4794 if (func != NULL) { 4795 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func); 4796 } 4797 4798 // meminfo 4799 func = resolve_symbol_lazy("meminfo"); 4800 if (func != NULL) { 4801 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func)); 4802 } 4803 } 4804 4805 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) { 4806 assert(_getisax != NULL, "_getisax not set"); 4807 return _getisax(array, n); 4808 } 4809 4810 // Symbol doesn't exist in Solaris 8 pset.h 4811 #ifndef PS_MYID 4812 #define PS_MYID -3 4813 #endif 4814 4815 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem); 4816 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem); 4817 static pset_getloadavg_type pset_getloadavg_ptr = NULL; 4818 4819 void init_pset_getloadavg_ptr(void) { 4820 pset_getloadavg_ptr = 4821 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg"); 4822 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) { 4823 warning("pset_getloadavg function not found"); 4824 } 4825 } 4826 4827 int os::Solaris::_dev_zero_fd = -1; 4828 4829 // this is called _before_ the global arguments have been parsed 4830 void os::init(void) { 4831 _initial_pid = getpid(); 4832 4833 max_hrtime = first_hrtime = gethrtime(); 4834 4835 init_random(1234567); 4836 4837 page_size = sysconf(_SC_PAGESIZE); 4838 if (page_size == -1) 4839 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)", 4840 strerror(errno))); 4841 init_page_sizes((size_t) page_size); 4842 4843 Solaris::initialize_system_info(); 4844 4845 // Initialize misc. symbols as soon as possible, so we can use them 4846 // if we need them. 4847 Solaris::misc_sym_init(); 4848 4849 int fd = open("/dev/zero", O_RDWR); 4850 if (fd < 0) { 4851 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno))); 4852 } else { 4853 Solaris::set_dev_zero_fd(fd); 4854 4855 // Close on exec, child won't inherit. 4856 fcntl(fd, F_SETFD, FD_CLOEXEC); 4857 } 4858 4859 clock_tics_per_sec = CLK_TCK; 4860 4861 // check if dladdr1() exists; dladdr1 can provide more information than 4862 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9 4863 // and is available on linker patches for 5.7 and 5.8. 4864 // libdl.so must have been loaded, this call is just an entry lookup 4865 void * hdl = dlopen("libdl.so", RTLD_NOW); 4866 if (hdl) 4867 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1")); 4868 4869 // (Solaris only) this switches to calls that actually do locking. 4870 ThreadCritical::initialize(); 4871 4872 main_thread = thr_self(); 4873 4874 // Constant minimum stack size allowed. It must be at least 4875 // the minimum of what the OS supports (thr_min_stack()), and 4876 // enough to allow the thread to get to user bytecode execution. 4877 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed); 4878 // If the pagesize of the VM is greater than 8K determine the appropriate 4879 // number of initial guard pages. The user can change this with the 4880 // command line arguments, if needed. 4881 if (vm_page_size() > 8*K) { 4882 StackYellowPages = 1; 4883 StackRedPages = 1; 4884 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size(); 4885 } 4886 } 4887 4888 // To install functions for atexit system call 4889 extern "C" { 4890 static void perfMemory_exit_helper() { 4891 perfMemory_exit(); 4892 } 4893 } 4894 4895 // this is called _after_ the global arguments have been parsed 4896 jint os::init_2(void) { 4897 // try to enable extended file IO ASAP, see 6431278 4898 os::Solaris::try_enable_extended_io(); 4899 4900 // Allocate a single page and mark it as readable for safepoint polling. Also 4901 // use this first mmap call to check support for MAP_ALIGN. 4902 address polling_page = (address)Solaris::mmap_chunk((char*)page_size, 4903 page_size, 4904 MAP_PRIVATE | MAP_ALIGN, 4905 PROT_READ); 4906 if (polling_page == NULL) { 4907 has_map_align = false; 4908 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, 4909 PROT_READ); 4910 } 4911 4912 os::set_polling_page(polling_page); 4913 4914 #ifndef PRODUCT 4915 if( Verbose && PrintMiscellaneous ) 4916 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 4917 #endif 4918 4919 if (!UseMembar) { 4920 address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE ); 4921 guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 4922 os::set_memory_serialize_page( mem_serialize_page ); 4923 4924 #ifndef PRODUCT 4925 if(Verbose && PrintMiscellaneous) 4926 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 4927 #endif 4928 } 4929 4930 FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); 4931 4932 // Check minimum allowable stack size for thread creation and to initialize 4933 // the java system classes, including StackOverflowError - depends on page 4934 // size. Add a page for compiler2 recursion in main thread. 4935 // Add in BytesPerWord times page size to account for VM stack during 4936 // class initialization depending on 32 or 64 bit VM. 4937 guarantee((Solaris::min_stack_allowed >= 4938 (StackYellowPages+StackRedPages+StackShadowPages+BytesPerWord 4939 COMPILER2_PRESENT(+1)) * page_size), 4940 "need to increase Solaris::min_stack_allowed on this platform"); 4941 4942 size_t threadStackSizeInBytes = ThreadStackSize * K; 4943 if (threadStackSizeInBytes != 0 && 4944 threadStackSizeInBytes < Solaris::min_stack_allowed) { 4945 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk", 4946 Solaris::min_stack_allowed/K); 4947 return JNI_ERR; 4948 } 4949 4950 // For 64kbps there will be a 64kb page size, which makes 4951 // the usable default stack size quite a bit less. Increase the 4952 // stack for 64kb (or any > than 8kb) pages, this increases 4953 // virtual memory fragmentation (since we're not creating the 4954 // stack on a power of 2 boundary. The real fix for this 4955 // should be to fix the guard page mechanism. 4956 4957 if (vm_page_size() > 8*K) { 4958 threadStackSizeInBytes = (threadStackSizeInBytes != 0) 4959 ? threadStackSizeInBytes + 4960 ((StackYellowPages + StackRedPages) * vm_page_size()) 4961 : 0; 4962 ThreadStackSize = threadStackSizeInBytes/K; 4963 } 4964 4965 // Make the stack size a multiple of the page size so that 4966 // the yellow/red zones can be guarded. 4967 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, 4968 vm_page_size())); 4969 4970 Solaris::libthread_init(); 4971 4972 if (UseNUMA) { 4973 if (!Solaris::liblgrp_init()) { 4974 UseNUMA = false; 4975 } else { 4976 size_t lgrp_limit = os::numa_get_groups_num(); 4977 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit); 4978 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); 4979 FREE_C_HEAP_ARRAY(int, lgrp_ids); 4980 if (lgrp_num < 2) { 4981 // There's only one locality group, disable NUMA. 4982 UseNUMA = false; 4983 } 4984 } 4985 if (!UseNUMA && ForceNUMA) { 4986 UseNUMA = true; 4987 } 4988 } 4989 4990 Solaris::signal_sets_init(); 4991 Solaris::init_signal_mem(); 4992 Solaris::install_signal_handlers(); 4993 4994 if (libjsigversion < JSIG_VERSION_1_4_1) { 4995 Maxlibjsigsigs = OLDMAXSIGNUM; 4996 } 4997 4998 // initialize synchronization primitives to use either thread or 4999 // lwp synchronization (controlled by UseLWPSynchronization) 5000 Solaris::synchronization_init(); 5001 5002 if (MaxFDLimit) { 5003 // set the number of file descriptors to max. print out error 5004 // if getrlimit/setrlimit fails but continue regardless. 5005 struct rlimit nbr_files; 5006 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 5007 if (status != 0) { 5008 if (PrintMiscellaneous && (Verbose || WizardMode)) 5009 perror("os::init_2 getrlimit failed"); 5010 } else { 5011 nbr_files.rlim_cur = nbr_files.rlim_max; 5012 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 5013 if (status != 0) { 5014 if (PrintMiscellaneous && (Verbose || WizardMode)) 5015 perror("os::init_2 setrlimit failed"); 5016 } 5017 } 5018 } 5019 5020 // Initialize HPI. 5021 jint hpi_result = hpi::initialize(); 5022 if (hpi_result != JNI_OK) { 5023 tty->print_cr("There was an error trying to initialize the HPI library."); 5024 return hpi_result; 5025 } 5026 5027 // Calculate theoretical max. size of Threads to guard gainst 5028 // artifical out-of-memory situations, where all available address- 5029 // space has been reserved by thread stacks. Default stack size is 1Mb. 5030 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ? 5031 JavaThread::stack_size_at_create() : (1*K*K); 5032 assert(pre_thread_stack_size != 0, "Must have a stack"); 5033 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when 5034 // we should start doing Virtual Memory banging. Currently when the threads will 5035 // have used all but 200Mb of space. 5036 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K); 5037 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size; 5038 5039 // at-exit methods are called in the reverse order of their registration. 5040 // In Solaris 7 and earlier, atexit functions are called on return from 5041 // main or as a result of a call to exit(3C). There can be only 32 of 5042 // these functions registered and atexit() does not set errno. In Solaris 5043 // 8 and later, there is no limit to the number of functions registered 5044 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit 5045 // functions are called upon dlclose(3DL) in addition to return from main 5046 // and exit(3C). 5047 5048 if (PerfAllowAtExitRegistration) { 5049 // only register atexit functions if PerfAllowAtExitRegistration is set. 5050 // atexit functions can be delayed until process exit time, which 5051 // can be problematic for embedded VM situations. Embedded VMs should 5052 // call DestroyJavaVM() to assure that VM resources are released. 5053 5054 // note: perfMemory_exit_helper atexit function may be removed in 5055 // the future if the appropriate cleanup code can be added to the 5056 // VM_Exit VMOperation's doit method. 5057 if (atexit(perfMemory_exit_helper) != 0) { 5058 warning("os::init2 atexit(perfMemory_exit_helper) failed"); 5059 } 5060 } 5061 5062 // Init pset_loadavg function pointer 5063 init_pset_getloadavg_ptr(); 5064 5065 return JNI_OK; 5066 } 5067 5068 void os::init_3(void) { 5069 return; 5070 } 5071 5072 // Mark the polling page as unreadable 5073 void os::make_polling_page_unreadable(void) { 5074 if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 ) 5075 fatal("Could not disable polling page"); 5076 }; 5077 5078 // Mark the polling page as readable 5079 void os::make_polling_page_readable(void) { 5080 if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 ) 5081 fatal("Could not enable polling page"); 5082 }; 5083 5084 // OS interface. 5085 5086 int os::stat(const char *path, struct stat *sbuf) { 5087 char pathbuf[MAX_PATH]; 5088 if (strlen(path) > MAX_PATH - 1) { 5089 errno = ENAMETOOLONG; 5090 return -1; 5091 } 5092 hpi::native_path(strcpy(pathbuf, path)); 5093 return ::stat(pathbuf, sbuf); 5094 } 5095 5096 5097 bool os::check_heap(bool force) { return true; } 5098 5099 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr); 5100 static vsnprintf_t sol_vsnprintf = NULL; 5101 5102 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) { 5103 if (!sol_vsnprintf) { 5104 //search for the named symbol in the objects that were loaded after libjvm 5105 void* where = RTLD_NEXT; 5106 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5107 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5108 if (!sol_vsnprintf){ 5109 //search for the named symbol in the objects that were loaded before libjvm 5110 where = RTLD_DEFAULT; 5111 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5112 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5113 assert(sol_vsnprintf != NULL, "vsnprintf not found"); 5114 } 5115 } 5116 return (*sol_vsnprintf)(buf, count, fmt, argptr); 5117 } 5118 5119 5120 // Is a (classpath) directory empty? 5121 bool os::dir_is_empty(const char* path) { 5122 DIR *dir = NULL; 5123 struct dirent *ptr; 5124 5125 dir = opendir(path); 5126 if (dir == NULL) return true; 5127 5128 /* Scan the directory */ 5129 bool result = true; 5130 char buf[sizeof(struct dirent) + MAX_PATH]; 5131 struct dirent *dbuf = (struct dirent *) buf; 5132 while (result && (ptr = readdir(dir, dbuf)) != NULL) { 5133 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 5134 result = false; 5135 } 5136 } 5137 closedir(dir); 5138 return result; 5139 } 5140 5141 // create binary file, rewriting existing file if required 5142 int os::create_binary_file(const char* path, bool rewrite_existing) { 5143 int oflags = O_WRONLY | O_CREAT; 5144 if (!rewrite_existing) { 5145 oflags |= O_EXCL; 5146 } 5147 return ::open64(path, oflags, S_IREAD | S_IWRITE); 5148 } 5149 5150 // return current position of file pointer 5151 jlong os::current_file_offset(int fd) { 5152 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 5153 } 5154 5155 // move file pointer to the specified offset 5156 jlong os::seek_to_file_offset(int fd, jlong offset) { 5157 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 5158 } 5159 5160 // Map a block of memory. 5161 char* os::map_memory(int fd, const char* file_name, size_t file_offset, 5162 char *addr, size_t bytes, bool read_only, 5163 bool allow_exec) { 5164 int prot; 5165 int flags; 5166 5167 if (read_only) { 5168 prot = PROT_READ; 5169 flags = MAP_SHARED; 5170 } else { 5171 prot = PROT_READ | PROT_WRITE; 5172 flags = MAP_PRIVATE; 5173 } 5174 5175 if (allow_exec) { 5176 prot |= PROT_EXEC; 5177 } 5178 5179 if (addr != NULL) { 5180 flags |= MAP_FIXED; 5181 } 5182 5183 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, 5184 fd, file_offset); 5185 if (mapped_address == MAP_FAILED) { 5186 return NULL; 5187 } 5188 return mapped_address; 5189 } 5190 5191 5192 // Remap a block of memory. 5193 char* os::remap_memory(int fd, const char* file_name, size_t file_offset, 5194 char *addr, size_t bytes, bool read_only, 5195 bool allow_exec) { 5196 // same as map_memory() on this OS 5197 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 5198 allow_exec); 5199 } 5200 5201 5202 // Unmap a block of memory. 5203 bool os::unmap_memory(char* addr, size_t bytes) { 5204 return munmap(addr, bytes) == 0; 5205 } 5206 5207 void os::pause() { 5208 char filename[MAX_PATH]; 5209 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 5210 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 5211 } else { 5212 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 5213 } 5214 5215 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 5216 if (fd != -1) { 5217 struct stat buf; 5218 close(fd); 5219 while (::stat(filename, &buf) == 0) { 5220 (void)::poll(NULL, 0, 100); 5221 } 5222 } else { 5223 jio_fprintf(stderr, 5224 "Could not open pause file '%s', continuing immediately.\n", filename); 5225 } 5226 } 5227 5228 #ifndef PRODUCT 5229 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5230 // Turn this on if you need to trace synch operations. 5231 // Set RECORD_SYNCH_LIMIT to a large-enough value, 5232 // and call record_synch_enable and record_synch_disable 5233 // around the computation of interest. 5234 5235 void record_synch(char* name, bool returning); // defined below 5236 5237 class RecordSynch { 5238 char* _name; 5239 public: 5240 RecordSynch(char* name) :_name(name) 5241 { record_synch(_name, false); } 5242 ~RecordSynch() { record_synch(_name, true); } 5243 }; 5244 5245 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \ 5246 extern "C" ret name params { \ 5247 typedef ret name##_t params; \ 5248 static name##_t* implem = NULL; \ 5249 static int callcount = 0; \ 5250 if (implem == NULL) { \ 5251 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \ 5252 if (implem == NULL) fatal(dlerror()); \ 5253 } \ 5254 ++callcount; \ 5255 RecordSynch _rs(#name); \ 5256 inner; \ 5257 return implem args; \ 5258 } 5259 // in dbx, examine callcounts this way: 5260 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done 5261 5262 #define CHECK_POINTER_OK(p) \ 5263 (Universe::perm_gen() == NULL || !Universe::is_reserved_heap((oop)(p))) 5264 #define CHECK_MU \ 5265 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only."); 5266 #define CHECK_CV \ 5267 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only."); 5268 #define CHECK_P(p) \ 5269 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only."); 5270 5271 #define CHECK_MUTEX(mutex_op) \ 5272 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU); 5273 5274 CHECK_MUTEX( mutex_lock) 5275 CHECK_MUTEX( _mutex_lock) 5276 CHECK_MUTEX( mutex_unlock) 5277 CHECK_MUTEX(_mutex_unlock) 5278 CHECK_MUTEX( mutex_trylock) 5279 CHECK_MUTEX(_mutex_trylock) 5280 5281 #define CHECK_COND(cond_op) \ 5282 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV); 5283 5284 CHECK_COND( cond_wait); 5285 CHECK_COND(_cond_wait); 5286 CHECK_COND(_cond_wait_cancel); 5287 5288 #define CHECK_COND2(cond_op) \ 5289 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV); 5290 5291 CHECK_COND2( cond_timedwait); 5292 CHECK_COND2(_cond_timedwait); 5293 CHECK_COND2(_cond_timedwait_cancel); 5294 5295 // do the _lwp_* versions too 5296 #define mutex_t lwp_mutex_t 5297 #define cond_t lwp_cond_t 5298 CHECK_MUTEX( _lwp_mutex_lock) 5299 CHECK_MUTEX( _lwp_mutex_unlock) 5300 CHECK_MUTEX( _lwp_mutex_trylock) 5301 CHECK_MUTEX( __lwp_mutex_lock) 5302 CHECK_MUTEX( __lwp_mutex_unlock) 5303 CHECK_MUTEX( __lwp_mutex_trylock) 5304 CHECK_MUTEX(___lwp_mutex_lock) 5305 CHECK_MUTEX(___lwp_mutex_unlock) 5306 5307 CHECK_COND( _lwp_cond_wait); 5308 CHECK_COND( __lwp_cond_wait); 5309 CHECK_COND(___lwp_cond_wait); 5310 5311 CHECK_COND2( _lwp_cond_timedwait); 5312 CHECK_COND2( __lwp_cond_timedwait); 5313 #undef mutex_t 5314 #undef cond_t 5315 5316 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5317 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5318 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0); 5319 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0); 5320 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5321 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5322 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5323 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5324 5325 5326 // recording machinery: 5327 5328 enum { RECORD_SYNCH_LIMIT = 200 }; 5329 char* record_synch_name[RECORD_SYNCH_LIMIT]; 5330 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT]; 5331 bool record_synch_returning[RECORD_SYNCH_LIMIT]; 5332 thread_t record_synch_thread[RECORD_SYNCH_LIMIT]; 5333 int record_synch_count = 0; 5334 bool record_synch_enabled = false; 5335 5336 // in dbx, examine recorded data this way: 5337 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done 5338 5339 void record_synch(char* name, bool returning) { 5340 if (record_synch_enabled) { 5341 if (record_synch_count < RECORD_SYNCH_LIMIT) { 5342 record_synch_name[record_synch_count] = name; 5343 record_synch_returning[record_synch_count] = returning; 5344 record_synch_thread[record_synch_count] = thr_self(); 5345 record_synch_arg0ptr[record_synch_count] = &name; 5346 record_synch_count++; 5347 } 5348 // put more checking code here: 5349 // ... 5350 } 5351 } 5352 5353 void record_synch_enable() { 5354 // start collecting trace data, if not already doing so 5355 if (!record_synch_enabled) record_synch_count = 0; 5356 record_synch_enabled = true; 5357 } 5358 5359 void record_synch_disable() { 5360 // stop collecting trace data 5361 record_synch_enabled = false; 5362 } 5363 5364 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5365 #endif // PRODUCT 5366 5367 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5368 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) - 5369 (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5370 5371 5372 // JVMTI & JVM monitoring and management support 5373 // The thread_cpu_time() and current_thread_cpu_time() are only 5374 // supported if is_thread_cpu_time_supported() returns true. 5375 // They are not supported on Solaris T1. 5376 5377 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 5378 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 5379 // of a thread. 5380 // 5381 // current_thread_cpu_time() and thread_cpu_time(Thread *) 5382 // returns the fast estimate available on the platform. 5383 5384 // hrtime_t gethrvtime() return value includes 5385 // user time but does not include system time 5386 jlong os::current_thread_cpu_time() { 5387 return (jlong) gethrvtime(); 5388 } 5389 5390 jlong os::thread_cpu_time(Thread *thread) { 5391 // return user level CPU time only to be consistent with 5392 // what current_thread_cpu_time returns. 5393 // thread_cpu_time_info() must be changed if this changes 5394 return os::thread_cpu_time(thread, false /* user time only */); 5395 } 5396 5397 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 5398 if (user_sys_cpu_time) { 5399 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 5400 } else { 5401 return os::current_thread_cpu_time(); 5402 } 5403 } 5404 5405 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 5406 char proc_name[64]; 5407 int count; 5408 prusage_t prusage; 5409 jlong lwp_time; 5410 int fd; 5411 5412 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage", 5413 getpid(), 5414 thread->osthread()->lwp_id()); 5415 fd = open(proc_name, O_RDONLY); 5416 if ( fd == -1 ) return -1; 5417 5418 do { 5419 count = pread(fd, 5420 (void *)&prusage.pr_utime, 5421 thr_time_size, 5422 thr_time_off); 5423 } while (count < 0 && errno == EINTR); 5424 close(fd); 5425 if ( count < 0 ) return -1; 5426 5427 if (user_sys_cpu_time) { 5428 // user + system CPU time 5429 lwp_time = (((jlong)prusage.pr_stime.tv_sec + 5430 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) + 5431 (jlong)prusage.pr_stime.tv_nsec + 5432 (jlong)prusage.pr_utime.tv_nsec; 5433 } else { 5434 // user level CPU time only 5435 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) + 5436 (jlong)prusage.pr_utime.tv_nsec; 5437 } 5438 5439 return(lwp_time); 5440 } 5441 5442 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5443 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5444 info_ptr->may_skip_backward = false; // elapsed time not wall time 5445 info_ptr->may_skip_forward = false; // elapsed time not wall time 5446 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5447 } 5448 5449 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5450 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5451 info_ptr->may_skip_backward = false; // elapsed time not wall time 5452 info_ptr->may_skip_forward = false; // elapsed time not wall time 5453 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5454 } 5455 5456 bool os::is_thread_cpu_time_supported() { 5457 if ( os::Solaris::T2_libthread() || UseBoundThreads ) { 5458 return true; 5459 } else { 5460 return false; 5461 } 5462 } 5463 5464 // System loadavg support. Returns -1 if load average cannot be obtained. 5465 // Return the load average for our processor set if the primitive exists 5466 // (Solaris 9 and later). Otherwise just return system wide loadavg. 5467 int os::loadavg(double loadavg[], int nelem) { 5468 if (pset_getloadavg_ptr != NULL) { 5469 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem); 5470 } else { 5471 return ::getloadavg(loadavg, nelem); 5472 } 5473 } 5474 5475 //--------------------------------------------------------------------------------- 5476 5477 static address same_page(address x, address y) { 5478 intptr_t page_bits = -os::vm_page_size(); 5479 if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits)) 5480 return x; 5481 else if (x > y) 5482 return (address)(intptr_t(y) | ~page_bits) + 1; 5483 else 5484 return (address)(intptr_t(y) & page_bits); 5485 } 5486 5487 bool os::find(address addr, outputStream* st) { 5488 Dl_info dlinfo; 5489 memset(&dlinfo, 0, sizeof(dlinfo)); 5490 if (dladdr(addr, &dlinfo)) { 5491 #ifdef _LP64 5492 st->print("0x%016lx: ", addr); 5493 #else 5494 st->print("0x%08x: ", addr); 5495 #endif 5496 if (dlinfo.dli_sname != NULL) 5497 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr); 5498 else if (dlinfo.dli_fname) 5499 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase); 5500 else 5501 st->print("<absolute address>"); 5502 if (dlinfo.dli_fname) st->print(" in %s", dlinfo.dli_fname); 5503 #ifdef _LP64 5504 if (dlinfo.dli_fbase) st->print(" at 0x%016lx", dlinfo.dli_fbase); 5505 #else 5506 if (dlinfo.dli_fbase) st->print(" at 0x%08x", dlinfo.dli_fbase); 5507 #endif 5508 st->cr(); 5509 5510 if (Verbose) { 5511 // decode some bytes around the PC 5512 address begin = same_page(addr-40, addr); 5513 address end = same_page(addr+40, addr); 5514 address lowest = (address) dlinfo.dli_sname; 5515 if (!lowest) lowest = (address) dlinfo.dli_fbase; 5516 if (begin < lowest) begin = lowest; 5517 Dl_info dlinfo2; 5518 if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr 5519 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) 5520 end = (address) dlinfo2.dli_saddr; 5521 Disassembler::decode(begin, end, st); 5522 } 5523 return true; 5524 } 5525 return false; 5526 } 5527 5528 // Following function has been added to support HotSparc's libjvm.so running 5529 // under Solaris production JDK 1.2.2 / 1.3.0. These came from 5530 // src/solaris/hpi/native_threads in the EVM codebase. 5531 // 5532 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release 5533 // libraries and should thus be removed. We will leave it behind for a while 5534 // until we no longer want to able to run on top of 1.3.0 Solaris production 5535 // JDK. See 4341971. 5536 5537 #define STACK_SLACK 0x800 5538 5539 extern "C" { 5540 intptr_t sysThreadAvailableStackWithSlack() { 5541 stack_t st; 5542 intptr_t retval, stack_top; 5543 retval = thr_stksegment(&st); 5544 assert(retval == 0, "incorrect return value from thr_stksegment"); 5545 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 5546 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 5547 stack_top=(intptr_t)st.ss_sp-st.ss_size; 5548 return ((intptr_t)&stack_top - stack_top - STACK_SLACK); 5549 } 5550 } 5551 5552 // Just to get the Kernel build to link on solaris for testing. 5553 5554 extern "C" { 5555 class ASGCT_CallTrace; 5556 void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext) 5557 KERNEL_RETURN; 5558 } 5559 5560 5561 // ObjectMonitor park-unpark infrastructure ... 5562 // 5563 // We implement Solaris and Linux PlatformEvents with the 5564 // obvious condvar-mutex-flag triple. 5565 // Another alternative that works quite well is pipes: 5566 // Each PlatformEvent consists of a pipe-pair. 5567 // The thread associated with the PlatformEvent 5568 // calls park(), which reads from the input end of the pipe. 5569 // Unpark() writes into the other end of the pipe. 5570 // The write-side of the pipe must be set NDELAY. 5571 // Unfortunately pipes consume a large # of handles. 5572 // Native solaris lwp_park() and lwp_unpark() work nicely, too. 5573 // Using pipes for the 1st few threads might be workable, however. 5574 // 5575 // park() is permitted to return spuriously. 5576 // Callers of park() should wrap the call to park() in 5577 // an appropriate loop. A litmus test for the correct 5578 // usage of park is the following: if park() were modified 5579 // to immediately return 0 your code should still work, 5580 // albeit degenerating to a spin loop. 5581 // 5582 // An interesting optimization for park() is to use a trylock() 5583 // to attempt to acquire the mutex. If the trylock() fails 5584 // then we know that a concurrent unpark() operation is in-progress. 5585 // in that case the park() code could simply set _count to 0 5586 // and return immediately. The subsequent park() operation *might* 5587 // return immediately. That's harmless as the caller of park() is 5588 // expected to loop. By using trylock() we will have avoided a 5589 // avoided a context switch caused by contention on the per-thread mutex. 5590 // 5591 // TODO-FIXME: 5592 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the 5593 // objectmonitor implementation. 5594 // 2. Collapse the JSR166 parker event, and the 5595 // objectmonitor ParkEvent into a single "Event" construct. 5596 // 3. In park() and unpark() add: 5597 // assert (Thread::current() == AssociatedWith). 5598 // 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch. 5599 // 1-out-of-N park() operations will return immediately. 5600 // 5601 // _Event transitions in park() 5602 // -1 => -1 : illegal 5603 // 1 => 0 : pass - return immediately 5604 // 0 => -1 : block 5605 // 5606 // _Event serves as a restricted-range semaphore. 5607 // 5608 // Another possible encoding of _Event would be with 5609 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5610 // 5611 // TODO-FIXME: add DTRACE probes for: 5612 // 1. Tx parks 5613 // 2. Ty unparks Tx 5614 // 3. Tx resumes from park 5615 5616 5617 // value determined through experimentation 5618 #define ROUNDINGFIX 11 5619 5620 // utility to compute the abstime argument to timedwait. 5621 // TODO-FIXME: switch from compute_abstime() to unpackTime(). 5622 5623 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) { 5624 // millis is the relative timeout time 5625 // abstime will be the absolute timeout time 5626 if (millis < 0) millis = 0; 5627 struct timeval now; 5628 int status = gettimeofday(&now, NULL); 5629 assert(status == 0, "gettimeofday"); 5630 jlong seconds = millis / 1000; 5631 jlong max_wait_period; 5632 5633 if (UseLWPSynchronization) { 5634 // forward port of fix for 4275818 (not sleeping long enough) 5635 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where 5636 // _lwp_cond_timedwait() used a round_down algorithm rather 5637 // than a round_up. For millis less than our roundfactor 5638 // it rounded down to 0 which doesn't meet the spec. 5639 // For millis > roundfactor we may return a bit sooner, but 5640 // since we can not accurately identify the patch level and 5641 // this has already been fixed in Solaris 9 and 8 we will 5642 // leave it alone rather than always rounding down. 5643 5644 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX; 5645 // It appears that when we go directly through Solaris _lwp_cond_timedwait() 5646 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6 5647 max_wait_period = 21000000; 5648 } else { 5649 max_wait_period = 50000000; 5650 } 5651 millis %= 1000; 5652 if (seconds > max_wait_period) { // see man cond_timedwait(3T) 5653 seconds = max_wait_period; 5654 } 5655 abstime->tv_sec = now.tv_sec + seconds; 5656 long usec = now.tv_usec + millis * 1000; 5657 if (usec >= 1000000) { 5658 abstime->tv_sec += 1; 5659 usec -= 1000000; 5660 } 5661 abstime->tv_nsec = usec * 1000; 5662 return abstime; 5663 } 5664 5665 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately. 5666 // Conceptually TryPark() should be equivalent to park(0). 5667 5668 int os::PlatformEvent::TryPark() { 5669 for (;;) { 5670 const int v = _Event ; 5671 guarantee ((v == 0) || (v == 1), "invariant") ; 5672 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ; 5673 } 5674 } 5675 5676 void os::PlatformEvent::park() { // AKA: down() 5677 // Invariant: Only the thread associated with the Event/PlatformEvent 5678 // may call park(). 5679 int v ; 5680 for (;;) { 5681 v = _Event ; 5682 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 5683 } 5684 guarantee (v >= 0, "invariant") ; 5685 if (v == 0) { 5686 // Do this the hard way by blocking ... 5687 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5688 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 5689 // Only for SPARC >= V8PlusA 5690 #if defined(__sparc) && defined(COMPILER2) 5691 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 5692 #endif 5693 int status = os::Solaris::mutex_lock(_mutex); 5694 assert_status(status == 0, status, "mutex_lock"); 5695 guarantee (_nParked == 0, "invariant") ; 5696 ++ _nParked ; 5697 while (_Event < 0) { 5698 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ... 5699 // Treat this the same as if the wait was interrupted 5700 // With usr/lib/lwp going to kernel, always handle ETIME 5701 status = os::Solaris::cond_wait(_cond, _mutex); 5702 if (status == ETIME) status = EINTR ; 5703 assert_status(status == 0 || status == EINTR, status, "cond_wait"); 5704 } 5705 -- _nParked ; 5706 _Event = 0 ; 5707 status = os::Solaris::mutex_unlock(_mutex); 5708 assert_status(status == 0, status, "mutex_unlock"); 5709 } 5710 } 5711 5712 int os::PlatformEvent::park(jlong millis) { 5713 guarantee (_nParked == 0, "invariant") ; 5714 int v ; 5715 for (;;) { 5716 v = _Event ; 5717 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 5718 } 5719 guarantee (v >= 0, "invariant") ; 5720 if (v != 0) return OS_OK ; 5721 5722 int ret = OS_TIMEOUT; 5723 timestruc_t abst; 5724 compute_abstime (&abst, millis); 5725 5726 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5727 // For Solaris SPARC set fprs.FEF=0 prior to parking. 5728 // Only for SPARC >= V8PlusA 5729 #if defined(__sparc) && defined(COMPILER2) 5730 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 5731 #endif 5732 int status = os::Solaris::mutex_lock(_mutex); 5733 assert_status(status == 0, status, "mutex_lock"); 5734 guarantee (_nParked == 0, "invariant") ; 5735 ++ _nParked ; 5736 while (_Event < 0) { 5737 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst); 5738 assert_status(status == 0 || status == EINTR || 5739 status == ETIME || status == ETIMEDOUT, 5740 status, "cond_timedwait"); 5741 if (!FilterSpuriousWakeups) break ; // previous semantics 5742 if (status == ETIME || status == ETIMEDOUT) break ; 5743 // We consume and ignore EINTR and spurious wakeups. 5744 } 5745 -- _nParked ; 5746 if (_Event >= 0) ret = OS_OK ; 5747 _Event = 0 ; 5748 status = os::Solaris::mutex_unlock(_mutex); 5749 assert_status(status == 0, status, "mutex_unlock"); 5750 return ret; 5751 } 5752 5753 void os::PlatformEvent::unpark() { 5754 int v, AnyWaiters; 5755 5756 // Increment _Event. 5757 // Another acceptable implementation would be to simply swap 1 5758 // into _Event: 5759 // if (Swap (&_Event, 1) < 0) { 5760 // mutex_lock (_mutex) ; AnyWaiters = nParked; mutex_unlock (_mutex) ; 5761 // if (AnyWaiters) cond_signal (_cond) ; 5762 // } 5763 5764 for (;;) { 5765 v = _Event ; 5766 if (v > 0) { 5767 // The LD of _Event could have reordered or be satisfied 5768 // by a read-aside from this processor's write buffer. 5769 // To avoid problems execute a barrier and then 5770 // ratify the value. A degenerate CAS() would also work. 5771 // Viz., CAS (v+0, &_Event, v) == v). 5772 OrderAccess::fence() ; 5773 if (_Event == v) return ; 5774 continue ; 5775 } 5776 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ; 5777 } 5778 5779 // If the thread associated with the event was parked, wake it. 5780 if (v < 0) { 5781 int status ; 5782 // Wait for the thread assoc with the PlatformEvent to vacate. 5783 status = os::Solaris::mutex_lock(_mutex); 5784 assert_status(status == 0, status, "mutex_lock"); 5785 AnyWaiters = _nParked ; 5786 status = os::Solaris::mutex_unlock(_mutex); 5787 assert_status(status == 0, status, "mutex_unlock"); 5788 guarantee (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ; 5789 if (AnyWaiters != 0) { 5790 // We intentional signal *after* dropping the lock 5791 // to avoid a common class of futile wakeups. 5792 status = os::Solaris::cond_signal(_cond); 5793 assert_status(status == 0, status, "cond_signal"); 5794 } 5795 } 5796 } 5797 5798 // JSR166 5799 // ------------------------------------------------------- 5800 5801 /* 5802 * The solaris and linux implementations of park/unpark are fairly 5803 * conservative for now, but can be improved. They currently use a 5804 * mutex/condvar pair, plus _counter. 5805 * Park decrements _counter if > 0, else does a condvar wait. Unpark 5806 * sets count to 1 and signals condvar. Only one thread ever waits 5807 * on the condvar. Contention seen when trying to park implies that someone 5808 * is unparking you, so don't wait. And spurious returns are fine, so there 5809 * is no need to track notifications. 5810 */ 5811 5812 #define NANOSECS_PER_SEC 1000000000 5813 #define NANOSECS_PER_MILLISEC 1000000 5814 #define MAX_SECS 100000000 5815 5816 /* 5817 * This code is common to linux and solaris and will be moved to a 5818 * common place in dolphin. 5819 * 5820 * The passed in time value is either a relative time in nanoseconds 5821 * or an absolute time in milliseconds. Either way it has to be unpacked 5822 * into suitable seconds and nanoseconds components and stored in the 5823 * given timespec structure. 5824 * Given time is a 64-bit value and the time_t used in the timespec is only 5825 * a signed-32-bit value (except on 64-bit Linux) we have to watch for 5826 * overflow if times way in the future are given. Further on Solaris versions 5827 * prior to 10 there is a restriction (see cond_timedwait) that the specified 5828 * number of seconds, in abstime, is less than current_time + 100,000,000. 5829 * As it will be 28 years before "now + 100000000" will overflow we can 5830 * ignore overflow and just impose a hard-limit on seconds using the value 5831 * of "now + 100,000,000". This places a limit on the timeout of about 3.17 5832 * years from "now". 5833 */ 5834 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { 5835 assert (time > 0, "convertTime"); 5836 5837 struct timeval now; 5838 int status = gettimeofday(&now, NULL); 5839 assert(status == 0, "gettimeofday"); 5840 5841 time_t max_secs = now.tv_sec + MAX_SECS; 5842 5843 if (isAbsolute) { 5844 jlong secs = time / 1000; 5845 if (secs > max_secs) { 5846 absTime->tv_sec = max_secs; 5847 } 5848 else { 5849 absTime->tv_sec = secs; 5850 } 5851 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; 5852 } 5853 else { 5854 jlong secs = time / NANOSECS_PER_SEC; 5855 if (secs >= MAX_SECS) { 5856 absTime->tv_sec = max_secs; 5857 absTime->tv_nsec = 0; 5858 } 5859 else { 5860 absTime->tv_sec = now.tv_sec + secs; 5861 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; 5862 if (absTime->tv_nsec >= NANOSECS_PER_SEC) { 5863 absTime->tv_nsec -= NANOSECS_PER_SEC; 5864 ++absTime->tv_sec; // note: this must be <= max_secs 5865 } 5866 } 5867 } 5868 assert(absTime->tv_sec >= 0, "tv_sec < 0"); 5869 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); 5870 assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); 5871 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); 5872 } 5873 5874 void Parker::park(bool isAbsolute, jlong time) { 5875 5876 // Optional fast-path check: 5877 // Return immediately if a permit is available. 5878 if (_counter > 0) { 5879 _counter = 0 ; 5880 OrderAccess::fence(); 5881 return ; 5882 } 5883 5884 // Optional fast-exit: Check interrupt before trying to wait 5885 Thread* thread = Thread::current(); 5886 assert(thread->is_Java_thread(), "Must be JavaThread"); 5887 JavaThread *jt = (JavaThread *)thread; 5888 if (Thread::is_interrupted(thread, false)) { 5889 return; 5890 } 5891 5892 // First, demultiplex/decode time arguments 5893 timespec absTime; 5894 if (time < 0) { // don't wait at all 5895 return; 5896 } 5897 if (time > 0) { 5898 // Warning: this code might be exposed to the old Solaris time 5899 // round-down bugs. Grep "roundingFix" for details. 5900 unpackTime(&absTime, isAbsolute, time); 5901 } 5902 5903 // Enter safepoint region 5904 // Beware of deadlocks such as 6317397. 5905 // The per-thread Parker:: _mutex is a classic leaf-lock. 5906 // In particular a thread must never block on the Threads_lock while 5907 // holding the Parker:: mutex. If safepoints are pending both the 5908 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. 5909 ThreadBlockInVM tbivm(jt); 5910 5911 // Don't wait if cannot get lock since interference arises from 5912 // unblocking. Also. check interrupt before trying wait 5913 if (Thread::is_interrupted(thread, false) || 5914 os::Solaris::mutex_trylock(_mutex) != 0) { 5915 return; 5916 } 5917 5918 int status ; 5919 5920 if (_counter > 0) { // no wait needed 5921 _counter = 0; 5922 status = os::Solaris::mutex_unlock(_mutex); 5923 assert (status == 0, "invariant") ; 5924 OrderAccess::fence(); 5925 return; 5926 } 5927 5928 #ifdef ASSERT 5929 // Don't catch signals while blocked; let the running threads have the signals. 5930 // (This allows a debugger to break into the running thread.) 5931 sigset_t oldsigs; 5932 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals(); 5933 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); 5934 #endif 5935 5936 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5937 jt->set_suspend_equivalent(); 5938 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 5939 5940 // Do this the hard way by blocking ... 5941 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5942 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 5943 // Only for SPARC >= V8PlusA 5944 #if defined(__sparc) && defined(COMPILER2) 5945 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 5946 #endif 5947 5948 if (time == 0) { 5949 status = os::Solaris::cond_wait (_cond, _mutex) ; 5950 } else { 5951 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime); 5952 } 5953 // Note that an untimed cond_wait() can sometimes return ETIME on older 5954 // versions of the Solaris. 5955 assert_status(status == 0 || status == EINTR || 5956 status == ETIME || status == ETIMEDOUT, 5957 status, "cond_timedwait"); 5958 5959 #ifdef ASSERT 5960 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL); 5961 #endif 5962 _counter = 0 ; 5963 status = os::Solaris::mutex_unlock(_mutex); 5964 assert_status(status == 0, status, "mutex_unlock") ; 5965 5966 // If externally suspended while waiting, re-suspend 5967 if (jt->handle_special_suspend_equivalent_condition()) { 5968 jt->java_suspend_self(); 5969 } 5970 OrderAccess::fence(); 5971 } 5972 5973 void Parker::unpark() { 5974 int s, status ; 5975 status = os::Solaris::mutex_lock (_mutex) ; 5976 assert (status == 0, "invariant") ; 5977 s = _counter; 5978 _counter = 1; 5979 status = os::Solaris::mutex_unlock (_mutex) ; 5980 assert (status == 0, "invariant") ; 5981 5982 if (s < 1) { 5983 status = os::Solaris::cond_signal (_cond) ; 5984 assert (status == 0, "invariant") ; 5985 } 5986 } 5987 5988 extern char** environ; 5989 5990 // Run the specified command in a separate process. Return its exit value, 5991 // or -1 on failure (e.g. can't fork a new process). 5992 // Unlike system(), this function can be called from signal handler. It 5993 // doesn't block SIGINT et al. 5994 int os::fork_and_exec(char* cmd) { 5995 char * argv[4]; 5996 argv[0] = (char *)"sh"; 5997 argv[1] = (char *)"-c"; 5998 argv[2] = cmd; 5999 argv[3] = NULL; 6000 6001 // fork is async-safe, fork1 is not so can't use in signal handler 6002 pid_t pid; 6003 Thread* t = ThreadLocalStorage::get_thread_slow(); 6004 if (t != NULL && t->is_inside_signal_handler()) { 6005 pid = fork(); 6006 } else { 6007 pid = fork1(); 6008 } 6009 6010 if (pid < 0) { 6011 // fork failed 6012 warning("fork failed: %s", strerror(errno)); 6013 return -1; 6014 6015 } else if (pid == 0) { 6016 // child process 6017 6018 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris 6019 execve("/usr/bin/sh", argv, environ); 6020 6021 // execve failed 6022 _exit(-1); 6023 6024 } else { 6025 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 6026 // care about the actual exit code, for now. 6027 6028 int status; 6029 6030 // Wait for the child process to exit. This returns immediately if 6031 // the child has already exited. */ 6032 while (waitpid(pid, &status, 0) < 0) { 6033 switch (errno) { 6034 case ECHILD: return 0; 6035 case EINTR: break; 6036 default: return -1; 6037 } 6038 } 6039 6040 if (WIFEXITED(status)) { 6041 // The child exited normally; get its exit code. 6042 return WEXITSTATUS(status); 6043 } else if (WIFSIGNALED(status)) { 6044 // The child exited because of a signal 6045 // The best value to return is 0x80 + signal number, 6046 // because that is what all Unix shells do, and because 6047 // it allows callers to distinguish between process exit and 6048 // process death by signal. 6049 return 0x80 + WTERMSIG(status); 6050 } else { 6051 // Unknown exit code; pass it through 6052 return status; 6053 } 6054 } 6055 } 6056 6057 // is_headless_jre() 6058 // 6059 // Test for the existence of libmawt in motif21 or xawt directories 6060 // in order to report if we are running in a headless jre 6061 // 6062 bool os::is_headless_jre() { 6063 struct stat statbuf; 6064 char buf[MAXPATHLEN]; 6065 char libmawtpath[MAXPATHLEN]; 6066 const char *xawtstr = "/xawt/libmawt.so"; 6067 const char *motifstr = "/motif21/libmawt.so"; 6068 char *p; 6069 6070 // Get path to libjvm.so 6071 os::jvm_path(buf, sizeof(buf)); 6072 6073 // Get rid of libjvm.so 6074 p = strrchr(buf, '/'); 6075 if (p == NULL) return false; 6076 else *p = '\0'; 6077 6078 // Get rid of client or server 6079 p = strrchr(buf, '/'); 6080 if (p == NULL) return false; 6081 else *p = '\0'; 6082 6083 // check xawt/libmawt.so 6084 strcpy(libmawtpath, buf); 6085 strcat(libmawtpath, xawtstr); 6086 if (::stat(libmawtpath, &statbuf) == 0) return false; 6087 6088 // check motif21/libmawt.so 6089 strcpy(libmawtpath, buf); 6090 strcat(libmawtpath, motifstr); 6091 if (::stat(libmawtpath, &statbuf) == 0) return false; 6092 6093 return true; 6094 } 6095 6096