1 /* 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/classLoader.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "compiler/compileBroker.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "jvm_solaris.h" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/filemap.hpp" 36 #include "mutex_solaris.inline.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "os_share_solaris.hpp" 39 #include "prims/jniFastGetField.hpp" 40 #include "prims/jvm.h" 41 #include "prims/jvm_misc.hpp" 42 #include "runtime/arguments.hpp" 43 #include "runtime/extendedPC.hpp" 44 #include "runtime/globals.hpp" 45 #include "runtime/hpi.hpp" 46 #include "runtime/interfaceSupport.hpp" 47 #include "runtime/java.hpp" 48 #include "runtime/javaCalls.hpp" 49 #include "runtime/mutexLocker.hpp" 50 #include "runtime/objectMonitor.hpp" 51 #include "runtime/osThread.hpp" 52 #include "runtime/perfMemory.hpp" 53 #include "runtime/sharedRuntime.hpp" 54 #include "runtime/statSampler.hpp" 55 #include "runtime/stubRoutines.hpp" 56 #include "runtime/threadCritical.hpp" 57 #include "runtime/timer.hpp" 58 #include "services/attachListener.hpp" 59 #include "services/runtimeService.hpp" 60 #include "thread_solaris.inline.hpp" 61 #include "utilities/defaultStream.hpp" 62 #include "utilities/events.hpp" 63 #include "utilities/growableArray.hpp" 64 #include "utilities/vmError.hpp" 65 #ifdef TARGET_ARCH_x86 66 # include "assembler_x86.inline.hpp" 67 # include "nativeInst_x86.hpp" 68 #endif 69 #ifdef TARGET_ARCH_sparc 70 # include "assembler_sparc.inline.hpp" 71 # include "nativeInst_sparc.hpp" 72 #endif 73 #ifdef COMPILER1 74 #include "c1/c1_Runtime1.hpp" 75 #endif 76 #ifdef COMPILER2 77 #include "opto/runtime.hpp" 78 #endif 79 80 // put OS-includes here 81 # include <dlfcn.h> 82 # include <errno.h> 83 # include <link.h> 84 # include <poll.h> 85 # include <pthread.h> 86 # include <pwd.h> 87 # include <schedctl.h> 88 # include <setjmp.h> 89 # include <signal.h> 90 # include <stdio.h> 91 # include <alloca.h> 92 # include <sys/filio.h> 93 # include <sys/ipc.h> 94 # include <sys/lwp.h> 95 # include <sys/machelf.h> // for elf Sym structure used by dladdr1 96 # include <sys/mman.h> 97 # include <sys/processor.h> 98 # include <sys/procset.h> 99 # include <sys/pset.h> 100 # include <sys/resource.h> 101 # include <sys/shm.h> 102 # include <sys/socket.h> 103 # include <sys/stat.h> 104 # include <sys/systeminfo.h> 105 # include <sys/time.h> 106 # include <sys/times.h> 107 # include <sys/types.h> 108 # include <sys/wait.h> 109 # include <sys/utsname.h> 110 # include <thread.h> 111 # include <unistd.h> 112 # include <sys/priocntl.h> 113 # include <sys/rtpriocntl.h> 114 # include <sys/tspriocntl.h> 115 # include <sys/iapriocntl.h> 116 # include <sys/loadavg.h> 117 # include <string.h> 118 119 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later 120 # include <sys/procfs.h> // see comment in <sys/procfs.h> 121 122 #define MAX_PATH (2 * K) 123 124 // for timer info max values which include all bits 125 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 126 127 #ifdef _GNU_SOURCE 128 // See bug #6514594 129 extern "C" int madvise(caddr_t, size_t, int); 130 extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg, 131 int attr, int mask); 132 #endif //_GNU_SOURCE 133 134 /* 135 MPSS Changes Start. 136 The JVM binary needs to be built and run on pre-Solaris 9 137 systems, but the constants needed by MPSS are only in Solaris 9 138 header files. They are textually replicated here to allow 139 building on earlier systems. Once building on Solaris 8 is 140 no longer a requirement, these #defines can be replaced by ordinary 141 system .h inclusion. 142 143 In earlier versions of the JDK and Solaris, we used ISM for large pages. 144 But ISM requires shared memory to achieve this and thus has many caveats. 145 MPSS is a fully transparent and is a cleaner way to get large pages. 146 Although we still require keeping ISM for backward compatiblitiy as well as 147 giving the opportunity to use large pages on older systems it is 148 recommended that MPSS be used for Solaris 9 and above. 149 150 */ 151 152 #ifndef MC_HAT_ADVISE 153 154 struct memcntl_mha { 155 uint_t mha_cmd; /* command(s) */ 156 uint_t mha_flags; 157 size_t mha_pagesize; 158 }; 159 #define MC_HAT_ADVISE 7 /* advise hat map size */ 160 #define MHA_MAPSIZE_VA 0x1 /* set preferred page size */ 161 #define MAP_ALIGN 0x200 /* addr specifies alignment */ 162 163 #endif 164 // MPSS Changes End. 165 166 167 // Here are some liblgrp types from sys/lgrp_user.h to be able to 168 // compile on older systems without this header file. 169 170 #ifndef MADV_ACCESS_LWP 171 # define MADV_ACCESS_LWP 7 /* next LWP to access heavily */ 172 #endif 173 #ifndef MADV_ACCESS_MANY 174 # define MADV_ACCESS_MANY 8 /* many processes to access heavily */ 175 #endif 176 177 #ifndef LGRP_RSRC_CPU 178 # define LGRP_RSRC_CPU 0 /* CPU resources */ 179 #endif 180 #ifndef LGRP_RSRC_MEM 181 # define LGRP_RSRC_MEM 1 /* memory resources */ 182 #endif 183 184 // Some more macros from sys/mman.h that are not present in Solaris 8. 185 186 #ifndef MAX_MEMINFO_CNT 187 /* 188 * info_req request type definitions for meminfo 189 * request types starting with MEMINFO_V are used for Virtual addresses 190 * and should not be mixed with MEMINFO_PLGRP which is targeted for Physical 191 * addresses 192 */ 193 # define MEMINFO_SHIFT 16 194 # define MEMINFO_MASK (0xFF << MEMINFO_SHIFT) 195 # define MEMINFO_VPHYSICAL (0x01 << MEMINFO_SHIFT) /* get physical addr */ 196 # define MEMINFO_VLGRP (0x02 << MEMINFO_SHIFT) /* get lgroup */ 197 # define MEMINFO_VPAGESIZE (0x03 << MEMINFO_SHIFT) /* size of phys page */ 198 # define MEMINFO_VREPLCNT (0x04 << MEMINFO_SHIFT) /* no. of replica */ 199 # define MEMINFO_VREPL (0x05 << MEMINFO_SHIFT) /* physical replica */ 200 # define MEMINFO_VREPL_LGRP (0x06 << MEMINFO_SHIFT) /* lgrp of replica */ 201 # define MEMINFO_PLGRP (0x07 << MEMINFO_SHIFT) /* lgroup for paddr */ 202 203 /* maximum number of addresses meminfo() can process at a time */ 204 # define MAX_MEMINFO_CNT 256 205 206 /* maximum number of request types */ 207 # define MAX_MEMINFO_REQ 31 208 #endif 209 210 // see thr_setprio(3T) for the basis of these numbers 211 #define MinimumPriority 0 212 #define NormalPriority 64 213 #define MaximumPriority 127 214 215 // Values for ThreadPriorityPolicy == 1 216 int prio_policy1[MaxPriority+1] = { -99999, 0, 16, 32, 48, 64, 217 80, 96, 112, 124, 127 }; 218 219 // System parameters used internally 220 static clock_t clock_tics_per_sec = 100; 221 222 // For diagnostics to print a message once. see run_periodic_checks 223 static bool check_addr0_done = false; 224 static sigset_t check_signal_done; 225 static bool check_signals = true; 226 227 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo 228 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo 229 230 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround 231 232 233 // "default" initializers for missing libc APIs 234 extern "C" { 235 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 236 static int lwp_mutex_destroy(mutex_t *mx) { return 0; } 237 238 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 239 static int lwp_cond_destroy(cond_t *cv) { return 0; } 240 } 241 242 // "default" initializers for pthread-based synchronization 243 extern "C" { 244 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 245 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 246 } 247 248 // Thread Local Storage 249 // This is common to all Solaris platforms so it is defined here, 250 // in this common file. 251 // The declarations are in the os_cpu threadLS*.hpp files. 252 // 253 // Static member initialization for TLS 254 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL}; 255 256 #ifndef PRODUCT 257 #define _PCT(n,d) ((100.0*(double)(n))/(double)(d)) 258 259 int ThreadLocalStorage::_tcacheHit = 0; 260 int ThreadLocalStorage::_tcacheMiss = 0; 261 262 void ThreadLocalStorage::print_statistics() { 263 int total = _tcacheMiss+_tcacheHit; 264 tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n", 265 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total)); 266 } 267 #undef _PCT 268 #endif // PRODUCT 269 270 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id, 271 int index) { 272 Thread *thread = get_thread_slow(); 273 if (thread != NULL) { 274 address sp = os::current_stack_pointer(); 275 guarantee(thread->_stack_base == NULL || 276 (sp <= thread->_stack_base && 277 sp >= thread->_stack_base - thread->_stack_size) || 278 is_error_reported(), 279 "sp must be inside of selected thread stack"); 280 281 thread->_self_raw_id = raw_id; // mark for quick retrieval 282 _get_thread_cache[ index ] = thread; 283 } 284 return thread; 285 } 286 287 288 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0}; 289 #define NO_CACHED_THREAD ((Thread*)all_zero) 290 291 void ThreadLocalStorage::pd_set_thread(Thread* thread) { 292 293 // Store the new value before updating the cache to prevent a race 294 // between get_thread_via_cache_slowly() and this store operation. 295 os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); 296 297 // Update thread cache with new thread if setting on thread create, 298 // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit. 299 uintptr_t raw = pd_raw_thread_id(); 300 int ix = pd_cache_index(raw); 301 _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread; 302 } 303 304 void ThreadLocalStorage::pd_init() { 305 for (int i = 0; i < _pd_cache_size; i++) { 306 _get_thread_cache[i] = NO_CACHED_THREAD; 307 } 308 } 309 310 // Invalidate all the caches (happens to be the same as pd_init). 311 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); } 312 313 #undef NO_CACHED_THREAD 314 315 // END Thread Local Storage 316 317 static inline size_t adjust_stack_size(address base, size_t size) { 318 if ((ssize_t)size < 0) { 319 // 4759953: Compensate for ridiculous stack size. 320 size = max_intx; 321 } 322 if (size > (size_t)base) { 323 // 4812466: Make sure size doesn't allow the stack to wrap the address space. 324 size = (size_t)base; 325 } 326 return size; 327 } 328 329 static inline stack_t get_stack_info() { 330 stack_t st; 331 int retval = thr_stksegment(&st); 332 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size); 333 assert(retval == 0, "incorrect return value from thr_stksegment"); 334 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 335 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 336 return st; 337 } 338 339 address os::current_stack_base() { 340 int r = thr_main() ; 341 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 342 bool is_primordial_thread = r; 343 344 // Workaround 4352906, avoid calls to thr_stksegment by 345 // thr_main after the first one (it looks like we trash 346 // some data, causing the value for ss_sp to be incorrect). 347 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) { 348 stack_t st = get_stack_info(); 349 if (is_primordial_thread) { 350 // cache initial value of stack base 351 os::Solaris::_main_stack_base = (address)st.ss_sp; 352 } 353 return (address)st.ss_sp; 354 } else { 355 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base"); 356 return os::Solaris::_main_stack_base; 357 } 358 } 359 360 size_t os::current_stack_size() { 361 size_t size; 362 363 int r = thr_main() ; 364 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 365 if(!r) { 366 size = get_stack_info().ss_size; 367 } else { 368 struct rlimit limits; 369 getrlimit(RLIMIT_STACK, &limits); 370 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur); 371 } 372 // base may not be page aligned 373 address base = current_stack_base(); 374 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());; 375 return (size_t)(base - bottom); 376 } 377 378 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 379 return localtime_r(clock, res); 380 } 381 382 // interruptible infrastructure 383 384 // setup_interruptible saves the thread state before going into an 385 // interruptible system call. 386 // The saved state is used to restore the thread to 387 // its former state whether or not an interrupt is received. 388 // Used by classloader os::read 389 // hpi calls skip this layer and stay in _thread_in_native 390 391 void os::Solaris::setup_interruptible(JavaThread* thread) { 392 393 JavaThreadState thread_state = thread->thread_state(); 394 395 assert(thread_state != _thread_blocked, "Coming from the wrong thread"); 396 assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible"); 397 OSThread* osthread = thread->osthread(); 398 osthread->set_saved_interrupt_thread_state(thread_state); 399 thread->frame_anchor()->make_walkable(thread); 400 ThreadStateTransition::transition(thread, thread_state, _thread_blocked); 401 } 402 403 // Version of setup_interruptible() for threads that are already in 404 // _thread_blocked. Used by os_sleep(). 405 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) { 406 thread->frame_anchor()->make_walkable(thread); 407 } 408 409 JavaThread* os::Solaris::setup_interruptible() { 410 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 411 setup_interruptible(thread); 412 return thread; 413 } 414 415 void os::Solaris::try_enable_extended_io() { 416 typedef int (*enable_extended_FILE_stdio_t)(int, int); 417 418 if (!UseExtendedFileIO) { 419 return; 420 } 421 422 enable_extended_FILE_stdio_t enabler = 423 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT, 424 "enable_extended_FILE_stdio"); 425 if (enabler) { 426 enabler(-1, -1); 427 } 428 } 429 430 431 #ifdef ASSERT 432 433 JavaThread* os::Solaris::setup_interruptible_native() { 434 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 435 JavaThreadState thread_state = thread->thread_state(); 436 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 437 return thread; 438 } 439 440 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) { 441 JavaThreadState thread_state = thread->thread_state(); 442 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 443 } 444 #endif 445 446 // cleanup_interruptible reverses the effects of setup_interruptible 447 // setup_interruptible_already_blocked() does not need any cleanup. 448 449 void os::Solaris::cleanup_interruptible(JavaThread* thread) { 450 OSThread* osthread = thread->osthread(); 451 452 ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state()); 453 } 454 455 // I/O interruption related counters called in _INTERRUPTIBLE 456 457 void os::Solaris::bump_interrupted_before_count() { 458 RuntimeService::record_interrupted_before_count(); 459 } 460 461 void os::Solaris::bump_interrupted_during_count() { 462 RuntimeService::record_interrupted_during_count(); 463 } 464 465 static int _processors_online = 0; 466 467 jint os::Solaris::_os_thread_limit = 0; 468 volatile jint os::Solaris::_os_thread_count = 0; 469 470 julong os::available_memory() { 471 return Solaris::available_memory(); 472 } 473 474 julong os::Solaris::available_memory() { 475 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size(); 476 } 477 478 julong os::Solaris::_physical_memory = 0; 479 480 julong os::physical_memory() { 481 return Solaris::physical_memory(); 482 } 483 484 julong os::allocatable_physical_memory(julong size) { 485 #ifdef _LP64 486 return size; 487 #else 488 julong result = MIN2(size, (julong)3835*M); 489 if (!is_allocatable(result)) { 490 // Memory allocations will be aligned but the alignment 491 // is not known at this point. Alignments will 492 // be at most to LargePageSizeInBytes. Protect 493 // allocations from alignments up to illegal 494 // values. If at this point 2G is illegal. 495 julong reasonable_size = (julong)2*G - 2 * LargePageSizeInBytes; 496 result = MIN2(size, reasonable_size); 497 } 498 return result; 499 #endif 500 } 501 502 static hrtime_t first_hrtime = 0; 503 static const hrtime_t hrtime_hz = 1000*1000*1000; 504 const int LOCK_BUSY = 1; 505 const int LOCK_FREE = 0; 506 const int LOCK_INVALID = -1; 507 static volatile hrtime_t max_hrtime = 0; 508 static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB as lock-in-progress 509 510 511 void os::Solaris::initialize_system_info() { 512 set_processor_count(sysconf(_SC_NPROCESSORS_CONF)); 513 _processors_online = sysconf (_SC_NPROCESSORS_ONLN); 514 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE); 515 } 516 517 int os::active_processor_count() { 518 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN); 519 pid_t pid = getpid(); 520 psetid_t pset = PS_NONE; 521 // Are we running in a processor set or is there any processor set around? 522 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) { 523 uint_t pset_cpus; 524 // Query the number of cpus available to us. 525 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) { 526 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check"); 527 _processors_online = pset_cpus; 528 return pset_cpus; 529 } 530 } 531 // Otherwise return number of online cpus 532 return online_cpus; 533 } 534 535 static bool find_processors_in_pset(psetid_t pset, 536 processorid_t** id_array, 537 uint_t* id_length) { 538 bool result = false; 539 // Find the number of processors in the processor set. 540 if (pset_info(pset, NULL, id_length, NULL) == 0) { 541 // Make up an array to hold their ids. 542 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length); 543 // Fill in the array with their processor ids. 544 if (pset_info(pset, NULL, id_length, *id_array) == 0) { 545 result = true; 546 } 547 } 548 return result; 549 } 550 551 // Callers of find_processors_online() must tolerate imprecise results -- 552 // the system configuration can change asynchronously because of DR 553 // or explicit psradm operations. 554 // 555 // We also need to take care that the loop (below) terminates as the 556 // number of processors online can change between the _SC_NPROCESSORS_ONLN 557 // request and the loop that builds the list of processor ids. Unfortunately 558 // there's no reliable way to determine the maximum valid processor id, 559 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online 560 // man pages, which claim the processor id set is "sparse, but 561 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually 562 // exit the loop. 563 // 564 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's 565 // not available on S8.0. 566 567 static bool find_processors_online(processorid_t** id_array, 568 uint* id_length) { 569 const processorid_t MAX_PROCESSOR_ID = 100000 ; 570 // Find the number of processors online. 571 *id_length = sysconf(_SC_NPROCESSORS_ONLN); 572 // Make up an array to hold their ids. 573 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length); 574 // Processors need not be numbered consecutively. 575 long found = 0; 576 processorid_t next = 0; 577 while (found < *id_length && next < MAX_PROCESSOR_ID) { 578 processor_info_t info; 579 if (processor_info(next, &info) == 0) { 580 // NB, PI_NOINTR processors are effectively online ... 581 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) { 582 (*id_array)[found] = next; 583 found += 1; 584 } 585 } 586 next += 1; 587 } 588 if (found < *id_length) { 589 // The loop above didn't identify the expected number of processors. 590 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN) 591 // and re-running the loop, above, but there's no guarantee of progress 592 // if the system configuration is in flux. Instead, we just return what 593 // we've got. Note that in the worst case find_processors_online() could 594 // return an empty set. (As a fall-back in the case of the empty set we 595 // could just return the ID of the current processor). 596 *id_length = found ; 597 } 598 599 return true; 600 } 601 602 static bool assign_distribution(processorid_t* id_array, 603 uint id_length, 604 uint* distribution, 605 uint distribution_length) { 606 // We assume we can assign processorid_t's to uint's. 607 assert(sizeof(processorid_t) == sizeof(uint), 608 "can't convert processorid_t to uint"); 609 // Quick check to see if we won't succeed. 610 if (id_length < distribution_length) { 611 return false; 612 } 613 // Assign processor ids to the distribution. 614 // Try to shuffle processors to distribute work across boards, 615 // assuming 4 processors per board. 616 const uint processors_per_board = ProcessDistributionStride; 617 // Find the maximum processor id. 618 processorid_t max_id = 0; 619 for (uint m = 0; m < id_length; m += 1) { 620 max_id = MAX2(max_id, id_array[m]); 621 } 622 // The next id, to limit loops. 623 const processorid_t limit_id = max_id + 1; 624 // Make up markers for available processors. 625 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id); 626 for (uint c = 0; c < limit_id; c += 1) { 627 available_id[c] = false; 628 } 629 for (uint a = 0; a < id_length; a += 1) { 630 available_id[id_array[a]] = true; 631 } 632 // Step by "boards", then by "slot", copying to "assigned". 633 // NEEDS_CLEANUP: The assignment of processors should be stateful, 634 // remembering which processors have been assigned by 635 // previous calls, etc., so as to distribute several 636 // independent calls of this method. What we'd like is 637 // It would be nice to have an API that let us ask 638 // how many processes are bound to a processor, 639 // but we don't have that, either. 640 // In the short term, "board" is static so that 641 // subsequent distributions don't all start at board 0. 642 static uint board = 0; 643 uint assigned = 0; 644 // Until we've found enough processors .... 645 while (assigned < distribution_length) { 646 // ... find the next available processor in the board. 647 for (uint slot = 0; slot < processors_per_board; slot += 1) { 648 uint try_id = board * processors_per_board + slot; 649 if ((try_id < limit_id) && (available_id[try_id] == true)) { 650 distribution[assigned] = try_id; 651 available_id[try_id] = false; 652 assigned += 1; 653 break; 654 } 655 } 656 board += 1; 657 if (board * processors_per_board + 0 >= limit_id) { 658 board = 0; 659 } 660 } 661 if (available_id != NULL) { 662 FREE_C_HEAP_ARRAY(bool, available_id); 663 } 664 return true; 665 } 666 667 bool os::distribute_processes(uint length, uint* distribution) { 668 bool result = false; 669 // Find the processor id's of all the available CPUs. 670 processorid_t* id_array = NULL; 671 uint id_length = 0; 672 // There are some races between querying information and using it, 673 // since processor sets can change dynamically. 674 psetid_t pset = PS_NONE; 675 // Are we running in a processor set? 676 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) { 677 result = find_processors_in_pset(pset, &id_array, &id_length); 678 } else { 679 result = find_processors_online(&id_array, &id_length); 680 } 681 if (result == true) { 682 if (id_length >= length) { 683 result = assign_distribution(id_array, id_length, distribution, length); 684 } else { 685 result = false; 686 } 687 } 688 if (id_array != NULL) { 689 FREE_C_HEAP_ARRAY(processorid_t, id_array); 690 } 691 return result; 692 } 693 694 bool os::bind_to_processor(uint processor_id) { 695 // We assume that a processorid_t can be stored in a uint. 696 assert(sizeof(uint) == sizeof(processorid_t), 697 "can't convert uint to processorid_t"); 698 int bind_result = 699 processor_bind(P_LWPID, // bind LWP. 700 P_MYID, // bind current LWP. 701 (processorid_t) processor_id, // id. 702 NULL); // don't return old binding. 703 return (bind_result == 0); 704 } 705 706 bool os::getenv(const char* name, char* buffer, int len) { 707 char* val = ::getenv( name ); 708 if ( val == NULL 709 || strlen(val) + 1 > len ) { 710 if (len > 0) buffer[0] = 0; // return a null string 711 return false; 712 } 713 strcpy( buffer, val ); 714 return true; 715 } 716 717 718 // Return true if user is running as root. 719 720 bool os::have_special_privileges() { 721 static bool init = false; 722 static bool privileges = false; 723 if (!init) { 724 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 725 init = true; 726 } 727 return privileges; 728 } 729 730 731 void os::init_system_properties_values() { 732 char arch[12]; 733 sysinfo(SI_ARCHITECTURE, arch, sizeof(arch)); 734 735 // The next steps are taken in the product version: 736 // 737 // Obtain the JAVA_HOME value from the location of libjvm[_g].so. 738 // This library should be located at: 739 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so. 740 // 741 // If "/jre/lib/" appears at the right place in the path, then we 742 // assume libjvm[_g].so is installed in a JDK and we use this path. 743 // 744 // Otherwise exit with message: "Could not create the Java virtual machine." 745 // 746 // The following extra steps are taken in the debugging version: 747 // 748 // If "/jre/lib/" does NOT appear at the right place in the path 749 // instead of exit check for $JAVA_HOME environment variable. 750 // 751 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>, 752 // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so 753 // it looks like libjvm[_g].so is installed there 754 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so. 755 // 756 // Otherwise exit. 757 // 758 // Important note: if the location of libjvm.so changes this 759 // code needs to be changed accordingly. 760 761 // The next few definitions allow the code to be verbatim: 762 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n)) 763 #define free(p) FREE_C_HEAP_ARRAY(char, p) 764 #define getenv(n) ::getenv(n) 765 766 #define EXTENSIONS_DIR "/lib/ext" 767 #define ENDORSED_DIR "/lib/endorsed" 768 #define COMMON_DIR "/usr/jdk/packages" 769 770 { 771 /* sysclasspath, java_home, dll_dir */ 772 { 773 char *home_path; 774 char *dll_path; 775 char *pslash; 776 char buf[MAXPATHLEN]; 777 os::jvm_path(buf, sizeof(buf)); 778 779 // Found the full path to libjvm.so. 780 // Now cut the path to <java_home>/jre if we can. 781 *(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */ 782 pslash = strrchr(buf, '/'); 783 if (pslash != NULL) 784 *pslash = '\0'; /* get rid of /{client|server|hotspot} */ 785 dll_path = malloc(strlen(buf) + 1); 786 if (dll_path == NULL) 787 return; 788 strcpy(dll_path, buf); 789 Arguments::set_dll_dir(dll_path); 790 791 if (pslash != NULL) { 792 pslash = strrchr(buf, '/'); 793 if (pslash != NULL) { 794 *pslash = '\0'; /* get rid of /<arch> */ 795 pslash = strrchr(buf, '/'); 796 if (pslash != NULL) 797 *pslash = '\0'; /* get rid of /lib */ 798 } 799 } 800 801 home_path = malloc(strlen(buf) + 1); 802 if (home_path == NULL) 803 return; 804 strcpy(home_path, buf); 805 Arguments::set_java_home(home_path); 806 807 if (!set_boot_path('/', ':')) 808 return; 809 } 810 811 /* 812 * Where to look for native libraries 813 */ 814 { 815 // Use dlinfo() to determine the correct java.library.path. 816 // 817 // If we're launched by the Java launcher, and the user 818 // does not set java.library.path explicitly on the commandline, 819 // the Java launcher sets LD_LIBRARY_PATH for us and unsets 820 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case 821 // dlinfo returns LD_LIBRARY_PATH + crle settings (including 822 // /usr/lib), which is exactly what we want. 823 // 824 // If the user does set java.library.path, it completely 825 // overwrites this setting, and always has. 826 // 827 // If we're not launched by the Java launcher, we may 828 // get here with any/all of the LD_LIBRARY_PATH[_32|64] 829 // settings. Again, dlinfo does exactly what we want. 830 831 Dl_serinfo _info, *info = &_info; 832 Dl_serpath *path; 833 char* library_path; 834 char *common_path; 835 int i; 836 837 // determine search path count and required buffer size 838 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) { 839 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror()); 840 } 841 842 // allocate new buffer and initialize 843 info = (Dl_serinfo*)malloc(_info.dls_size); 844 if (info == NULL) { 845 vm_exit_out_of_memory(_info.dls_size, 846 "init_system_properties_values info"); 847 } 848 info->dls_size = _info.dls_size; 849 info->dls_cnt = _info.dls_cnt; 850 851 // obtain search path information 852 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) { 853 free(info); 854 vm_exit_during_initialization("dlinfo SERINFO request", dlerror()); 855 } 856 857 path = &info->dls_serpath[0]; 858 859 // Note: Due to a legacy implementation, most of the library path 860 // is set in the launcher. This was to accomodate linking restrictions 861 // on legacy Solaris implementations (which are no longer supported). 862 // Eventually, all the library path setting will be done here. 863 // 864 // However, to prevent the proliferation of improperly built native 865 // libraries, the new path component /usr/jdk/packages is added here. 866 867 // Determine the actual CPU architecture. 868 char cpu_arch[12]; 869 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 870 #ifdef _LP64 871 // If we are a 64-bit vm, perform the following translations: 872 // sparc -> sparcv9 873 // i386 -> amd64 874 if (strcmp(cpu_arch, "sparc") == 0) 875 strcat(cpu_arch, "v9"); 876 else if (strcmp(cpu_arch, "i386") == 0) 877 strcpy(cpu_arch, "amd64"); 878 #endif 879 880 // Construct the invariant part of ld_library_path. Note that the 881 // space for the colon and the trailing null are provided by the 882 // nulls included by the sizeof operator. 883 size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch); 884 common_path = malloc(bufsize); 885 if (common_path == NULL) { 886 free(info); 887 vm_exit_out_of_memory(bufsize, 888 "init_system_properties_values common_path"); 889 } 890 sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch); 891 892 // struct size is more than sufficient for the path components obtained 893 // through the dlinfo() call, so only add additional space for the path 894 // components explicitly added here. 895 bufsize = info->dls_size + strlen(common_path); 896 library_path = malloc(bufsize); 897 if (library_path == NULL) { 898 free(info); 899 free(common_path); 900 vm_exit_out_of_memory(bufsize, 901 "init_system_properties_values library_path"); 902 } 903 library_path[0] = '\0'; 904 905 // Construct the desired Java library path from the linker's library 906 // search path. 907 // 908 // For compatibility, it is optimal that we insert the additional path 909 // components specific to the Java VM after those components specified 910 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so 911 // infrastructure. 912 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it 913 strcpy(library_path, common_path); 914 } else { 915 int inserted = 0; 916 for (i = 0; i < info->dls_cnt; i++, path++) { 917 uint_t flags = path->dls_flags & LA_SER_MASK; 918 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) { 919 strcat(library_path, common_path); 920 strcat(library_path, os::path_separator()); 921 inserted = 1; 922 } 923 strcat(library_path, path->dls_name); 924 strcat(library_path, os::path_separator()); 925 } 926 // eliminate trailing path separator 927 library_path[strlen(library_path)-1] = '\0'; 928 } 929 930 // happens before argument parsing - can't use a trace flag 931 // tty->print_raw("init_system_properties_values: native lib path: "); 932 // tty->print_raw_cr(library_path); 933 934 // callee copies into its own buffer 935 Arguments::set_library_path(library_path); 936 937 free(common_path); 938 free(library_path); 939 free(info); 940 } 941 942 /* 943 * Extensions directories. 944 * 945 * Note that the space for the colon and the trailing null are provided 946 * by the nulls included by the sizeof operator (so actually one byte more 947 * than necessary is allocated). 948 */ 949 { 950 char *buf = (char *) malloc(strlen(Arguments::get_java_home()) + 951 sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) + 952 sizeof(EXTENSIONS_DIR)); 953 sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR, 954 Arguments::get_java_home()); 955 Arguments::set_ext_dirs(buf); 956 } 957 958 /* Endorsed standards default directory. */ 959 { 960 char * buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR)); 961 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home()); 962 Arguments::set_endorsed_dirs(buf); 963 } 964 } 965 966 #undef malloc 967 #undef free 968 #undef getenv 969 #undef EXTENSIONS_DIR 970 #undef ENDORSED_DIR 971 #undef COMMON_DIR 972 973 } 974 975 void os::breakpoint() { 976 BREAKPOINT; 977 } 978 979 bool os::obsolete_option(const JavaVMOption *option) 980 { 981 if (!strncmp(option->optionString, "-Xt", 3)) { 982 return true; 983 } else if (!strncmp(option->optionString, "-Xtm", 4)) { 984 return true; 985 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) { 986 return true; 987 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) { 988 return true; 989 } 990 return false; 991 } 992 993 bool os::Solaris::valid_stack_address(Thread* thread, address sp) { 994 address stackStart = (address)thread->stack_base(); 995 address stackEnd = (address)(stackStart - (address)thread->stack_size()); 996 if (sp < stackStart && sp >= stackEnd ) return true; 997 return false; 998 } 999 1000 extern "C" void breakpoint() { 1001 // use debugger to set breakpoint here 1002 } 1003 1004 // Returns an estimate of the current stack pointer. Result must be guaranteed to 1005 // point into the calling threads stack, and be no lower than the current stack 1006 // pointer. 1007 address os::current_stack_pointer() { 1008 volatile int dummy; 1009 address sp = (address)&dummy + 8; // %%%% need to confirm if this is right 1010 return sp; 1011 } 1012 1013 static thread_t main_thread; 1014 1015 // Thread start routine for all new Java threads 1016 extern "C" void* java_start(void* thread_addr) { 1017 // Try to randomize the cache line index of hot stack frames. 1018 // This helps when threads of the same stack traces evict each other's 1019 // cache lines. The threads can be either from the same JVM instance, or 1020 // from different JVM instances. The benefit is especially true for 1021 // processors with hyperthreading technology. 1022 static int counter = 0; 1023 int pid = os::current_process_id(); 1024 alloca(((pid ^ counter++) & 7) * 128); 1025 1026 int prio; 1027 Thread* thread = (Thread*)thread_addr; 1028 OSThread* osthr = thread->osthread(); 1029 1030 osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound 1031 thread->_schedctl = (void *) schedctl_init () ; 1032 1033 if (UseNUMA) { 1034 int lgrp_id = os::numa_get_group_id(); 1035 if (lgrp_id != -1) { 1036 thread->set_lgrp_id(lgrp_id); 1037 } 1038 } 1039 1040 // If the creator called set priority before we started, 1041 // we need to call set priority now that we have an lwp. 1042 // Get the priority from libthread and set the priority 1043 // for the new Solaris lwp. 1044 if ( osthr->thread_id() != -1 ) { 1045 if ( UseThreadPriorities ) { 1046 thr_getprio(osthr->thread_id(), &prio); 1047 if (ThreadPriorityVerbose) { 1048 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT ", setting priority: %d\n", 1049 osthr->thread_id(), osthr->lwp_id(), prio ); 1050 } 1051 os::set_native_priority(thread, prio); 1052 } 1053 } else if (ThreadPriorityVerbose) { 1054 warning("Can't set priority in _start routine, thread id hasn't been set\n"); 1055 } 1056 1057 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 1058 1059 // initialize signal mask for this thread 1060 os::Solaris::hotspot_sigmask(thread); 1061 1062 thread->run(); 1063 1064 // One less thread is executing 1065 // When the VMThread gets here, the main thread may have already exited 1066 // which frees the CodeHeap containing the Atomic::dec code 1067 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 1068 Atomic::dec(&os::Solaris::_os_thread_count); 1069 } 1070 1071 if (UseDetachedThreads) { 1072 thr_exit(NULL); 1073 ShouldNotReachHere(); 1074 } 1075 return NULL; 1076 } 1077 1078 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) { 1079 // Allocate the OSThread object 1080 OSThread* osthread = new OSThread(NULL, NULL); 1081 if (osthread == NULL) return NULL; 1082 1083 // Store info on the Solaris thread into the OSThread 1084 osthread->set_thread_id(thread_id); 1085 osthread->set_lwp_id(_lwp_self()); 1086 thread->_schedctl = (void *) schedctl_init () ; 1087 1088 if (UseNUMA) { 1089 int lgrp_id = os::numa_get_group_id(); 1090 if (lgrp_id != -1) { 1091 thread->set_lgrp_id(lgrp_id); 1092 } 1093 } 1094 1095 if ( ThreadPriorityVerbose ) { 1096 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n", 1097 osthread->thread_id(), osthread->lwp_id() ); 1098 } 1099 1100 // Initial thread state is INITIALIZED, not SUSPENDED 1101 osthread->set_state(INITIALIZED); 1102 1103 return osthread; 1104 } 1105 1106 void os::Solaris::hotspot_sigmask(Thread* thread) { 1107 1108 //Save caller's signal mask 1109 sigset_t sigmask; 1110 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask); 1111 OSThread *osthread = thread->osthread(); 1112 osthread->set_caller_sigmask(sigmask); 1113 1114 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL); 1115 if (!ReduceSignalUsage) { 1116 if (thread->is_VM_thread()) { 1117 // Only the VM thread handles BREAK_SIGNAL ... 1118 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL); 1119 } else { 1120 // ... all other threads block BREAK_SIGNAL 1121 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked"); 1122 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL); 1123 } 1124 } 1125 } 1126 1127 bool os::create_attached_thread(JavaThread* thread) { 1128 #ifdef ASSERT 1129 thread->verify_not_published(); 1130 #endif 1131 OSThread* osthread = create_os_thread(thread, thr_self()); 1132 if (osthread == NULL) { 1133 return false; 1134 } 1135 1136 // Initial thread state is RUNNABLE 1137 osthread->set_state(RUNNABLE); 1138 thread->set_osthread(osthread); 1139 1140 // initialize signal mask for this thread 1141 // and save the caller's signal mask 1142 os::Solaris::hotspot_sigmask(thread); 1143 1144 return true; 1145 } 1146 1147 bool os::create_main_thread(JavaThread* thread) { 1148 #ifdef ASSERT 1149 thread->verify_not_published(); 1150 #endif 1151 if (_starting_thread == NULL) { 1152 _starting_thread = create_os_thread(thread, main_thread); 1153 if (_starting_thread == NULL) { 1154 return false; 1155 } 1156 } 1157 1158 // The primodial thread is runnable from the start 1159 _starting_thread->set_state(RUNNABLE); 1160 1161 thread->set_osthread(_starting_thread); 1162 1163 // initialize signal mask for this thread 1164 // and save the caller's signal mask 1165 os::Solaris::hotspot_sigmask(thread); 1166 1167 return true; 1168 } 1169 1170 // _T2_libthread is true if we believe we are running with the newer 1171 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default) 1172 bool os::Solaris::_T2_libthread = false; 1173 1174 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 1175 // Allocate the OSThread object 1176 OSThread* osthread = new OSThread(NULL, NULL); 1177 if (osthread == NULL) { 1178 return false; 1179 } 1180 1181 if ( ThreadPriorityVerbose ) { 1182 char *thrtyp; 1183 switch ( thr_type ) { 1184 case vm_thread: 1185 thrtyp = (char *)"vm"; 1186 break; 1187 case cgc_thread: 1188 thrtyp = (char *)"cgc"; 1189 break; 1190 case pgc_thread: 1191 thrtyp = (char *)"pgc"; 1192 break; 1193 case java_thread: 1194 thrtyp = (char *)"java"; 1195 break; 1196 case compiler_thread: 1197 thrtyp = (char *)"compiler"; 1198 break; 1199 case watcher_thread: 1200 thrtyp = (char *)"watcher"; 1201 break; 1202 default: 1203 thrtyp = (char *)"unknown"; 1204 break; 1205 } 1206 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp); 1207 } 1208 1209 // Calculate stack size if it's not specified by caller. 1210 if (stack_size == 0) { 1211 // The default stack size 1M (2M for LP64). 1212 stack_size = (BytesPerWord >> 2) * K * K; 1213 1214 switch (thr_type) { 1215 case os::java_thread: 1216 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 1217 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create(); 1218 break; 1219 case os::compiler_thread: 1220 if (CompilerThreadStackSize > 0) { 1221 stack_size = (size_t)(CompilerThreadStackSize * K); 1222 break; 1223 } // else fall through: 1224 // use VMThreadStackSize if CompilerThreadStackSize is not defined 1225 case os::vm_thread: 1226 case os::pgc_thread: 1227 case os::cgc_thread: 1228 case os::watcher_thread: 1229 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 1230 break; 1231 } 1232 } 1233 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed); 1234 1235 // Initial state is ALLOCATED but not INITIALIZED 1236 osthread->set_state(ALLOCATED); 1237 1238 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) { 1239 // We got lots of threads. Check if we still have some address space left. 1240 // Need to be at least 5Mb of unreserved address space. We do check by 1241 // trying to reserve some. 1242 const size_t VirtualMemoryBangSize = 20*K*K; 1243 char* mem = os::reserve_memory(VirtualMemoryBangSize); 1244 if (mem == NULL) { 1245 delete osthread; 1246 return false; 1247 } else { 1248 // Release the memory again 1249 os::release_memory(mem, VirtualMemoryBangSize); 1250 } 1251 } 1252 1253 // Setup osthread because the child thread may need it. 1254 thread->set_osthread(osthread); 1255 1256 // Create the Solaris thread 1257 // explicit THR_BOUND for T2_libthread case in case 1258 // that assumption is not accurate, but our alternate signal stack 1259 // handling is based on it which must have bound threads 1260 thread_t tid = 0; 1261 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED 1262 | ((UseBoundThreads || os::Solaris::T2_libthread() || 1263 (thr_type == vm_thread) || 1264 (thr_type == cgc_thread) || 1265 (thr_type == pgc_thread) || 1266 (thr_type == compiler_thread && BackgroundCompilation)) ? 1267 THR_BOUND : 0); 1268 int status; 1269 1270 // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs. 1271 // 1272 // On multiprocessors systems, libthread sometimes under-provisions our 1273 // process with LWPs. On a 30-way systems, for instance, we could have 1274 // 50 user-level threads in ready state and only 2 or 3 LWPs assigned 1275 // to our process. This can result in under utilization of PEs. 1276 // I suspect the problem is related to libthread's LWP 1277 // pool management and to the kernel's SIGBLOCKING "last LWP parked" 1278 // upcall policy. 1279 // 1280 // The following code is palliative -- it attempts to ensure that our 1281 // process has sufficient LWPs to take advantage of multiple PEs. 1282 // Proper long-term cures include using user-level threads bound to LWPs 1283 // (THR_BOUND) or using LWP-based synchronization. Note that there is a 1284 // slight timing window with respect to sampling _os_thread_count, but 1285 // the race is benign. Also, we should periodically recompute 1286 // _processors_online as the min of SC_NPROCESSORS_ONLN and the 1287 // the number of PEs in our partition. You might be tempted to use 1288 // THR_NEW_LWP here, but I'd recommend against it as that could 1289 // result in undesirable growth of the libthread's LWP pool. 1290 // The fix below isn't sufficient; for instance, it doesn't take into count 1291 // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks. 1292 // 1293 // Some pathologies this scheme doesn't handle: 1294 // * Threads can block, releasing the LWPs. The LWPs can age out. 1295 // When a large number of threads become ready again there aren't 1296 // enough LWPs available to service them. This can occur when the 1297 // number of ready threads oscillates. 1298 // * LWPs/Threads park on IO, thus taking the LWP out of circulation. 1299 // 1300 // Finally, we should call thr_setconcurrency() periodically to refresh 1301 // the LWP pool and thwart the LWP age-out mechanism. 1302 // The "+3" term provides a little slop -- we want to slightly overprovision. 1303 1304 if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) { 1305 if (!(flags & THR_BOUND)) { 1306 thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation 1307 } 1308 } 1309 // Although this doesn't hurt, we should warn of undefined behavior 1310 // when using unbound T1 threads with schedctl(). This should never 1311 // happen, as the compiler and VM threads are always created bound 1312 DEBUG_ONLY( 1313 if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) && 1314 (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) && 1315 ((thr_type == vm_thread) || (thr_type == cgc_thread) || 1316 (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) { 1317 warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound"); 1318 } 1319 ); 1320 1321 1322 // Mark that we don't have an lwp or thread id yet. 1323 // In case we attempt to set the priority before the thread starts. 1324 osthread->set_lwp_id(-1); 1325 osthread->set_thread_id(-1); 1326 1327 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid); 1328 if (status != 0) { 1329 if (PrintMiscellaneous && (Verbose || WizardMode)) { 1330 perror("os::create_thread"); 1331 } 1332 thread->set_osthread(NULL); 1333 // Need to clean up stuff we've allocated so far 1334 delete osthread; 1335 return false; 1336 } 1337 1338 Atomic::inc(&os::Solaris::_os_thread_count); 1339 1340 // Store info on the Solaris thread into the OSThread 1341 osthread->set_thread_id(tid); 1342 1343 // Remember that we created this thread so we can set priority on it 1344 osthread->set_vm_created(); 1345 1346 // Set the default thread priority otherwise use NormalPriority 1347 1348 if ( UseThreadPriorities ) { 1349 thr_setprio(tid, (DefaultThreadPriority == -1) ? 1350 java_to_os_priority[NormPriority] : 1351 DefaultThreadPriority); 1352 } 1353 1354 // Initial thread state is INITIALIZED, not SUSPENDED 1355 osthread->set_state(INITIALIZED); 1356 1357 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 1358 return true; 1359 } 1360 1361 /* defined for >= Solaris 10. This allows builds on earlier versions 1362 * of Solaris to take advantage of the newly reserved Solaris JVM signals 1363 * With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2 1364 * and -XX:+UseAltSigs does nothing since these should have no conflict 1365 */ 1366 #if !defined(SIGJVM1) 1367 #define SIGJVM1 39 1368 #define SIGJVM2 40 1369 #endif 1370 1371 debug_only(static bool signal_sets_initialized = false); 1372 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; 1373 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL; 1374 int os::Solaris::_SIGasync = ASYNC_SIGNAL; 1375 1376 bool os::Solaris::is_sig_ignored(int sig) { 1377 struct sigaction oact; 1378 sigaction(sig, (struct sigaction*)NULL, &oact); 1379 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 1380 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 1381 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) 1382 return true; 1383 else 1384 return false; 1385 } 1386 1387 // Note: SIGRTMIN is a macro that calls sysconf() so it will 1388 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime 1389 static bool isJVM1available() { 1390 return SIGJVM1 < SIGRTMIN; 1391 } 1392 1393 void os::Solaris::signal_sets_init() { 1394 // Should also have an assertion stating we are still single-threaded. 1395 assert(!signal_sets_initialized, "Already initialized"); 1396 // Fill in signals that are necessarily unblocked for all threads in 1397 // the VM. Currently, we unblock the following signals: 1398 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 1399 // by -Xrs (=ReduceSignalUsage)); 1400 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 1401 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 1402 // the dispositions or masks wrt these signals. 1403 // Programs embedding the VM that want to use the above signals for their 1404 // own purposes must, at this time, use the "-Xrs" option to prevent 1405 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 1406 // (See bug 4345157, and other related bugs). 1407 // In reality, though, unblocking these signals is really a nop, since 1408 // these signals are not blocked by default. 1409 sigemptyset(&unblocked_sigs); 1410 sigemptyset(&allowdebug_blocked_sigs); 1411 sigaddset(&unblocked_sigs, SIGILL); 1412 sigaddset(&unblocked_sigs, SIGSEGV); 1413 sigaddset(&unblocked_sigs, SIGBUS); 1414 sigaddset(&unblocked_sigs, SIGFPE); 1415 1416 if (isJVM1available) { 1417 os::Solaris::set_SIGinterrupt(SIGJVM1); 1418 os::Solaris::set_SIGasync(SIGJVM2); 1419 } else if (UseAltSigs) { 1420 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL); 1421 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL); 1422 } else { 1423 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL); 1424 os::Solaris::set_SIGasync(ASYNC_SIGNAL); 1425 } 1426 1427 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt()); 1428 sigaddset(&unblocked_sigs, os::Solaris::SIGasync()); 1429 1430 if (!ReduceSignalUsage) { 1431 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 1432 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 1433 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); 1434 } 1435 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 1436 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 1437 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); 1438 } 1439 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 1440 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 1441 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); 1442 } 1443 } 1444 // Fill in signals that are blocked by all but the VM thread. 1445 sigemptyset(&vm_sigs); 1446 if (!ReduceSignalUsage) 1447 sigaddset(&vm_sigs, BREAK_SIGNAL); 1448 debug_only(signal_sets_initialized = true); 1449 1450 // For diagnostics only used in run_periodic_checks 1451 sigemptyset(&check_signal_done); 1452 } 1453 1454 // These are signals that are unblocked while a thread is running Java. 1455 // (For some reason, they get blocked by default.) 1456 sigset_t* os::Solaris::unblocked_signals() { 1457 assert(signal_sets_initialized, "Not initialized"); 1458 return &unblocked_sigs; 1459 } 1460 1461 // These are the signals that are blocked while a (non-VM) thread is 1462 // running Java. Only the VM thread handles these signals. 1463 sigset_t* os::Solaris::vm_signals() { 1464 assert(signal_sets_initialized, "Not initialized"); 1465 return &vm_sigs; 1466 } 1467 1468 // These are signals that are blocked during cond_wait to allow debugger in 1469 sigset_t* os::Solaris::allowdebug_blocked_signals() { 1470 assert(signal_sets_initialized, "Not initialized"); 1471 return &allowdebug_blocked_sigs; 1472 } 1473 1474 // First crack at OS-specific initialization, from inside the new thread. 1475 void os::initialize_thread() { 1476 int r = thr_main() ; 1477 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 1478 if (r) { 1479 JavaThread* jt = (JavaThread *)Thread::current(); 1480 assert(jt != NULL,"Sanity check"); 1481 size_t stack_size; 1482 address base = jt->stack_base(); 1483 if (Arguments::created_by_java_launcher()) { 1484 // Use 2MB to allow for Solaris 7 64 bit mode. 1485 stack_size = JavaThread::stack_size_at_create() == 0 1486 ? 2048*K : JavaThread::stack_size_at_create(); 1487 1488 // There are rare cases when we may have already used more than 1489 // the basic stack size allotment before this method is invoked. 1490 // Attempt to allow for a normally sized java_stack. 1491 size_t current_stack_offset = (size_t)(base - (address)&stack_size); 1492 stack_size += ReservedSpace::page_align_size_down(current_stack_offset); 1493 } else { 1494 // 6269555: If we were not created by a Java launcher, i.e. if we are 1495 // running embedded in a native application, treat the primordial thread 1496 // as much like a native attached thread as possible. This means using 1497 // the current stack size from thr_stksegment(), unless it is too large 1498 // to reliably setup guard pages. A reasonable max size is 8MB. 1499 size_t current_size = current_stack_size(); 1500 // This should never happen, but just in case.... 1501 if (current_size == 0) current_size = 2 * K * K; 1502 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size; 1503 } 1504 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());; 1505 stack_size = (size_t)(base - bottom); 1506 1507 assert(stack_size > 0, "Stack size calculation problem"); 1508 1509 if (stack_size > jt->stack_size()) { 1510 NOT_PRODUCT( 1511 struct rlimit limits; 1512 getrlimit(RLIMIT_STACK, &limits); 1513 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur); 1514 assert(size >= jt->stack_size(), "Stack size problem in main thread"); 1515 ) 1516 tty->print_cr( 1517 "Stack size of %d Kb exceeds current limit of %d Kb.\n" 1518 "(Stack sizes are rounded up to a multiple of the system page size.)\n" 1519 "See limit(1) to increase the stack size limit.", 1520 stack_size / K, jt->stack_size() / K); 1521 vm_exit(1); 1522 } 1523 assert(jt->stack_size() >= stack_size, 1524 "Attempt to map more stack than was allocated"); 1525 jt->set_stack_size(stack_size); 1526 } 1527 1528 // 5/22/01: Right now alternate signal stacks do not handle 1529 // throwing stack overflow exceptions, see bug 4463178 1530 // Until a fix is found for this, T2 will NOT imply alternate signal 1531 // stacks. 1532 // If using T2 libthread threads, install an alternate signal stack. 1533 // Because alternate stacks associate with LWPs on Solaris, 1534 // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads 1535 // we prefer to explicitly stack bang. 1536 // If not using T2 libthread, but using UseBoundThreads any threads 1537 // (primordial thread, jni_attachCurrentThread) we do not create, 1538 // probably are not bound, therefore they can not have an alternate 1539 // signal stack. Since our stack banging code is generated and 1540 // is shared across threads, all threads must be bound to allow 1541 // using alternate signal stacks. The alternative is to interpose 1542 // on _lwp_create to associate an alt sig stack with each LWP, 1543 // and this could be a problem when the JVM is embedded. 1544 // We would prefer to use alternate signal stacks with T2 1545 // Since there is currently no accurate way to detect T2 1546 // we do not. Assuming T2 when running T1 causes sig 11s or assertions 1547 // on installing alternate signal stacks 1548 1549 1550 // 05/09/03: removed alternate signal stack support for Solaris 1551 // The alternate signal stack mechanism is no longer needed to 1552 // handle stack overflow. This is now handled by allocating 1553 // guard pages (red zone) and stackbanging. 1554 // Initially the alternate signal stack mechanism was removed because 1555 // it did not work with T1 llibthread. Alternate 1556 // signal stacks MUST have all threads bound to lwps. Applications 1557 // can create their own threads and attach them without their being 1558 // bound under T1. This is frequently the case for the primordial thread. 1559 // If we were ever to reenable this mechanism we would need to 1560 // use the dynamic check for T2 libthread. 1561 1562 os::Solaris::init_thread_fpu_state(); 1563 } 1564 1565 1566 1567 // Free Solaris resources related to the OSThread 1568 void os::free_thread(OSThread* osthread) { 1569 assert(osthread != NULL, "os::free_thread but osthread not set"); 1570 1571 1572 // We are told to free resources of the argument thread, 1573 // but we can only really operate on the current thread. 1574 // The main thread must take the VMThread down synchronously 1575 // before the main thread exits and frees up CodeHeap 1576 guarantee((Thread::current()->osthread() == osthread 1577 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread"); 1578 if (Thread::current()->osthread() == osthread) { 1579 // Restore caller's signal mask 1580 sigset_t sigmask = osthread->caller_sigmask(); 1581 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL); 1582 } 1583 delete osthread; 1584 } 1585 1586 void os::pd_start_thread(Thread* thread) { 1587 int status = thr_continue(thread->osthread()->thread_id()); 1588 assert_status(status == 0, status, "thr_continue failed"); 1589 } 1590 1591 1592 intx os::current_thread_id() { 1593 return (intx)thr_self(); 1594 } 1595 1596 static pid_t _initial_pid = 0; 1597 1598 int os::current_process_id() { 1599 return (int)(_initial_pid ? _initial_pid : getpid()); 1600 } 1601 1602 int os::allocate_thread_local_storage() { 1603 // %%% in Win32 this allocates a memory segment pointed to by a 1604 // register. Dan Stein can implement a similar feature in 1605 // Solaris. Alternatively, the VM can do the same thing 1606 // explicitly: malloc some storage and keep the pointer in a 1607 // register (which is part of the thread's context) (or keep it 1608 // in TLS). 1609 // %%% In current versions of Solaris, thr_self and TSD can 1610 // be accessed via short sequences of displaced indirections. 1611 // The value of thr_self is available as %g7(36). 1612 // The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4), 1613 // assuming that the current thread already has a value bound to k. 1614 // It may be worth experimenting with such access patterns, 1615 // and later having the parameters formally exported from a Solaris 1616 // interface. I think, however, that it will be faster to 1617 // maintain the invariant that %g2 always contains the 1618 // JavaThread in Java code, and have stubs simply 1619 // treat %g2 as a caller-save register, preserving it in a %lN. 1620 thread_key_t tk; 1621 if (thr_keycreate( &tk, NULL ) ) 1622 fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed " 1623 "(%s)", strerror(errno))); 1624 return int(tk); 1625 } 1626 1627 void os::free_thread_local_storage(int index) { 1628 // %%% don't think we need anything here 1629 // if ( pthread_key_delete((pthread_key_t) tk) ) 1630 // fatal("os::free_thread_local_storage: pthread_key_delete failed"); 1631 } 1632 1633 #define SMALLINT 32 // libthread allocate for tsd_common is a version specific 1634 // small number - point is NO swap space available 1635 void os::thread_local_storage_at_put(int index, void* value) { 1636 // %%% this is used only in threadLocalStorage.cpp 1637 if (thr_setspecific((thread_key_t)index, value)) { 1638 if (errno == ENOMEM) { 1639 vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space"); 1640 } else { 1641 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed " 1642 "(%s)", strerror(errno))); 1643 } 1644 } else { 1645 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ; 1646 } 1647 } 1648 1649 // This function could be called before TLS is initialized, for example, when 1650 // VM receives an async signal or when VM causes a fatal error during 1651 // initialization. Return NULL if thr_getspecific() fails. 1652 void* os::thread_local_storage_at(int index) { 1653 // %%% this is used only in threadLocalStorage.cpp 1654 void* r = NULL; 1655 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r; 1656 } 1657 1658 1659 const int NANOSECS_PER_MILLISECS = 1000000; 1660 // gethrtime can move backwards if read from one cpu and then a different cpu 1661 // getTimeNanos is guaranteed to not move backward on Solaris 1662 // local spinloop created as faster for a CAS on an int than 1663 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not 1664 // supported on sparc v8 or pre supports_cx8 intel boxes. 1665 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong 1666 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes 1667 inline hrtime_t oldgetTimeNanos() { 1668 int gotlock = LOCK_INVALID; 1669 hrtime_t newtime = gethrtime(); 1670 1671 for (;;) { 1672 // grab lock for max_hrtime 1673 int curlock = max_hrtime_lock; 1674 if (curlock & LOCK_BUSY) continue; 1675 if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue; 1676 if (newtime > max_hrtime) { 1677 max_hrtime = newtime; 1678 } else { 1679 newtime = max_hrtime; 1680 } 1681 // release lock 1682 max_hrtime_lock = LOCK_FREE; 1683 return newtime; 1684 } 1685 } 1686 // gethrtime can move backwards if read from one cpu and then a different cpu 1687 // getTimeNanos is guaranteed to not move backward on Solaris 1688 inline hrtime_t getTimeNanos() { 1689 if (VM_Version::supports_cx8()) { 1690 const hrtime_t now = gethrtime(); 1691 // Use atomic long load since 32-bit x86 uses 2 registers to keep long. 1692 const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime); 1693 if (now <= prev) return prev; // same or retrograde time; 1694 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); 1695 assert(obsv >= prev, "invariant"); // Monotonicity 1696 // If the CAS succeeded then we're done and return "now". 1697 // If the CAS failed and the observed value "obs" is >= now then 1698 // we should return "obs". If the CAS failed and now > obs > prv then 1699 // some other thread raced this thread and installed a new value, in which case 1700 // we could either (a) retry the entire operation, (b) retry trying to install now 1701 // or (c) just return obs. We use (c). No loop is required although in some cases 1702 // we might discard a higher "now" value in deference to a slightly lower but freshly 1703 // installed obs value. That's entirely benign -- it admits no new orderings compared 1704 // to (a) or (b) -- and greatly reduces coherence traffic. 1705 // We might also condition (c) on the magnitude of the delta between obs and now. 1706 // Avoiding excessive CAS operations to hot RW locations is critical. 1707 // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate 1708 return (prev == obsv) ? now : obsv ; 1709 } else { 1710 return oldgetTimeNanos(); 1711 } 1712 } 1713 1714 // Time since start-up in seconds to a fine granularity. 1715 // Used by VMSelfDestructTimer and the MemProfiler. 1716 double os::elapsedTime() { 1717 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz; 1718 } 1719 1720 jlong os::elapsed_counter() { 1721 return (jlong)(getTimeNanos() - first_hrtime); 1722 } 1723 1724 jlong os::elapsed_frequency() { 1725 return hrtime_hz; 1726 } 1727 1728 // Return the real, user, and system times in seconds from an 1729 // arbitrary fixed point in the past. 1730 bool os::getTimesSecs(double* process_real_time, 1731 double* process_user_time, 1732 double* process_system_time) { 1733 struct tms ticks; 1734 clock_t real_ticks = times(&ticks); 1735 1736 if (real_ticks == (clock_t) (-1)) { 1737 return false; 1738 } else { 1739 double ticks_per_second = (double) clock_tics_per_sec; 1740 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1741 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1742 // For consistency return the real time from getTimeNanos() 1743 // converted to seconds. 1744 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS); 1745 1746 return true; 1747 } 1748 } 1749 1750 bool os::supports_vtime() { return true; } 1751 1752 bool os::enable_vtime() { 1753 int fd = open("/proc/self/ctl", O_WRONLY); 1754 if (fd == -1) 1755 return false; 1756 1757 long cmd[] = { PCSET, PR_MSACCT }; 1758 int res = write(fd, cmd, sizeof(long) * 2); 1759 close(fd); 1760 if (res != sizeof(long) * 2) 1761 return false; 1762 1763 return true; 1764 } 1765 1766 bool os::vtime_enabled() { 1767 int fd = open("/proc/self/status", O_RDONLY); 1768 if (fd == -1) 1769 return false; 1770 1771 pstatus_t status; 1772 int res = read(fd, (void*) &status, sizeof(pstatus_t)); 1773 close(fd); 1774 if (res != sizeof(pstatus_t)) 1775 return false; 1776 1777 return status.pr_flags & PR_MSACCT; 1778 } 1779 1780 double os::elapsedVTime() { 1781 return (double)gethrvtime() / (double)hrtime_hz; 1782 } 1783 1784 // Used internally for comparisons only 1785 // getTimeMillis guaranteed to not move backwards on Solaris 1786 jlong getTimeMillis() { 1787 jlong nanotime = getTimeNanos(); 1788 return (jlong)(nanotime / NANOSECS_PER_MILLISECS); 1789 } 1790 1791 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis 1792 jlong os::javaTimeMillis() { 1793 timeval t; 1794 if (gettimeofday( &t, NULL) == -1) 1795 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno))); 1796 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; 1797 } 1798 1799 jlong os::javaTimeNanos() { 1800 return (jlong)getTimeNanos(); 1801 } 1802 1803 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1804 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits 1805 info_ptr->may_skip_backward = false; // not subject to resetting or drifting 1806 info_ptr->may_skip_forward = false; // not subject to resetting or drifting 1807 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1808 } 1809 1810 char * os::local_time_string(char *buf, size_t buflen) { 1811 struct tm t; 1812 time_t long_time; 1813 time(&long_time); 1814 localtime_r(&long_time, &t); 1815 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1816 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1817 t.tm_hour, t.tm_min, t.tm_sec); 1818 return buf; 1819 } 1820 1821 // Note: os::shutdown() might be called very early during initialization, or 1822 // called from signal handler. Before adding something to os::shutdown(), make 1823 // sure it is async-safe and can handle partially initialized VM. 1824 void os::shutdown() { 1825 1826 // allow PerfMemory to attempt cleanup of any persistent resources 1827 perfMemory_exit(); 1828 1829 // needs to remove object in file system 1830 AttachListener::abort(); 1831 1832 // flush buffered output, finish log files 1833 ostream_abort(); 1834 1835 // Check for abort hook 1836 abort_hook_t abort_hook = Arguments::abort_hook(); 1837 if (abort_hook != NULL) { 1838 abort_hook(); 1839 } 1840 } 1841 1842 // Note: os::abort() might be called very early during initialization, or 1843 // called from signal handler. Before adding something to os::abort(), make 1844 // sure it is async-safe and can handle partially initialized VM. 1845 void os::abort(bool dump_core) { 1846 os::shutdown(); 1847 if (dump_core) { 1848 #ifndef PRODUCT 1849 fdStream out(defaultStream::output_fd()); 1850 out.print_raw("Current thread is "); 1851 char buf[16]; 1852 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1853 out.print_raw_cr(buf); 1854 out.print_raw_cr("Dumping core ..."); 1855 #endif 1856 ::abort(); // dump core (for debugging) 1857 } 1858 1859 ::exit(1); 1860 } 1861 1862 // Die immediately, no exit hook, no abort hook, no cleanup. 1863 void os::die() { 1864 _exit(-1); 1865 } 1866 1867 // unused 1868 void os::set_error_file(const char *logfile) {} 1869 1870 // DLL functions 1871 1872 const char* os::dll_file_extension() { return ".so"; } 1873 1874 const char* os::get_temp_directory() { 1875 const char *prop = Arguments::get_property("java.io.tmpdir"); 1876 return prop == NULL ? "/tmp" : prop; 1877 } 1878 1879 static bool file_exists(const char* filename) { 1880 struct stat statbuf; 1881 if (filename == NULL || strlen(filename) == 0) { 1882 return false; 1883 } 1884 return os::stat(filename, &statbuf) == 0; 1885 } 1886 1887 void os::dll_build_name(char* buffer, size_t buflen, 1888 const char* pname, const char* fname) { 1889 // Copied from libhpi 1890 const size_t pnamelen = pname ? strlen(pname) : 0; 1891 1892 // Quietly truncate on buffer overflow. Should be an error. 1893 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { 1894 *buffer = '\0'; 1895 return; 1896 } 1897 1898 if (pnamelen == 0) { 1899 snprintf(buffer, buflen, "lib%s.so", fname); 1900 } else if (strchr(pname, *os::path_separator()) != NULL) { 1901 int n; 1902 char** pelements = split_path(pname, &n); 1903 for (int i = 0 ; i < n ; i++) { 1904 // really shouldn't be NULL but what the heck, check can't hurt 1905 if (pelements[i] == NULL || strlen(pelements[i]) == 0) { 1906 continue; // skip the empty path values 1907 } 1908 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); 1909 if (file_exists(buffer)) { 1910 break; 1911 } 1912 } 1913 // release the storage 1914 for (int i = 0 ; i < n ; i++) { 1915 if (pelements[i] != NULL) { 1916 FREE_C_HEAP_ARRAY(char, pelements[i]); 1917 } 1918 } 1919 if (pelements != NULL) { 1920 FREE_C_HEAP_ARRAY(char*, pelements); 1921 } 1922 } else { 1923 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1924 } 1925 } 1926 1927 const char* os::get_current_directory(char *buf, int buflen) { 1928 return getcwd(buf, buflen); 1929 } 1930 1931 // check if addr is inside libjvm[_g].so 1932 bool os::address_is_in_vm(address addr) { 1933 static address libjvm_base_addr; 1934 Dl_info dlinfo; 1935 1936 if (libjvm_base_addr == NULL) { 1937 dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo); 1938 libjvm_base_addr = (address)dlinfo.dli_fbase; 1939 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); 1940 } 1941 1942 if (dladdr((void *)addr, &dlinfo)) { 1943 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; 1944 } 1945 1946 return false; 1947 } 1948 1949 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int); 1950 static dladdr1_func_type dladdr1_func = NULL; 1951 1952 bool os::dll_address_to_function_name(address addr, char *buf, 1953 int buflen, int * offset) { 1954 Dl_info dlinfo; 1955 1956 // dladdr1_func was initialized in os::init() 1957 if (dladdr1_func){ 1958 // yes, we have dladdr1 1959 1960 // Support for dladdr1 is checked at runtime; it may be 1961 // available even if the vm is built on a machine that does 1962 // not have dladdr1 support. Make sure there is a value for 1963 // RTLD_DL_SYMENT. 1964 #ifndef RTLD_DL_SYMENT 1965 #define RTLD_DL_SYMENT 1 1966 #endif 1967 Sym * info; 1968 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info, 1969 RTLD_DL_SYMENT)) { 1970 if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); 1971 if (offset) *offset = addr - (address)dlinfo.dli_saddr; 1972 1973 // check if the returned symbol really covers addr 1974 return ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr); 1975 } else { 1976 if (buf) buf[0] = '\0'; 1977 if (offset) *offset = -1; 1978 return false; 1979 } 1980 } else { 1981 // no, only dladdr is available 1982 if(dladdr((void *)addr, &dlinfo)) { 1983 if (buf) jio_snprintf(buf, buflen, dlinfo.dli_sname); 1984 if (offset) *offset = addr - (address)dlinfo.dli_saddr; 1985 return true; 1986 } else { 1987 if (buf) buf[0] = '\0'; 1988 if (offset) *offset = -1; 1989 return false; 1990 } 1991 } 1992 } 1993 1994 bool os::dll_address_to_library_name(address addr, char* buf, 1995 int buflen, int* offset) { 1996 Dl_info dlinfo; 1997 1998 if (dladdr((void*)addr, &dlinfo)){ 1999 if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); 2000 if (offset) *offset = addr - (address)dlinfo.dli_fbase; 2001 return true; 2002 } else { 2003 if (buf) buf[0] = '\0'; 2004 if (offset) *offset = -1; 2005 return false; 2006 } 2007 } 2008 2009 // Prints the names and full paths of all opened dynamic libraries 2010 // for current process 2011 void os::print_dll_info(outputStream * st) { 2012 Dl_info dli; 2013 void *handle; 2014 Link_map *map; 2015 Link_map *p; 2016 2017 st->print_cr("Dynamic libraries:"); st->flush(); 2018 2019 if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) { 2020 st->print_cr("Error: Cannot print dynamic libraries."); 2021 return; 2022 } 2023 handle = dlopen(dli.dli_fname, RTLD_LAZY); 2024 if (handle == NULL) { 2025 st->print_cr("Error: Cannot print dynamic libraries."); 2026 return; 2027 } 2028 dlinfo(handle, RTLD_DI_LINKMAP, &map); 2029 if (map == NULL) { 2030 st->print_cr("Error: Cannot print dynamic libraries."); 2031 return; 2032 } 2033 2034 while (map->l_prev != NULL) 2035 map = map->l_prev; 2036 2037 while (map != NULL) { 2038 st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name); 2039 map = map->l_next; 2040 } 2041 2042 dlclose(handle); 2043 } 2044 2045 // Loads .dll/.so and 2046 // in case of error it checks if .dll/.so was built for the 2047 // same architecture as Hotspot is running on 2048 2049 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) 2050 { 2051 void * result= ::dlopen(filename, RTLD_LAZY); 2052 if (result != NULL) { 2053 // Successful loading 2054 return result; 2055 } 2056 2057 Elf32_Ehdr elf_head; 2058 2059 // Read system error message into ebuf 2060 // It may or may not be overwritten below 2061 ::strncpy(ebuf, ::dlerror(), ebuflen-1); 2062 ebuf[ebuflen-1]='\0'; 2063 int diag_msg_max_length=ebuflen-strlen(ebuf); 2064 char* diag_msg_buf=ebuf+strlen(ebuf); 2065 2066 if (diag_msg_max_length==0) { 2067 // No more space in ebuf for additional diagnostics message 2068 return NULL; 2069 } 2070 2071 2072 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); 2073 2074 if (file_descriptor < 0) { 2075 // Can't open library, report dlerror() message 2076 return NULL; 2077 } 2078 2079 bool failed_to_read_elf_head= 2080 (sizeof(elf_head)!= 2081 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ; 2082 2083 ::close(file_descriptor); 2084 if (failed_to_read_elf_head) { 2085 // file i/o error - report dlerror() msg 2086 return NULL; 2087 } 2088 2089 typedef struct { 2090 Elf32_Half code; // Actual value as defined in elf.h 2091 Elf32_Half compat_class; // Compatibility of archs at VM's sense 2092 char elf_class; // 32 or 64 bit 2093 char endianess; // MSB or LSB 2094 char* name; // String representation 2095 } arch_t; 2096 2097 static const arch_t arch_array[]={ 2098 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2099 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2100 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, 2101 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, 2102 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2103 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2104 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, 2105 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, 2106 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, 2107 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"} 2108 }; 2109 2110 #if (defined IA32) 2111 static Elf32_Half running_arch_code=EM_386; 2112 #elif (defined AMD64) 2113 static Elf32_Half running_arch_code=EM_X86_64; 2114 #elif (defined IA64) 2115 static Elf32_Half running_arch_code=EM_IA_64; 2116 #elif (defined __sparc) && (defined _LP64) 2117 static Elf32_Half running_arch_code=EM_SPARCV9; 2118 #elif (defined __sparc) && (!defined _LP64) 2119 static Elf32_Half running_arch_code=EM_SPARC; 2120 #elif (defined __powerpc64__) 2121 static Elf32_Half running_arch_code=EM_PPC64; 2122 #elif (defined __powerpc__) 2123 static Elf32_Half running_arch_code=EM_PPC; 2124 #elif (defined ARM) 2125 static Elf32_Half running_arch_code=EM_ARM; 2126 #else 2127 #error Method os::dll_load requires that one of following is defined:\ 2128 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM 2129 #endif 2130 2131 // Identify compatability class for VM's architecture and library's architecture 2132 // Obtain string descriptions for architectures 2133 2134 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; 2135 int running_arch_index=-1; 2136 2137 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) { 2138 if (running_arch_code == arch_array[i].code) { 2139 running_arch_index = i; 2140 } 2141 if (lib_arch.code == arch_array[i].code) { 2142 lib_arch.compat_class = arch_array[i].compat_class; 2143 lib_arch.name = arch_array[i].name; 2144 } 2145 } 2146 2147 assert(running_arch_index != -1, 2148 "Didn't find running architecture code (running_arch_code) in arch_array"); 2149 if (running_arch_index == -1) { 2150 // Even though running architecture detection failed 2151 // we may still continue with reporting dlerror() message 2152 return NULL; 2153 } 2154 2155 if (lib_arch.endianess != arch_array[running_arch_index].endianess) { 2156 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)"); 2157 return NULL; 2158 } 2159 2160 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { 2161 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)"); 2162 return NULL; 2163 } 2164 2165 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { 2166 if ( lib_arch.name!=NULL ) { 2167 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2168 " (Possible cause: can't load %s-bit .so on a %s-bit platform)", 2169 lib_arch.name, arch_array[running_arch_index].name); 2170 } else { 2171 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2172 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)", 2173 lib_arch.code, 2174 arch_array[running_arch_index].name); 2175 } 2176 } 2177 2178 return NULL; 2179 } 2180 2181 void* os::dll_lookup(void* handle, const char* name) { 2182 return dlsym(handle, name); 2183 } 2184 2185 2186 bool _print_ascii_file(const char* filename, outputStream* st) { 2187 int fd = open(filename, O_RDONLY); 2188 if (fd == -1) { 2189 return false; 2190 } 2191 2192 char buf[32]; 2193 int bytes; 2194 while ((bytes = read(fd, buf, sizeof(buf))) > 0) { 2195 st->print_raw(buf, bytes); 2196 } 2197 2198 close(fd); 2199 2200 return true; 2201 } 2202 2203 void os::print_os_info(outputStream* st) { 2204 st->print("OS:"); 2205 2206 if (!_print_ascii_file("/etc/release", st)) { 2207 st->print("Solaris"); 2208 } 2209 st->cr(); 2210 2211 // kernel 2212 st->print("uname:"); 2213 struct utsname name; 2214 uname(&name); 2215 st->print(name.sysname); st->print(" "); 2216 st->print(name.release); st->print(" "); 2217 st->print(name.version); st->print(" "); 2218 st->print(name.machine); 2219 2220 // libthread 2221 if (os::Solaris::T2_libthread()) st->print(" (T2 libthread)"); 2222 else st->print(" (T1 libthread)"); 2223 st->cr(); 2224 2225 // rlimit 2226 st->print("rlimit:"); 2227 struct rlimit rlim; 2228 2229 st->print(" STACK "); 2230 getrlimit(RLIMIT_STACK, &rlim); 2231 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2232 else st->print("%uk", rlim.rlim_cur >> 10); 2233 2234 st->print(", CORE "); 2235 getrlimit(RLIMIT_CORE, &rlim); 2236 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2237 else st->print("%uk", rlim.rlim_cur >> 10); 2238 2239 st->print(", NOFILE "); 2240 getrlimit(RLIMIT_NOFILE, &rlim); 2241 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2242 else st->print("%d", rlim.rlim_cur); 2243 2244 st->print(", AS "); 2245 getrlimit(RLIMIT_AS, &rlim); 2246 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2247 else st->print("%uk", rlim.rlim_cur >> 10); 2248 st->cr(); 2249 2250 // load average 2251 st->print("load average:"); 2252 double loadavg[3]; 2253 os::loadavg(loadavg, 3); 2254 st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]); 2255 st->cr(); 2256 } 2257 2258 2259 static bool check_addr0(outputStream* st) { 2260 jboolean status = false; 2261 int fd = open("/proc/self/map",O_RDONLY); 2262 if (fd >= 0) { 2263 prmap_t p; 2264 while(read(fd, &p, sizeof(p)) > 0) { 2265 if (p.pr_vaddr == 0x0) { 2266 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname); 2267 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname); 2268 st->print("Access:"); 2269 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-"); 2270 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-"); 2271 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-"); 2272 st->cr(); 2273 status = true; 2274 } 2275 close(fd); 2276 } 2277 } 2278 return status; 2279 } 2280 2281 void os::print_memory_info(outputStream* st) { 2282 st->print("Memory:"); 2283 st->print(" %dk page", os::vm_page_size()>>10); 2284 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10); 2285 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10); 2286 st->cr(); 2287 (void) check_addr0(st); 2288 } 2289 2290 // Taken from /usr/include/sys/machsig.h Supposed to be architecture specific 2291 // but they're the same for all the solaris architectures that we support. 2292 const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR", 2293 "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG", 2294 "ILL_COPROC", "ILL_BADSTK" }; 2295 2296 const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV", 2297 "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES", 2298 "FPE_FLTINV", "FPE_FLTSUB" }; 2299 2300 const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" }; 2301 2302 const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" }; 2303 2304 void os::print_siginfo(outputStream* st, void* siginfo) { 2305 st->print("siginfo:"); 2306 2307 const int buflen = 100; 2308 char buf[buflen]; 2309 siginfo_t *si = (siginfo_t*)siginfo; 2310 st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen)); 2311 char *err = strerror(si->si_errno); 2312 if (si->si_errno != 0 && err != NULL) { 2313 st->print("si_errno=%s", err); 2314 } else { 2315 st->print("si_errno=%d", si->si_errno); 2316 } 2317 const int c = si->si_code; 2318 assert(c > 0, "unexpected si_code"); 2319 switch (si->si_signo) { 2320 case SIGILL: 2321 st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]); 2322 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2323 break; 2324 case SIGFPE: 2325 st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]); 2326 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2327 break; 2328 case SIGSEGV: 2329 st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]); 2330 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2331 break; 2332 case SIGBUS: 2333 st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]); 2334 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2335 break; 2336 default: 2337 st->print(", si_code=%d", si->si_code); 2338 // no si_addr 2339 } 2340 2341 if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && 2342 UseSharedSpaces) { 2343 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2344 if (mapinfo->is_in_shared_space(si->si_addr)) { 2345 st->print("\n\nError accessing class data sharing archive." \ 2346 " Mapped file inaccessible during execution, " \ 2347 " possible disk/network problem."); 2348 } 2349 } 2350 st->cr(); 2351 } 2352 2353 // Moved from whole group, because we need them here for diagnostic 2354 // prints. 2355 #define OLDMAXSIGNUM 32 2356 static int Maxsignum = 0; 2357 static int *ourSigFlags = NULL; 2358 2359 extern "C" void sigINTRHandler(int, siginfo_t*, void*); 2360 2361 int os::Solaris::get_our_sigflags(int sig) { 2362 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2363 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2364 return ourSigFlags[sig]; 2365 } 2366 2367 void os::Solaris::set_our_sigflags(int sig, int flags) { 2368 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2369 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2370 ourSigFlags[sig] = flags; 2371 } 2372 2373 2374 static const char* get_signal_handler_name(address handler, 2375 char* buf, int buflen) { 2376 int offset; 2377 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 2378 if (found) { 2379 // skip directory names 2380 const char *p1, *p2; 2381 p1 = buf; 2382 size_t len = strlen(os::file_separator()); 2383 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 2384 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset); 2385 } else { 2386 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 2387 } 2388 return buf; 2389 } 2390 2391 static void print_signal_handler(outputStream* st, int sig, 2392 char* buf, size_t buflen) { 2393 struct sigaction sa; 2394 2395 sigaction(sig, NULL, &sa); 2396 2397 st->print("%s: ", os::exception_name(sig, buf, buflen)); 2398 2399 address handler = (sa.sa_flags & SA_SIGINFO) 2400 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 2401 : CAST_FROM_FN_PTR(address, sa.sa_handler); 2402 2403 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 2404 st->print("SIG_DFL"); 2405 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 2406 st->print("SIG_IGN"); 2407 } else { 2408 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 2409 } 2410 2411 st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask); 2412 2413 address rh = VMError::get_resetted_sighandler(sig); 2414 // May be, handler was resetted by VMError? 2415 if(rh != NULL) { 2416 handler = rh; 2417 sa.sa_flags = VMError::get_resetted_sigflags(sig); 2418 } 2419 2420 st->print(", sa_flags=" PTR32_FORMAT, sa.sa_flags); 2421 2422 // Check: is it our handler? 2423 if(handler == CAST_FROM_FN_PTR(address, signalHandler) || 2424 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) { 2425 // It is our signal handler 2426 // check for flags 2427 if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) { 2428 st->print( 2429 ", flags was changed from " PTR32_FORMAT ", consider using jsig library", 2430 os::Solaris::get_our_sigflags(sig)); 2431 } 2432 } 2433 st->cr(); 2434 } 2435 2436 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 2437 st->print_cr("Signal Handlers:"); 2438 print_signal_handler(st, SIGSEGV, buf, buflen); 2439 print_signal_handler(st, SIGBUS , buf, buflen); 2440 print_signal_handler(st, SIGFPE , buf, buflen); 2441 print_signal_handler(st, SIGPIPE, buf, buflen); 2442 print_signal_handler(st, SIGXFSZ, buf, buflen); 2443 print_signal_handler(st, SIGILL , buf, buflen); 2444 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen); 2445 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen); 2446 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 2447 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen); 2448 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 2449 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen); 2450 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen); 2451 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen); 2452 } 2453 2454 static char saved_jvm_path[MAXPATHLEN] = { 0 }; 2455 2456 // Find the full path to the current module, libjvm.so or libjvm_g.so 2457 void os::jvm_path(char *buf, jint buflen) { 2458 // Error checking. 2459 if (buflen < MAXPATHLEN) { 2460 assert(false, "must use a large-enough buffer"); 2461 buf[0] = '\0'; 2462 return; 2463 } 2464 // Lazy resolve the path to current module. 2465 if (saved_jvm_path[0] != 0) { 2466 strcpy(buf, saved_jvm_path); 2467 return; 2468 } 2469 2470 Dl_info dlinfo; 2471 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 2472 assert(ret != 0, "cannot locate libjvm"); 2473 realpath((char *)dlinfo.dli_fname, buf); 2474 2475 if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) { 2476 // Support for the gamma launcher. Typical value for buf is 2477 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at 2478 // the right place in the string, then assume we are installed in a JDK and 2479 // we're done. Otherwise, check for a JAVA_HOME environment variable and fix 2480 // up the path so it looks like libjvm.so is installed there (append a 2481 // fake suffix hotspot/libjvm.so). 2482 const char *p = buf + strlen(buf) - 1; 2483 for (int count = 0; p > buf && count < 5; ++count) { 2484 for (--p; p > buf && *p != '/'; --p) 2485 /* empty */ ; 2486 } 2487 2488 if (strncmp(p, "/jre/lib/", 9) != 0) { 2489 // Look for JAVA_HOME in the environment. 2490 char* java_home_var = ::getenv("JAVA_HOME"); 2491 if (java_home_var != NULL && java_home_var[0] != 0) { 2492 char cpu_arch[12]; 2493 char* jrelib_p; 2494 int len; 2495 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 2496 #ifdef _LP64 2497 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9. 2498 if (strcmp(cpu_arch, "sparc") == 0) { 2499 strcat(cpu_arch, "v9"); 2500 } else if (strcmp(cpu_arch, "i386") == 0) { 2501 strcpy(cpu_arch, "amd64"); 2502 } 2503 #endif 2504 // Check the current module name "libjvm.so" or "libjvm_g.so". 2505 p = strrchr(buf, '/'); 2506 assert(strstr(p, "/libjvm") == p, "invalid library name"); 2507 p = strstr(p, "_g") ? "_g" : ""; 2508 2509 realpath(java_home_var, buf); 2510 // determine if this is a legacy image or modules image 2511 // modules image doesn't have "jre" subdirectory 2512 len = strlen(buf); 2513 jrelib_p = buf + len; 2514 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); 2515 if (0 != access(buf, F_OK)) { 2516 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch); 2517 } 2518 2519 if (0 == access(buf, F_OK)) { 2520 // Use current module name "libjvm[_g].so" instead of 2521 // "libjvm"debug_only("_g")".so" since for fastdebug version 2522 // we should have "libjvm.so" but debug_only("_g") adds "_g"! 2523 // It is used when we are choosing the HPI library's name 2524 // "libhpi[_g].so" in hpi::initialize_get_interface(). 2525 len = strlen(buf); 2526 snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p); 2527 } else { 2528 // Go back to path of .so 2529 realpath((char *)dlinfo.dli_fname, buf); 2530 } 2531 } 2532 } 2533 } 2534 2535 strcpy(saved_jvm_path, buf); 2536 } 2537 2538 2539 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 2540 // no prefix required, not even "_" 2541 } 2542 2543 2544 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 2545 // no suffix required 2546 } 2547 2548 2549 // sun.misc.Signal 2550 2551 extern "C" { 2552 static void UserHandler(int sig, void *siginfo, void *context) { 2553 // Ctrl-C is pressed during error reporting, likely because the error 2554 // handler fails to abort. Let VM die immediately. 2555 if (sig == SIGINT && is_error_reported()) { 2556 os::die(); 2557 } 2558 2559 os::signal_notify(sig); 2560 // We do not need to reinstate the signal handler each time... 2561 } 2562 } 2563 2564 void* os::user_handler() { 2565 return CAST_FROM_FN_PTR(void*, UserHandler); 2566 } 2567 2568 extern "C" { 2569 typedef void (*sa_handler_t)(int); 2570 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 2571 } 2572 2573 void* os::signal(int signal_number, void* handler) { 2574 struct sigaction sigAct, oldSigAct; 2575 sigfillset(&(sigAct.sa_mask)); 2576 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND; 2577 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 2578 2579 if (sigaction(signal_number, &sigAct, &oldSigAct)) 2580 // -1 means registration failed 2581 return (void *)-1; 2582 2583 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 2584 } 2585 2586 void os::signal_raise(int signal_number) { 2587 raise(signal_number); 2588 } 2589 2590 /* 2591 * The following code is moved from os.cpp for making this 2592 * code platform specific, which it is by its very nature. 2593 */ 2594 2595 // a counter for each possible signal value 2596 static int Sigexit = 0; 2597 static int Maxlibjsigsigs; 2598 static jint *pending_signals = NULL; 2599 static int *preinstalled_sigs = NULL; 2600 static struct sigaction *chainedsigactions = NULL; 2601 static sema_t sig_sem; 2602 typedef int (*version_getting_t)(); 2603 version_getting_t os::Solaris::get_libjsig_version = NULL; 2604 static int libjsigversion = NULL; 2605 2606 int os::sigexitnum_pd() { 2607 assert(Sigexit > 0, "signal memory not yet initialized"); 2608 return Sigexit; 2609 } 2610 2611 void os::Solaris::init_signal_mem() { 2612 // Initialize signal structures 2613 Maxsignum = SIGRTMAX; 2614 Sigexit = Maxsignum+1; 2615 assert(Maxsignum >0, "Unable to obtain max signal number"); 2616 2617 Maxlibjsigsigs = Maxsignum; 2618 2619 // pending_signals has one int per signal 2620 // The additional signal is for SIGEXIT - exit signal to signal_thread 2621 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1)); 2622 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1))); 2623 2624 if (UseSignalChaining) { 2625 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction) 2626 * (Maxsignum + 1)); 2627 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1))); 2628 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1)); 2629 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1))); 2630 } 2631 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 )); 2632 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1)); 2633 } 2634 2635 void os::signal_init_pd() { 2636 int ret; 2637 2638 ret = ::sema_init(&sig_sem, 0, NULL, NULL); 2639 assert(ret == 0, "sema_init() failed"); 2640 } 2641 2642 void os::signal_notify(int signal_number) { 2643 int ret; 2644 2645 Atomic::inc(&pending_signals[signal_number]); 2646 ret = ::sema_post(&sig_sem); 2647 assert(ret == 0, "sema_post() failed"); 2648 } 2649 2650 static int check_pending_signals(bool wait_for_signal) { 2651 int ret; 2652 while (true) { 2653 for (int i = 0; i < Sigexit + 1; i++) { 2654 jint n = pending_signals[i]; 2655 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2656 return i; 2657 } 2658 } 2659 if (!wait_for_signal) { 2660 return -1; 2661 } 2662 JavaThread *thread = JavaThread::current(); 2663 ThreadBlockInVM tbivm(thread); 2664 2665 bool threadIsSuspended; 2666 do { 2667 thread->set_suspend_equivalent(); 2668 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2669 while((ret = ::sema_wait(&sig_sem)) == EINTR) 2670 ; 2671 assert(ret == 0, "sema_wait() failed"); 2672 2673 // were we externally suspended while we were waiting? 2674 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2675 if (threadIsSuspended) { 2676 // 2677 // The semaphore has been incremented, but while we were waiting 2678 // another thread suspended us. We don't want to continue running 2679 // while suspended because that would surprise the thread that 2680 // suspended us. 2681 // 2682 ret = ::sema_post(&sig_sem); 2683 assert(ret == 0, "sema_post() failed"); 2684 2685 thread->java_suspend_self(); 2686 } 2687 } while (threadIsSuspended); 2688 } 2689 } 2690 2691 int os::signal_lookup() { 2692 return check_pending_signals(false); 2693 } 2694 2695 int os::signal_wait() { 2696 return check_pending_signals(true); 2697 } 2698 2699 //////////////////////////////////////////////////////////////////////////////// 2700 // Virtual Memory 2701 2702 static int page_size = -1; 2703 2704 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will 2705 // clear this var if support is not available. 2706 static bool has_map_align = true; 2707 2708 int os::vm_page_size() { 2709 assert(page_size != -1, "must call os::init"); 2710 return page_size; 2711 } 2712 2713 // Solaris allocates memory by pages. 2714 int os::vm_allocation_granularity() { 2715 assert(page_size != -1, "must call os::init"); 2716 return page_size; 2717 } 2718 2719 bool os::commit_memory(char* addr, size_t bytes, bool exec) { 2720 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2721 size_t size = bytes; 2722 return 2723 NULL != Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); 2724 } 2725 2726 bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint, 2727 bool exec) { 2728 if (commit_memory(addr, bytes, exec)) { 2729 if (UseMPSS && alignment_hint > (size_t)vm_page_size()) { 2730 // If the large page size has been set and the VM 2731 // is using large pages, use the large page size 2732 // if it is smaller than the alignment hint. This is 2733 // a case where the VM wants to use a larger alignment size 2734 // for its own reasons but still want to use large pages 2735 // (which is what matters to setting the mpss range. 2736 size_t page_size = 0; 2737 if (large_page_size() < alignment_hint) { 2738 assert(UseLargePages, "Expected to be here for large page use only"); 2739 page_size = large_page_size(); 2740 } else { 2741 // If the alignment hint is less than the large page 2742 // size, the VM wants a particular alignment (thus the hint) 2743 // for internal reasons. Try to set the mpss range using 2744 // the alignment_hint. 2745 page_size = alignment_hint; 2746 } 2747 // Since this is a hint, ignore any failures. 2748 (void)Solaris::set_mpss_range(addr, bytes, page_size); 2749 } 2750 return true; 2751 } 2752 return false; 2753 } 2754 2755 // Uncommit the pages in a specified region. 2756 void os::free_memory(char* addr, size_t bytes) { 2757 if (madvise(addr, bytes, MADV_FREE) < 0) { 2758 debug_only(warning("MADV_FREE failed.")); 2759 return; 2760 } 2761 } 2762 2763 bool os::create_stack_guard_pages(char* addr, size_t size) { 2764 return os::commit_memory(addr, size); 2765 } 2766 2767 bool os::remove_stack_guard_pages(char* addr, size_t size) { 2768 return os::uncommit_memory(addr, size); 2769 } 2770 2771 // Change the page size in a given range. 2772 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2773 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); 2774 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); 2775 Solaris::set_mpss_range(addr, bytes, alignment_hint); 2776 } 2777 2778 // Tell the OS to make the range local to the first-touching LWP 2779 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2780 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2781 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) { 2782 debug_only(warning("MADV_ACCESS_LWP failed.")); 2783 } 2784 } 2785 2786 // Tell the OS that this range would be accessed from different LWPs. 2787 void os::numa_make_global(char *addr, size_t bytes) { 2788 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2789 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) { 2790 debug_only(warning("MADV_ACCESS_MANY failed.")); 2791 } 2792 } 2793 2794 // Get the number of the locality groups. 2795 size_t os::numa_get_groups_num() { 2796 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie()); 2797 return n != -1 ? n : 1; 2798 } 2799 2800 // Get a list of leaf locality groups. A leaf lgroup is group that 2801 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory 2802 // board. An LWP is assigned to one of these groups upon creation. 2803 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2804 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) { 2805 ids[0] = 0; 2806 return 1; 2807 } 2808 int result_size = 0, top = 1, bottom = 0, cur = 0; 2809 for (int k = 0; k < size; k++) { 2810 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur], 2811 (Solaris::lgrp_id_t*)&ids[top], size - top); 2812 if (r == -1) { 2813 ids[0] = 0; 2814 return 1; 2815 } 2816 if (!r) { 2817 // That's a leaf node. 2818 assert (bottom <= cur, "Sanity check"); 2819 // Check if the node has memory 2820 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur], 2821 NULL, 0, LGRP_RSRC_MEM) > 0) { 2822 ids[bottom++] = ids[cur]; 2823 } 2824 } 2825 top += r; 2826 cur++; 2827 } 2828 if (bottom == 0) { 2829 // Handle a situation, when the OS reports no memory available. 2830 // Assume UMA architecture. 2831 ids[0] = 0; 2832 return 1; 2833 } 2834 return bottom; 2835 } 2836 2837 // Detect the topology change. Typically happens during CPU plugging-unplugging. 2838 bool os::numa_topology_changed() { 2839 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie()); 2840 if (is_stale != -1 && is_stale) { 2841 Solaris::lgrp_fini(Solaris::lgrp_cookie()); 2842 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER); 2843 assert(c != 0, "Failure to initialize LGRP API"); 2844 Solaris::set_lgrp_cookie(c); 2845 return true; 2846 } 2847 return false; 2848 } 2849 2850 // Get the group id of the current LWP. 2851 int os::numa_get_group_id() { 2852 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID); 2853 if (lgrp_id == -1) { 2854 return 0; 2855 } 2856 const int size = os::numa_get_groups_num(); 2857 int *ids = (int*)alloca(size * sizeof(int)); 2858 2859 // Get the ids of all lgroups with memory; r is the count. 2860 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id, 2861 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM); 2862 if (r <= 0) { 2863 return 0; 2864 } 2865 return ids[os::random() % r]; 2866 } 2867 2868 // Request information about the page. 2869 bool os::get_page_info(char *start, page_info* info) { 2870 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2871 uint64_t addr = (uintptr_t)start; 2872 uint64_t outdata[2]; 2873 uint_t validity = 0; 2874 2875 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) { 2876 return false; 2877 } 2878 2879 info->size = 0; 2880 info->lgrp_id = -1; 2881 2882 if ((validity & 1) != 0) { 2883 if ((validity & 2) != 0) { 2884 info->lgrp_id = outdata[0]; 2885 } 2886 if ((validity & 4) != 0) { 2887 info->size = outdata[1]; 2888 } 2889 return true; 2890 } 2891 return false; 2892 } 2893 2894 // Scan the pages from start to end until a page different than 2895 // the one described in the info parameter is encountered. 2896 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 2897 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2898 const size_t types = sizeof(info_types) / sizeof(info_types[0]); 2899 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT]; 2900 uint_t validity[MAX_MEMINFO_CNT]; 2901 2902 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size); 2903 uint64_t p = (uint64_t)start; 2904 while (p < (uint64_t)end) { 2905 addrs[0] = p; 2906 size_t addrs_count = 1; 2907 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] < (uint64_t)end) { 2908 addrs[addrs_count] = addrs[addrs_count - 1] + page_size; 2909 addrs_count++; 2910 } 2911 2912 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) { 2913 return NULL; 2914 } 2915 2916 size_t i = 0; 2917 for (; i < addrs_count; i++) { 2918 if ((validity[i] & 1) != 0) { 2919 if ((validity[i] & 4) != 0) { 2920 if (outdata[types * i + 1] != page_expected->size) { 2921 break; 2922 } 2923 } else 2924 if (page_expected->size != 0) { 2925 break; 2926 } 2927 2928 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) { 2929 if (outdata[types * i] != page_expected->lgrp_id) { 2930 break; 2931 } 2932 } 2933 } else { 2934 return NULL; 2935 } 2936 } 2937 2938 if (i != addrs_count) { 2939 if ((validity[i] & 2) != 0) { 2940 page_found->lgrp_id = outdata[types * i]; 2941 } else { 2942 page_found->lgrp_id = -1; 2943 } 2944 if ((validity[i] & 4) != 0) { 2945 page_found->size = outdata[types * i + 1]; 2946 } else { 2947 page_found->size = 0; 2948 } 2949 return (char*)addrs[i]; 2950 } 2951 2952 p = addrs[addrs_count - 1] + page_size; 2953 } 2954 return end; 2955 } 2956 2957 bool os::uncommit_memory(char* addr, size_t bytes) { 2958 size_t size = bytes; 2959 // Map uncommitted pages PROT_NONE so we fail early if we touch an 2960 // uncommitted page. Otherwise, the read/write might succeed if we 2961 // have enough swap space to back the physical page. 2962 return 2963 NULL != Solaris::mmap_chunk(addr, size, 2964 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, 2965 PROT_NONE); 2966 } 2967 2968 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) { 2969 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0); 2970 2971 if (b == MAP_FAILED) { 2972 return NULL; 2973 } 2974 return b; 2975 } 2976 2977 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) { 2978 char* addr = requested_addr; 2979 int flags = MAP_PRIVATE | MAP_NORESERVE; 2980 2981 assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap"); 2982 2983 if (fixed) { 2984 flags |= MAP_FIXED; 2985 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) { 2986 flags |= MAP_ALIGN; 2987 addr = (char*) alignment_hint; 2988 } 2989 2990 // Map uncommitted pages PROT_NONE so we fail early if we touch an 2991 // uncommitted page. Otherwise, the read/write might succeed if we 2992 // have enough swap space to back the physical page. 2993 return mmap_chunk(addr, bytes, flags, PROT_NONE); 2994 } 2995 2996 char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 2997 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL)); 2998 2999 guarantee(requested_addr == NULL || requested_addr == addr, 3000 "OS failed to return requested mmap address."); 3001 return addr; 3002 } 3003 3004 // Reserve memory at an arbitrary address, only if that area is 3005 // available (and not reserved for something else). 3006 3007 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3008 const int max_tries = 10; 3009 char* base[max_tries]; 3010 size_t size[max_tries]; 3011 3012 // Solaris adds a gap between mmap'ed regions. The size of the gap 3013 // is dependent on the requested size and the MMU. Our initial gap 3014 // value here is just a guess and will be corrected later. 3015 bool had_top_overlap = false; 3016 bool have_adjusted_gap = false; 3017 size_t gap = 0x400000; 3018 3019 // Assert only that the size is a multiple of the page size, since 3020 // that's all that mmap requires, and since that's all we really know 3021 // about at this low abstraction level. If we need higher alignment, 3022 // we can either pass an alignment to this method or verify alignment 3023 // in one of the methods further up the call chain. See bug 5044738. 3024 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); 3025 3026 // Since snv_84, Solaris attempts to honor the address hint - see 5003415. 3027 // Give it a try, if the kernel honors the hint we can return immediately. 3028 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false); 3029 volatile int err = errno; 3030 if (addr == requested_addr) { 3031 return addr; 3032 } else if (addr != NULL) { 3033 unmap_memory(addr, bytes); 3034 } 3035 3036 if (PrintMiscellaneous && Verbose) { 3037 char buf[256]; 3038 buf[0] = '\0'; 3039 if (addr == NULL) { 3040 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err)); 3041 } 3042 warning("attempt_reserve_memory_at: couldn't reserve %d bytes at " 3043 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT 3044 "%s", bytes, requested_addr, addr, buf); 3045 } 3046 3047 // Address hint method didn't work. Fall back to the old method. 3048 // In theory, once SNV becomes our oldest supported platform, this 3049 // code will no longer be needed. 3050 // 3051 // Repeatedly allocate blocks until the block is allocated at the 3052 // right spot. Give up after max_tries. 3053 int i; 3054 for (i = 0; i < max_tries; ++i) { 3055 base[i] = reserve_memory(bytes); 3056 3057 if (base[i] != NULL) { 3058 // Is this the block we wanted? 3059 if (base[i] == requested_addr) { 3060 size[i] = bytes; 3061 break; 3062 } 3063 3064 // check that the gap value is right 3065 if (had_top_overlap && !have_adjusted_gap) { 3066 size_t actual_gap = base[i-1] - base[i] - bytes; 3067 if (gap != actual_gap) { 3068 // adjust the gap value and retry the last 2 allocations 3069 assert(i > 0, "gap adjustment code problem"); 3070 have_adjusted_gap = true; // adjust the gap only once, just in case 3071 gap = actual_gap; 3072 if (PrintMiscellaneous && Verbose) { 3073 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap); 3074 } 3075 unmap_memory(base[i], bytes); 3076 unmap_memory(base[i-1], size[i-1]); 3077 i-=2; 3078 continue; 3079 } 3080 } 3081 3082 // Does this overlap the block we wanted? Give back the overlapped 3083 // parts and try again. 3084 // 3085 // There is still a bug in this code: if top_overlap == bytes, 3086 // the overlap is offset from requested region by the value of gap. 3087 // In this case giving back the overlapped part will not work, 3088 // because we'll give back the entire block at base[i] and 3089 // therefore the subsequent allocation will not generate a new gap. 3090 // This could be fixed with a new algorithm that used larger 3091 // or variable size chunks to find the requested region - 3092 // but such a change would introduce additional complications. 3093 // It's rare enough that the planets align for this bug, 3094 // so we'll just wait for a fix for 6204603/5003415 which 3095 // will provide a mmap flag to allow us to avoid this business. 3096 3097 size_t top_overlap = requested_addr + (bytes + gap) - base[i]; 3098 if (top_overlap >= 0 && top_overlap < bytes) { 3099 had_top_overlap = true; 3100 unmap_memory(base[i], top_overlap); 3101 base[i] += top_overlap; 3102 size[i] = bytes - top_overlap; 3103 } else { 3104 size_t bottom_overlap = base[i] + bytes - requested_addr; 3105 if (bottom_overlap >= 0 && bottom_overlap < bytes) { 3106 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) { 3107 warning("attempt_reserve_memory_at: possible alignment bug"); 3108 } 3109 unmap_memory(requested_addr, bottom_overlap); 3110 size[i] = bytes - bottom_overlap; 3111 } else { 3112 size[i] = bytes; 3113 } 3114 } 3115 } 3116 } 3117 3118 // Give back the unused reserved pieces. 3119 3120 for (int j = 0; j < i; ++j) { 3121 if (base[j] != NULL) { 3122 unmap_memory(base[j], size[j]); 3123 } 3124 } 3125 3126 return (i < max_tries) ? requested_addr : NULL; 3127 } 3128 3129 bool os::release_memory(char* addr, size_t bytes) { 3130 size_t size = bytes; 3131 return munmap(addr, size) == 0; 3132 } 3133 3134 static bool solaris_mprotect(char* addr, size_t bytes, int prot) { 3135 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()), 3136 "addr must be page aligned"); 3137 int retVal = mprotect(addr, bytes, prot); 3138 return retVal == 0; 3139 } 3140 3141 // Protect memory (Used to pass readonly pages through 3142 // JNI GetArray<type>Elements with empty arrays.) 3143 // Also, used for serialization page and for compressed oops null pointer 3144 // checking. 3145 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3146 bool is_committed) { 3147 unsigned int p = 0; 3148 switch (prot) { 3149 case MEM_PROT_NONE: p = PROT_NONE; break; 3150 case MEM_PROT_READ: p = PROT_READ; break; 3151 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 3152 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 3153 default: 3154 ShouldNotReachHere(); 3155 } 3156 // is_committed is unused. 3157 return solaris_mprotect(addr, bytes, p); 3158 } 3159 3160 // guard_memory and unguard_memory only happens within stack guard pages. 3161 // Since ISM pertains only to the heap, guard and unguard memory should not 3162 /// happen with an ISM region. 3163 bool os::guard_memory(char* addr, size_t bytes) { 3164 return solaris_mprotect(addr, bytes, PROT_NONE); 3165 } 3166 3167 bool os::unguard_memory(char* addr, size_t bytes) { 3168 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE); 3169 } 3170 3171 // Large page support 3172 3173 // UseLargePages is the master flag to enable/disable large page memory. 3174 // UseMPSS and UseISM are supported for compatibility reasons. Their combined 3175 // effects can be described in the following table: 3176 // 3177 // UseLargePages UseMPSS UseISM 3178 // false * * => UseLargePages is the master switch, turning 3179 // it off will turn off both UseMPSS and 3180 // UseISM. VM will not use large page memory 3181 // regardless the settings of UseMPSS/UseISM. 3182 // true false false => Unless future Solaris provides other 3183 // mechanism to use large page memory, this 3184 // combination is equivalent to -UseLargePages, 3185 // VM will not use large page memory 3186 // true true false => JVM will use MPSS for large page memory. 3187 // This is the default behavior. 3188 // true false true => JVM will use ISM for large page memory. 3189 // true true true => JVM will use ISM if it is available. 3190 // Otherwise, JVM will fall back to MPSS. 3191 // Becaues ISM is now available on all 3192 // supported Solaris versions, this combination 3193 // is equivalent to +UseISM -UseMPSS. 3194 3195 typedef int (*getpagesizes_func_type) (size_t[], int); 3196 static size_t _large_page_size = 0; 3197 3198 bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) { 3199 // x86 uses either 2M or 4M page, depending on whether PAE (Physical Address 3200 // Extensions) mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. Sparc 3201 // can support multiple page sizes. 3202 3203 // Don't bother to probe page size because getpagesizes() comes with MPSS. 3204 // ISM is only recommended on old Solaris where there is no MPSS support. 3205 // Simply choose a conservative value as default. 3206 *page_size = LargePageSizeInBytes ? LargePageSizeInBytes : 3207 SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M) 3208 ARM_ONLY(2 * M); 3209 3210 // ISM is available on all supported Solaris versions 3211 return true; 3212 } 3213 3214 // Insertion sort for small arrays (descending order). 3215 static void insertion_sort_descending(size_t* array, int len) { 3216 for (int i = 0; i < len; i++) { 3217 size_t val = array[i]; 3218 for (size_t key = i; key > 0 && array[key - 1] < val; --key) { 3219 size_t tmp = array[key]; 3220 array[key] = array[key - 1]; 3221 array[key - 1] = tmp; 3222 } 3223 } 3224 } 3225 3226 bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) { 3227 getpagesizes_func_type getpagesizes_func = 3228 CAST_TO_FN_PTR(getpagesizes_func_type, dlsym(RTLD_DEFAULT, "getpagesizes")); 3229 if (getpagesizes_func == NULL) { 3230 if (warn) { 3231 warning("MPSS is not supported by the operating system."); 3232 } 3233 return false; 3234 } 3235 3236 const unsigned int usable_count = VM_Version::page_size_count(); 3237 if (usable_count == 1) { 3238 return false; 3239 } 3240 3241 // Fill the array of page sizes. 3242 int n = getpagesizes_func(_page_sizes, page_sizes_max); 3243 assert(n > 0, "Solaris bug?"); 3244 if (n == page_sizes_max) { 3245 // Add a sentinel value (necessary only if the array was completely filled 3246 // since it is static (zeroed at initialization)). 3247 _page_sizes[--n] = 0; 3248 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");) 3249 } 3250 assert(_page_sizes[n] == 0, "missing sentinel"); 3251 3252 if (n == 1) return false; // Only one page size available. 3253 3254 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and 3255 // select up to usable_count elements. First sort the array, find the first 3256 // acceptable value, then copy the usable sizes to the top of the array and 3257 // trim the rest. Make sure to include the default page size :-). 3258 // 3259 // A better policy could get rid of the 4M limit by taking the sizes of the 3260 // important VM memory regions (java heap and possibly the code cache) into 3261 // account. 3262 insertion_sort_descending(_page_sizes, n); 3263 const size_t size_limit = 3264 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes; 3265 int beg; 3266 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ; 3267 const int end = MIN2((int)usable_count, n) - 1; 3268 for (int cur = 0; cur < end; ++cur, ++beg) { 3269 _page_sizes[cur] = _page_sizes[beg]; 3270 } 3271 _page_sizes[end] = vm_page_size(); 3272 _page_sizes[end + 1] = 0; 3273 3274 if (_page_sizes[end] > _page_sizes[end - 1]) { 3275 // Default page size is not the smallest; sort again. 3276 insertion_sort_descending(_page_sizes, end + 1); 3277 } 3278 *page_size = _page_sizes[0]; 3279 3280 return true; 3281 } 3282 3283 bool os::large_page_init() { 3284 if (!UseLargePages) { 3285 UseISM = false; 3286 UseMPSS = false; 3287 return false; 3288 } 3289 3290 // print a warning if any large page related flag is specified on command line 3291 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3292 !FLAG_IS_DEFAULT(UseISM) || 3293 !FLAG_IS_DEFAULT(UseMPSS) || 3294 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3295 UseISM = UseISM && 3296 Solaris::ism_sanity_check(warn_on_failure, &_large_page_size); 3297 if (UseISM) { 3298 // ISM disables MPSS to be compatible with old JDK behavior 3299 UseMPSS = false; 3300 _page_sizes[0] = _large_page_size; 3301 _page_sizes[1] = vm_page_size(); 3302 } 3303 3304 UseMPSS = UseMPSS && 3305 Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); 3306 3307 UseLargePages = UseISM || UseMPSS; 3308 return UseLargePages; 3309 } 3310 3311 bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) { 3312 // Signal to OS that we want large pages for addresses 3313 // from addr, addr + bytes 3314 struct memcntl_mha mpss_struct; 3315 mpss_struct.mha_cmd = MHA_MAPSIZE_VA; 3316 mpss_struct.mha_pagesize = align; 3317 mpss_struct.mha_flags = 0; 3318 if (memcntl(start, bytes, MC_HAT_ADVISE, 3319 (caddr_t) &mpss_struct, 0, 0) < 0) { 3320 debug_only(warning("Attempt to use MPSS failed.")); 3321 return false; 3322 } 3323 return true; 3324 } 3325 3326 char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) { 3327 // "exec" is passed in but not used. Creating the shared image for 3328 // the code cache doesn't have an SHM_X executable permission to check. 3329 assert(UseLargePages && UseISM, "only for ISM large pages"); 3330 3331 size_t size = bytes; 3332 char* retAddr = NULL; 3333 int shmid; 3334 key_t ismKey; 3335 3336 bool warn_on_failure = UseISM && 3337 (!FLAG_IS_DEFAULT(UseLargePages) || 3338 !FLAG_IS_DEFAULT(UseISM) || 3339 !FLAG_IS_DEFAULT(LargePageSizeInBytes) 3340 ); 3341 char msg[128]; 3342 3343 ismKey = IPC_PRIVATE; 3344 3345 // Create a large shared memory region to attach to based on size. 3346 // Currently, size is the total size of the heap 3347 shmid = shmget(ismKey, size, SHM_R | SHM_W | IPC_CREAT); 3348 if (shmid == -1){ 3349 if (warn_on_failure) { 3350 jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno); 3351 warning(msg); 3352 } 3353 return NULL; 3354 } 3355 3356 // Attach to the region 3357 retAddr = (char *) shmat(shmid, 0, SHM_SHARE_MMU | SHM_R | SHM_W); 3358 int err = errno; 3359 3360 // Remove shmid. If shmat() is successful, the actual shared memory segment 3361 // will be deleted when it's detached by shmdt() or when the process 3362 // terminates. If shmat() is not successful this will remove the shared 3363 // segment immediately. 3364 shmctl(shmid, IPC_RMID, NULL); 3365 3366 if (retAddr == (char *) -1) { 3367 if (warn_on_failure) { 3368 jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err); 3369 warning(msg); 3370 } 3371 return NULL; 3372 } 3373 3374 return retAddr; 3375 } 3376 3377 bool os::release_memory_special(char* base, size_t bytes) { 3378 // detaching the SHM segment will also delete it, see reserve_memory_special() 3379 int rslt = shmdt(base); 3380 return rslt == 0; 3381 } 3382 3383 size_t os::large_page_size() { 3384 return _large_page_size; 3385 } 3386 3387 // MPSS allows application to commit large page memory on demand; with ISM 3388 // the entire memory region must be allocated as shared memory. 3389 bool os::can_commit_large_page_memory() { 3390 return UseISM ? false : true; 3391 } 3392 3393 bool os::can_execute_large_page_memory() { 3394 return UseISM ? false : true; 3395 } 3396 3397 static int os_sleep(jlong millis, bool interruptible) { 3398 const jlong limit = INT_MAX; 3399 jlong prevtime; 3400 int res; 3401 3402 while (millis > limit) { 3403 if ((res = os_sleep(limit, interruptible)) != OS_OK) 3404 return res; 3405 millis -= limit; 3406 } 3407 3408 // Restart interrupted polls with new parameters until the proper delay 3409 // has been completed. 3410 3411 prevtime = getTimeMillis(); 3412 3413 while (millis > 0) { 3414 jlong newtime; 3415 3416 if (!interruptible) { 3417 // Following assert fails for os::yield_all: 3418 // assert(!thread->is_Java_thread(), "must not be java thread"); 3419 res = poll(NULL, 0, millis); 3420 } else { 3421 JavaThread *jt = JavaThread::current(); 3422 3423 INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt, 3424 os::Solaris::clear_interrupted); 3425 } 3426 3427 // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for 3428 // thread.Interrupt. 3429 3430 if((res == OS_ERR) && (errno == EINTR)) { 3431 newtime = getTimeMillis(); 3432 assert(newtime >= prevtime, "time moving backwards"); 3433 /* Doing prevtime and newtime in microseconds doesn't help precision, 3434 and trying to round up to avoid lost milliseconds can result in a 3435 too-short delay. */ 3436 millis -= newtime - prevtime; 3437 if(millis <= 0) 3438 return OS_OK; 3439 prevtime = newtime; 3440 } else 3441 return res; 3442 } 3443 3444 return OS_OK; 3445 } 3446 3447 // Read calls from inside the vm need to perform state transitions 3448 size_t os::read(int fd, void *buf, unsigned int nBytes) { 3449 INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3450 } 3451 3452 int os::sleep(Thread* thread, jlong millis, bool interruptible) { 3453 assert(thread == Thread::current(), "thread consistency check"); 3454 3455 // TODO-FIXME: this should be removed. 3456 // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock 3457 // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate 3458 // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving 3459 // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel 3460 // is fooled into believing that the system is making progress. In the code below we block the 3461 // the watcher thread while safepoint is in progress so that it would not appear as though the 3462 // system is making progress. 3463 if (!Solaris::T2_libthread() && 3464 thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) { 3465 // We now try to acquire the threads lock. Since this lock is held by the VM thread during 3466 // the entire safepoint, the watcher thread will line up here during the safepoint. 3467 Threads_lock->lock_without_safepoint_check(); 3468 Threads_lock->unlock(); 3469 } 3470 3471 if (thread->is_Java_thread()) { 3472 // This is a JavaThread so we honor the _thread_blocked protocol 3473 // even for sleeps of 0 milliseconds. This was originally done 3474 // as a workaround for bug 4338139. However, now we also do it 3475 // to honor the suspend-equivalent protocol. 3476 3477 JavaThread *jt = (JavaThread *) thread; 3478 ThreadBlockInVM tbivm(jt); 3479 3480 jt->set_suspend_equivalent(); 3481 // cleared by handle_special_suspend_equivalent_condition() or 3482 // java_suspend_self() via check_and_wait_while_suspended() 3483 3484 int ret_code; 3485 if (millis <= 0) { 3486 thr_yield(); 3487 ret_code = 0; 3488 } else { 3489 // The original sleep() implementation did not create an 3490 // OSThreadWaitState helper for sleeps of 0 milliseconds. 3491 // I'm preserving that decision for now. 3492 OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */); 3493 3494 ret_code = os_sleep(millis, interruptible); 3495 } 3496 3497 // were we externally suspended while we were waiting? 3498 jt->check_and_wait_while_suspended(); 3499 3500 return ret_code; 3501 } 3502 3503 // non-JavaThread from this point on: 3504 3505 if (millis <= 0) { 3506 thr_yield(); 3507 return 0; 3508 } 3509 3510 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 3511 3512 return os_sleep(millis, interruptible); 3513 } 3514 3515 int os::naked_sleep() { 3516 // %% make the sleep time an integer flag. for now use 1 millisec. 3517 return os_sleep(1, false); 3518 } 3519 3520 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3521 void os::infinite_sleep() { 3522 while (true) { // sleep forever ... 3523 ::sleep(100); // ... 100 seconds at a time 3524 } 3525 } 3526 3527 // Used to convert frequent JVM_Yield() to nops 3528 bool os::dont_yield() { 3529 if (DontYieldALot) { 3530 static hrtime_t last_time = 0; 3531 hrtime_t diff = getTimeNanos() - last_time; 3532 3533 if (diff < DontYieldALotInterval * 1000000) 3534 return true; 3535 3536 last_time += diff; 3537 3538 return false; 3539 } 3540 else { 3541 return false; 3542 } 3543 } 3544 3545 // Caveat: Solaris os::yield() causes a thread-state transition whereas 3546 // the linux and win32 implementations do not. This should be checked. 3547 3548 void os::yield() { 3549 // Yields to all threads with same or greater priority 3550 os::sleep(Thread::current(), 0, false); 3551 } 3552 3553 // Note that yield semantics are defined by the scheduling class to which 3554 // the thread currently belongs. Typically, yield will _not yield to 3555 // other equal or higher priority threads that reside on the dispatch queues 3556 // of other CPUs. 3557 3558 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; } 3559 3560 3561 // On Solaris we found that yield_all doesn't always yield to all other threads. 3562 // There have been cases where there is a thread ready to execute but it doesn't 3563 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond. 3564 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a 3565 // SIGWAITING signal which will cause a new lwp to be created. So we count the 3566 // number of times yield_all is called in the one loop and increase the sleep 3567 // time after 8 attempts. If this fails too we increase the concurrency level 3568 // so that the starving thread would get an lwp 3569 3570 void os::yield_all(int attempts) { 3571 // Yields to all threads, including threads with lower priorities 3572 if (attempts == 0) { 3573 os::sleep(Thread::current(), 1, false); 3574 } else { 3575 int iterations = attempts % 30; 3576 if (iterations == 0 && !os::Solaris::T2_libthread()) { 3577 // thr_setconcurrency and _getconcurrency make sense only under T1. 3578 int noofLWPS = thr_getconcurrency(); 3579 if (noofLWPS < (Threads::number_of_threads() + 2)) { 3580 thr_setconcurrency(thr_getconcurrency() + 1); 3581 } 3582 } else if (iterations < 25) { 3583 os::sleep(Thread::current(), 1, false); 3584 } else { 3585 os::sleep(Thread::current(), 10, false); 3586 } 3587 } 3588 } 3589 3590 // Called from the tight loops to possibly influence time-sharing heuristics 3591 void os::loop_breaker(int attempts) { 3592 os::yield_all(attempts); 3593 } 3594 3595 3596 // Interface for setting lwp priorities. If we are using T2 libthread, 3597 // which forces the use of BoundThreads or we manually set UseBoundThreads, 3598 // all of our threads will be assigned to real lwp's. Using the thr_setprio 3599 // function is meaningless in this mode so we must adjust the real lwp's priority 3600 // The routines below implement the getting and setting of lwp priorities. 3601 // 3602 // Note: There are three priority scales used on Solaris. Java priotities 3603 // which range from 1 to 10, libthread "thr_setprio" scale which range 3604 // from 0 to 127, and the current scheduling class of the process we 3605 // are running in. This is typically from -60 to +60. 3606 // The setting of the lwp priorities in done after a call to thr_setprio 3607 // so Java priorities are mapped to libthread priorities and we map from 3608 // the latter to lwp priorities. We don't keep priorities stored in 3609 // Java priorities since some of our worker threads want to set priorities 3610 // higher than all Java threads. 3611 // 3612 // For related information: 3613 // (1) man -s 2 priocntl 3614 // (2) man -s 4 priocntl 3615 // (3) man dispadmin 3616 // = librt.so 3617 // = libthread/common/rtsched.c - thrp_setlwpprio(). 3618 // = ps -cL <pid> ... to validate priority. 3619 // = sched_get_priority_min and _max 3620 // pthread_create 3621 // sched_setparam 3622 // pthread_setschedparam 3623 // 3624 // Assumptions: 3625 // + We assume that all threads in the process belong to the same 3626 // scheduling class. IE. an homogenous process. 3627 // + Must be root or in IA group to change change "interactive" attribute. 3628 // Priocntl() will fail silently. The only indication of failure is when 3629 // we read-back the value and notice that it hasn't changed. 3630 // + Interactive threads enter the runq at the head, non-interactive at the tail. 3631 // + For RT, change timeslice as well. Invariant: 3632 // constant "priority integral" 3633 // Konst == TimeSlice * (60-Priority) 3634 // Given a priority, compute appropriate timeslice. 3635 // + Higher numerical values have higher priority. 3636 3637 // sched class attributes 3638 typedef struct { 3639 int schedPolicy; // classID 3640 int maxPrio; 3641 int minPrio; 3642 } SchedInfo; 3643 3644 3645 static SchedInfo tsLimits, iaLimits, rtLimits; 3646 3647 #ifdef ASSERT 3648 static int ReadBackValidate = 1; 3649 #endif 3650 static int myClass = 0; 3651 static int myMin = 0; 3652 static int myMax = 0; 3653 static int myCur = 0; 3654 static bool priocntl_enable = false; 3655 3656 3657 // Call the version of priocntl suitable for all supported versions 3658 // of Solaris. We need to call through this wrapper so that we can 3659 // build on Solaris 9 and run on Solaris 8, 9 and 10. 3660 // 3661 // This code should be removed if we ever stop supporting Solaris 8 3662 // and earlier releases. 3663 3664 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); 3665 typedef long (*priocntl_type)(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); 3666 static priocntl_type priocntl_ptr = priocntl_stub; 3667 3668 // Stub to set the value of the real pointer, and then call the real 3669 // function. 3670 3671 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg) { 3672 // Try Solaris 8- name only. 3673 priocntl_type tmp = (priocntl_type)dlsym(RTLD_DEFAULT, "__priocntl"); 3674 guarantee(tmp != NULL, "priocntl function not found."); 3675 priocntl_ptr = tmp; 3676 return (*priocntl_ptr)(PC_VERSION, idtype, id, cmd, arg); 3677 } 3678 3679 3680 // lwp_priocntl_init 3681 // 3682 // Try to determine the priority scale for our process. 3683 // 3684 // Return errno or 0 if OK. 3685 // 3686 static 3687 int lwp_priocntl_init () 3688 { 3689 int rslt; 3690 pcinfo_t ClassInfo; 3691 pcparms_t ParmInfo; 3692 int i; 3693 3694 if (!UseThreadPriorities) return 0; 3695 3696 // We are using Bound threads, we need to determine our priority ranges 3697 if (os::Solaris::T2_libthread() || UseBoundThreads) { 3698 // If ThreadPriorityPolicy is 1, switch tables 3699 if (ThreadPriorityPolicy == 1) { 3700 for (i = 0 ; i < MaxPriority+1; i++) 3701 os::java_to_os_priority[i] = prio_policy1[i]; 3702 } 3703 } 3704 // Not using Bound Threads, set to ThreadPolicy 1 3705 else { 3706 for ( i = 0 ; i < MaxPriority+1; i++ ) { 3707 os::java_to_os_priority[i] = prio_policy1[i]; 3708 } 3709 return 0; 3710 } 3711 3712 3713 // Get IDs for a set of well-known scheduling classes. 3714 // TODO-FIXME: GETCLINFO returns the current # of classes in the 3715 // the system. We should have a loop that iterates over the 3716 // classID values, which are known to be "small" integers. 3717 3718 strcpy(ClassInfo.pc_clname, "TS"); 3719 ClassInfo.pc_cid = -1; 3720 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3721 if (rslt < 0) return errno; 3722 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1"); 3723 tsLimits.schedPolicy = ClassInfo.pc_cid; 3724 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri; 3725 tsLimits.minPrio = -tsLimits.maxPrio; 3726 3727 strcpy(ClassInfo.pc_clname, "IA"); 3728 ClassInfo.pc_cid = -1; 3729 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3730 if (rslt < 0) return errno; 3731 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1"); 3732 iaLimits.schedPolicy = ClassInfo.pc_cid; 3733 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri; 3734 iaLimits.minPrio = -iaLimits.maxPrio; 3735 3736 strcpy(ClassInfo.pc_clname, "RT"); 3737 ClassInfo.pc_cid = -1; 3738 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3739 if (rslt < 0) return errno; 3740 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1"); 3741 rtLimits.schedPolicy = ClassInfo.pc_cid; 3742 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri; 3743 rtLimits.minPrio = 0; 3744 3745 3746 // Query our "current" scheduling class. 3747 // This will normally be IA,TS or, rarely, RT. 3748 memset (&ParmInfo, 0, sizeof(ParmInfo)); 3749 ParmInfo.pc_cid = PC_CLNULL; 3750 rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo ); 3751 if ( rslt < 0 ) return errno; 3752 myClass = ParmInfo.pc_cid; 3753 3754 // We now know our scheduling classId, get specific information 3755 // the class. 3756 ClassInfo.pc_cid = myClass; 3757 ClassInfo.pc_clname[0] = 0; 3758 rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo ); 3759 if ( rslt < 0 ) return errno; 3760 3761 if (ThreadPriorityVerbose) 3762 tty->print_cr ("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname); 3763 3764 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3765 ParmInfo.pc_cid = PC_CLNULL; 3766 rslt = (*priocntl_ptr)(PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3767 if (rslt < 0) return errno; 3768 3769 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3770 myMin = rtLimits.minPrio; 3771 myMax = rtLimits.maxPrio; 3772 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3773 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3774 myMin = iaLimits.minPrio; 3775 myMax = iaLimits.maxPrio; 3776 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict 3777 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3778 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3779 myMin = tsLimits.minPrio; 3780 myMax = tsLimits.maxPrio; 3781 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict 3782 } else { 3783 // No clue - punt 3784 if (ThreadPriorityVerbose) 3785 tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname); 3786 return EINVAL; // no clue, punt 3787 } 3788 3789 if (ThreadPriorityVerbose) 3790 tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax); 3791 3792 priocntl_enable = true; // Enable changing priorities 3793 return 0; 3794 } 3795 3796 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms)) 3797 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms)) 3798 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms)) 3799 3800 3801 // scale_to_lwp_priority 3802 // 3803 // Convert from the libthread "thr_setprio" scale to our current 3804 // lwp scheduling class scale. 3805 // 3806 static 3807 int scale_to_lwp_priority (int rMin, int rMax, int x) 3808 { 3809 int v; 3810 3811 if (x == 127) return rMax; // avoid round-down 3812 v = (((x*(rMax-rMin)))/128)+rMin; 3813 return v; 3814 } 3815 3816 3817 // set_lwp_priority 3818 // 3819 // Set the priority of the lwp. This call should only be made 3820 // when using bound threads (T2 threads are bound by default). 3821 // 3822 int set_lwp_priority (int ThreadID, int lwpid, int newPrio ) 3823 { 3824 int rslt; 3825 int Actual, Expected, prv; 3826 pcparms_t ParmInfo; // for GET-SET 3827 #ifdef ASSERT 3828 pcparms_t ReadBack; // for readback 3829 #endif 3830 3831 // Set priority via PC_GETPARMS, update, PC_SETPARMS 3832 // Query current values. 3833 // TODO: accelerate this by eliminating the PC_GETPARMS call. 3834 // Cache "pcparms_t" in global ParmCache. 3835 // TODO: elide set-to-same-value 3836 3837 // If something went wrong on init, don't change priorities. 3838 if ( !priocntl_enable ) { 3839 if (ThreadPriorityVerbose) 3840 tty->print_cr("Trying to set priority but init failed, ignoring"); 3841 return EINVAL; 3842 } 3843 3844 3845 // If lwp hasn't started yet, just return 3846 // the _start routine will call us again. 3847 if ( lwpid <= 0 ) { 3848 if (ThreadPriorityVerbose) { 3849 tty->print_cr ("deferring the set_lwp_priority of thread " INTPTR_FORMAT " to %d, lwpid not set", 3850 ThreadID, newPrio); 3851 } 3852 return 0; 3853 } 3854 3855 if (ThreadPriorityVerbose) { 3856 tty->print_cr ("set_lwp_priority(" INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ", 3857 ThreadID, lwpid, newPrio); 3858 } 3859 3860 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3861 ParmInfo.pc_cid = PC_CLNULL; 3862 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo); 3863 if (rslt < 0) return errno; 3864 3865 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3866 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms; 3867 rtInfo->rt_pri = scale_to_lwp_priority (rtLimits.minPrio, rtLimits.maxPrio, newPrio); 3868 rtInfo->rt_tqsecs = RT_NOCHANGE; 3869 rtInfo->rt_tqnsecs = RT_NOCHANGE; 3870 if (ThreadPriorityVerbose) { 3871 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri); 3872 } 3873 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3874 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3875 int maxClamped = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim); 3876 iaInfo->ia_upri = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio); 3877 iaInfo->ia_uprilim = IA_NOCHANGE; 3878 iaInfo->ia_mode = IA_NOCHANGE; 3879 if (ThreadPriorityVerbose) { 3880 tty->print_cr ("IA: [%d...%d] %d->%d\n", 3881 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri); 3882 } 3883 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3884 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3885 int maxClamped = MIN2(tsLimits.maxPrio, (int)tsInfo->ts_uprilim); 3886 prv = tsInfo->ts_upri; 3887 tsInfo->ts_upri = scale_to_lwp_priority(tsLimits.minPrio, maxClamped, newPrio); 3888 tsInfo->ts_uprilim = IA_NOCHANGE; 3889 if (ThreadPriorityVerbose) { 3890 tty->print_cr ("TS: %d [%d...%d] %d->%d\n", 3891 prv, tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri); 3892 } 3893 if (prv == tsInfo->ts_upri) return 0; 3894 } else { 3895 if ( ThreadPriorityVerbose ) { 3896 tty->print_cr ("Unknown scheduling class\n"); 3897 } 3898 return EINVAL; // no clue, punt 3899 } 3900 3901 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo); 3902 if (ThreadPriorityVerbose && rslt) { 3903 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno); 3904 } 3905 if (rslt < 0) return errno; 3906 3907 #ifdef ASSERT 3908 // Sanity check: read back what we just attempted to set. 3909 // In theory it could have changed in the interim ... 3910 // 3911 // The priocntl system call is tricky. 3912 // Sometimes it'll validate the priority value argument and 3913 // return EINVAL if unhappy. At other times it fails silently. 3914 // Readbacks are prudent. 3915 3916 if (!ReadBackValidate) return 0; 3917 3918 memset(&ReadBack, 0, sizeof(pcparms_t)); 3919 ReadBack.pc_cid = PC_CLNULL; 3920 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack); 3921 assert(rslt >= 0, "priocntl failed"); 3922 Actual = Expected = 0xBAD; 3923 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match"); 3924 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3925 Actual = RTPRI(ReadBack)->rt_pri; 3926 Expected = RTPRI(ParmInfo)->rt_pri; 3927 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3928 Actual = IAPRI(ReadBack)->ia_upri; 3929 Expected = IAPRI(ParmInfo)->ia_upri; 3930 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3931 Actual = TSPRI(ReadBack)->ts_upri; 3932 Expected = TSPRI(ParmInfo)->ts_upri; 3933 } else { 3934 if ( ThreadPriorityVerbose ) { 3935 tty->print_cr("set_lwp_priority: unexpected class in readback: %d\n", ParmInfo.pc_cid); 3936 } 3937 } 3938 3939 if (Actual != Expected) { 3940 if ( ThreadPriorityVerbose ) { 3941 tty->print_cr ("set_lwp_priority(%d %d) Class=%d: actual=%d vs expected=%d\n", 3942 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected); 3943 } 3944 } 3945 #endif 3946 3947 return 0; 3948 } 3949 3950 3951 3952 // Solaris only gives access to 128 real priorities at a time, 3953 // so we expand Java's ten to fill this range. This would be better 3954 // if we dynamically adjusted relative priorities. 3955 // 3956 // The ThreadPriorityPolicy option allows us to select 2 different 3957 // priority scales. 3958 // 3959 // ThreadPriorityPolicy=0 3960 // Since the Solaris' default priority is MaximumPriority, we do not 3961 // set a priority lower than Max unless a priority lower than 3962 // NormPriority is requested. 3963 // 3964 // ThreadPriorityPolicy=1 3965 // This mode causes the priority table to get filled with 3966 // linear values. NormPriority get's mapped to 50% of the 3967 // Maximum priority an so on. This will cause VM threads 3968 // to get unfair treatment against other Solaris processes 3969 // which do not explicitly alter their thread priorities. 3970 // 3971 3972 3973 int os::java_to_os_priority[MaxPriority + 1] = { 3974 -99999, // 0 Entry should never be used 3975 3976 0, // 1 MinPriority 3977 32, // 2 3978 64, // 3 3979 3980 96, // 4 3981 127, // 5 NormPriority 3982 127, // 6 3983 3984 127, // 7 3985 127, // 8 3986 127, // 9 NearMaxPriority 3987 3988 127 // 10 MaxPriority 3989 }; 3990 3991 3992 OSReturn os::set_native_priority(Thread* thread, int newpri) { 3993 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping"); 3994 if ( !UseThreadPriorities ) return OS_OK; 3995 int status = thr_setprio(thread->osthread()->thread_id(), newpri); 3996 if ( os::Solaris::T2_libthread() || (UseBoundThreads && thread->osthread()->is_vm_created()) ) 3997 status |= (set_lwp_priority (thread->osthread()->thread_id(), 3998 thread->osthread()->lwp_id(), newpri )); 3999 return (status == 0) ? OS_OK : OS_ERR; 4000 } 4001 4002 4003 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { 4004 int p; 4005 if ( !UseThreadPriorities ) { 4006 *priority_ptr = NormalPriority; 4007 return OS_OK; 4008 } 4009 int status = thr_getprio(thread->osthread()->thread_id(), &p); 4010 if (status != 0) { 4011 return OS_ERR; 4012 } 4013 *priority_ptr = p; 4014 return OS_OK; 4015 } 4016 4017 4018 // Hint to the underlying OS that a task switch would not be good. 4019 // Void return because it's a hint and can fail. 4020 void os::hint_no_preempt() { 4021 schedctl_start(schedctl_init()); 4022 } 4023 4024 void os::interrupt(Thread* thread) { 4025 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); 4026 4027 OSThread* osthread = thread->osthread(); 4028 4029 int isInterrupted = osthread->interrupted(); 4030 if (!isInterrupted) { 4031 osthread->set_interrupted(true); 4032 OrderAccess::fence(); 4033 // os::sleep() is implemented with either poll (NULL,0,timeout) or 4034 // by parking on _SleepEvent. If the former, thr_kill will unwedge 4035 // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper. 4036 ParkEvent * const slp = thread->_SleepEvent ; 4037 if (slp != NULL) slp->unpark() ; 4038 } 4039 4040 // For JSR166: unpark after setting status but before thr_kill -dl 4041 if (thread->is_Java_thread()) { 4042 ((JavaThread*)thread)->parker()->unpark(); 4043 } 4044 4045 // Handle interruptible wait() ... 4046 ParkEvent * const ev = thread->_ParkEvent ; 4047 if (ev != NULL) ev->unpark() ; 4048 4049 // When events are used everywhere for os::sleep, then this thr_kill 4050 // will only be needed if UseVMInterruptibleIO is true. 4051 4052 if (!isInterrupted) { 4053 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt()); 4054 assert_status(status == 0, status, "thr_kill"); 4055 4056 // Bump thread interruption counter 4057 RuntimeService::record_thread_interrupt_signaled_count(); 4058 } 4059 } 4060 4061 4062 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 4063 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); 4064 4065 OSThread* osthread = thread->osthread(); 4066 4067 bool res = osthread->interrupted(); 4068 4069 // NOTE that since there is no "lock" around these two operations, 4070 // there is the possibility that the interrupted flag will be 4071 // "false" but that the interrupt event will be set. This is 4072 // intentional. The effect of this is that Object.wait() will appear 4073 // to have a spurious wakeup, which is not harmful, and the 4074 // possibility is so rare that it is not worth the added complexity 4075 // to add yet another lock. It has also been recommended not to put 4076 // the interrupted flag into the os::Solaris::Event structure, 4077 // because it hides the issue. 4078 if (res && clear_interrupted) { 4079 osthread->set_interrupted(false); 4080 } 4081 return res; 4082 } 4083 4084 4085 void os::print_statistics() { 4086 } 4087 4088 int os::message_box(const char* title, const char* message) { 4089 int i; 4090 fdStream err(defaultStream::error_fd()); 4091 for (i = 0; i < 78; i++) err.print_raw("="); 4092 err.cr(); 4093 err.print_raw_cr(title); 4094 for (i = 0; i < 78; i++) err.print_raw("-"); 4095 err.cr(); 4096 err.print_raw_cr(message); 4097 for (i = 0; i < 78; i++) err.print_raw("="); 4098 err.cr(); 4099 4100 char buf[16]; 4101 // Prevent process from exiting upon "read error" without consuming all CPU 4102 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 4103 4104 return buf[0] == 'y' || buf[0] == 'Y'; 4105 } 4106 4107 // A lightweight implementation that does not suspend the target thread and 4108 // thus returns only a hint. Used for profiling only! 4109 ExtendedPC os::get_thread_pc(Thread* thread) { 4110 // Make sure that it is called by the watcher and the Threads lock is owned. 4111 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); 4112 // For now, is only used to profile the VM Thread 4113 assert(thread->is_VM_thread(), "Can only be called for VMThread"); 4114 ExtendedPC epc; 4115 4116 GetThreadPC_Callback cb(ProfileVM_lock); 4117 OSThread *osthread = thread->osthread(); 4118 const int time_to_wait = 400; // 400ms wait for initial response 4119 int status = cb.interrupt(thread, time_to_wait); 4120 4121 if (cb.is_done() ) { 4122 epc = cb.addr(); 4123 } else { 4124 DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status", 4125 osthread->thread_id(), status);); 4126 // epc is already NULL 4127 } 4128 return epc; 4129 } 4130 4131 4132 // This does not do anything on Solaris. This is basically a hook for being 4133 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32. 4134 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) { 4135 f(value, method, args, thread); 4136 } 4137 4138 // This routine may be used by user applications as a "hook" to catch signals. 4139 // The user-defined signal handler must pass unrecognized signals to this 4140 // routine, and if it returns true (non-zero), then the signal handler must 4141 // return immediately. If the flag "abort_if_unrecognized" is true, then this 4142 // routine will never retun false (zero), but instead will execute a VM panic 4143 // routine kill the process. 4144 // 4145 // If this routine returns false, it is OK to call it again. This allows 4146 // the user-defined signal handler to perform checks either before or after 4147 // the VM performs its own checks. Naturally, the user code would be making 4148 // a serious error if it tried to handle an exception (such as a null check 4149 // or breakpoint) that the VM was generating for its own correct operation. 4150 // 4151 // This routine may recognize any of the following kinds of signals: 4152 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ, 4153 // os::Solaris::SIGasync 4154 // It should be consulted by handlers for any of those signals. 4155 // It explicitly does not recognize os::Solaris::SIGinterrupt 4156 // 4157 // The caller of this routine must pass in the three arguments supplied 4158 // to the function referred to in the "sa_sigaction" (not the "sa_handler") 4159 // field of the structure passed to sigaction(). This routine assumes that 4160 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 4161 // 4162 // Note that the VM will print warnings if it detects conflicting signal 4163 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 4164 // 4165 extern "C" int JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized); 4166 4167 4168 void signalHandler(int sig, siginfo_t* info, void* ucVoid) { 4169 JVM_handle_solaris_signal(sig, info, ucVoid, true); 4170 } 4171 4172 /* Do not delete - if guarantee is ever removed, a signal handler (even empty) 4173 is needed to provoke threads blocked on IO to return an EINTR 4174 Note: this explicitly does NOT call JVM_handle_solaris_signal and 4175 does NOT participate in signal chaining due to requirement for 4176 NOT setting SA_RESTART to make EINTR work. */ 4177 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) { 4178 if (UseSignalChaining) { 4179 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig); 4180 if (actp && actp->sa_handler) { 4181 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs"); 4182 } 4183 } 4184 } 4185 4186 // This boolean allows users to forward their own non-matching signals 4187 // to JVM_handle_solaris_signal, harmlessly. 4188 bool os::Solaris::signal_handlers_are_installed = false; 4189 4190 // For signal-chaining 4191 bool os::Solaris::libjsig_is_loaded = false; 4192 typedef struct sigaction *(*get_signal_t)(int); 4193 get_signal_t os::Solaris::get_signal_action = NULL; 4194 4195 struct sigaction* os::Solaris::get_chained_signal_action(int sig) { 4196 struct sigaction *actp = NULL; 4197 4198 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) { 4199 // Retrieve the old signal handler from libjsig 4200 actp = (*get_signal_action)(sig); 4201 } 4202 if (actp == NULL) { 4203 // Retrieve the preinstalled signal handler from jvm 4204 actp = get_preinstalled_handler(sig); 4205 } 4206 4207 return actp; 4208 } 4209 4210 static bool call_chained_handler(struct sigaction *actp, int sig, 4211 siginfo_t *siginfo, void *context) { 4212 // Call the old signal handler 4213 if (actp->sa_handler == SIG_DFL) { 4214 // It's more reasonable to let jvm treat it as an unexpected exception 4215 // instead of taking the default action. 4216 return false; 4217 } else if (actp->sa_handler != SIG_IGN) { 4218 if ((actp->sa_flags & SA_NODEFER) == 0) { 4219 // automaticlly block the signal 4220 sigaddset(&(actp->sa_mask), sig); 4221 } 4222 4223 sa_handler_t hand; 4224 sa_sigaction_t sa; 4225 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 4226 // retrieve the chained handler 4227 if (siginfo_flag_set) { 4228 sa = actp->sa_sigaction; 4229 } else { 4230 hand = actp->sa_handler; 4231 } 4232 4233 if ((actp->sa_flags & SA_RESETHAND) != 0) { 4234 actp->sa_handler = SIG_DFL; 4235 } 4236 4237 // try to honor the signal mask 4238 sigset_t oset; 4239 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset); 4240 4241 // call into the chained handler 4242 if (siginfo_flag_set) { 4243 (*sa)(sig, siginfo, context); 4244 } else { 4245 (*hand)(sig); 4246 } 4247 4248 // restore the signal mask 4249 thr_sigsetmask(SIG_SETMASK, &oset, 0); 4250 } 4251 // Tell jvm's signal handler the signal is taken care of. 4252 return true; 4253 } 4254 4255 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) { 4256 bool chained = false; 4257 // signal-chaining 4258 if (UseSignalChaining) { 4259 struct sigaction *actp = get_chained_signal_action(sig); 4260 if (actp != NULL) { 4261 chained = call_chained_handler(actp, sig, siginfo, context); 4262 } 4263 } 4264 return chained; 4265 } 4266 4267 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) { 4268 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4269 if (preinstalled_sigs[sig] != 0) { 4270 return &chainedsigactions[sig]; 4271 } 4272 return NULL; 4273 } 4274 4275 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) { 4276 4277 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range"); 4278 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4279 chainedsigactions[sig] = oldAct; 4280 preinstalled_sigs[sig] = 1; 4281 } 4282 4283 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) { 4284 // Check for overwrite. 4285 struct sigaction oldAct; 4286 sigaction(sig, (struct sigaction*)NULL, &oldAct); 4287 void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4288 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4289 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 4290 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 4291 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) { 4292 if (AllowUserSignalHandlers || !set_installed) { 4293 // Do not overwrite; user takes responsibility to forward to us. 4294 return; 4295 } else if (UseSignalChaining) { 4296 if (oktochain) { 4297 // save the old handler in jvm 4298 save_preinstalled_handler(sig, oldAct); 4299 } else { 4300 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs."); 4301 } 4302 // libjsig also interposes the sigaction() call below and saves the 4303 // old sigaction on it own. 4304 } else { 4305 fatal(err_msg("Encountered unexpected pre-existing sigaction handler " 4306 "%#lx for signal %d.", (long)oldhand, sig)); 4307 } 4308 } 4309 4310 struct sigaction sigAct; 4311 sigfillset(&(sigAct.sa_mask)); 4312 sigAct.sa_handler = SIG_DFL; 4313 4314 sigAct.sa_sigaction = signalHandler; 4315 // Handle SIGSEGV on alternate signal stack if 4316 // not using stack banging 4317 if (!UseStackBanging && sig == SIGSEGV) { 4318 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK; 4319 // Interruptible i/o requires SA_RESTART cleared so EINTR 4320 // is returned instead of restarting system calls 4321 } else if (sig == os::Solaris::SIGinterrupt()) { 4322 sigemptyset(&sigAct.sa_mask); 4323 sigAct.sa_handler = NULL; 4324 sigAct.sa_flags = SA_SIGINFO; 4325 sigAct.sa_sigaction = sigINTRHandler; 4326 } else { 4327 sigAct.sa_flags = SA_SIGINFO | SA_RESTART; 4328 } 4329 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags); 4330 4331 sigaction(sig, &sigAct, &oldAct); 4332 4333 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4334 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4335 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 4336 } 4337 4338 4339 #define DO_SIGNAL_CHECK(sig) \ 4340 if (!sigismember(&check_signal_done, sig)) \ 4341 os::Solaris::check_signal_handler(sig) 4342 4343 // This method is a periodic task to check for misbehaving JNI applications 4344 // under CheckJNI, we can add any periodic checks here 4345 4346 void os::run_periodic_checks() { 4347 // A big source of grief is hijacking virt. addr 0x0 on Solaris, 4348 // thereby preventing a NULL checks. 4349 if(!check_addr0_done) check_addr0_done = check_addr0(tty); 4350 4351 if (check_signals == false) return; 4352 4353 // SEGV and BUS if overridden could potentially prevent 4354 // generation of hs*.log in the event of a crash, debugging 4355 // such a case can be very challenging, so we absolutely 4356 // check for the following for a good measure: 4357 DO_SIGNAL_CHECK(SIGSEGV); 4358 DO_SIGNAL_CHECK(SIGILL); 4359 DO_SIGNAL_CHECK(SIGFPE); 4360 DO_SIGNAL_CHECK(SIGBUS); 4361 DO_SIGNAL_CHECK(SIGPIPE); 4362 DO_SIGNAL_CHECK(SIGXFSZ); 4363 4364 // ReduceSignalUsage allows the user to override these handlers 4365 // see comments at the very top and jvm_solaris.h 4366 if (!ReduceSignalUsage) { 4367 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 4368 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 4369 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 4370 DO_SIGNAL_CHECK(BREAK_SIGNAL); 4371 } 4372 4373 // See comments above for using JVM1/JVM2 and UseAltSigs 4374 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt()); 4375 DO_SIGNAL_CHECK(os::Solaris::SIGasync()); 4376 4377 } 4378 4379 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 4380 4381 static os_sigaction_t os_sigaction = NULL; 4382 4383 void os::Solaris::check_signal_handler(int sig) { 4384 char buf[O_BUFLEN]; 4385 address jvmHandler = NULL; 4386 4387 struct sigaction act; 4388 if (os_sigaction == NULL) { 4389 // only trust the default sigaction, in case it has been interposed 4390 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction"); 4391 if (os_sigaction == NULL) return; 4392 } 4393 4394 os_sigaction(sig, (struct sigaction*)NULL, &act); 4395 4396 address thisHandler = (act.sa_flags & SA_SIGINFO) 4397 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 4398 : CAST_FROM_FN_PTR(address, act.sa_handler) ; 4399 4400 4401 switch(sig) { 4402 case SIGSEGV: 4403 case SIGBUS: 4404 case SIGFPE: 4405 case SIGPIPE: 4406 case SIGXFSZ: 4407 case SIGILL: 4408 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4409 break; 4410 4411 case SHUTDOWN1_SIGNAL: 4412 case SHUTDOWN2_SIGNAL: 4413 case SHUTDOWN3_SIGNAL: 4414 case BREAK_SIGNAL: 4415 jvmHandler = (address)user_handler(); 4416 break; 4417 4418 default: 4419 int intrsig = os::Solaris::SIGinterrupt(); 4420 int asynsig = os::Solaris::SIGasync(); 4421 4422 if (sig == intrsig) { 4423 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler); 4424 } else if (sig == asynsig) { 4425 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4426 } else { 4427 return; 4428 } 4429 break; 4430 } 4431 4432 4433 if (thisHandler != jvmHandler) { 4434 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 4435 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 4436 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 4437 // No need to check this sig any longer 4438 sigaddset(&check_signal_done, sig); 4439 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) { 4440 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 4441 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig)); 4442 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags); 4443 // No need to check this sig any longer 4444 sigaddset(&check_signal_done, sig); 4445 } 4446 4447 // Print all the signal handler state 4448 if (sigismember(&check_signal_done, sig)) { 4449 print_signal_handlers(tty, buf, O_BUFLEN); 4450 } 4451 4452 } 4453 4454 void os::Solaris::install_signal_handlers() { 4455 bool libjsigdone = false; 4456 signal_handlers_are_installed = true; 4457 4458 // signal-chaining 4459 typedef void (*signal_setting_t)(); 4460 signal_setting_t begin_signal_setting = NULL; 4461 signal_setting_t end_signal_setting = NULL; 4462 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4463 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 4464 if (begin_signal_setting != NULL) { 4465 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4466 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 4467 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 4468 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 4469 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t, 4470 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version")); 4471 libjsig_is_loaded = true; 4472 if (os::Solaris::get_libjsig_version != NULL) { 4473 libjsigversion = (*os::Solaris::get_libjsig_version)(); 4474 } 4475 assert(UseSignalChaining, "should enable signal-chaining"); 4476 } 4477 if (libjsig_is_loaded) { 4478 // Tell libjsig jvm is setting signal handlers 4479 (*begin_signal_setting)(); 4480 } 4481 4482 set_signal_handler(SIGSEGV, true, true); 4483 set_signal_handler(SIGPIPE, true, true); 4484 set_signal_handler(SIGXFSZ, true, true); 4485 set_signal_handler(SIGBUS, true, true); 4486 set_signal_handler(SIGILL, true, true); 4487 set_signal_handler(SIGFPE, true, true); 4488 4489 4490 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) { 4491 4492 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so 4493 // can not register overridable signals which might be > 32 4494 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) { 4495 // Tell libjsig jvm has finished setting signal handlers 4496 (*end_signal_setting)(); 4497 libjsigdone = true; 4498 } 4499 } 4500 4501 // Never ok to chain our SIGinterrupt 4502 set_signal_handler(os::Solaris::SIGinterrupt(), true, false); 4503 set_signal_handler(os::Solaris::SIGasync(), true, true); 4504 4505 if (libjsig_is_loaded && !libjsigdone) { 4506 // Tell libjsig jvm finishes setting signal handlers 4507 (*end_signal_setting)(); 4508 } 4509 4510 // We don't activate signal checker if libjsig is in place, we trust ourselves 4511 // and if UserSignalHandler is installed all bets are off 4512 if (CheckJNICalls) { 4513 if (libjsig_is_loaded) { 4514 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 4515 check_signals = false; 4516 } 4517 if (AllowUserSignalHandlers) { 4518 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 4519 check_signals = false; 4520 } 4521 } 4522 } 4523 4524 4525 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...); 4526 4527 const char * signames[] = { 4528 "SIG0", 4529 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP", 4530 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS", 4531 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM", 4532 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH", 4533 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT", 4534 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU", 4535 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW", 4536 "SIGCANCEL", "SIGLOST" 4537 }; 4538 4539 const char* os::exception_name(int exception_code, char* buf, size_t size) { 4540 if (0 < exception_code && exception_code <= SIGRTMAX) { 4541 // signal 4542 if (exception_code < sizeof(signames)/sizeof(const char*)) { 4543 jio_snprintf(buf, size, "%s", signames[exception_code]); 4544 } else { 4545 jio_snprintf(buf, size, "SIG%d", exception_code); 4546 } 4547 return buf; 4548 } else { 4549 return NULL; 4550 } 4551 } 4552 4553 // (Static) wrappers for the new libthread API 4554 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate; 4555 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate; 4556 int_fnP_thread_t_i os::Solaris::_thr_setmutator; 4557 int_fnP_thread_t os::Solaris::_thr_suspend_mutator; 4558 int_fnP_thread_t os::Solaris::_thr_continue_mutator; 4559 4560 // (Static) wrapper for getisax(2) call. 4561 os::Solaris::getisax_func_t os::Solaris::_getisax = 0; 4562 4563 // (Static) wrappers for the liblgrp API 4564 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home; 4565 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init; 4566 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini; 4567 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root; 4568 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children; 4569 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources; 4570 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps; 4571 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale; 4572 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0; 4573 4574 // (Static) wrapper for meminfo() call. 4575 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0; 4576 4577 static address resolve_symbol_lazy(const char* name) { 4578 address addr = (address) dlsym(RTLD_DEFAULT, name); 4579 if(addr == NULL) { 4580 // RTLD_DEFAULT was not defined on some early versions of 2.5.1 4581 addr = (address) dlsym(RTLD_NEXT, name); 4582 } 4583 return addr; 4584 } 4585 4586 static address resolve_symbol(const char* name) { 4587 address addr = resolve_symbol_lazy(name); 4588 if(addr == NULL) { 4589 fatal(dlerror()); 4590 } 4591 return addr; 4592 } 4593 4594 4595 4596 // isT2_libthread() 4597 // 4598 // Routine to determine if we are currently using the new T2 libthread. 4599 // 4600 // We determine if we are using T2 by reading /proc/self/lstatus and 4601 // looking for a thread with the ASLWP bit set. If we find this status 4602 // bit set, we must assume that we are NOT using T2. The T2 team 4603 // has approved this algorithm. 4604 // 4605 // We need to determine if we are running with the new T2 libthread 4606 // since setting native thread priorities is handled differently 4607 // when using this library. All threads created using T2 are bound 4608 // threads. Calling thr_setprio is meaningless in this case. 4609 // 4610 bool isT2_libthread() { 4611 static prheader_t * lwpArray = NULL; 4612 static int lwpSize = 0; 4613 static int lwpFile = -1; 4614 lwpstatus_t * that; 4615 char lwpName [128]; 4616 bool isT2 = false; 4617 4618 #define ADR(x) ((uintptr_t)(x)) 4619 #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1)))) 4620 4621 lwpFile = open("/proc/self/lstatus", O_RDONLY, 0); 4622 if (lwpFile < 0) { 4623 if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n"); 4624 return false; 4625 } 4626 lwpSize = 16*1024; 4627 for (;;) { 4628 lseek (lwpFile, 0, SEEK_SET); 4629 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize); 4630 if (read(lwpFile, lwpArray, lwpSize) < 0) { 4631 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n"); 4632 break; 4633 } 4634 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) { 4635 // We got a good snapshot - now iterate over the list. 4636 int aslwpcount = 0; 4637 for (int i = 0; i < lwpArray->pr_nent; i++ ) { 4638 that = LWPINDEX(lwpArray,i); 4639 if (that->pr_flags & PR_ASLWP) { 4640 aslwpcount++; 4641 } 4642 } 4643 if (aslwpcount == 0) isT2 = true; 4644 break; 4645 } 4646 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize; 4647 FREE_C_HEAP_ARRAY(char, lwpArray); // retry. 4648 } 4649 4650 FREE_C_HEAP_ARRAY(char, lwpArray); 4651 close (lwpFile); 4652 if (ThreadPriorityVerbose) { 4653 if (isT2) tty->print_cr("We are running with a T2 libthread\n"); 4654 else tty->print_cr("We are not running with a T2 libthread\n"); 4655 } 4656 return isT2; 4657 } 4658 4659 4660 void os::Solaris::libthread_init() { 4661 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators"); 4662 4663 // Determine if we are running with the new T2 libthread 4664 os::Solaris::set_T2_libthread(isT2_libthread()); 4665 4666 lwp_priocntl_init(); 4667 4668 // RTLD_DEFAULT was not defined on some early versions of 5.5.1 4669 if(func == NULL) { 4670 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators"); 4671 // Guarantee that this VM is running on an new enough OS (5.6 or 4672 // later) that it will have a new enough libthread.so. 4673 guarantee(func != NULL, "libthread.so is too old."); 4674 } 4675 4676 // Initialize the new libthread getstate API wrappers 4677 func = resolve_symbol("thr_getstate"); 4678 os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func)); 4679 4680 func = resolve_symbol("thr_setstate"); 4681 os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func)); 4682 4683 func = resolve_symbol("thr_setmutator"); 4684 os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func)); 4685 4686 func = resolve_symbol("thr_suspend_mutator"); 4687 os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4688 4689 func = resolve_symbol("thr_continue_mutator"); 4690 os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4691 4692 int size; 4693 void (*handler_info_func)(address *, int *); 4694 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo")); 4695 handler_info_func(&handler_start, &size); 4696 handler_end = handler_start + size; 4697 } 4698 4699 4700 int_fnP_mutex_tP os::Solaris::_mutex_lock; 4701 int_fnP_mutex_tP os::Solaris::_mutex_trylock; 4702 int_fnP_mutex_tP os::Solaris::_mutex_unlock; 4703 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init; 4704 int_fnP_mutex_tP os::Solaris::_mutex_destroy; 4705 int os::Solaris::_mutex_scope = USYNC_THREAD; 4706 4707 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait; 4708 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait; 4709 int_fnP_cond_tP os::Solaris::_cond_signal; 4710 int_fnP_cond_tP os::Solaris::_cond_broadcast; 4711 int_fnP_cond_tP_i_vP os::Solaris::_cond_init; 4712 int_fnP_cond_tP os::Solaris::_cond_destroy; 4713 int os::Solaris::_cond_scope = USYNC_THREAD; 4714 4715 void os::Solaris::synchronization_init() { 4716 if(UseLWPSynchronization) { 4717 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock"))); 4718 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock"))); 4719 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock"))); 4720 os::Solaris::set_mutex_init(lwp_mutex_init); 4721 os::Solaris::set_mutex_destroy(lwp_mutex_destroy); 4722 os::Solaris::set_mutex_scope(USYNC_THREAD); 4723 4724 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait"))); 4725 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait"))); 4726 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal"))); 4727 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast"))); 4728 os::Solaris::set_cond_init(lwp_cond_init); 4729 os::Solaris::set_cond_destroy(lwp_cond_destroy); 4730 os::Solaris::set_cond_scope(USYNC_THREAD); 4731 } 4732 else { 4733 os::Solaris::set_mutex_scope(USYNC_THREAD); 4734 os::Solaris::set_cond_scope(USYNC_THREAD); 4735 4736 if(UsePthreads) { 4737 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock"))); 4738 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock"))); 4739 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock"))); 4740 os::Solaris::set_mutex_init(pthread_mutex_default_init); 4741 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy"))); 4742 4743 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait"))); 4744 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait"))); 4745 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal"))); 4746 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast"))); 4747 os::Solaris::set_cond_init(pthread_cond_default_init); 4748 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy"))); 4749 } 4750 else { 4751 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock"))); 4752 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock"))); 4753 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock"))); 4754 os::Solaris::set_mutex_init(::mutex_init); 4755 os::Solaris::set_mutex_destroy(::mutex_destroy); 4756 4757 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait"))); 4758 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait"))); 4759 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal"))); 4760 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast"))); 4761 os::Solaris::set_cond_init(::cond_init); 4762 os::Solaris::set_cond_destroy(::cond_destroy); 4763 } 4764 } 4765 } 4766 4767 bool os::Solaris::liblgrp_init() { 4768 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY); 4769 if (handle != NULL) { 4770 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home"))); 4771 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init"))); 4772 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini"))); 4773 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root"))); 4774 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children"))); 4775 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources"))); 4776 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps"))); 4777 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t, 4778 dlsym(handle, "lgrp_cookie_stale"))); 4779 4780 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER); 4781 set_lgrp_cookie(c); 4782 return true; 4783 } 4784 return false; 4785 } 4786 4787 void os::Solaris::misc_sym_init() { 4788 address func; 4789 4790 // getisax 4791 func = resolve_symbol_lazy("getisax"); 4792 if (func != NULL) { 4793 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func); 4794 } 4795 4796 // meminfo 4797 func = resolve_symbol_lazy("meminfo"); 4798 if (func != NULL) { 4799 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func)); 4800 } 4801 } 4802 4803 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) { 4804 assert(_getisax != NULL, "_getisax not set"); 4805 return _getisax(array, n); 4806 } 4807 4808 // Symbol doesn't exist in Solaris 8 pset.h 4809 #ifndef PS_MYID 4810 #define PS_MYID -3 4811 #endif 4812 4813 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem); 4814 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem); 4815 static pset_getloadavg_type pset_getloadavg_ptr = NULL; 4816 4817 void init_pset_getloadavg_ptr(void) { 4818 pset_getloadavg_ptr = 4819 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg"); 4820 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) { 4821 warning("pset_getloadavg function not found"); 4822 } 4823 } 4824 4825 int os::Solaris::_dev_zero_fd = -1; 4826 4827 // this is called _before_ the global arguments have been parsed 4828 void os::init(void) { 4829 _initial_pid = getpid(); 4830 4831 max_hrtime = first_hrtime = gethrtime(); 4832 4833 init_random(1234567); 4834 4835 page_size = sysconf(_SC_PAGESIZE); 4836 if (page_size == -1) 4837 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)", 4838 strerror(errno))); 4839 init_page_sizes((size_t) page_size); 4840 4841 Solaris::initialize_system_info(); 4842 4843 // Initialize misc. symbols as soon as possible, so we can use them 4844 // if we need them. 4845 Solaris::misc_sym_init(); 4846 4847 int fd = open("/dev/zero", O_RDWR); 4848 if (fd < 0) { 4849 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno))); 4850 } else { 4851 Solaris::set_dev_zero_fd(fd); 4852 4853 // Close on exec, child won't inherit. 4854 fcntl(fd, F_SETFD, FD_CLOEXEC); 4855 } 4856 4857 clock_tics_per_sec = CLK_TCK; 4858 4859 // check if dladdr1() exists; dladdr1 can provide more information than 4860 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9 4861 // and is available on linker patches for 5.7 and 5.8. 4862 // libdl.so must have been loaded, this call is just an entry lookup 4863 void * hdl = dlopen("libdl.so", RTLD_NOW); 4864 if (hdl) 4865 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1")); 4866 4867 // (Solaris only) this switches to calls that actually do locking. 4868 ThreadCritical::initialize(); 4869 4870 main_thread = thr_self(); 4871 4872 // Constant minimum stack size allowed. It must be at least 4873 // the minimum of what the OS supports (thr_min_stack()), and 4874 // enough to allow the thread to get to user bytecode execution. 4875 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed); 4876 // If the pagesize of the VM is greater than 8K determine the appropriate 4877 // number of initial guard pages. The user can change this with the 4878 // command line arguments, if needed. 4879 if (vm_page_size() > 8*K) { 4880 StackYellowPages = 1; 4881 StackRedPages = 1; 4882 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size(); 4883 } 4884 } 4885 4886 // To install functions for atexit system call 4887 extern "C" { 4888 static void perfMemory_exit_helper() { 4889 perfMemory_exit(); 4890 } 4891 } 4892 4893 // this is called _after_ the global arguments have been parsed 4894 jint os::init_2(void) { 4895 // try to enable extended file IO ASAP, see 6431278 4896 os::Solaris::try_enable_extended_io(); 4897 4898 // Allocate a single page and mark it as readable for safepoint polling. Also 4899 // use this first mmap call to check support for MAP_ALIGN. 4900 address polling_page = (address)Solaris::mmap_chunk((char*)page_size, 4901 page_size, 4902 MAP_PRIVATE | MAP_ALIGN, 4903 PROT_READ); 4904 if (polling_page == NULL) { 4905 has_map_align = false; 4906 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, 4907 PROT_READ); 4908 } 4909 4910 os::set_polling_page(polling_page); 4911 4912 #ifndef PRODUCT 4913 if( Verbose && PrintMiscellaneous ) 4914 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 4915 #endif 4916 4917 if (!UseMembar) { 4918 address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE ); 4919 guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 4920 os::set_memory_serialize_page( mem_serialize_page ); 4921 4922 #ifndef PRODUCT 4923 if(Verbose && PrintMiscellaneous) 4924 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 4925 #endif 4926 } 4927 4928 FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); 4929 4930 // Check minimum allowable stack size for thread creation and to initialize 4931 // the java system classes, including StackOverflowError - depends on page 4932 // size. Add a page for compiler2 recursion in main thread. 4933 // Add in 2*BytesPerWord times page size to account for VM stack during 4934 // class initialization depending on 32 or 64 bit VM. 4935 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed, 4936 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 4937 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size); 4938 4939 size_t threadStackSizeInBytes = ThreadStackSize * K; 4940 if (threadStackSizeInBytes != 0 && 4941 threadStackSizeInBytes < os::Solaris::min_stack_allowed) { 4942 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk", 4943 os::Solaris::min_stack_allowed/K); 4944 return JNI_ERR; 4945 } 4946 4947 // For 64kbps there will be a 64kb page size, which makes 4948 // the usable default stack size quite a bit less. Increase the 4949 // stack for 64kb (or any > than 8kb) pages, this increases 4950 // virtual memory fragmentation (since we're not creating the 4951 // stack on a power of 2 boundary. The real fix for this 4952 // should be to fix the guard page mechanism. 4953 4954 if (vm_page_size() > 8*K) { 4955 threadStackSizeInBytes = (threadStackSizeInBytes != 0) 4956 ? threadStackSizeInBytes + 4957 ((StackYellowPages + StackRedPages) * vm_page_size()) 4958 : 0; 4959 ThreadStackSize = threadStackSizeInBytes/K; 4960 } 4961 4962 // Make the stack size a multiple of the page size so that 4963 // the yellow/red zones can be guarded. 4964 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, 4965 vm_page_size())); 4966 4967 Solaris::libthread_init(); 4968 4969 if (UseNUMA) { 4970 if (!Solaris::liblgrp_init()) { 4971 UseNUMA = false; 4972 } else { 4973 size_t lgrp_limit = os::numa_get_groups_num(); 4974 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit); 4975 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); 4976 FREE_C_HEAP_ARRAY(int, lgrp_ids); 4977 if (lgrp_num < 2) { 4978 // There's only one locality group, disable NUMA. 4979 UseNUMA = false; 4980 } 4981 } 4982 if (!UseNUMA && ForceNUMA) { 4983 UseNUMA = true; 4984 } 4985 } 4986 4987 Solaris::signal_sets_init(); 4988 Solaris::init_signal_mem(); 4989 Solaris::install_signal_handlers(); 4990 4991 if (libjsigversion < JSIG_VERSION_1_4_1) { 4992 Maxlibjsigsigs = OLDMAXSIGNUM; 4993 } 4994 4995 // initialize synchronization primitives to use either thread or 4996 // lwp synchronization (controlled by UseLWPSynchronization) 4997 Solaris::synchronization_init(); 4998 4999 if (MaxFDLimit) { 5000 // set the number of file descriptors to max. print out error 5001 // if getrlimit/setrlimit fails but continue regardless. 5002 struct rlimit nbr_files; 5003 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 5004 if (status != 0) { 5005 if (PrintMiscellaneous && (Verbose || WizardMode)) 5006 perror("os::init_2 getrlimit failed"); 5007 } else { 5008 nbr_files.rlim_cur = nbr_files.rlim_max; 5009 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 5010 if (status != 0) { 5011 if (PrintMiscellaneous && (Verbose || WizardMode)) 5012 perror("os::init_2 setrlimit failed"); 5013 } 5014 } 5015 } 5016 5017 // Initialize HPI. 5018 jint hpi_result = hpi::initialize(); 5019 if (hpi_result != JNI_OK) { 5020 tty->print_cr("There was an error trying to initialize the HPI library."); 5021 return hpi_result; 5022 } 5023 5024 // Calculate theoretical max. size of Threads to guard gainst 5025 // artifical out-of-memory situations, where all available address- 5026 // space has been reserved by thread stacks. Default stack size is 1Mb. 5027 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ? 5028 JavaThread::stack_size_at_create() : (1*K*K); 5029 assert(pre_thread_stack_size != 0, "Must have a stack"); 5030 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when 5031 // we should start doing Virtual Memory banging. Currently when the threads will 5032 // have used all but 200Mb of space. 5033 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K); 5034 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size; 5035 5036 // at-exit methods are called in the reverse order of their registration. 5037 // In Solaris 7 and earlier, atexit functions are called on return from 5038 // main or as a result of a call to exit(3C). There can be only 32 of 5039 // these functions registered and atexit() does not set errno. In Solaris 5040 // 8 and later, there is no limit to the number of functions registered 5041 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit 5042 // functions are called upon dlclose(3DL) in addition to return from main 5043 // and exit(3C). 5044 5045 if (PerfAllowAtExitRegistration) { 5046 // only register atexit functions if PerfAllowAtExitRegistration is set. 5047 // atexit functions can be delayed until process exit time, which 5048 // can be problematic for embedded VM situations. Embedded VMs should 5049 // call DestroyJavaVM() to assure that VM resources are released. 5050 5051 // note: perfMemory_exit_helper atexit function may be removed in 5052 // the future if the appropriate cleanup code can be added to the 5053 // VM_Exit VMOperation's doit method. 5054 if (atexit(perfMemory_exit_helper) != 0) { 5055 warning("os::init2 atexit(perfMemory_exit_helper) failed"); 5056 } 5057 } 5058 5059 // Init pset_loadavg function pointer 5060 init_pset_getloadavg_ptr(); 5061 5062 return JNI_OK; 5063 } 5064 5065 void os::init_3(void) { 5066 return; 5067 } 5068 5069 // Mark the polling page as unreadable 5070 void os::make_polling_page_unreadable(void) { 5071 if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 ) 5072 fatal("Could not disable polling page"); 5073 }; 5074 5075 // Mark the polling page as readable 5076 void os::make_polling_page_readable(void) { 5077 if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 ) 5078 fatal("Could not enable polling page"); 5079 }; 5080 5081 // OS interface. 5082 5083 int os::stat(const char *path, struct stat *sbuf) { 5084 char pathbuf[MAX_PATH]; 5085 if (strlen(path) > MAX_PATH - 1) { 5086 errno = ENAMETOOLONG; 5087 return -1; 5088 } 5089 hpi::native_path(strcpy(pathbuf, path)); 5090 return ::stat(pathbuf, sbuf); 5091 } 5092 5093 5094 bool os::check_heap(bool force) { return true; } 5095 5096 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr); 5097 static vsnprintf_t sol_vsnprintf = NULL; 5098 5099 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) { 5100 if (!sol_vsnprintf) { 5101 //search for the named symbol in the objects that were loaded after libjvm 5102 void* where = RTLD_NEXT; 5103 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5104 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5105 if (!sol_vsnprintf){ 5106 //search for the named symbol in the objects that were loaded before libjvm 5107 where = RTLD_DEFAULT; 5108 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5109 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5110 assert(sol_vsnprintf != NULL, "vsnprintf not found"); 5111 } 5112 } 5113 return (*sol_vsnprintf)(buf, count, fmt, argptr); 5114 } 5115 5116 5117 // Is a (classpath) directory empty? 5118 bool os::dir_is_empty(const char* path) { 5119 DIR *dir = NULL; 5120 struct dirent *ptr; 5121 5122 dir = opendir(path); 5123 if (dir == NULL) return true; 5124 5125 /* Scan the directory */ 5126 bool result = true; 5127 char buf[sizeof(struct dirent) + MAX_PATH]; 5128 struct dirent *dbuf = (struct dirent *) buf; 5129 while (result && (ptr = readdir(dir, dbuf)) != NULL) { 5130 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 5131 result = false; 5132 } 5133 } 5134 closedir(dir); 5135 return result; 5136 } 5137 5138 // create binary file, rewriting existing file if required 5139 int os::create_binary_file(const char* path, bool rewrite_existing) { 5140 int oflags = O_WRONLY | O_CREAT; 5141 if (!rewrite_existing) { 5142 oflags |= O_EXCL; 5143 } 5144 return ::open64(path, oflags, S_IREAD | S_IWRITE); 5145 } 5146 5147 // return current position of file pointer 5148 jlong os::current_file_offset(int fd) { 5149 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 5150 } 5151 5152 // move file pointer to the specified offset 5153 jlong os::seek_to_file_offset(int fd, jlong offset) { 5154 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 5155 } 5156 5157 // Map a block of memory. 5158 char* os::map_memory(int fd, const char* file_name, size_t file_offset, 5159 char *addr, size_t bytes, bool read_only, 5160 bool allow_exec) { 5161 int prot; 5162 int flags; 5163 5164 if (read_only) { 5165 prot = PROT_READ; 5166 flags = MAP_SHARED; 5167 } else { 5168 prot = PROT_READ | PROT_WRITE; 5169 flags = MAP_PRIVATE; 5170 } 5171 5172 if (allow_exec) { 5173 prot |= PROT_EXEC; 5174 } 5175 5176 if (addr != NULL) { 5177 flags |= MAP_FIXED; 5178 } 5179 5180 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, 5181 fd, file_offset); 5182 if (mapped_address == MAP_FAILED) { 5183 return NULL; 5184 } 5185 return mapped_address; 5186 } 5187 5188 5189 // Remap a block of memory. 5190 char* os::remap_memory(int fd, const char* file_name, size_t file_offset, 5191 char *addr, size_t bytes, bool read_only, 5192 bool allow_exec) { 5193 // same as map_memory() on this OS 5194 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 5195 allow_exec); 5196 } 5197 5198 5199 // Unmap a block of memory. 5200 bool os::unmap_memory(char* addr, size_t bytes) { 5201 return munmap(addr, bytes) == 0; 5202 } 5203 5204 void os::pause() { 5205 char filename[MAX_PATH]; 5206 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 5207 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 5208 } else { 5209 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 5210 } 5211 5212 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 5213 if (fd != -1) { 5214 struct stat buf; 5215 close(fd); 5216 while (::stat(filename, &buf) == 0) { 5217 (void)::poll(NULL, 0, 100); 5218 } 5219 } else { 5220 jio_fprintf(stderr, 5221 "Could not open pause file '%s', continuing immediately.\n", filename); 5222 } 5223 } 5224 5225 #ifndef PRODUCT 5226 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5227 // Turn this on if you need to trace synch operations. 5228 // Set RECORD_SYNCH_LIMIT to a large-enough value, 5229 // and call record_synch_enable and record_synch_disable 5230 // around the computation of interest. 5231 5232 void record_synch(char* name, bool returning); // defined below 5233 5234 class RecordSynch { 5235 char* _name; 5236 public: 5237 RecordSynch(char* name) :_name(name) 5238 { record_synch(_name, false); } 5239 ~RecordSynch() { record_synch(_name, true); } 5240 }; 5241 5242 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \ 5243 extern "C" ret name params { \ 5244 typedef ret name##_t params; \ 5245 static name##_t* implem = NULL; \ 5246 static int callcount = 0; \ 5247 if (implem == NULL) { \ 5248 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \ 5249 if (implem == NULL) fatal(dlerror()); \ 5250 } \ 5251 ++callcount; \ 5252 RecordSynch _rs(#name); \ 5253 inner; \ 5254 return implem args; \ 5255 } 5256 // in dbx, examine callcounts this way: 5257 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done 5258 5259 #define CHECK_POINTER_OK(p) \ 5260 (Universe::perm_gen() == NULL || !Universe::is_reserved_heap((oop)(p))) 5261 #define CHECK_MU \ 5262 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only."); 5263 #define CHECK_CV \ 5264 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only."); 5265 #define CHECK_P(p) \ 5266 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only."); 5267 5268 #define CHECK_MUTEX(mutex_op) \ 5269 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU); 5270 5271 CHECK_MUTEX( mutex_lock) 5272 CHECK_MUTEX( _mutex_lock) 5273 CHECK_MUTEX( mutex_unlock) 5274 CHECK_MUTEX(_mutex_unlock) 5275 CHECK_MUTEX( mutex_trylock) 5276 CHECK_MUTEX(_mutex_trylock) 5277 5278 #define CHECK_COND(cond_op) \ 5279 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV); 5280 5281 CHECK_COND( cond_wait); 5282 CHECK_COND(_cond_wait); 5283 CHECK_COND(_cond_wait_cancel); 5284 5285 #define CHECK_COND2(cond_op) \ 5286 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV); 5287 5288 CHECK_COND2( cond_timedwait); 5289 CHECK_COND2(_cond_timedwait); 5290 CHECK_COND2(_cond_timedwait_cancel); 5291 5292 // do the _lwp_* versions too 5293 #define mutex_t lwp_mutex_t 5294 #define cond_t lwp_cond_t 5295 CHECK_MUTEX( _lwp_mutex_lock) 5296 CHECK_MUTEX( _lwp_mutex_unlock) 5297 CHECK_MUTEX( _lwp_mutex_trylock) 5298 CHECK_MUTEX( __lwp_mutex_lock) 5299 CHECK_MUTEX( __lwp_mutex_unlock) 5300 CHECK_MUTEX( __lwp_mutex_trylock) 5301 CHECK_MUTEX(___lwp_mutex_lock) 5302 CHECK_MUTEX(___lwp_mutex_unlock) 5303 5304 CHECK_COND( _lwp_cond_wait); 5305 CHECK_COND( __lwp_cond_wait); 5306 CHECK_COND(___lwp_cond_wait); 5307 5308 CHECK_COND2( _lwp_cond_timedwait); 5309 CHECK_COND2( __lwp_cond_timedwait); 5310 #undef mutex_t 5311 #undef cond_t 5312 5313 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5314 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5315 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0); 5316 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0); 5317 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5318 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5319 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5320 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5321 5322 5323 // recording machinery: 5324 5325 enum { RECORD_SYNCH_LIMIT = 200 }; 5326 char* record_synch_name[RECORD_SYNCH_LIMIT]; 5327 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT]; 5328 bool record_synch_returning[RECORD_SYNCH_LIMIT]; 5329 thread_t record_synch_thread[RECORD_SYNCH_LIMIT]; 5330 int record_synch_count = 0; 5331 bool record_synch_enabled = false; 5332 5333 // in dbx, examine recorded data this way: 5334 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done 5335 5336 void record_synch(char* name, bool returning) { 5337 if (record_synch_enabled) { 5338 if (record_synch_count < RECORD_SYNCH_LIMIT) { 5339 record_synch_name[record_synch_count] = name; 5340 record_synch_returning[record_synch_count] = returning; 5341 record_synch_thread[record_synch_count] = thr_self(); 5342 record_synch_arg0ptr[record_synch_count] = &name; 5343 record_synch_count++; 5344 } 5345 // put more checking code here: 5346 // ... 5347 } 5348 } 5349 5350 void record_synch_enable() { 5351 // start collecting trace data, if not already doing so 5352 if (!record_synch_enabled) record_synch_count = 0; 5353 record_synch_enabled = true; 5354 } 5355 5356 void record_synch_disable() { 5357 // stop collecting trace data 5358 record_synch_enabled = false; 5359 } 5360 5361 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5362 #endif // PRODUCT 5363 5364 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5365 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) - 5366 (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5367 5368 5369 // JVMTI & JVM monitoring and management support 5370 // The thread_cpu_time() and current_thread_cpu_time() are only 5371 // supported if is_thread_cpu_time_supported() returns true. 5372 // They are not supported on Solaris T1. 5373 5374 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 5375 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 5376 // of a thread. 5377 // 5378 // current_thread_cpu_time() and thread_cpu_time(Thread *) 5379 // returns the fast estimate available on the platform. 5380 5381 // hrtime_t gethrvtime() return value includes 5382 // user time but does not include system time 5383 jlong os::current_thread_cpu_time() { 5384 return (jlong) gethrvtime(); 5385 } 5386 5387 jlong os::thread_cpu_time(Thread *thread) { 5388 // return user level CPU time only to be consistent with 5389 // what current_thread_cpu_time returns. 5390 // thread_cpu_time_info() must be changed if this changes 5391 return os::thread_cpu_time(thread, false /* user time only */); 5392 } 5393 5394 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 5395 if (user_sys_cpu_time) { 5396 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 5397 } else { 5398 return os::current_thread_cpu_time(); 5399 } 5400 } 5401 5402 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 5403 char proc_name[64]; 5404 int count; 5405 prusage_t prusage; 5406 jlong lwp_time; 5407 int fd; 5408 5409 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage", 5410 getpid(), 5411 thread->osthread()->lwp_id()); 5412 fd = open(proc_name, O_RDONLY); 5413 if ( fd == -1 ) return -1; 5414 5415 do { 5416 count = pread(fd, 5417 (void *)&prusage.pr_utime, 5418 thr_time_size, 5419 thr_time_off); 5420 } while (count < 0 && errno == EINTR); 5421 close(fd); 5422 if ( count < 0 ) return -1; 5423 5424 if (user_sys_cpu_time) { 5425 // user + system CPU time 5426 lwp_time = (((jlong)prusage.pr_stime.tv_sec + 5427 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) + 5428 (jlong)prusage.pr_stime.tv_nsec + 5429 (jlong)prusage.pr_utime.tv_nsec; 5430 } else { 5431 // user level CPU time only 5432 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) + 5433 (jlong)prusage.pr_utime.tv_nsec; 5434 } 5435 5436 return(lwp_time); 5437 } 5438 5439 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5440 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5441 info_ptr->may_skip_backward = false; // elapsed time not wall time 5442 info_ptr->may_skip_forward = false; // elapsed time not wall time 5443 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5444 } 5445 5446 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5447 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5448 info_ptr->may_skip_backward = false; // elapsed time not wall time 5449 info_ptr->may_skip_forward = false; // elapsed time not wall time 5450 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5451 } 5452 5453 bool os::is_thread_cpu_time_supported() { 5454 if ( os::Solaris::T2_libthread() || UseBoundThreads ) { 5455 return true; 5456 } else { 5457 return false; 5458 } 5459 } 5460 5461 // System loadavg support. Returns -1 if load average cannot be obtained. 5462 // Return the load average for our processor set if the primitive exists 5463 // (Solaris 9 and later). Otherwise just return system wide loadavg. 5464 int os::loadavg(double loadavg[], int nelem) { 5465 if (pset_getloadavg_ptr != NULL) { 5466 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem); 5467 } else { 5468 return ::getloadavg(loadavg, nelem); 5469 } 5470 } 5471 5472 //--------------------------------------------------------------------------------- 5473 5474 static address same_page(address x, address y) { 5475 intptr_t page_bits = -os::vm_page_size(); 5476 if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits)) 5477 return x; 5478 else if (x > y) 5479 return (address)(intptr_t(y) | ~page_bits) + 1; 5480 else 5481 return (address)(intptr_t(y) & page_bits); 5482 } 5483 5484 bool os::find(address addr, outputStream* st) { 5485 Dl_info dlinfo; 5486 memset(&dlinfo, 0, sizeof(dlinfo)); 5487 if (dladdr(addr, &dlinfo)) { 5488 #ifdef _LP64 5489 st->print("0x%016lx: ", addr); 5490 #else 5491 st->print("0x%08x: ", addr); 5492 #endif 5493 if (dlinfo.dli_sname != NULL) 5494 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr); 5495 else if (dlinfo.dli_fname) 5496 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase); 5497 else 5498 st->print("<absolute address>"); 5499 if (dlinfo.dli_fname) st->print(" in %s", dlinfo.dli_fname); 5500 #ifdef _LP64 5501 if (dlinfo.dli_fbase) st->print(" at 0x%016lx", dlinfo.dli_fbase); 5502 #else 5503 if (dlinfo.dli_fbase) st->print(" at 0x%08x", dlinfo.dli_fbase); 5504 #endif 5505 st->cr(); 5506 5507 if (Verbose) { 5508 // decode some bytes around the PC 5509 address begin = same_page(addr-40, addr); 5510 address end = same_page(addr+40, addr); 5511 address lowest = (address) dlinfo.dli_sname; 5512 if (!lowest) lowest = (address) dlinfo.dli_fbase; 5513 if (begin < lowest) begin = lowest; 5514 Dl_info dlinfo2; 5515 if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr 5516 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) 5517 end = (address) dlinfo2.dli_saddr; 5518 Disassembler::decode(begin, end, st); 5519 } 5520 return true; 5521 } 5522 return false; 5523 } 5524 5525 // Following function has been added to support HotSparc's libjvm.so running 5526 // under Solaris production JDK 1.2.2 / 1.3.0. These came from 5527 // src/solaris/hpi/native_threads in the EVM codebase. 5528 // 5529 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release 5530 // libraries and should thus be removed. We will leave it behind for a while 5531 // until we no longer want to able to run on top of 1.3.0 Solaris production 5532 // JDK. See 4341971. 5533 5534 #define STACK_SLACK 0x800 5535 5536 extern "C" { 5537 intptr_t sysThreadAvailableStackWithSlack() { 5538 stack_t st; 5539 intptr_t retval, stack_top; 5540 retval = thr_stksegment(&st); 5541 assert(retval == 0, "incorrect return value from thr_stksegment"); 5542 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 5543 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 5544 stack_top=(intptr_t)st.ss_sp-st.ss_size; 5545 return ((intptr_t)&stack_top - stack_top - STACK_SLACK); 5546 } 5547 } 5548 5549 // Just to get the Kernel build to link on solaris for testing. 5550 5551 extern "C" { 5552 class ASGCT_CallTrace; 5553 void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext) 5554 KERNEL_RETURN; 5555 } 5556 5557 5558 // ObjectMonitor park-unpark infrastructure ... 5559 // 5560 // We implement Solaris and Linux PlatformEvents with the 5561 // obvious condvar-mutex-flag triple. 5562 // Another alternative that works quite well is pipes: 5563 // Each PlatformEvent consists of a pipe-pair. 5564 // The thread associated with the PlatformEvent 5565 // calls park(), which reads from the input end of the pipe. 5566 // Unpark() writes into the other end of the pipe. 5567 // The write-side of the pipe must be set NDELAY. 5568 // Unfortunately pipes consume a large # of handles. 5569 // Native solaris lwp_park() and lwp_unpark() work nicely, too. 5570 // Using pipes for the 1st few threads might be workable, however. 5571 // 5572 // park() is permitted to return spuriously. 5573 // Callers of park() should wrap the call to park() in 5574 // an appropriate loop. A litmus test for the correct 5575 // usage of park is the following: if park() were modified 5576 // to immediately return 0 your code should still work, 5577 // albeit degenerating to a spin loop. 5578 // 5579 // An interesting optimization for park() is to use a trylock() 5580 // to attempt to acquire the mutex. If the trylock() fails 5581 // then we know that a concurrent unpark() operation is in-progress. 5582 // in that case the park() code could simply set _count to 0 5583 // and return immediately. The subsequent park() operation *might* 5584 // return immediately. That's harmless as the caller of park() is 5585 // expected to loop. By using trylock() we will have avoided a 5586 // avoided a context switch caused by contention on the per-thread mutex. 5587 // 5588 // TODO-FIXME: 5589 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the 5590 // objectmonitor implementation. 5591 // 2. Collapse the JSR166 parker event, and the 5592 // objectmonitor ParkEvent into a single "Event" construct. 5593 // 3. In park() and unpark() add: 5594 // assert (Thread::current() == AssociatedWith). 5595 // 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch. 5596 // 1-out-of-N park() operations will return immediately. 5597 // 5598 // _Event transitions in park() 5599 // -1 => -1 : illegal 5600 // 1 => 0 : pass - return immediately 5601 // 0 => -1 : block 5602 // 5603 // _Event serves as a restricted-range semaphore. 5604 // 5605 // Another possible encoding of _Event would be with 5606 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5607 // 5608 // TODO-FIXME: add DTRACE probes for: 5609 // 1. Tx parks 5610 // 2. Ty unparks Tx 5611 // 3. Tx resumes from park 5612 5613 5614 // value determined through experimentation 5615 #define ROUNDINGFIX 11 5616 5617 // utility to compute the abstime argument to timedwait. 5618 // TODO-FIXME: switch from compute_abstime() to unpackTime(). 5619 5620 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) { 5621 // millis is the relative timeout time 5622 // abstime will be the absolute timeout time 5623 if (millis < 0) millis = 0; 5624 struct timeval now; 5625 int status = gettimeofday(&now, NULL); 5626 assert(status == 0, "gettimeofday"); 5627 jlong seconds = millis / 1000; 5628 jlong max_wait_period; 5629 5630 if (UseLWPSynchronization) { 5631 // forward port of fix for 4275818 (not sleeping long enough) 5632 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where 5633 // _lwp_cond_timedwait() used a round_down algorithm rather 5634 // than a round_up. For millis less than our roundfactor 5635 // it rounded down to 0 which doesn't meet the spec. 5636 // For millis > roundfactor we may return a bit sooner, but 5637 // since we can not accurately identify the patch level and 5638 // this has already been fixed in Solaris 9 and 8 we will 5639 // leave it alone rather than always rounding down. 5640 5641 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX; 5642 // It appears that when we go directly through Solaris _lwp_cond_timedwait() 5643 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6 5644 max_wait_period = 21000000; 5645 } else { 5646 max_wait_period = 50000000; 5647 } 5648 millis %= 1000; 5649 if (seconds > max_wait_period) { // see man cond_timedwait(3T) 5650 seconds = max_wait_period; 5651 } 5652 abstime->tv_sec = now.tv_sec + seconds; 5653 long usec = now.tv_usec + millis * 1000; 5654 if (usec >= 1000000) { 5655 abstime->tv_sec += 1; 5656 usec -= 1000000; 5657 } 5658 abstime->tv_nsec = usec * 1000; 5659 return abstime; 5660 } 5661 5662 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately. 5663 // Conceptually TryPark() should be equivalent to park(0). 5664 5665 int os::PlatformEvent::TryPark() { 5666 for (;;) { 5667 const int v = _Event ; 5668 guarantee ((v == 0) || (v == 1), "invariant") ; 5669 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ; 5670 } 5671 } 5672 5673 void os::PlatformEvent::park() { // AKA: down() 5674 // Invariant: Only the thread associated with the Event/PlatformEvent 5675 // may call park(). 5676 int v ; 5677 for (;;) { 5678 v = _Event ; 5679 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 5680 } 5681 guarantee (v >= 0, "invariant") ; 5682 if (v == 0) { 5683 // Do this the hard way by blocking ... 5684 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5685 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 5686 // Only for SPARC >= V8PlusA 5687 #if defined(__sparc) && defined(COMPILER2) 5688 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 5689 #endif 5690 int status = os::Solaris::mutex_lock(_mutex); 5691 assert_status(status == 0, status, "mutex_lock"); 5692 guarantee (_nParked == 0, "invariant") ; 5693 ++ _nParked ; 5694 while (_Event < 0) { 5695 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ... 5696 // Treat this the same as if the wait was interrupted 5697 // With usr/lib/lwp going to kernel, always handle ETIME 5698 status = os::Solaris::cond_wait(_cond, _mutex); 5699 if (status == ETIME) status = EINTR ; 5700 assert_status(status == 0 || status == EINTR, status, "cond_wait"); 5701 } 5702 -- _nParked ; 5703 _Event = 0 ; 5704 status = os::Solaris::mutex_unlock(_mutex); 5705 assert_status(status == 0, status, "mutex_unlock"); 5706 } 5707 } 5708 5709 int os::PlatformEvent::park(jlong millis) { 5710 guarantee (_nParked == 0, "invariant") ; 5711 int v ; 5712 for (;;) { 5713 v = _Event ; 5714 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 5715 } 5716 guarantee (v >= 0, "invariant") ; 5717 if (v != 0) return OS_OK ; 5718 5719 int ret = OS_TIMEOUT; 5720 timestruc_t abst; 5721 compute_abstime (&abst, millis); 5722 5723 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5724 // For Solaris SPARC set fprs.FEF=0 prior to parking. 5725 // Only for SPARC >= V8PlusA 5726 #if defined(__sparc) && defined(COMPILER2) 5727 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 5728 #endif 5729 int status = os::Solaris::mutex_lock(_mutex); 5730 assert_status(status == 0, status, "mutex_lock"); 5731 guarantee (_nParked == 0, "invariant") ; 5732 ++ _nParked ; 5733 while (_Event < 0) { 5734 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst); 5735 assert_status(status == 0 || status == EINTR || 5736 status == ETIME || status == ETIMEDOUT, 5737 status, "cond_timedwait"); 5738 if (!FilterSpuriousWakeups) break ; // previous semantics 5739 if (status == ETIME || status == ETIMEDOUT) break ; 5740 // We consume and ignore EINTR and spurious wakeups. 5741 } 5742 -- _nParked ; 5743 if (_Event >= 0) ret = OS_OK ; 5744 _Event = 0 ; 5745 status = os::Solaris::mutex_unlock(_mutex); 5746 assert_status(status == 0, status, "mutex_unlock"); 5747 return ret; 5748 } 5749 5750 void os::PlatformEvent::unpark() { 5751 int v, AnyWaiters; 5752 5753 // Increment _Event. 5754 // Another acceptable implementation would be to simply swap 1 5755 // into _Event: 5756 // if (Swap (&_Event, 1) < 0) { 5757 // mutex_lock (_mutex) ; AnyWaiters = nParked; mutex_unlock (_mutex) ; 5758 // if (AnyWaiters) cond_signal (_cond) ; 5759 // } 5760 5761 for (;;) { 5762 v = _Event ; 5763 if (v > 0) { 5764 // The LD of _Event could have reordered or be satisfied 5765 // by a read-aside from this processor's write buffer. 5766 // To avoid problems execute a barrier and then 5767 // ratify the value. A degenerate CAS() would also work. 5768 // Viz., CAS (v+0, &_Event, v) == v). 5769 OrderAccess::fence() ; 5770 if (_Event == v) return ; 5771 continue ; 5772 } 5773 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ; 5774 } 5775 5776 // If the thread associated with the event was parked, wake it. 5777 if (v < 0) { 5778 int status ; 5779 // Wait for the thread assoc with the PlatformEvent to vacate. 5780 status = os::Solaris::mutex_lock(_mutex); 5781 assert_status(status == 0, status, "mutex_lock"); 5782 AnyWaiters = _nParked ; 5783 status = os::Solaris::mutex_unlock(_mutex); 5784 assert_status(status == 0, status, "mutex_unlock"); 5785 guarantee (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ; 5786 if (AnyWaiters != 0) { 5787 // We intentional signal *after* dropping the lock 5788 // to avoid a common class of futile wakeups. 5789 status = os::Solaris::cond_signal(_cond); 5790 assert_status(status == 0, status, "cond_signal"); 5791 } 5792 } 5793 } 5794 5795 // JSR166 5796 // ------------------------------------------------------- 5797 5798 /* 5799 * The solaris and linux implementations of park/unpark are fairly 5800 * conservative for now, but can be improved. They currently use a 5801 * mutex/condvar pair, plus _counter. 5802 * Park decrements _counter if > 0, else does a condvar wait. Unpark 5803 * sets count to 1 and signals condvar. Only one thread ever waits 5804 * on the condvar. Contention seen when trying to park implies that someone 5805 * is unparking you, so don't wait. And spurious returns are fine, so there 5806 * is no need to track notifications. 5807 */ 5808 5809 #define NANOSECS_PER_SEC 1000000000 5810 #define NANOSECS_PER_MILLISEC 1000000 5811 #define MAX_SECS 100000000 5812 5813 /* 5814 * This code is common to linux and solaris and will be moved to a 5815 * common place in dolphin. 5816 * 5817 * The passed in time value is either a relative time in nanoseconds 5818 * or an absolute time in milliseconds. Either way it has to be unpacked 5819 * into suitable seconds and nanoseconds components and stored in the 5820 * given timespec structure. 5821 * Given time is a 64-bit value and the time_t used in the timespec is only 5822 * a signed-32-bit value (except on 64-bit Linux) we have to watch for 5823 * overflow if times way in the future are given. Further on Solaris versions 5824 * prior to 10 there is a restriction (see cond_timedwait) that the specified 5825 * number of seconds, in abstime, is less than current_time + 100,000,000. 5826 * As it will be 28 years before "now + 100000000" will overflow we can 5827 * ignore overflow and just impose a hard-limit on seconds using the value 5828 * of "now + 100,000,000". This places a limit on the timeout of about 3.17 5829 * years from "now". 5830 */ 5831 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { 5832 assert (time > 0, "convertTime"); 5833 5834 struct timeval now; 5835 int status = gettimeofday(&now, NULL); 5836 assert(status == 0, "gettimeofday"); 5837 5838 time_t max_secs = now.tv_sec + MAX_SECS; 5839 5840 if (isAbsolute) { 5841 jlong secs = time / 1000; 5842 if (secs > max_secs) { 5843 absTime->tv_sec = max_secs; 5844 } 5845 else { 5846 absTime->tv_sec = secs; 5847 } 5848 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; 5849 } 5850 else { 5851 jlong secs = time / NANOSECS_PER_SEC; 5852 if (secs >= MAX_SECS) { 5853 absTime->tv_sec = max_secs; 5854 absTime->tv_nsec = 0; 5855 } 5856 else { 5857 absTime->tv_sec = now.tv_sec + secs; 5858 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; 5859 if (absTime->tv_nsec >= NANOSECS_PER_SEC) { 5860 absTime->tv_nsec -= NANOSECS_PER_SEC; 5861 ++absTime->tv_sec; // note: this must be <= max_secs 5862 } 5863 } 5864 } 5865 assert(absTime->tv_sec >= 0, "tv_sec < 0"); 5866 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); 5867 assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); 5868 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); 5869 } 5870 5871 void Parker::park(bool isAbsolute, jlong time) { 5872 5873 // Optional fast-path check: 5874 // Return immediately if a permit is available. 5875 if (_counter > 0) { 5876 _counter = 0 ; 5877 OrderAccess::fence(); 5878 return ; 5879 } 5880 5881 // Optional fast-exit: Check interrupt before trying to wait 5882 Thread* thread = Thread::current(); 5883 assert(thread->is_Java_thread(), "Must be JavaThread"); 5884 JavaThread *jt = (JavaThread *)thread; 5885 if (Thread::is_interrupted(thread, false)) { 5886 return; 5887 } 5888 5889 // First, demultiplex/decode time arguments 5890 timespec absTime; 5891 if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all 5892 return; 5893 } 5894 if (time > 0) { 5895 // Warning: this code might be exposed to the old Solaris time 5896 // round-down bugs. Grep "roundingFix" for details. 5897 unpackTime(&absTime, isAbsolute, time); 5898 } 5899 5900 // Enter safepoint region 5901 // Beware of deadlocks such as 6317397. 5902 // The per-thread Parker:: _mutex is a classic leaf-lock. 5903 // In particular a thread must never block on the Threads_lock while 5904 // holding the Parker:: mutex. If safepoints are pending both the 5905 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. 5906 ThreadBlockInVM tbivm(jt); 5907 5908 // Don't wait if cannot get lock since interference arises from 5909 // unblocking. Also. check interrupt before trying wait 5910 if (Thread::is_interrupted(thread, false) || 5911 os::Solaris::mutex_trylock(_mutex) != 0) { 5912 return; 5913 } 5914 5915 int status ; 5916 5917 if (_counter > 0) { // no wait needed 5918 _counter = 0; 5919 status = os::Solaris::mutex_unlock(_mutex); 5920 assert (status == 0, "invariant") ; 5921 OrderAccess::fence(); 5922 return; 5923 } 5924 5925 #ifdef ASSERT 5926 // Don't catch signals while blocked; let the running threads have the signals. 5927 // (This allows a debugger to break into the running thread.) 5928 sigset_t oldsigs; 5929 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals(); 5930 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); 5931 #endif 5932 5933 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5934 jt->set_suspend_equivalent(); 5935 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 5936 5937 // Do this the hard way by blocking ... 5938 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5939 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 5940 // Only for SPARC >= V8PlusA 5941 #if defined(__sparc) && defined(COMPILER2) 5942 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 5943 #endif 5944 5945 if (time == 0) { 5946 status = os::Solaris::cond_wait (_cond, _mutex) ; 5947 } else { 5948 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime); 5949 } 5950 // Note that an untimed cond_wait() can sometimes return ETIME on older 5951 // versions of the Solaris. 5952 assert_status(status == 0 || status == EINTR || 5953 status == ETIME || status == ETIMEDOUT, 5954 status, "cond_timedwait"); 5955 5956 #ifdef ASSERT 5957 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL); 5958 #endif 5959 _counter = 0 ; 5960 status = os::Solaris::mutex_unlock(_mutex); 5961 assert_status(status == 0, status, "mutex_unlock") ; 5962 5963 // If externally suspended while waiting, re-suspend 5964 if (jt->handle_special_suspend_equivalent_condition()) { 5965 jt->java_suspend_self(); 5966 } 5967 OrderAccess::fence(); 5968 } 5969 5970 void Parker::unpark() { 5971 int s, status ; 5972 status = os::Solaris::mutex_lock (_mutex) ; 5973 assert (status == 0, "invariant") ; 5974 s = _counter; 5975 _counter = 1; 5976 status = os::Solaris::mutex_unlock (_mutex) ; 5977 assert (status == 0, "invariant") ; 5978 5979 if (s < 1) { 5980 status = os::Solaris::cond_signal (_cond) ; 5981 assert (status == 0, "invariant") ; 5982 } 5983 } 5984 5985 extern char** environ; 5986 5987 // Run the specified command in a separate process. Return its exit value, 5988 // or -1 on failure (e.g. can't fork a new process). 5989 // Unlike system(), this function can be called from signal handler. It 5990 // doesn't block SIGINT et al. 5991 int os::fork_and_exec(char* cmd) { 5992 char * argv[4]; 5993 argv[0] = (char *)"sh"; 5994 argv[1] = (char *)"-c"; 5995 argv[2] = cmd; 5996 argv[3] = NULL; 5997 5998 // fork is async-safe, fork1 is not so can't use in signal handler 5999 pid_t pid; 6000 Thread* t = ThreadLocalStorage::get_thread_slow(); 6001 if (t != NULL && t->is_inside_signal_handler()) { 6002 pid = fork(); 6003 } else { 6004 pid = fork1(); 6005 } 6006 6007 if (pid < 0) { 6008 // fork failed 6009 warning("fork failed: %s", strerror(errno)); 6010 return -1; 6011 6012 } else if (pid == 0) { 6013 // child process 6014 6015 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris 6016 execve("/usr/bin/sh", argv, environ); 6017 6018 // execve failed 6019 _exit(-1); 6020 6021 } else { 6022 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 6023 // care about the actual exit code, for now. 6024 6025 int status; 6026 6027 // Wait for the child process to exit. This returns immediately if 6028 // the child has already exited. */ 6029 while (waitpid(pid, &status, 0) < 0) { 6030 switch (errno) { 6031 case ECHILD: return 0; 6032 case EINTR: break; 6033 default: return -1; 6034 } 6035 } 6036 6037 if (WIFEXITED(status)) { 6038 // The child exited normally; get its exit code. 6039 return WEXITSTATUS(status); 6040 } else if (WIFSIGNALED(status)) { 6041 // The child exited because of a signal 6042 // The best value to return is 0x80 + signal number, 6043 // because that is what all Unix shells do, and because 6044 // it allows callers to distinguish between process exit and 6045 // process death by signal. 6046 return 0x80 + WTERMSIG(status); 6047 } else { 6048 // Unknown exit code; pass it through 6049 return status; 6050 } 6051 } 6052 } 6053 6054 // is_headless_jre() 6055 // 6056 // Test for the existence of libmawt in motif21 or xawt directories 6057 // in order to report if we are running in a headless jre 6058 // 6059 bool os::is_headless_jre() { 6060 struct stat statbuf; 6061 char buf[MAXPATHLEN]; 6062 char libmawtpath[MAXPATHLEN]; 6063 const char *xawtstr = "/xawt/libmawt.so"; 6064 const char *motifstr = "/motif21/libmawt.so"; 6065 char *p; 6066 6067 // Get path to libjvm.so 6068 os::jvm_path(buf, sizeof(buf)); 6069 6070 // Get rid of libjvm.so 6071 p = strrchr(buf, '/'); 6072 if (p == NULL) return false; 6073 else *p = '\0'; 6074 6075 // Get rid of client or server 6076 p = strrchr(buf, '/'); 6077 if (p == NULL) return false; 6078 else *p = '\0'; 6079 6080 // check xawt/libmawt.so 6081 strcpy(libmawtpath, buf); 6082 strcat(libmawtpath, xawtstr); 6083 if (::stat(libmawtpath, &statbuf) == 0) return false; 6084 6085 // check motif21/libmawt.so 6086 strcpy(libmawtpath, buf); 6087 strcat(libmawtpath, motifstr); 6088 if (::stat(libmawtpath, &statbuf) == 0) return false; 6089 6090 return true; 6091 } 6092 6093