1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/classLoader.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "compiler/compileBroker.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "jvm_solaris.h" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/filemap.hpp" 37 #include "mutex_solaris.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "os_share_solaris.hpp" 40 #include "prims/jniFastGetField.hpp" 41 #include "prims/jvm.h" 42 #include "prims/jvm_misc.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/extendedPC.hpp" 45 #include "runtime/globals.hpp" 46 #include "runtime/interfaceSupport.hpp" 47 #include "runtime/java.hpp" 48 #include "runtime/javaCalls.hpp" 49 #include "runtime/mutexLocker.hpp" 50 #include "runtime/objectMonitor.hpp" 51 #include "runtime/osThread.hpp" 52 #include "runtime/perfMemory.hpp" 53 #include "runtime/sharedRuntime.hpp" 54 #include "runtime/statSampler.hpp" 55 #include "runtime/stubRoutines.hpp" 56 #include "runtime/thread.inline.hpp" 57 #include "runtime/threadCritical.hpp" 58 #include "runtime/timer.hpp" 59 #include "services/attachListener.hpp" 60 #include "services/memTracker.hpp" 61 #include "services/runtimeService.hpp" 62 #include "utilities/decoder.hpp" 63 #include "utilities/defaultStream.hpp" 64 #include "utilities/events.hpp" 65 #include "utilities/growableArray.hpp" 66 #include "utilities/vmError.hpp" 67 68 // put OS-includes here 69 # include <dlfcn.h> 70 # include <errno.h> 71 # include <exception> 72 # include <link.h> 73 # include <poll.h> 74 # include <pthread.h> 75 # include <pwd.h> 76 # include <schedctl.h> 77 # include <setjmp.h> 78 # include <signal.h> 79 # include <stdio.h> 80 # include <alloca.h> 81 # include <sys/filio.h> 82 # include <sys/ipc.h> 83 # include <sys/lwp.h> 84 # include <sys/machelf.h> // for elf Sym structure used by dladdr1 85 # include <sys/mman.h> 86 # include <sys/processor.h> 87 # include <sys/procset.h> 88 # include <sys/pset.h> 89 # include <sys/resource.h> 90 # include <sys/shm.h> 91 # include <sys/socket.h> 92 # include <sys/stat.h> 93 # include <sys/systeminfo.h> 94 # include <sys/time.h> 95 # include <sys/times.h> 96 # include <sys/types.h> 97 # include <sys/wait.h> 98 # include <sys/utsname.h> 99 # include <thread.h> 100 # include <unistd.h> 101 # include <sys/priocntl.h> 102 # include <sys/rtpriocntl.h> 103 # include <sys/tspriocntl.h> 104 # include <sys/iapriocntl.h> 105 # include <sys/fxpriocntl.h> 106 # include <sys/loadavg.h> 107 # include <string.h> 108 # include <stdio.h> 109 110 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later 111 # include <sys/procfs.h> // see comment in <sys/procfs.h> 112 113 #define MAX_PATH (2 * K) 114 115 // for timer info max values which include all bits 116 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 117 118 #ifdef _GNU_SOURCE 119 // See bug #6514594 120 extern "C" int madvise(caddr_t, size_t, int); 121 extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg, 122 int attr, int mask); 123 #endif //_GNU_SOURCE 124 125 /* 126 MPSS Changes Start. 127 The JVM binary needs to be built and run on pre-Solaris 9 128 systems, but the constants needed by MPSS are only in Solaris 9 129 header files. They are textually replicated here to allow 130 building on earlier systems. Once building on Solaris 8 is 131 no longer a requirement, these #defines can be replaced by ordinary 132 system .h inclusion. 133 134 In earlier versions of the JDK and Solaris, we used ISM for large pages. 135 But ISM requires shared memory to achieve this and thus has many caveats. 136 MPSS is a fully transparent and is a cleaner way to get large pages. 137 Although we still require keeping ISM for backward compatiblitiy as well as 138 giving the opportunity to use large pages on older systems it is 139 recommended that MPSS be used for Solaris 9 and above. 140 141 */ 142 143 #ifndef MC_HAT_ADVISE 144 145 struct memcntl_mha { 146 uint_t mha_cmd; /* command(s) */ 147 uint_t mha_flags; 148 size_t mha_pagesize; 149 }; 150 #define MC_HAT_ADVISE 7 /* advise hat map size */ 151 #define MHA_MAPSIZE_VA 0x1 /* set preferred page size */ 152 #define MAP_ALIGN 0x200 /* addr specifies alignment */ 153 154 #endif 155 // MPSS Changes End. 156 157 158 // Here are some liblgrp types from sys/lgrp_user.h to be able to 159 // compile on older systems without this header file. 160 161 #ifndef MADV_ACCESS_LWP 162 # define MADV_ACCESS_LWP 7 /* next LWP to access heavily */ 163 #endif 164 #ifndef MADV_ACCESS_MANY 165 # define MADV_ACCESS_MANY 8 /* many processes to access heavily */ 166 #endif 167 168 #ifndef LGRP_RSRC_CPU 169 # define LGRP_RSRC_CPU 0 /* CPU resources */ 170 #endif 171 #ifndef LGRP_RSRC_MEM 172 # define LGRP_RSRC_MEM 1 /* memory resources */ 173 #endif 174 175 // Some more macros from sys/mman.h that are not present in Solaris 8. 176 177 #ifndef MAX_MEMINFO_CNT 178 /* 179 * info_req request type definitions for meminfo 180 * request types starting with MEMINFO_V are used for Virtual addresses 181 * and should not be mixed with MEMINFO_PLGRP which is targeted for Physical 182 * addresses 183 */ 184 # define MEMINFO_SHIFT 16 185 # define MEMINFO_MASK (0xFF << MEMINFO_SHIFT) 186 # define MEMINFO_VPHYSICAL (0x01 << MEMINFO_SHIFT) /* get physical addr */ 187 # define MEMINFO_VLGRP (0x02 << MEMINFO_SHIFT) /* get lgroup */ 188 # define MEMINFO_VPAGESIZE (0x03 << MEMINFO_SHIFT) /* size of phys page */ 189 # define MEMINFO_VREPLCNT (0x04 << MEMINFO_SHIFT) /* no. of replica */ 190 # define MEMINFO_VREPL (0x05 << MEMINFO_SHIFT) /* physical replica */ 191 # define MEMINFO_VREPL_LGRP (0x06 << MEMINFO_SHIFT) /* lgrp of replica */ 192 # define MEMINFO_PLGRP (0x07 << MEMINFO_SHIFT) /* lgroup for paddr */ 193 194 /* maximum number of addresses meminfo() can process at a time */ 195 # define MAX_MEMINFO_CNT 256 196 197 /* maximum number of request types */ 198 # define MAX_MEMINFO_REQ 31 199 #endif 200 201 // see thr_setprio(3T) for the basis of these numbers 202 #define MinimumPriority 0 203 #define NormalPriority 64 204 #define MaximumPriority 127 205 206 // Values for ThreadPriorityPolicy == 1 207 int prio_policy1[CriticalPriority+1] = { 208 -99999, 0, 16, 32, 48, 64, 209 80, 96, 112, 124, 127, 127 }; 210 211 // System parameters used internally 212 static clock_t clock_tics_per_sec = 100; 213 214 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+) 215 static bool enabled_extended_FILE_stdio = false; 216 217 // For diagnostics to print a message once. see run_periodic_checks 218 static bool check_addr0_done = false; 219 static sigset_t check_signal_done; 220 static bool check_signals = true; 221 222 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo 223 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo 224 225 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround 226 227 228 // "default" initializers for missing libc APIs 229 extern "C" { 230 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 231 static int lwp_mutex_destroy(mutex_t *mx) { return 0; } 232 233 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 234 static int lwp_cond_destroy(cond_t *cv) { return 0; } 235 } 236 237 // "default" initializers for pthread-based synchronization 238 extern "C" { 239 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 240 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 241 } 242 243 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time); 244 245 // Thread Local Storage 246 // This is common to all Solaris platforms so it is defined here, 247 // in this common file. 248 // The declarations are in the os_cpu threadLS*.hpp files. 249 // 250 // Static member initialization for TLS 251 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL}; 252 253 #ifndef PRODUCT 254 #define _PCT(n,d) ((100.0*(double)(n))/(double)(d)) 255 256 int ThreadLocalStorage::_tcacheHit = 0; 257 int ThreadLocalStorage::_tcacheMiss = 0; 258 259 void ThreadLocalStorage::print_statistics() { 260 int total = _tcacheMiss+_tcacheHit; 261 tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n", 262 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total)); 263 } 264 #undef _PCT 265 #endif // PRODUCT 266 267 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id, 268 int index) { 269 Thread *thread = get_thread_slow(); 270 if (thread != NULL) { 271 address sp = os::current_stack_pointer(); 272 guarantee(thread->_stack_base == NULL || 273 (sp <= thread->_stack_base && 274 sp >= thread->_stack_base - thread->_stack_size) || 275 is_error_reported(), 276 "sp must be inside of selected thread stack"); 277 278 thread->set_self_raw_id(raw_id); // mark for quick retrieval 279 _get_thread_cache[ index ] = thread; 280 } 281 return thread; 282 } 283 284 285 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0}; 286 #define NO_CACHED_THREAD ((Thread*)all_zero) 287 288 void ThreadLocalStorage::pd_set_thread(Thread* thread) { 289 290 // Store the new value before updating the cache to prevent a race 291 // between get_thread_via_cache_slowly() and this store operation. 292 os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); 293 294 // Update thread cache with new thread if setting on thread create, 295 // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit. 296 uintptr_t raw = pd_raw_thread_id(); 297 int ix = pd_cache_index(raw); 298 _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread; 299 } 300 301 void ThreadLocalStorage::pd_init() { 302 for (int i = 0; i < _pd_cache_size; i++) { 303 _get_thread_cache[i] = NO_CACHED_THREAD; 304 } 305 } 306 307 // Invalidate all the caches (happens to be the same as pd_init). 308 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); } 309 310 #undef NO_CACHED_THREAD 311 312 // END Thread Local Storage 313 314 static inline size_t adjust_stack_size(address base, size_t size) { 315 if ((ssize_t)size < 0) { 316 // 4759953: Compensate for ridiculous stack size. 317 size = max_intx; 318 } 319 if (size > (size_t)base) { 320 // 4812466: Make sure size doesn't allow the stack to wrap the address space. 321 size = (size_t)base; 322 } 323 return size; 324 } 325 326 static inline stack_t get_stack_info() { 327 stack_t st; 328 int retval = thr_stksegment(&st); 329 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size); 330 assert(retval == 0, "incorrect return value from thr_stksegment"); 331 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 332 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 333 return st; 334 } 335 336 address os::current_stack_base() { 337 int r = thr_main() ; 338 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 339 bool is_primordial_thread = r; 340 341 // Workaround 4352906, avoid calls to thr_stksegment by 342 // thr_main after the first one (it looks like we trash 343 // some data, causing the value for ss_sp to be incorrect). 344 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) { 345 stack_t st = get_stack_info(); 346 if (is_primordial_thread) { 347 // cache initial value of stack base 348 os::Solaris::_main_stack_base = (address)st.ss_sp; 349 } 350 return (address)st.ss_sp; 351 } else { 352 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base"); 353 return os::Solaris::_main_stack_base; 354 } 355 } 356 357 size_t os::current_stack_size() { 358 size_t size; 359 360 int r = thr_main() ; 361 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 362 if(!r) { 363 size = get_stack_info().ss_size; 364 } else { 365 struct rlimit limits; 366 getrlimit(RLIMIT_STACK, &limits); 367 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur); 368 } 369 // base may not be page aligned 370 address base = current_stack_base(); 371 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());; 372 return (size_t)(base - bottom); 373 } 374 375 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 376 return localtime_r(clock, res); 377 } 378 379 // interruptible infrastructure 380 381 // setup_interruptible saves the thread state before going into an 382 // interruptible system call. 383 // The saved state is used to restore the thread to 384 // its former state whether or not an interrupt is received. 385 // Used by classloader os::read 386 // os::restartable_read calls skip this layer and stay in _thread_in_native 387 388 void os::Solaris::setup_interruptible(JavaThread* thread) { 389 390 JavaThreadState thread_state = thread->thread_state(); 391 392 assert(thread_state != _thread_blocked, "Coming from the wrong thread"); 393 assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible"); 394 OSThread* osthread = thread->osthread(); 395 osthread->set_saved_interrupt_thread_state(thread_state); 396 thread->frame_anchor()->make_walkable(thread); 397 ThreadStateTransition::transition(thread, thread_state, _thread_blocked); 398 } 399 400 // Version of setup_interruptible() for threads that are already in 401 // _thread_blocked. Used by os_sleep(). 402 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) { 403 thread->frame_anchor()->make_walkable(thread); 404 } 405 406 JavaThread* os::Solaris::setup_interruptible() { 407 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 408 setup_interruptible(thread); 409 return thread; 410 } 411 412 void os::Solaris::try_enable_extended_io() { 413 typedef int (*enable_extended_FILE_stdio_t)(int, int); 414 415 if (!UseExtendedFileIO) { 416 return; 417 } 418 419 enable_extended_FILE_stdio_t enabler = 420 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT, 421 "enable_extended_FILE_stdio"); 422 if (enabler) { 423 enabler(-1, -1); 424 } 425 } 426 427 428 #ifdef ASSERT 429 430 JavaThread* os::Solaris::setup_interruptible_native() { 431 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 432 JavaThreadState thread_state = thread->thread_state(); 433 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 434 return thread; 435 } 436 437 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) { 438 JavaThreadState thread_state = thread->thread_state(); 439 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 440 } 441 #endif 442 443 // cleanup_interruptible reverses the effects of setup_interruptible 444 // setup_interruptible_already_blocked() does not need any cleanup. 445 446 void os::Solaris::cleanup_interruptible(JavaThread* thread) { 447 OSThread* osthread = thread->osthread(); 448 449 ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state()); 450 } 451 452 // I/O interruption related counters called in _INTERRUPTIBLE 453 454 void os::Solaris::bump_interrupted_before_count() { 455 RuntimeService::record_interrupted_before_count(); 456 } 457 458 void os::Solaris::bump_interrupted_during_count() { 459 RuntimeService::record_interrupted_during_count(); 460 } 461 462 static int _processors_online = 0; 463 464 jint os::Solaris::_os_thread_limit = 0; 465 volatile jint os::Solaris::_os_thread_count = 0; 466 467 julong os::available_memory() { 468 return Solaris::available_memory(); 469 } 470 471 julong os::Solaris::available_memory() { 472 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size(); 473 } 474 475 julong os::Solaris::_physical_memory = 0; 476 477 julong os::physical_memory() { 478 return Solaris::physical_memory(); 479 } 480 481 static hrtime_t first_hrtime = 0; 482 static const hrtime_t hrtime_hz = 1000*1000*1000; 483 const int LOCK_BUSY = 1; 484 const int LOCK_FREE = 0; 485 const int LOCK_INVALID = -1; 486 static volatile hrtime_t max_hrtime = 0; 487 static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB as lock-in-progress 488 489 490 void os::Solaris::initialize_system_info() { 491 set_processor_count(sysconf(_SC_NPROCESSORS_CONF)); 492 _processors_online = sysconf (_SC_NPROCESSORS_ONLN); 493 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE); 494 } 495 496 int os::active_processor_count() { 497 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN); 498 pid_t pid = getpid(); 499 psetid_t pset = PS_NONE; 500 // Are we running in a processor set or is there any processor set around? 501 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) { 502 uint_t pset_cpus; 503 // Query the number of cpus available to us. 504 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) { 505 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check"); 506 _processors_online = pset_cpus; 507 return pset_cpus; 508 } 509 } 510 // Otherwise return number of online cpus 511 return online_cpus; 512 } 513 514 static bool find_processors_in_pset(psetid_t pset, 515 processorid_t** id_array, 516 uint_t* id_length) { 517 bool result = false; 518 // Find the number of processors in the processor set. 519 if (pset_info(pset, NULL, id_length, NULL) == 0) { 520 // Make up an array to hold their ids. 521 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); 522 // Fill in the array with their processor ids. 523 if (pset_info(pset, NULL, id_length, *id_array) == 0) { 524 result = true; 525 } 526 } 527 return result; 528 } 529 530 // Callers of find_processors_online() must tolerate imprecise results -- 531 // the system configuration can change asynchronously because of DR 532 // or explicit psradm operations. 533 // 534 // We also need to take care that the loop (below) terminates as the 535 // number of processors online can change between the _SC_NPROCESSORS_ONLN 536 // request and the loop that builds the list of processor ids. Unfortunately 537 // there's no reliable way to determine the maximum valid processor id, 538 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online 539 // man pages, which claim the processor id set is "sparse, but 540 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually 541 // exit the loop. 542 // 543 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's 544 // not available on S8.0. 545 546 static bool find_processors_online(processorid_t** id_array, 547 uint* id_length) { 548 const processorid_t MAX_PROCESSOR_ID = 100000 ; 549 // Find the number of processors online. 550 *id_length = sysconf(_SC_NPROCESSORS_ONLN); 551 // Make up an array to hold their ids. 552 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); 553 // Processors need not be numbered consecutively. 554 long found = 0; 555 processorid_t next = 0; 556 while (found < *id_length && next < MAX_PROCESSOR_ID) { 557 processor_info_t info; 558 if (processor_info(next, &info) == 0) { 559 // NB, PI_NOINTR processors are effectively online ... 560 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) { 561 (*id_array)[found] = next; 562 found += 1; 563 } 564 } 565 next += 1; 566 } 567 if (found < *id_length) { 568 // The loop above didn't identify the expected number of processors. 569 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN) 570 // and re-running the loop, above, but there's no guarantee of progress 571 // if the system configuration is in flux. Instead, we just return what 572 // we've got. Note that in the worst case find_processors_online() could 573 // return an empty set. (As a fall-back in the case of the empty set we 574 // could just return the ID of the current processor). 575 *id_length = found ; 576 } 577 578 return true; 579 } 580 581 static bool assign_distribution(processorid_t* id_array, 582 uint id_length, 583 uint* distribution, 584 uint distribution_length) { 585 // We assume we can assign processorid_t's to uint's. 586 assert(sizeof(processorid_t) == sizeof(uint), 587 "can't convert processorid_t to uint"); 588 // Quick check to see if we won't succeed. 589 if (id_length < distribution_length) { 590 return false; 591 } 592 // Assign processor ids to the distribution. 593 // Try to shuffle processors to distribute work across boards, 594 // assuming 4 processors per board. 595 const uint processors_per_board = ProcessDistributionStride; 596 // Find the maximum processor id. 597 processorid_t max_id = 0; 598 for (uint m = 0; m < id_length; m += 1) { 599 max_id = MAX2(max_id, id_array[m]); 600 } 601 // The next id, to limit loops. 602 const processorid_t limit_id = max_id + 1; 603 // Make up markers for available processors. 604 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal); 605 for (uint c = 0; c < limit_id; c += 1) { 606 available_id[c] = false; 607 } 608 for (uint a = 0; a < id_length; a += 1) { 609 available_id[id_array[a]] = true; 610 } 611 // Step by "boards", then by "slot", copying to "assigned". 612 // NEEDS_CLEANUP: The assignment of processors should be stateful, 613 // remembering which processors have been assigned by 614 // previous calls, etc., so as to distribute several 615 // independent calls of this method. What we'd like is 616 // It would be nice to have an API that let us ask 617 // how many processes are bound to a processor, 618 // but we don't have that, either. 619 // In the short term, "board" is static so that 620 // subsequent distributions don't all start at board 0. 621 static uint board = 0; 622 uint assigned = 0; 623 // Until we've found enough processors .... 624 while (assigned < distribution_length) { 625 // ... find the next available processor in the board. 626 for (uint slot = 0; slot < processors_per_board; slot += 1) { 627 uint try_id = board * processors_per_board + slot; 628 if ((try_id < limit_id) && (available_id[try_id] == true)) { 629 distribution[assigned] = try_id; 630 available_id[try_id] = false; 631 assigned += 1; 632 break; 633 } 634 } 635 board += 1; 636 if (board * processors_per_board + 0 >= limit_id) { 637 board = 0; 638 } 639 } 640 if (available_id != NULL) { 641 FREE_C_HEAP_ARRAY(bool, available_id, mtInternal); 642 } 643 return true; 644 } 645 646 void os::set_native_thread_name(const char *name) { 647 // Not yet implemented. 648 return; 649 } 650 651 bool os::distribute_processes(uint length, uint* distribution) { 652 bool result = false; 653 // Find the processor id's of all the available CPUs. 654 processorid_t* id_array = NULL; 655 uint id_length = 0; 656 // There are some races between querying information and using it, 657 // since processor sets can change dynamically. 658 psetid_t pset = PS_NONE; 659 // Are we running in a processor set? 660 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) { 661 result = find_processors_in_pset(pset, &id_array, &id_length); 662 } else { 663 result = find_processors_online(&id_array, &id_length); 664 } 665 if (result == true) { 666 if (id_length >= length) { 667 result = assign_distribution(id_array, id_length, distribution, length); 668 } else { 669 result = false; 670 } 671 } 672 if (id_array != NULL) { 673 FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal); 674 } 675 return result; 676 } 677 678 bool os::bind_to_processor(uint processor_id) { 679 // We assume that a processorid_t can be stored in a uint. 680 assert(sizeof(uint) == sizeof(processorid_t), 681 "can't convert uint to processorid_t"); 682 int bind_result = 683 processor_bind(P_LWPID, // bind LWP. 684 P_MYID, // bind current LWP. 685 (processorid_t) processor_id, // id. 686 NULL); // don't return old binding. 687 return (bind_result == 0); 688 } 689 690 bool os::getenv(const char* name, char* buffer, int len) { 691 char* val = ::getenv( name ); 692 if ( val == NULL 693 || strlen(val) + 1 > len ) { 694 if (len > 0) buffer[0] = 0; // return a null string 695 return false; 696 } 697 strcpy( buffer, val ); 698 return true; 699 } 700 701 702 // Return true if user is running as root. 703 704 bool os::have_special_privileges() { 705 static bool init = false; 706 static bool privileges = false; 707 if (!init) { 708 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 709 init = true; 710 } 711 return privileges; 712 } 713 714 715 void os::init_system_properties_values() { 716 char arch[12]; 717 sysinfo(SI_ARCHITECTURE, arch, sizeof(arch)); 718 719 // The next steps are taken in the product version: 720 // 721 // Obtain the JAVA_HOME value from the location of libjvm.so. 722 // This library should be located at: 723 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so. 724 // 725 // If "/jre/lib/" appears at the right place in the path, then we 726 // assume libjvm.so is installed in a JDK and we use this path. 727 // 728 // Otherwise exit with message: "Could not create the Java virtual machine." 729 // 730 // The following extra steps are taken in the debugging version: 731 // 732 // If "/jre/lib/" does NOT appear at the right place in the path 733 // instead of exit check for $JAVA_HOME environment variable. 734 // 735 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>, 736 // then we append a fake suffix "hotspot/libjvm.so" to this path so 737 // it looks like libjvm.so is installed there 738 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so. 739 // 740 // Otherwise exit. 741 // 742 // Important note: if the location of libjvm.so changes this 743 // code needs to be changed accordingly. 744 745 // The next few definitions allow the code to be verbatim: 746 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal) 747 #define free(p) FREE_C_HEAP_ARRAY(char, p, mtInternal) 748 #define getenv(n) ::getenv(n) 749 750 #define EXTENSIONS_DIR "/lib/ext" 751 #define ENDORSED_DIR "/lib/endorsed" 752 #define COMMON_DIR "/usr/jdk/packages" 753 754 { 755 /* sysclasspath, java_home, dll_dir */ 756 { 757 char *home_path; 758 char *dll_path; 759 char *pslash; 760 char buf[MAXPATHLEN]; 761 os::jvm_path(buf, sizeof(buf)); 762 763 // Found the full path to libjvm.so. 764 // Now cut the path to <java_home>/jre if we can. 765 *(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */ 766 pslash = strrchr(buf, '/'); 767 if (pslash != NULL) 768 *pslash = '\0'; /* get rid of /{client|server|hotspot} */ 769 dll_path = malloc(strlen(buf) + 1); 770 if (dll_path == NULL) 771 return; 772 strcpy(dll_path, buf); 773 Arguments::set_dll_dir(dll_path); 774 775 if (pslash != NULL) { 776 pslash = strrchr(buf, '/'); 777 if (pslash != NULL) { 778 *pslash = '\0'; /* get rid of /<arch> */ 779 pslash = strrchr(buf, '/'); 780 if (pslash != NULL) 781 *pslash = '\0'; /* get rid of /lib */ 782 } 783 } 784 785 home_path = malloc(strlen(buf) + 1); 786 if (home_path == NULL) 787 return; 788 strcpy(home_path, buf); 789 Arguments::set_java_home(home_path); 790 791 if (!set_boot_path('/', ':')) 792 return; 793 } 794 795 /* 796 * Where to look for native libraries 797 */ 798 { 799 // Use dlinfo() to determine the correct java.library.path. 800 // 801 // If we're launched by the Java launcher, and the user 802 // does not set java.library.path explicitly on the commandline, 803 // the Java launcher sets LD_LIBRARY_PATH for us and unsets 804 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case 805 // dlinfo returns LD_LIBRARY_PATH + crle settings (including 806 // /usr/lib), which is exactly what we want. 807 // 808 // If the user does set java.library.path, it completely 809 // overwrites this setting, and always has. 810 // 811 // If we're not launched by the Java launcher, we may 812 // get here with any/all of the LD_LIBRARY_PATH[_32|64] 813 // settings. Again, dlinfo does exactly what we want. 814 815 Dl_serinfo _info, *info = &_info; 816 Dl_serpath *path; 817 char* library_path; 818 char *common_path; 819 int i; 820 821 // determine search path count and required buffer size 822 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) { 823 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror()); 824 } 825 826 // allocate new buffer and initialize 827 info = (Dl_serinfo*)malloc(_info.dls_size); 828 if (info == NULL) { 829 vm_exit_out_of_memory(_info.dls_size, OOM_MALLOC_ERROR, 830 "init_system_properties_values info"); 831 } 832 info->dls_size = _info.dls_size; 833 info->dls_cnt = _info.dls_cnt; 834 835 // obtain search path information 836 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) { 837 free(info); 838 vm_exit_during_initialization("dlinfo SERINFO request", dlerror()); 839 } 840 841 path = &info->dls_serpath[0]; 842 843 // Note: Due to a legacy implementation, most of the library path 844 // is set in the launcher. This was to accomodate linking restrictions 845 // on legacy Solaris implementations (which are no longer supported). 846 // Eventually, all the library path setting will be done here. 847 // 848 // However, to prevent the proliferation of improperly built native 849 // libraries, the new path component /usr/jdk/packages is added here. 850 851 // Determine the actual CPU architecture. 852 char cpu_arch[12]; 853 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 854 #ifdef _LP64 855 // If we are a 64-bit vm, perform the following translations: 856 // sparc -> sparcv9 857 // i386 -> amd64 858 if (strcmp(cpu_arch, "sparc") == 0) 859 strcat(cpu_arch, "v9"); 860 else if (strcmp(cpu_arch, "i386") == 0) 861 strcpy(cpu_arch, "amd64"); 862 #endif 863 864 // Construct the invariant part of ld_library_path. Note that the 865 // space for the colon and the trailing null are provided by the 866 // nulls included by the sizeof operator. 867 size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch); 868 common_path = malloc(bufsize); 869 if (common_path == NULL) { 870 free(info); 871 vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR, 872 "init_system_properties_values common_path"); 873 } 874 sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch); 875 876 // struct size is more than sufficient for the path components obtained 877 // through the dlinfo() call, so only add additional space for the path 878 // components explicitly added here. 879 bufsize = info->dls_size + strlen(common_path); 880 library_path = malloc(bufsize); 881 if (library_path == NULL) { 882 free(info); 883 free(common_path); 884 vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR, 885 "init_system_properties_values library_path"); 886 } 887 library_path[0] = '\0'; 888 889 // Construct the desired Java library path from the linker's library 890 // search path. 891 // 892 // For compatibility, it is optimal that we insert the additional path 893 // components specific to the Java VM after those components specified 894 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so 895 // infrastructure. 896 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it 897 strcpy(library_path, common_path); 898 } else { 899 int inserted = 0; 900 for (i = 0; i < info->dls_cnt; i++, path++) { 901 uint_t flags = path->dls_flags & LA_SER_MASK; 902 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) { 903 strcat(library_path, common_path); 904 strcat(library_path, os::path_separator()); 905 inserted = 1; 906 } 907 strcat(library_path, path->dls_name); 908 strcat(library_path, os::path_separator()); 909 } 910 // eliminate trailing path separator 911 library_path[strlen(library_path)-1] = '\0'; 912 } 913 914 // happens before argument parsing - can't use a trace flag 915 // tty->print_raw("init_system_properties_values: native lib path: "); 916 // tty->print_raw_cr(library_path); 917 918 // callee copies into its own buffer 919 Arguments::set_library_path(library_path); 920 921 free(common_path); 922 free(library_path); 923 free(info); 924 } 925 926 /* 927 * Extensions directories. 928 * 929 * Note that the space for the colon and the trailing null are provided 930 * by the nulls included by the sizeof operator (so actually one byte more 931 * than necessary is allocated). 932 */ 933 { 934 char *buf = (char *) malloc(strlen(Arguments::get_java_home()) + 935 sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) + 936 sizeof(EXTENSIONS_DIR)); 937 sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR, 938 Arguments::get_java_home()); 939 Arguments::set_ext_dirs(buf); 940 } 941 942 /* Endorsed standards default directory. */ 943 { 944 char * buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR)); 945 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home()); 946 Arguments::set_endorsed_dirs(buf); 947 } 948 } 949 950 #undef malloc 951 #undef free 952 #undef getenv 953 #undef EXTENSIONS_DIR 954 #undef ENDORSED_DIR 955 #undef COMMON_DIR 956 957 } 958 959 void os::breakpoint() { 960 BREAKPOINT; 961 } 962 963 bool os::obsolete_option(const JavaVMOption *option) 964 { 965 if (!strncmp(option->optionString, "-Xt", 3)) { 966 return true; 967 } else if (!strncmp(option->optionString, "-Xtm", 4)) { 968 return true; 969 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) { 970 return true; 971 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) { 972 return true; 973 } 974 return false; 975 } 976 977 bool os::Solaris::valid_stack_address(Thread* thread, address sp) { 978 address stackStart = (address)thread->stack_base(); 979 address stackEnd = (address)(stackStart - (address)thread->stack_size()); 980 if (sp < stackStart && sp >= stackEnd ) return true; 981 return false; 982 } 983 984 extern "C" void breakpoint() { 985 // use debugger to set breakpoint here 986 } 987 988 static thread_t main_thread; 989 990 // Thread start routine for all new Java threads 991 extern "C" void* java_start(void* thread_addr) { 992 // Try to randomize the cache line index of hot stack frames. 993 // This helps when threads of the same stack traces evict each other's 994 // cache lines. The threads can be either from the same JVM instance, or 995 // from different JVM instances. The benefit is especially true for 996 // processors with hyperthreading technology. 997 static int counter = 0; 998 int pid = os::current_process_id(); 999 alloca(((pid ^ counter++) & 7) * 128); 1000 1001 int prio; 1002 Thread* thread = (Thread*)thread_addr; 1003 OSThread* osthr = thread->osthread(); 1004 1005 osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound 1006 thread->_schedctl = (void *) schedctl_init () ; 1007 1008 if (UseNUMA) { 1009 int lgrp_id = os::numa_get_group_id(); 1010 if (lgrp_id != -1) { 1011 thread->set_lgrp_id(lgrp_id); 1012 } 1013 } 1014 1015 // If the creator called set priority before we started, 1016 // we need to call set_native_priority now that we have an lwp. 1017 // We used to get the priority from thr_getprio (we called 1018 // thr_setprio way back in create_thread) and pass it to 1019 // set_native_priority, but Solaris scales the priority 1020 // in java_to_os_priority, so when we read it back here, 1021 // we pass trash to set_native_priority instead of what's 1022 // in java_to_os_priority. So we save the native priority 1023 // in the osThread and recall it here. 1024 1025 if ( osthr->thread_id() != -1 ) { 1026 if ( UseThreadPriorities ) { 1027 int prio = osthr->native_priority(); 1028 if (ThreadPriorityVerbose) { 1029 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " 1030 INTPTR_FORMAT ", setting priority: %d\n", 1031 osthr->thread_id(), osthr->lwp_id(), prio); 1032 } 1033 os::set_native_priority(thread, prio); 1034 } 1035 } else if (ThreadPriorityVerbose) { 1036 warning("Can't set priority in _start routine, thread id hasn't been set\n"); 1037 } 1038 1039 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 1040 1041 // initialize signal mask for this thread 1042 os::Solaris::hotspot_sigmask(thread); 1043 1044 thread->run(); 1045 1046 // One less thread is executing 1047 // When the VMThread gets here, the main thread may have already exited 1048 // which frees the CodeHeap containing the Atomic::dec code 1049 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 1050 Atomic::dec(&os::Solaris::_os_thread_count); 1051 } 1052 1053 if (UseDetachedThreads) { 1054 thr_exit(NULL); 1055 ShouldNotReachHere(); 1056 } 1057 return NULL; 1058 } 1059 1060 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) { 1061 // Allocate the OSThread object 1062 OSThread* osthread = new OSThread(NULL, NULL); 1063 if (osthread == NULL) return NULL; 1064 1065 // Store info on the Solaris thread into the OSThread 1066 osthread->set_thread_id(thread_id); 1067 osthread->set_lwp_id(_lwp_self()); 1068 thread->_schedctl = (void *) schedctl_init () ; 1069 1070 if (UseNUMA) { 1071 int lgrp_id = os::numa_get_group_id(); 1072 if (lgrp_id != -1) { 1073 thread->set_lgrp_id(lgrp_id); 1074 } 1075 } 1076 1077 if ( ThreadPriorityVerbose ) { 1078 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n", 1079 osthread->thread_id(), osthread->lwp_id() ); 1080 } 1081 1082 // Initial thread state is INITIALIZED, not SUSPENDED 1083 osthread->set_state(INITIALIZED); 1084 1085 return osthread; 1086 } 1087 1088 void os::Solaris::hotspot_sigmask(Thread* thread) { 1089 1090 //Save caller's signal mask 1091 sigset_t sigmask; 1092 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask); 1093 OSThread *osthread = thread->osthread(); 1094 osthread->set_caller_sigmask(sigmask); 1095 1096 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL); 1097 if (!ReduceSignalUsage) { 1098 if (thread->is_VM_thread()) { 1099 // Only the VM thread handles BREAK_SIGNAL ... 1100 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL); 1101 } else { 1102 // ... all other threads block BREAK_SIGNAL 1103 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked"); 1104 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL); 1105 } 1106 } 1107 } 1108 1109 bool os::create_attached_thread(JavaThread* thread) { 1110 #ifdef ASSERT 1111 thread->verify_not_published(); 1112 #endif 1113 OSThread* osthread = create_os_thread(thread, thr_self()); 1114 if (osthread == NULL) { 1115 return false; 1116 } 1117 1118 // Initial thread state is RUNNABLE 1119 osthread->set_state(RUNNABLE); 1120 thread->set_osthread(osthread); 1121 1122 // initialize signal mask for this thread 1123 // and save the caller's signal mask 1124 os::Solaris::hotspot_sigmask(thread); 1125 1126 return true; 1127 } 1128 1129 bool os::create_main_thread(JavaThread* thread) { 1130 #ifdef ASSERT 1131 thread->verify_not_published(); 1132 #endif 1133 if (_starting_thread == NULL) { 1134 _starting_thread = create_os_thread(thread, main_thread); 1135 if (_starting_thread == NULL) { 1136 return false; 1137 } 1138 } 1139 1140 // The primodial thread is runnable from the start 1141 _starting_thread->set_state(RUNNABLE); 1142 1143 thread->set_osthread(_starting_thread); 1144 1145 // initialize signal mask for this thread 1146 // and save the caller's signal mask 1147 os::Solaris::hotspot_sigmask(thread); 1148 1149 return true; 1150 } 1151 1152 // _T2_libthread is true if we believe we are running with the newer 1153 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default) 1154 bool os::Solaris::_T2_libthread = false; 1155 1156 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 1157 // Allocate the OSThread object 1158 OSThread* osthread = new OSThread(NULL, NULL); 1159 if (osthread == NULL) { 1160 return false; 1161 } 1162 1163 if ( ThreadPriorityVerbose ) { 1164 char *thrtyp; 1165 switch ( thr_type ) { 1166 case vm_thread: 1167 thrtyp = (char *)"vm"; 1168 break; 1169 case cgc_thread: 1170 thrtyp = (char *)"cgc"; 1171 break; 1172 case pgc_thread: 1173 thrtyp = (char *)"pgc"; 1174 break; 1175 case java_thread: 1176 thrtyp = (char *)"java"; 1177 break; 1178 case compiler_thread: 1179 thrtyp = (char *)"compiler"; 1180 break; 1181 case watcher_thread: 1182 thrtyp = (char *)"watcher"; 1183 break; 1184 default: 1185 thrtyp = (char *)"unknown"; 1186 break; 1187 } 1188 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp); 1189 } 1190 1191 // Calculate stack size if it's not specified by caller. 1192 if (stack_size == 0) { 1193 // The default stack size 1M (2M for LP64). 1194 stack_size = (BytesPerWord >> 2) * K * K; 1195 1196 switch (thr_type) { 1197 case os::java_thread: 1198 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 1199 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create(); 1200 break; 1201 case os::compiler_thread: 1202 if (CompilerThreadStackSize > 0) { 1203 stack_size = (size_t)(CompilerThreadStackSize * K); 1204 break; 1205 } // else fall through: 1206 // use VMThreadStackSize if CompilerThreadStackSize is not defined 1207 case os::vm_thread: 1208 case os::pgc_thread: 1209 case os::cgc_thread: 1210 case os::watcher_thread: 1211 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 1212 break; 1213 } 1214 } 1215 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed); 1216 1217 // Initial state is ALLOCATED but not INITIALIZED 1218 osthread->set_state(ALLOCATED); 1219 1220 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) { 1221 // We got lots of threads. Check if we still have some address space left. 1222 // Need to be at least 5Mb of unreserved address space. We do check by 1223 // trying to reserve some. 1224 const size_t VirtualMemoryBangSize = 20*K*K; 1225 char* mem = os::reserve_memory(VirtualMemoryBangSize); 1226 if (mem == NULL) { 1227 delete osthread; 1228 return false; 1229 } else { 1230 // Release the memory again 1231 os::release_memory(mem, VirtualMemoryBangSize); 1232 } 1233 } 1234 1235 // Setup osthread because the child thread may need it. 1236 thread->set_osthread(osthread); 1237 1238 // Create the Solaris thread 1239 // explicit THR_BOUND for T2_libthread case in case 1240 // that assumption is not accurate, but our alternate signal stack 1241 // handling is based on it which must have bound threads 1242 thread_t tid = 0; 1243 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED 1244 | ((UseBoundThreads || os::Solaris::T2_libthread() || 1245 (thr_type == vm_thread) || 1246 (thr_type == cgc_thread) || 1247 (thr_type == pgc_thread) || 1248 (thr_type == compiler_thread && BackgroundCompilation)) ? 1249 THR_BOUND : 0); 1250 int status; 1251 1252 // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs. 1253 // 1254 // On multiprocessors systems, libthread sometimes under-provisions our 1255 // process with LWPs. On a 30-way systems, for instance, we could have 1256 // 50 user-level threads in ready state and only 2 or 3 LWPs assigned 1257 // to our process. This can result in under utilization of PEs. 1258 // I suspect the problem is related to libthread's LWP 1259 // pool management and to the kernel's SIGBLOCKING "last LWP parked" 1260 // upcall policy. 1261 // 1262 // The following code is palliative -- it attempts to ensure that our 1263 // process has sufficient LWPs to take advantage of multiple PEs. 1264 // Proper long-term cures include using user-level threads bound to LWPs 1265 // (THR_BOUND) or using LWP-based synchronization. Note that there is a 1266 // slight timing window with respect to sampling _os_thread_count, but 1267 // the race is benign. Also, we should periodically recompute 1268 // _processors_online as the min of SC_NPROCESSORS_ONLN and the 1269 // the number of PEs in our partition. You might be tempted to use 1270 // THR_NEW_LWP here, but I'd recommend against it as that could 1271 // result in undesirable growth of the libthread's LWP pool. 1272 // The fix below isn't sufficient; for instance, it doesn't take into count 1273 // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks. 1274 // 1275 // Some pathologies this scheme doesn't handle: 1276 // * Threads can block, releasing the LWPs. The LWPs can age out. 1277 // When a large number of threads become ready again there aren't 1278 // enough LWPs available to service them. This can occur when the 1279 // number of ready threads oscillates. 1280 // * LWPs/Threads park on IO, thus taking the LWP out of circulation. 1281 // 1282 // Finally, we should call thr_setconcurrency() periodically to refresh 1283 // the LWP pool and thwart the LWP age-out mechanism. 1284 // The "+3" term provides a little slop -- we want to slightly overprovision. 1285 1286 if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) { 1287 if (!(flags & THR_BOUND)) { 1288 thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation 1289 } 1290 } 1291 // Although this doesn't hurt, we should warn of undefined behavior 1292 // when using unbound T1 threads with schedctl(). This should never 1293 // happen, as the compiler and VM threads are always created bound 1294 DEBUG_ONLY( 1295 if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) && 1296 (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) && 1297 ((thr_type == vm_thread) || (thr_type == cgc_thread) || 1298 (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) { 1299 warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound"); 1300 } 1301 ); 1302 1303 1304 // Mark that we don't have an lwp or thread id yet. 1305 // In case we attempt to set the priority before the thread starts. 1306 osthread->set_lwp_id(-1); 1307 osthread->set_thread_id(-1); 1308 1309 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid); 1310 if (status != 0) { 1311 if (PrintMiscellaneous && (Verbose || WizardMode)) { 1312 perror("os::create_thread"); 1313 } 1314 thread->set_osthread(NULL); 1315 // Need to clean up stuff we've allocated so far 1316 delete osthread; 1317 return false; 1318 } 1319 1320 Atomic::inc(&os::Solaris::_os_thread_count); 1321 1322 // Store info on the Solaris thread into the OSThread 1323 osthread->set_thread_id(tid); 1324 1325 // Remember that we created this thread so we can set priority on it 1326 osthread->set_vm_created(); 1327 1328 // Set the default thread priority. If using bound threads, setting 1329 // lwp priority will be delayed until thread start. 1330 set_native_priority(thread, 1331 DefaultThreadPriority == -1 ? 1332 java_to_os_priority[NormPriority] : 1333 DefaultThreadPriority); 1334 1335 // Initial thread state is INITIALIZED, not SUSPENDED 1336 osthread->set_state(INITIALIZED); 1337 1338 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 1339 return true; 1340 } 1341 1342 /* defined for >= Solaris 10. This allows builds on earlier versions 1343 * of Solaris to take advantage of the newly reserved Solaris JVM signals 1344 * With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2 1345 * and -XX:+UseAltSigs does nothing since these should have no conflict 1346 */ 1347 #if !defined(SIGJVM1) 1348 #define SIGJVM1 39 1349 #define SIGJVM2 40 1350 #endif 1351 1352 debug_only(static bool signal_sets_initialized = false); 1353 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; 1354 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL; 1355 int os::Solaris::_SIGasync = ASYNC_SIGNAL; 1356 1357 bool os::Solaris::is_sig_ignored(int sig) { 1358 struct sigaction oact; 1359 sigaction(sig, (struct sigaction*)NULL, &oact); 1360 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 1361 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 1362 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) 1363 return true; 1364 else 1365 return false; 1366 } 1367 1368 // Note: SIGRTMIN is a macro that calls sysconf() so it will 1369 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime 1370 static bool isJVM1available() { 1371 return SIGJVM1 < SIGRTMIN; 1372 } 1373 1374 void os::Solaris::signal_sets_init() { 1375 // Should also have an assertion stating we are still single-threaded. 1376 assert(!signal_sets_initialized, "Already initialized"); 1377 // Fill in signals that are necessarily unblocked for all threads in 1378 // the VM. Currently, we unblock the following signals: 1379 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 1380 // by -Xrs (=ReduceSignalUsage)); 1381 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 1382 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 1383 // the dispositions or masks wrt these signals. 1384 // Programs embedding the VM that want to use the above signals for their 1385 // own purposes must, at this time, use the "-Xrs" option to prevent 1386 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 1387 // (See bug 4345157, and other related bugs). 1388 // In reality, though, unblocking these signals is really a nop, since 1389 // these signals are not blocked by default. 1390 sigemptyset(&unblocked_sigs); 1391 sigemptyset(&allowdebug_blocked_sigs); 1392 sigaddset(&unblocked_sigs, SIGILL); 1393 sigaddset(&unblocked_sigs, SIGSEGV); 1394 sigaddset(&unblocked_sigs, SIGBUS); 1395 sigaddset(&unblocked_sigs, SIGFPE); 1396 1397 if (isJVM1available) { 1398 os::Solaris::set_SIGinterrupt(SIGJVM1); 1399 os::Solaris::set_SIGasync(SIGJVM2); 1400 } else if (UseAltSigs) { 1401 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL); 1402 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL); 1403 } else { 1404 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL); 1405 os::Solaris::set_SIGasync(ASYNC_SIGNAL); 1406 } 1407 1408 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt()); 1409 sigaddset(&unblocked_sigs, os::Solaris::SIGasync()); 1410 1411 if (!ReduceSignalUsage) { 1412 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 1413 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 1414 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); 1415 } 1416 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 1417 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 1418 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); 1419 } 1420 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 1421 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 1422 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); 1423 } 1424 } 1425 // Fill in signals that are blocked by all but the VM thread. 1426 sigemptyset(&vm_sigs); 1427 if (!ReduceSignalUsage) 1428 sigaddset(&vm_sigs, BREAK_SIGNAL); 1429 debug_only(signal_sets_initialized = true); 1430 1431 // For diagnostics only used in run_periodic_checks 1432 sigemptyset(&check_signal_done); 1433 } 1434 1435 // These are signals that are unblocked while a thread is running Java. 1436 // (For some reason, they get blocked by default.) 1437 sigset_t* os::Solaris::unblocked_signals() { 1438 assert(signal_sets_initialized, "Not initialized"); 1439 return &unblocked_sigs; 1440 } 1441 1442 // These are the signals that are blocked while a (non-VM) thread is 1443 // running Java. Only the VM thread handles these signals. 1444 sigset_t* os::Solaris::vm_signals() { 1445 assert(signal_sets_initialized, "Not initialized"); 1446 return &vm_sigs; 1447 } 1448 1449 // These are signals that are blocked during cond_wait to allow debugger in 1450 sigset_t* os::Solaris::allowdebug_blocked_signals() { 1451 assert(signal_sets_initialized, "Not initialized"); 1452 return &allowdebug_blocked_sigs; 1453 } 1454 1455 1456 void _handle_uncaught_cxx_exception() { 1457 VMError err("An uncaught C++ exception"); 1458 err.report_and_die(); 1459 } 1460 1461 1462 // First crack at OS-specific initialization, from inside the new thread. 1463 void os::initialize_thread(Thread* thr) { 1464 int r = thr_main() ; 1465 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 1466 if (r) { 1467 JavaThread* jt = (JavaThread *)thr; 1468 assert(jt != NULL,"Sanity check"); 1469 size_t stack_size; 1470 address base = jt->stack_base(); 1471 if (Arguments::created_by_java_launcher()) { 1472 // Use 2MB to allow for Solaris 7 64 bit mode. 1473 stack_size = JavaThread::stack_size_at_create() == 0 1474 ? 2048*K : JavaThread::stack_size_at_create(); 1475 1476 // There are rare cases when we may have already used more than 1477 // the basic stack size allotment before this method is invoked. 1478 // Attempt to allow for a normally sized java_stack. 1479 size_t current_stack_offset = (size_t)(base - (address)&stack_size); 1480 stack_size += ReservedSpace::page_align_size_down(current_stack_offset); 1481 } else { 1482 // 6269555: If we were not created by a Java launcher, i.e. if we are 1483 // running embedded in a native application, treat the primordial thread 1484 // as much like a native attached thread as possible. This means using 1485 // the current stack size from thr_stksegment(), unless it is too large 1486 // to reliably setup guard pages. A reasonable max size is 8MB. 1487 size_t current_size = current_stack_size(); 1488 // This should never happen, but just in case.... 1489 if (current_size == 0) current_size = 2 * K * K; 1490 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size; 1491 } 1492 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());; 1493 stack_size = (size_t)(base - bottom); 1494 1495 assert(stack_size > 0, "Stack size calculation problem"); 1496 1497 if (stack_size > jt->stack_size()) { 1498 NOT_PRODUCT( 1499 struct rlimit limits; 1500 getrlimit(RLIMIT_STACK, &limits); 1501 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur); 1502 assert(size >= jt->stack_size(), "Stack size problem in main thread"); 1503 ) 1504 tty->print_cr( 1505 "Stack size of %d Kb exceeds current limit of %d Kb.\n" 1506 "(Stack sizes are rounded up to a multiple of the system page size.)\n" 1507 "See limit(1) to increase the stack size limit.", 1508 stack_size / K, jt->stack_size() / K); 1509 vm_exit(1); 1510 } 1511 assert(jt->stack_size() >= stack_size, 1512 "Attempt to map more stack than was allocated"); 1513 jt->set_stack_size(stack_size); 1514 } 1515 1516 // 5/22/01: Right now alternate signal stacks do not handle 1517 // throwing stack overflow exceptions, see bug 4463178 1518 // Until a fix is found for this, T2 will NOT imply alternate signal 1519 // stacks. 1520 // If using T2 libthread threads, install an alternate signal stack. 1521 // Because alternate stacks associate with LWPs on Solaris, 1522 // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads 1523 // we prefer to explicitly stack bang. 1524 // If not using T2 libthread, but using UseBoundThreads any threads 1525 // (primordial thread, jni_attachCurrentThread) we do not create, 1526 // probably are not bound, therefore they can not have an alternate 1527 // signal stack. Since our stack banging code is generated and 1528 // is shared across threads, all threads must be bound to allow 1529 // using alternate signal stacks. The alternative is to interpose 1530 // on _lwp_create to associate an alt sig stack with each LWP, 1531 // and this could be a problem when the JVM is embedded. 1532 // We would prefer to use alternate signal stacks with T2 1533 // Since there is currently no accurate way to detect T2 1534 // we do not. Assuming T2 when running T1 causes sig 11s or assertions 1535 // on installing alternate signal stacks 1536 1537 1538 // 05/09/03: removed alternate signal stack support for Solaris 1539 // The alternate signal stack mechanism is no longer needed to 1540 // handle stack overflow. This is now handled by allocating 1541 // guard pages (red zone) and stackbanging. 1542 // Initially the alternate signal stack mechanism was removed because 1543 // it did not work with T1 llibthread. Alternate 1544 // signal stacks MUST have all threads bound to lwps. Applications 1545 // can create their own threads and attach them without their being 1546 // bound under T1. This is frequently the case for the primordial thread. 1547 // If we were ever to reenable this mechanism we would need to 1548 // use the dynamic check for T2 libthread. 1549 1550 os::Solaris::init_thread_fpu_state(); 1551 std::set_terminate(_handle_uncaught_cxx_exception); 1552 } 1553 1554 1555 1556 // Free Solaris resources related to the OSThread 1557 void os::free_thread(OSThread* osthread) { 1558 assert(osthread != NULL, "os::free_thread but osthread not set"); 1559 1560 1561 // We are told to free resources of the argument thread, 1562 // but we can only really operate on the current thread. 1563 // The main thread must take the VMThread down synchronously 1564 // before the main thread exits and frees up CodeHeap 1565 guarantee((Thread::current()->osthread() == osthread 1566 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread"); 1567 if (Thread::current()->osthread() == osthread) { 1568 // Restore caller's signal mask 1569 sigset_t sigmask = osthread->caller_sigmask(); 1570 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL); 1571 } 1572 delete osthread; 1573 } 1574 1575 void os::pd_start_thread(Thread* thread) { 1576 int status = thr_continue(thread->osthread()->thread_id()); 1577 assert_status(status == 0, status, "thr_continue failed"); 1578 } 1579 1580 1581 intx os::current_thread_id() { 1582 return (intx)thr_self(); 1583 } 1584 1585 static pid_t _initial_pid = 0; 1586 1587 int os::current_process_id() { 1588 return (int)(_initial_pid ? _initial_pid : getpid()); 1589 } 1590 1591 int os::allocate_thread_local_storage() { 1592 // %%% in Win32 this allocates a memory segment pointed to by a 1593 // register. Dan Stein can implement a similar feature in 1594 // Solaris. Alternatively, the VM can do the same thing 1595 // explicitly: malloc some storage and keep the pointer in a 1596 // register (which is part of the thread's context) (or keep it 1597 // in TLS). 1598 // %%% In current versions of Solaris, thr_self and TSD can 1599 // be accessed via short sequences of displaced indirections. 1600 // The value of thr_self is available as %g7(36). 1601 // The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4), 1602 // assuming that the current thread already has a value bound to k. 1603 // It may be worth experimenting with such access patterns, 1604 // and later having the parameters formally exported from a Solaris 1605 // interface. I think, however, that it will be faster to 1606 // maintain the invariant that %g2 always contains the 1607 // JavaThread in Java code, and have stubs simply 1608 // treat %g2 as a caller-save register, preserving it in a %lN. 1609 thread_key_t tk; 1610 if (thr_keycreate( &tk, NULL ) ) 1611 fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed " 1612 "(%s)", strerror(errno))); 1613 return int(tk); 1614 } 1615 1616 void os::free_thread_local_storage(int index) { 1617 // %%% don't think we need anything here 1618 // if ( pthread_key_delete((pthread_key_t) tk) ) 1619 // fatal("os::free_thread_local_storage: pthread_key_delete failed"); 1620 } 1621 1622 #define SMALLINT 32 // libthread allocate for tsd_common is a version specific 1623 // small number - point is NO swap space available 1624 void os::thread_local_storage_at_put(int index, void* value) { 1625 // %%% this is used only in threadLocalStorage.cpp 1626 if (thr_setspecific((thread_key_t)index, value)) { 1627 if (errno == ENOMEM) { 1628 vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR, 1629 "thr_setspecific: out of swap space"); 1630 } else { 1631 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed " 1632 "(%s)", strerror(errno))); 1633 } 1634 } else { 1635 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ; 1636 } 1637 } 1638 1639 // This function could be called before TLS is initialized, for example, when 1640 // VM receives an async signal or when VM causes a fatal error during 1641 // initialization. Return NULL if thr_getspecific() fails. 1642 void* os::thread_local_storage_at(int index) { 1643 // %%% this is used only in threadLocalStorage.cpp 1644 void* r = NULL; 1645 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r; 1646 } 1647 1648 1649 // gethrtime can move backwards if read from one cpu and then a different cpu 1650 // getTimeNanos is guaranteed to not move backward on Solaris 1651 // local spinloop created as faster for a CAS on an int than 1652 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not 1653 // supported on sparc v8 or pre supports_cx8 intel boxes. 1654 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong 1655 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes 1656 inline hrtime_t oldgetTimeNanos() { 1657 int gotlock = LOCK_INVALID; 1658 hrtime_t newtime = gethrtime(); 1659 1660 for (;;) { 1661 // grab lock for max_hrtime 1662 int curlock = max_hrtime_lock; 1663 if (curlock & LOCK_BUSY) continue; 1664 if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue; 1665 if (newtime > max_hrtime) { 1666 max_hrtime = newtime; 1667 } else { 1668 newtime = max_hrtime; 1669 } 1670 // release lock 1671 max_hrtime_lock = LOCK_FREE; 1672 return newtime; 1673 } 1674 } 1675 // gethrtime can move backwards if read from one cpu and then a different cpu 1676 // getTimeNanos is guaranteed to not move backward on Solaris 1677 inline hrtime_t getTimeNanos() { 1678 if (VM_Version::supports_cx8()) { 1679 const hrtime_t now = gethrtime(); 1680 // Use atomic long load since 32-bit x86 uses 2 registers to keep long. 1681 const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime); 1682 if (now <= prev) return prev; // same or retrograde time; 1683 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); 1684 assert(obsv >= prev, "invariant"); // Monotonicity 1685 // If the CAS succeeded then we're done and return "now". 1686 // If the CAS failed and the observed value "obs" is >= now then 1687 // we should return "obs". If the CAS failed and now > obs > prv then 1688 // some other thread raced this thread and installed a new value, in which case 1689 // we could either (a) retry the entire operation, (b) retry trying to install now 1690 // or (c) just return obs. We use (c). No loop is required although in some cases 1691 // we might discard a higher "now" value in deference to a slightly lower but freshly 1692 // installed obs value. That's entirely benign -- it admits no new orderings compared 1693 // to (a) or (b) -- and greatly reduces coherence traffic. 1694 // We might also condition (c) on the magnitude of the delta between obs and now. 1695 // Avoiding excessive CAS operations to hot RW locations is critical. 1696 // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate 1697 return (prev == obsv) ? now : obsv ; 1698 } else { 1699 return oldgetTimeNanos(); 1700 } 1701 } 1702 1703 // Time since start-up in seconds to a fine granularity. 1704 // Used by VMSelfDestructTimer and the MemProfiler. 1705 double os::elapsedTime() { 1706 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz; 1707 } 1708 1709 jlong os::elapsed_counter() { 1710 return (jlong)(getTimeNanos() - first_hrtime); 1711 } 1712 1713 jlong os::elapsed_frequency() { 1714 return hrtime_hz; 1715 } 1716 1717 // Return the real, user, and system times in seconds from an 1718 // arbitrary fixed point in the past. 1719 bool os::getTimesSecs(double* process_real_time, 1720 double* process_user_time, 1721 double* process_system_time) { 1722 struct tms ticks; 1723 clock_t real_ticks = times(&ticks); 1724 1725 if (real_ticks == (clock_t) (-1)) { 1726 return false; 1727 } else { 1728 double ticks_per_second = (double) clock_tics_per_sec; 1729 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1730 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1731 // For consistency return the real time from getTimeNanos() 1732 // converted to seconds. 1733 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS); 1734 1735 return true; 1736 } 1737 } 1738 1739 bool os::supports_vtime() { return true; } 1740 1741 bool os::enable_vtime() { 1742 int fd = ::open("/proc/self/ctl", O_WRONLY); 1743 if (fd == -1) 1744 return false; 1745 1746 long cmd[] = { PCSET, PR_MSACCT }; 1747 int res = ::write(fd, cmd, sizeof(long) * 2); 1748 ::close(fd); 1749 if (res != sizeof(long) * 2) 1750 return false; 1751 1752 return true; 1753 } 1754 1755 bool os::vtime_enabled() { 1756 int fd = ::open("/proc/self/status", O_RDONLY); 1757 if (fd == -1) 1758 return false; 1759 1760 pstatus_t status; 1761 int res = os::read(fd, (void*) &status, sizeof(pstatus_t)); 1762 ::close(fd); 1763 if (res != sizeof(pstatus_t)) 1764 return false; 1765 1766 return status.pr_flags & PR_MSACCT; 1767 } 1768 1769 double os::elapsedVTime() { 1770 return (double)gethrvtime() / (double)hrtime_hz; 1771 } 1772 1773 // Used internally for comparisons only 1774 // getTimeMillis guaranteed to not move backwards on Solaris 1775 jlong getTimeMillis() { 1776 jlong nanotime = getTimeNanos(); 1777 return (jlong)(nanotime / NANOSECS_PER_MILLISEC); 1778 } 1779 1780 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis 1781 jlong os::javaTimeMillis() { 1782 timeval t; 1783 if (gettimeofday( &t, NULL) == -1) 1784 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno))); 1785 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; 1786 } 1787 1788 jlong os::javaTimeNanos() { 1789 return (jlong)getTimeNanos(); 1790 } 1791 1792 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1793 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits 1794 info_ptr->may_skip_backward = false; // not subject to resetting or drifting 1795 info_ptr->may_skip_forward = false; // not subject to resetting or drifting 1796 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1797 } 1798 1799 char * os::local_time_string(char *buf, size_t buflen) { 1800 struct tm t; 1801 time_t long_time; 1802 time(&long_time); 1803 localtime_r(&long_time, &t); 1804 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1805 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1806 t.tm_hour, t.tm_min, t.tm_sec); 1807 return buf; 1808 } 1809 1810 // Note: os::shutdown() might be called very early during initialization, or 1811 // called from signal handler. Before adding something to os::shutdown(), make 1812 // sure it is async-safe and can handle partially initialized VM. 1813 void os::shutdown() { 1814 1815 // allow PerfMemory to attempt cleanup of any persistent resources 1816 perfMemory_exit(); 1817 1818 // needs to remove object in file system 1819 AttachListener::abort(); 1820 1821 // flush buffered output, finish log files 1822 ostream_abort(); 1823 1824 // Check for abort hook 1825 abort_hook_t abort_hook = Arguments::abort_hook(); 1826 if (abort_hook != NULL) { 1827 abort_hook(); 1828 } 1829 } 1830 1831 // Note: os::abort() might be called very early during initialization, or 1832 // called from signal handler. Before adding something to os::abort(), make 1833 // sure it is async-safe and can handle partially initialized VM. 1834 void os::abort(bool dump_core) { 1835 os::shutdown(); 1836 if (dump_core) { 1837 #ifndef PRODUCT 1838 fdStream out(defaultStream::output_fd()); 1839 out.print_raw("Current thread is "); 1840 char buf[16]; 1841 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1842 out.print_raw_cr(buf); 1843 out.print_raw_cr("Dumping core ..."); 1844 #endif 1845 ::abort(); // dump core (for debugging) 1846 } 1847 1848 ::exit(1); 1849 } 1850 1851 // Die immediately, no exit hook, no abort hook, no cleanup. 1852 void os::die() { 1853 ::abort(); // dump core (for debugging) 1854 } 1855 1856 // unused 1857 void os::set_error_file(const char *logfile) {} 1858 1859 // DLL functions 1860 1861 const char* os::dll_file_extension() { return ".so"; } 1862 1863 // This must be hard coded because it's the system's temporary 1864 // directory not the java application's temp directory, ala java.io.tmpdir. 1865 const char* os::get_temp_directory() { return "/tmp"; } 1866 1867 static bool file_exists(const char* filename) { 1868 struct stat statbuf; 1869 if (filename == NULL || strlen(filename) == 0) { 1870 return false; 1871 } 1872 return os::stat(filename, &statbuf) == 0; 1873 } 1874 1875 bool os::dll_build_name(char* buffer, size_t buflen, 1876 const char* pname, const char* fname) { 1877 bool retval = false; 1878 const size_t pnamelen = pname ? strlen(pname) : 0; 1879 1880 // Return error on buffer overflow. 1881 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { 1882 return retval; 1883 } 1884 1885 if (pnamelen == 0) { 1886 snprintf(buffer, buflen, "lib%s.so", fname); 1887 retval = true; 1888 } else if (strchr(pname, *os::path_separator()) != NULL) { 1889 int n; 1890 char** pelements = split_path(pname, &n); 1891 if (pelements == NULL) { 1892 return false; 1893 } 1894 for (int i = 0 ; i < n ; i++) { 1895 // really shouldn't be NULL but what the heck, check can't hurt 1896 if (pelements[i] == NULL || strlen(pelements[i]) == 0) { 1897 continue; // skip the empty path values 1898 } 1899 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); 1900 if (file_exists(buffer)) { 1901 retval = true; 1902 break; 1903 } 1904 } 1905 // release the storage 1906 for (int i = 0 ; i < n ; i++) { 1907 if (pelements[i] != NULL) { 1908 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1909 } 1910 } 1911 if (pelements != NULL) { 1912 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1913 } 1914 } else { 1915 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1916 retval = true; 1917 } 1918 return retval; 1919 } 1920 1921 // check if addr is inside libjvm.so 1922 bool os::address_is_in_vm(address addr) { 1923 static address libjvm_base_addr; 1924 Dl_info dlinfo; 1925 1926 if (libjvm_base_addr == NULL) { 1927 dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo); 1928 libjvm_base_addr = (address)dlinfo.dli_fbase; 1929 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); 1930 } 1931 1932 if (dladdr((void *)addr, &dlinfo)) { 1933 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; 1934 } 1935 1936 return false; 1937 } 1938 1939 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int); 1940 static dladdr1_func_type dladdr1_func = NULL; 1941 1942 bool os::dll_address_to_function_name(address addr, char *buf, 1943 int buflen, int * offset) { 1944 Dl_info dlinfo; 1945 1946 // dladdr1_func was initialized in os::init() 1947 if (dladdr1_func){ 1948 // yes, we have dladdr1 1949 1950 // Support for dladdr1 is checked at runtime; it may be 1951 // available even if the vm is built on a machine that does 1952 // not have dladdr1 support. Make sure there is a value for 1953 // RTLD_DL_SYMENT. 1954 #ifndef RTLD_DL_SYMENT 1955 #define RTLD_DL_SYMENT 1 1956 #endif 1957 #ifdef _LP64 1958 Elf64_Sym * info; 1959 #else 1960 Elf32_Sym * info; 1961 #endif 1962 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info, 1963 RTLD_DL_SYMENT)) { 1964 if ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr) { 1965 if (buf != NULL) { 1966 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) 1967 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); 1968 } 1969 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 1970 return true; 1971 } 1972 } 1973 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) { 1974 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 1975 buf, buflen, offset, dlinfo.dli_fname)) { 1976 return true; 1977 } 1978 } 1979 if (buf != NULL) buf[0] = '\0'; 1980 if (offset != NULL) *offset = -1; 1981 return false; 1982 } else { 1983 // no, only dladdr is available 1984 if (dladdr((void *)addr, &dlinfo)) { 1985 if (buf != NULL) { 1986 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) 1987 jio_snprintf(buf, buflen, dlinfo.dli_sname); 1988 } 1989 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 1990 return true; 1991 } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) { 1992 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 1993 buf, buflen, offset, dlinfo.dli_fname)) { 1994 return true; 1995 } 1996 } 1997 if (buf != NULL) buf[0] = '\0'; 1998 if (offset != NULL) *offset = -1; 1999 return false; 2000 } 2001 } 2002 2003 bool os::dll_address_to_library_name(address addr, char* buf, 2004 int buflen, int* offset) { 2005 Dl_info dlinfo; 2006 2007 if (dladdr((void*)addr, &dlinfo)){ 2008 if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); 2009 if (offset) *offset = addr - (address)dlinfo.dli_fbase; 2010 return true; 2011 } else { 2012 if (buf) buf[0] = '\0'; 2013 if (offset) *offset = -1; 2014 return false; 2015 } 2016 } 2017 2018 // Prints the names and full paths of all opened dynamic libraries 2019 // for current process 2020 void os::print_dll_info(outputStream * st) { 2021 Dl_info dli; 2022 void *handle; 2023 Link_map *map; 2024 Link_map *p; 2025 2026 st->print_cr("Dynamic libraries:"); st->flush(); 2027 2028 if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) { 2029 st->print_cr("Error: Cannot print dynamic libraries."); 2030 return; 2031 } 2032 handle = dlopen(dli.dli_fname, RTLD_LAZY); 2033 if (handle == NULL) { 2034 st->print_cr("Error: Cannot print dynamic libraries."); 2035 return; 2036 } 2037 dlinfo(handle, RTLD_DI_LINKMAP, &map); 2038 if (map == NULL) { 2039 st->print_cr("Error: Cannot print dynamic libraries."); 2040 return; 2041 } 2042 2043 while (map->l_prev != NULL) 2044 map = map->l_prev; 2045 2046 while (map != NULL) { 2047 st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name); 2048 map = map->l_next; 2049 } 2050 2051 dlclose(handle); 2052 } 2053 2054 // Loads .dll/.so and 2055 // in case of error it checks if .dll/.so was built for the 2056 // same architecture as Hotspot is running on 2057 2058 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) 2059 { 2060 void * result= ::dlopen(filename, RTLD_LAZY); 2061 if (result != NULL) { 2062 // Successful loading 2063 return result; 2064 } 2065 2066 Elf32_Ehdr elf_head; 2067 2068 // Read system error message into ebuf 2069 // It may or may not be overwritten below 2070 ::strncpy(ebuf, ::dlerror(), ebuflen-1); 2071 ebuf[ebuflen-1]='\0'; 2072 int diag_msg_max_length=ebuflen-strlen(ebuf); 2073 char* diag_msg_buf=ebuf+strlen(ebuf); 2074 2075 if (diag_msg_max_length==0) { 2076 // No more space in ebuf for additional diagnostics message 2077 return NULL; 2078 } 2079 2080 2081 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); 2082 2083 if (file_descriptor < 0) { 2084 // Can't open library, report dlerror() message 2085 return NULL; 2086 } 2087 2088 bool failed_to_read_elf_head= 2089 (sizeof(elf_head)!= 2090 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ; 2091 2092 ::close(file_descriptor); 2093 if (failed_to_read_elf_head) { 2094 // file i/o error - report dlerror() msg 2095 return NULL; 2096 } 2097 2098 typedef struct { 2099 Elf32_Half code; // Actual value as defined in elf.h 2100 Elf32_Half compat_class; // Compatibility of archs at VM's sense 2101 char elf_class; // 32 or 64 bit 2102 char endianess; // MSB or LSB 2103 char* name; // String representation 2104 } arch_t; 2105 2106 static const arch_t arch_array[]={ 2107 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2108 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2109 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, 2110 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, 2111 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2112 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2113 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, 2114 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, 2115 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, 2116 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"} 2117 }; 2118 2119 #if (defined IA32) 2120 static Elf32_Half running_arch_code=EM_386; 2121 #elif (defined AMD64) 2122 static Elf32_Half running_arch_code=EM_X86_64; 2123 #elif (defined IA64) 2124 static Elf32_Half running_arch_code=EM_IA_64; 2125 #elif (defined __sparc) && (defined _LP64) 2126 static Elf32_Half running_arch_code=EM_SPARCV9; 2127 #elif (defined __sparc) && (!defined _LP64) 2128 static Elf32_Half running_arch_code=EM_SPARC; 2129 #elif (defined __powerpc64__) 2130 static Elf32_Half running_arch_code=EM_PPC64; 2131 #elif (defined __powerpc__) 2132 static Elf32_Half running_arch_code=EM_PPC; 2133 #elif (defined ARM) 2134 static Elf32_Half running_arch_code=EM_ARM; 2135 #else 2136 #error Method os::dll_load requires that one of following is defined:\ 2137 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM 2138 #endif 2139 2140 // Identify compatability class for VM's architecture and library's architecture 2141 // Obtain string descriptions for architectures 2142 2143 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; 2144 int running_arch_index=-1; 2145 2146 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) { 2147 if (running_arch_code == arch_array[i].code) { 2148 running_arch_index = i; 2149 } 2150 if (lib_arch.code == arch_array[i].code) { 2151 lib_arch.compat_class = arch_array[i].compat_class; 2152 lib_arch.name = arch_array[i].name; 2153 } 2154 } 2155 2156 assert(running_arch_index != -1, 2157 "Didn't find running architecture code (running_arch_code) in arch_array"); 2158 if (running_arch_index == -1) { 2159 // Even though running architecture detection failed 2160 // we may still continue with reporting dlerror() message 2161 return NULL; 2162 } 2163 2164 if (lib_arch.endianess != arch_array[running_arch_index].endianess) { 2165 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)"); 2166 return NULL; 2167 } 2168 2169 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { 2170 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)"); 2171 return NULL; 2172 } 2173 2174 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { 2175 if ( lib_arch.name!=NULL ) { 2176 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2177 " (Possible cause: can't load %s-bit .so on a %s-bit platform)", 2178 lib_arch.name, arch_array[running_arch_index].name); 2179 } else { 2180 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2181 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)", 2182 lib_arch.code, 2183 arch_array[running_arch_index].name); 2184 } 2185 } 2186 2187 return NULL; 2188 } 2189 2190 void* os::dll_lookup(void* handle, const char* name) { 2191 return dlsym(handle, name); 2192 } 2193 2194 int os::stat(const char *path, struct stat *sbuf) { 2195 char pathbuf[MAX_PATH]; 2196 if (strlen(path) > MAX_PATH - 1) { 2197 errno = ENAMETOOLONG; 2198 return -1; 2199 } 2200 os::native_path(strcpy(pathbuf, path)); 2201 return ::stat(pathbuf, sbuf); 2202 } 2203 2204 static bool _print_ascii_file(const char* filename, outputStream* st) { 2205 int fd = ::open(filename, O_RDONLY); 2206 if (fd == -1) { 2207 return false; 2208 } 2209 2210 char buf[32]; 2211 int bytes; 2212 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) { 2213 st->print_raw(buf, bytes); 2214 } 2215 2216 ::close(fd); 2217 2218 return true; 2219 } 2220 2221 void os::print_os_info_brief(outputStream* st) { 2222 os::Solaris::print_distro_info(st); 2223 2224 os::Posix::print_uname_info(st); 2225 2226 os::Solaris::print_libversion_info(st); 2227 } 2228 2229 void os::print_os_info(outputStream* st) { 2230 st->print("OS:"); 2231 2232 os::Solaris::print_distro_info(st); 2233 2234 os::Posix::print_uname_info(st); 2235 2236 os::Solaris::print_libversion_info(st); 2237 2238 os::Posix::print_rlimit_info(st); 2239 2240 os::Posix::print_load_average(st); 2241 } 2242 2243 void os::Solaris::print_distro_info(outputStream* st) { 2244 if (!_print_ascii_file("/etc/release", st)) { 2245 st->print("Solaris"); 2246 } 2247 st->cr(); 2248 } 2249 2250 void os::Solaris::print_libversion_info(outputStream* st) { 2251 if (os::Solaris::T2_libthread()) { 2252 st->print(" (T2 libthread)"); 2253 } 2254 else { 2255 st->print(" (T1 libthread)"); 2256 } 2257 st->cr(); 2258 } 2259 2260 static bool check_addr0(outputStream* st) { 2261 jboolean status = false; 2262 int fd = ::open("/proc/self/map",O_RDONLY); 2263 if (fd >= 0) { 2264 prmap_t p; 2265 while(::read(fd, &p, sizeof(p)) > 0) { 2266 if (p.pr_vaddr == 0x0) { 2267 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname); 2268 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname); 2269 st->print("Access:"); 2270 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-"); 2271 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-"); 2272 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-"); 2273 st->cr(); 2274 status = true; 2275 } 2276 ::close(fd); 2277 } 2278 } 2279 return status; 2280 } 2281 2282 void os::pd_print_cpu_info(outputStream* st) { 2283 // Nothing to do for now. 2284 } 2285 2286 void os::print_memory_info(outputStream* st) { 2287 st->print("Memory:"); 2288 st->print(" %dk page", os::vm_page_size()>>10); 2289 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10); 2290 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10); 2291 st->cr(); 2292 (void) check_addr0(st); 2293 } 2294 2295 // Taken from /usr/include/sys/machsig.h Supposed to be architecture specific 2296 // but they're the same for all the solaris architectures that we support. 2297 const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR", 2298 "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG", 2299 "ILL_COPROC", "ILL_BADSTK" }; 2300 2301 const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV", 2302 "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES", 2303 "FPE_FLTINV", "FPE_FLTSUB" }; 2304 2305 const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" }; 2306 2307 const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" }; 2308 2309 void os::print_siginfo(outputStream* st, void* siginfo) { 2310 st->print("siginfo:"); 2311 2312 const int buflen = 100; 2313 char buf[buflen]; 2314 siginfo_t *si = (siginfo_t*)siginfo; 2315 st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen)); 2316 char *err = strerror(si->si_errno); 2317 if (si->si_errno != 0 && err != NULL) { 2318 st->print("si_errno=%s", err); 2319 } else { 2320 st->print("si_errno=%d", si->si_errno); 2321 } 2322 const int c = si->si_code; 2323 assert(c > 0, "unexpected si_code"); 2324 switch (si->si_signo) { 2325 case SIGILL: 2326 st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]); 2327 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2328 break; 2329 case SIGFPE: 2330 st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]); 2331 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2332 break; 2333 case SIGSEGV: 2334 st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]); 2335 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2336 break; 2337 case SIGBUS: 2338 st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]); 2339 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2340 break; 2341 default: 2342 st->print(", si_code=%d", si->si_code); 2343 // no si_addr 2344 } 2345 2346 if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && 2347 UseSharedSpaces) { 2348 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2349 if (mapinfo->is_in_shared_space(si->si_addr)) { 2350 st->print("\n\nError accessing class data sharing archive." \ 2351 " Mapped file inaccessible during execution, " \ 2352 " possible disk/network problem."); 2353 } 2354 } 2355 st->cr(); 2356 } 2357 2358 // Moved from whole group, because we need them here for diagnostic 2359 // prints. 2360 #define OLDMAXSIGNUM 32 2361 static int Maxsignum = 0; 2362 static int *ourSigFlags = NULL; 2363 2364 extern "C" void sigINTRHandler(int, siginfo_t*, void*); 2365 2366 int os::Solaris::get_our_sigflags(int sig) { 2367 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2368 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2369 return ourSigFlags[sig]; 2370 } 2371 2372 void os::Solaris::set_our_sigflags(int sig, int flags) { 2373 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2374 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2375 ourSigFlags[sig] = flags; 2376 } 2377 2378 2379 static const char* get_signal_handler_name(address handler, 2380 char* buf, int buflen) { 2381 int offset; 2382 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 2383 if (found) { 2384 // skip directory names 2385 const char *p1, *p2; 2386 p1 = buf; 2387 size_t len = strlen(os::file_separator()); 2388 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 2389 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset); 2390 } else { 2391 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 2392 } 2393 return buf; 2394 } 2395 2396 static void print_signal_handler(outputStream* st, int sig, 2397 char* buf, size_t buflen) { 2398 struct sigaction sa; 2399 2400 sigaction(sig, NULL, &sa); 2401 2402 st->print("%s: ", os::exception_name(sig, buf, buflen)); 2403 2404 address handler = (sa.sa_flags & SA_SIGINFO) 2405 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 2406 : CAST_FROM_FN_PTR(address, sa.sa_handler); 2407 2408 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 2409 st->print("SIG_DFL"); 2410 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 2411 st->print("SIG_IGN"); 2412 } else { 2413 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 2414 } 2415 2416 st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask); 2417 2418 address rh = VMError::get_resetted_sighandler(sig); 2419 // May be, handler was resetted by VMError? 2420 if(rh != NULL) { 2421 handler = rh; 2422 sa.sa_flags = VMError::get_resetted_sigflags(sig); 2423 } 2424 2425 st->print(", sa_flags=" PTR32_FORMAT, sa.sa_flags); 2426 2427 // Check: is it our handler? 2428 if(handler == CAST_FROM_FN_PTR(address, signalHandler) || 2429 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) { 2430 // It is our signal handler 2431 // check for flags 2432 if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) { 2433 st->print( 2434 ", flags was changed from " PTR32_FORMAT ", consider using jsig library", 2435 os::Solaris::get_our_sigflags(sig)); 2436 } 2437 } 2438 st->cr(); 2439 } 2440 2441 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 2442 st->print_cr("Signal Handlers:"); 2443 print_signal_handler(st, SIGSEGV, buf, buflen); 2444 print_signal_handler(st, SIGBUS , buf, buflen); 2445 print_signal_handler(st, SIGFPE , buf, buflen); 2446 print_signal_handler(st, SIGPIPE, buf, buflen); 2447 print_signal_handler(st, SIGXFSZ, buf, buflen); 2448 print_signal_handler(st, SIGILL , buf, buflen); 2449 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen); 2450 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen); 2451 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 2452 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen); 2453 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 2454 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen); 2455 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen); 2456 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen); 2457 } 2458 2459 static char saved_jvm_path[MAXPATHLEN] = { 0 }; 2460 2461 // Find the full path to the current module, libjvm.so 2462 void os::jvm_path(char *buf, jint buflen) { 2463 // Error checking. 2464 if (buflen < MAXPATHLEN) { 2465 assert(false, "must use a large-enough buffer"); 2466 buf[0] = '\0'; 2467 return; 2468 } 2469 // Lazy resolve the path to current module. 2470 if (saved_jvm_path[0] != 0) { 2471 strcpy(buf, saved_jvm_path); 2472 return; 2473 } 2474 2475 Dl_info dlinfo; 2476 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 2477 assert(ret != 0, "cannot locate libjvm"); 2478 realpath((char *)dlinfo.dli_fname, buf); 2479 2480 if (Arguments::created_by_gamma_launcher()) { 2481 // Support for the gamma launcher. Typical value for buf is 2482 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at 2483 // the right place in the string, then assume we are installed in a JDK and 2484 // we're done. Otherwise, check for a JAVA_HOME environment variable and fix 2485 // up the path so it looks like libjvm.so is installed there (append a 2486 // fake suffix hotspot/libjvm.so). 2487 const char *p = buf + strlen(buf) - 1; 2488 for (int count = 0; p > buf && count < 5; ++count) { 2489 for (--p; p > buf && *p != '/'; --p) 2490 /* empty */ ; 2491 } 2492 2493 if (strncmp(p, "/jre/lib/", 9) != 0) { 2494 // Look for JAVA_HOME in the environment. 2495 char* java_home_var = ::getenv("JAVA_HOME"); 2496 if (java_home_var != NULL && java_home_var[0] != 0) { 2497 char cpu_arch[12]; 2498 char* jrelib_p; 2499 int len; 2500 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 2501 #ifdef _LP64 2502 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9. 2503 if (strcmp(cpu_arch, "sparc") == 0) { 2504 strcat(cpu_arch, "v9"); 2505 } else if (strcmp(cpu_arch, "i386") == 0) { 2506 strcpy(cpu_arch, "amd64"); 2507 } 2508 #endif 2509 // Check the current module name "libjvm.so". 2510 p = strrchr(buf, '/'); 2511 assert(strstr(p, "/libjvm") == p, "invalid library name"); 2512 2513 realpath(java_home_var, buf); 2514 // determine if this is a legacy image or modules image 2515 // modules image doesn't have "jre" subdirectory 2516 len = strlen(buf); 2517 jrelib_p = buf + len; 2518 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); 2519 if (0 != access(buf, F_OK)) { 2520 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch); 2521 } 2522 2523 if (0 == access(buf, F_OK)) { 2524 // Use current module name "libjvm.so" 2525 len = strlen(buf); 2526 snprintf(buf + len, buflen-len, "/hotspot/libjvm.so"); 2527 } else { 2528 // Go back to path of .so 2529 realpath((char *)dlinfo.dli_fname, buf); 2530 } 2531 } 2532 } 2533 } 2534 2535 strcpy(saved_jvm_path, buf); 2536 } 2537 2538 2539 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 2540 // no prefix required, not even "_" 2541 } 2542 2543 2544 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 2545 // no suffix required 2546 } 2547 2548 // This method is a copy of JDK's sysGetLastErrorString 2549 // from src/solaris/hpi/src/system_md.c 2550 2551 size_t os::lasterror(char *buf, size_t len) { 2552 2553 if (errno == 0) return 0; 2554 2555 const char *s = ::strerror(errno); 2556 size_t n = ::strlen(s); 2557 if (n >= len) { 2558 n = len - 1; 2559 } 2560 ::strncpy(buf, s, n); 2561 buf[n] = '\0'; 2562 return n; 2563 } 2564 2565 2566 // sun.misc.Signal 2567 2568 extern "C" { 2569 static void UserHandler(int sig, void *siginfo, void *context) { 2570 // Ctrl-C is pressed during error reporting, likely because the error 2571 // handler fails to abort. Let VM die immediately. 2572 if (sig == SIGINT && is_error_reported()) { 2573 os::die(); 2574 } 2575 2576 os::signal_notify(sig); 2577 // We do not need to reinstate the signal handler each time... 2578 } 2579 } 2580 2581 void* os::user_handler() { 2582 return CAST_FROM_FN_PTR(void*, UserHandler); 2583 } 2584 2585 class Semaphore : public StackObj { 2586 public: 2587 Semaphore(); 2588 ~Semaphore(); 2589 void signal(); 2590 void wait(); 2591 bool trywait(); 2592 bool timedwait(unsigned int sec, int nsec); 2593 private: 2594 sema_t _semaphore; 2595 }; 2596 2597 2598 Semaphore::Semaphore() { 2599 sema_init(&_semaphore, 0, NULL, NULL); 2600 } 2601 2602 Semaphore::~Semaphore() { 2603 sema_destroy(&_semaphore); 2604 } 2605 2606 void Semaphore::signal() { 2607 sema_post(&_semaphore); 2608 } 2609 2610 void Semaphore::wait() { 2611 sema_wait(&_semaphore); 2612 } 2613 2614 bool Semaphore::trywait() { 2615 return sema_trywait(&_semaphore) == 0; 2616 } 2617 2618 bool Semaphore::timedwait(unsigned int sec, int nsec) { 2619 struct timespec ts; 2620 unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); 2621 2622 while (1) { 2623 int result = sema_timedwait(&_semaphore, &ts); 2624 if (result == 0) { 2625 return true; 2626 } else if (errno == EINTR) { 2627 continue; 2628 } else if (errno == ETIME) { 2629 return false; 2630 } else { 2631 return false; 2632 } 2633 } 2634 } 2635 2636 extern "C" { 2637 typedef void (*sa_handler_t)(int); 2638 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 2639 } 2640 2641 void* os::signal(int signal_number, void* handler) { 2642 struct sigaction sigAct, oldSigAct; 2643 sigfillset(&(sigAct.sa_mask)); 2644 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND; 2645 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 2646 2647 if (sigaction(signal_number, &sigAct, &oldSigAct)) 2648 // -1 means registration failed 2649 return (void *)-1; 2650 2651 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 2652 } 2653 2654 void os::signal_raise(int signal_number) { 2655 raise(signal_number); 2656 } 2657 2658 /* 2659 * The following code is moved from os.cpp for making this 2660 * code platform specific, which it is by its very nature. 2661 */ 2662 2663 // a counter for each possible signal value 2664 static int Sigexit = 0; 2665 static int Maxlibjsigsigs; 2666 static jint *pending_signals = NULL; 2667 static int *preinstalled_sigs = NULL; 2668 static struct sigaction *chainedsigactions = NULL; 2669 static sema_t sig_sem; 2670 typedef int (*version_getting_t)(); 2671 version_getting_t os::Solaris::get_libjsig_version = NULL; 2672 static int libjsigversion = NULL; 2673 2674 int os::sigexitnum_pd() { 2675 assert(Sigexit > 0, "signal memory not yet initialized"); 2676 return Sigexit; 2677 } 2678 2679 void os::Solaris::init_signal_mem() { 2680 // Initialize signal structures 2681 Maxsignum = SIGRTMAX; 2682 Sigexit = Maxsignum+1; 2683 assert(Maxsignum >0, "Unable to obtain max signal number"); 2684 2685 Maxlibjsigsigs = Maxsignum; 2686 2687 // pending_signals has one int per signal 2688 // The additional signal is for SIGEXIT - exit signal to signal_thread 2689 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal); 2690 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1))); 2691 2692 if (UseSignalChaining) { 2693 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction) 2694 * (Maxsignum + 1), mtInternal); 2695 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1))); 2696 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal); 2697 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1))); 2698 } 2699 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal); 2700 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1)); 2701 } 2702 2703 void os::signal_init_pd() { 2704 int ret; 2705 2706 ret = ::sema_init(&sig_sem, 0, NULL, NULL); 2707 assert(ret == 0, "sema_init() failed"); 2708 } 2709 2710 void os::signal_notify(int signal_number) { 2711 int ret; 2712 2713 Atomic::inc(&pending_signals[signal_number]); 2714 ret = ::sema_post(&sig_sem); 2715 assert(ret == 0, "sema_post() failed"); 2716 } 2717 2718 static int check_pending_signals(bool wait_for_signal) { 2719 int ret; 2720 while (true) { 2721 for (int i = 0; i < Sigexit + 1; i++) { 2722 jint n = pending_signals[i]; 2723 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2724 return i; 2725 } 2726 } 2727 if (!wait_for_signal) { 2728 return -1; 2729 } 2730 JavaThread *thread = JavaThread::current(); 2731 ThreadBlockInVM tbivm(thread); 2732 2733 bool threadIsSuspended; 2734 do { 2735 thread->set_suspend_equivalent(); 2736 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2737 while((ret = ::sema_wait(&sig_sem)) == EINTR) 2738 ; 2739 assert(ret == 0, "sema_wait() failed"); 2740 2741 // were we externally suspended while we were waiting? 2742 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2743 if (threadIsSuspended) { 2744 // 2745 // The semaphore has been incremented, but while we were waiting 2746 // another thread suspended us. We don't want to continue running 2747 // while suspended because that would surprise the thread that 2748 // suspended us. 2749 // 2750 ret = ::sema_post(&sig_sem); 2751 assert(ret == 0, "sema_post() failed"); 2752 2753 thread->java_suspend_self(); 2754 } 2755 } while (threadIsSuspended); 2756 } 2757 } 2758 2759 int os::signal_lookup() { 2760 return check_pending_signals(false); 2761 } 2762 2763 int os::signal_wait() { 2764 return check_pending_signals(true); 2765 } 2766 2767 //////////////////////////////////////////////////////////////////////////////// 2768 // Virtual Memory 2769 2770 static int page_size = -1; 2771 2772 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will 2773 // clear this var if support is not available. 2774 static bool has_map_align = true; 2775 2776 int os::vm_page_size() { 2777 assert(page_size != -1, "must call os::init"); 2778 return page_size; 2779 } 2780 2781 // Solaris allocates memory by pages. 2782 int os::vm_allocation_granularity() { 2783 assert(page_size != -1, "must call os::init"); 2784 return page_size; 2785 } 2786 2787 static bool recoverable_mmap_error(int err) { 2788 // See if the error is one we can let the caller handle. This 2789 // list of errno values comes from the Solaris mmap(2) man page. 2790 switch (err) { 2791 case EBADF: 2792 case EINVAL: 2793 case ENOTSUP: 2794 // let the caller deal with these errors 2795 return true; 2796 2797 default: 2798 // Any remaining errors on this OS can cause our reserved mapping 2799 // to be lost. That can cause confusion where different data 2800 // structures think they have the same memory mapped. The worst 2801 // scenario is if both the VM and a library think they have the 2802 // same memory mapped. 2803 return false; 2804 } 2805 } 2806 2807 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec, 2808 int err) { 2809 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2810 ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec, 2811 strerror(err), err); 2812 } 2813 2814 static void warn_fail_commit_memory(char* addr, size_t bytes, 2815 size_t alignment_hint, bool exec, 2816 int err) { 2817 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2818 ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes, 2819 alignment_hint, exec, strerror(err), err); 2820 } 2821 2822 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) { 2823 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2824 size_t size = bytes; 2825 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); 2826 if (res != NULL) { 2827 if (UseNUMAInterleaving) { 2828 numa_make_global(addr, bytes); 2829 } 2830 return 0; 2831 } 2832 2833 int err = errno; // save errno from mmap() call in mmap_chunk() 2834 2835 if (!recoverable_mmap_error(err)) { 2836 warn_fail_commit_memory(addr, bytes, exec, err); 2837 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory."); 2838 } 2839 2840 return err; 2841 } 2842 2843 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 2844 return Solaris::commit_memory_impl(addr, bytes, exec) == 0; 2845 } 2846 2847 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec, 2848 const char* mesg) { 2849 assert(mesg != NULL, "mesg must be specified"); 2850 int err = os::Solaris::commit_memory_impl(addr, bytes, exec); 2851 if (err != 0) { 2852 // the caller wants all commit errors to exit with the specified mesg: 2853 warn_fail_commit_memory(addr, bytes, exec, err); 2854 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg); 2855 } 2856 } 2857 2858 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, 2859 size_t alignment_hint, bool exec) { 2860 int err = Solaris::commit_memory_impl(addr, bytes, exec); 2861 if (err == 0) { 2862 if (UseMPSS && alignment_hint > (size_t)vm_page_size()) { 2863 // If the large page size has been set and the VM 2864 // is using large pages, use the large page size 2865 // if it is smaller than the alignment hint. This is 2866 // a case where the VM wants to use a larger alignment size 2867 // for its own reasons but still want to use large pages 2868 // (which is what matters to setting the mpss range. 2869 size_t page_size = 0; 2870 if (large_page_size() < alignment_hint) { 2871 assert(UseLargePages, "Expected to be here for large page use only"); 2872 page_size = large_page_size(); 2873 } else { 2874 // If the alignment hint is less than the large page 2875 // size, the VM wants a particular alignment (thus the hint) 2876 // for internal reasons. Try to set the mpss range using 2877 // the alignment_hint. 2878 page_size = alignment_hint; 2879 } 2880 // Since this is a hint, ignore any failures. 2881 (void)Solaris::set_mpss_range(addr, bytes, page_size); 2882 } 2883 } 2884 return err; 2885 } 2886 2887 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint, 2888 bool exec) { 2889 return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0; 2890 } 2891 2892 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, 2893 size_t alignment_hint, bool exec, 2894 const char* mesg) { 2895 assert(mesg != NULL, "mesg must be specified"); 2896 int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec); 2897 if (err != 0) { 2898 // the caller wants all commit errors to exit with the specified mesg: 2899 warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err); 2900 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg); 2901 } 2902 } 2903 2904 // Uncommit the pages in a specified region. 2905 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) { 2906 if (madvise(addr, bytes, MADV_FREE) < 0) { 2907 debug_only(warning("MADV_FREE failed.")); 2908 return; 2909 } 2910 } 2911 2912 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 2913 return os::commit_memory(addr, size, !ExecMem); 2914 } 2915 2916 bool os::remove_stack_guard_pages(char* addr, size_t size) { 2917 return os::uncommit_memory(addr, size); 2918 } 2919 2920 // Change the page size in a given range. 2921 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2922 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); 2923 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); 2924 if (UseLargePages && UseMPSS) { 2925 Solaris::set_mpss_range(addr, bytes, alignment_hint); 2926 } 2927 } 2928 2929 // Tell the OS to make the range local to the first-touching LWP 2930 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2931 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2932 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) { 2933 debug_only(warning("MADV_ACCESS_LWP failed.")); 2934 } 2935 } 2936 2937 // Tell the OS that this range would be accessed from different LWPs. 2938 void os::numa_make_global(char *addr, size_t bytes) { 2939 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2940 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) { 2941 debug_only(warning("MADV_ACCESS_MANY failed.")); 2942 } 2943 } 2944 2945 // Get the number of the locality groups. 2946 size_t os::numa_get_groups_num() { 2947 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie()); 2948 return n != -1 ? n : 1; 2949 } 2950 2951 // Get a list of leaf locality groups. A leaf lgroup is group that 2952 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory 2953 // board. An LWP is assigned to one of these groups upon creation. 2954 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2955 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) { 2956 ids[0] = 0; 2957 return 1; 2958 } 2959 int result_size = 0, top = 1, bottom = 0, cur = 0; 2960 for (int k = 0; k < size; k++) { 2961 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur], 2962 (Solaris::lgrp_id_t*)&ids[top], size - top); 2963 if (r == -1) { 2964 ids[0] = 0; 2965 return 1; 2966 } 2967 if (!r) { 2968 // That's a leaf node. 2969 assert (bottom <= cur, "Sanity check"); 2970 // Check if the node has memory 2971 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur], 2972 NULL, 0, LGRP_RSRC_MEM) > 0) { 2973 ids[bottom++] = ids[cur]; 2974 } 2975 } 2976 top += r; 2977 cur++; 2978 } 2979 if (bottom == 0) { 2980 // Handle a situation, when the OS reports no memory available. 2981 // Assume UMA architecture. 2982 ids[0] = 0; 2983 return 1; 2984 } 2985 return bottom; 2986 } 2987 2988 // Detect the topology change. Typically happens during CPU plugging-unplugging. 2989 bool os::numa_topology_changed() { 2990 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie()); 2991 if (is_stale != -1 && is_stale) { 2992 Solaris::lgrp_fini(Solaris::lgrp_cookie()); 2993 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER); 2994 assert(c != 0, "Failure to initialize LGRP API"); 2995 Solaris::set_lgrp_cookie(c); 2996 return true; 2997 } 2998 return false; 2999 } 3000 3001 // Get the group id of the current LWP. 3002 int os::numa_get_group_id() { 3003 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID); 3004 if (lgrp_id == -1) { 3005 return 0; 3006 } 3007 const int size = os::numa_get_groups_num(); 3008 int *ids = (int*)alloca(size * sizeof(int)); 3009 3010 // Get the ids of all lgroups with memory; r is the count. 3011 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id, 3012 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM); 3013 if (r <= 0) { 3014 return 0; 3015 } 3016 return ids[os::random() % r]; 3017 } 3018 3019 // Request information about the page. 3020 bool os::get_page_info(char *start, page_info* info) { 3021 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 3022 uint64_t addr = (uintptr_t)start; 3023 uint64_t outdata[2]; 3024 uint_t validity = 0; 3025 3026 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) { 3027 return false; 3028 } 3029 3030 info->size = 0; 3031 info->lgrp_id = -1; 3032 3033 if ((validity & 1) != 0) { 3034 if ((validity & 2) != 0) { 3035 info->lgrp_id = outdata[0]; 3036 } 3037 if ((validity & 4) != 0) { 3038 info->size = outdata[1]; 3039 } 3040 return true; 3041 } 3042 return false; 3043 } 3044 3045 // Scan the pages from start to end until a page different than 3046 // the one described in the info parameter is encountered. 3047 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 3048 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 3049 const size_t types = sizeof(info_types) / sizeof(info_types[0]); 3050 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT]; 3051 uint_t validity[MAX_MEMINFO_CNT]; 3052 3053 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size); 3054 uint64_t p = (uint64_t)start; 3055 while (p < (uint64_t)end) { 3056 addrs[0] = p; 3057 size_t addrs_count = 1; 3058 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) { 3059 addrs[addrs_count] = addrs[addrs_count - 1] + page_size; 3060 addrs_count++; 3061 } 3062 3063 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) { 3064 return NULL; 3065 } 3066 3067 size_t i = 0; 3068 for (; i < addrs_count; i++) { 3069 if ((validity[i] & 1) != 0) { 3070 if ((validity[i] & 4) != 0) { 3071 if (outdata[types * i + 1] != page_expected->size) { 3072 break; 3073 } 3074 } else 3075 if (page_expected->size != 0) { 3076 break; 3077 } 3078 3079 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) { 3080 if (outdata[types * i] != page_expected->lgrp_id) { 3081 break; 3082 } 3083 } 3084 } else { 3085 return NULL; 3086 } 3087 } 3088 3089 if (i != addrs_count) { 3090 if ((validity[i] & 2) != 0) { 3091 page_found->lgrp_id = outdata[types * i]; 3092 } else { 3093 page_found->lgrp_id = -1; 3094 } 3095 if ((validity[i] & 4) != 0) { 3096 page_found->size = outdata[types * i + 1]; 3097 } else { 3098 page_found->size = 0; 3099 } 3100 return (char*)addrs[i]; 3101 } 3102 3103 p = addrs[addrs_count - 1] + page_size; 3104 } 3105 return end; 3106 } 3107 3108 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3109 size_t size = bytes; 3110 // Map uncommitted pages PROT_NONE so we fail early if we touch an 3111 // uncommitted page. Otherwise, the read/write might succeed if we 3112 // have enough swap space to back the physical page. 3113 return 3114 NULL != Solaris::mmap_chunk(addr, size, 3115 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, 3116 PROT_NONE); 3117 } 3118 3119 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) { 3120 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0); 3121 3122 if (b == MAP_FAILED) { 3123 return NULL; 3124 } 3125 return b; 3126 } 3127 3128 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) { 3129 char* addr = requested_addr; 3130 int flags = MAP_PRIVATE | MAP_NORESERVE; 3131 3132 assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap"); 3133 3134 if (fixed) { 3135 flags |= MAP_FIXED; 3136 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) { 3137 flags |= MAP_ALIGN; 3138 addr = (char*) alignment_hint; 3139 } 3140 3141 // Map uncommitted pages PROT_NONE so we fail early if we touch an 3142 // uncommitted page. Otherwise, the read/write might succeed if we 3143 // have enough swap space to back the physical page. 3144 return mmap_chunk(addr, bytes, flags, PROT_NONE); 3145 } 3146 3147 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 3148 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL)); 3149 3150 guarantee(requested_addr == NULL || requested_addr == addr, 3151 "OS failed to return requested mmap address."); 3152 return addr; 3153 } 3154 3155 // Reserve memory at an arbitrary address, only if that area is 3156 // available (and not reserved for something else). 3157 3158 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3159 const int max_tries = 10; 3160 char* base[max_tries]; 3161 size_t size[max_tries]; 3162 3163 // Solaris adds a gap between mmap'ed regions. The size of the gap 3164 // is dependent on the requested size and the MMU. Our initial gap 3165 // value here is just a guess and will be corrected later. 3166 bool had_top_overlap = false; 3167 bool have_adjusted_gap = false; 3168 size_t gap = 0x400000; 3169 3170 // Assert only that the size is a multiple of the page size, since 3171 // that's all that mmap requires, and since that's all we really know 3172 // about at this low abstraction level. If we need higher alignment, 3173 // we can either pass an alignment to this method or verify alignment 3174 // in one of the methods further up the call chain. See bug 5044738. 3175 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); 3176 3177 // Since snv_84, Solaris attempts to honor the address hint - see 5003415. 3178 // Give it a try, if the kernel honors the hint we can return immediately. 3179 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false); 3180 3181 volatile int err = errno; 3182 if (addr == requested_addr) { 3183 return addr; 3184 } else if (addr != NULL) { 3185 pd_unmap_memory(addr, bytes); 3186 } 3187 3188 if (PrintMiscellaneous && Verbose) { 3189 char buf[256]; 3190 buf[0] = '\0'; 3191 if (addr == NULL) { 3192 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err)); 3193 } 3194 warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at " 3195 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT 3196 "%s", bytes, requested_addr, addr, buf); 3197 } 3198 3199 // Address hint method didn't work. Fall back to the old method. 3200 // In theory, once SNV becomes our oldest supported platform, this 3201 // code will no longer be needed. 3202 // 3203 // Repeatedly allocate blocks until the block is allocated at the 3204 // right spot. Give up after max_tries. 3205 int i; 3206 for (i = 0; i < max_tries; ++i) { 3207 base[i] = reserve_memory(bytes); 3208 3209 if (base[i] != NULL) { 3210 // Is this the block we wanted? 3211 if (base[i] == requested_addr) { 3212 size[i] = bytes; 3213 break; 3214 } 3215 3216 // check that the gap value is right 3217 if (had_top_overlap && !have_adjusted_gap) { 3218 size_t actual_gap = base[i-1] - base[i] - bytes; 3219 if (gap != actual_gap) { 3220 // adjust the gap value and retry the last 2 allocations 3221 assert(i > 0, "gap adjustment code problem"); 3222 have_adjusted_gap = true; // adjust the gap only once, just in case 3223 gap = actual_gap; 3224 if (PrintMiscellaneous && Verbose) { 3225 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap); 3226 } 3227 unmap_memory(base[i], bytes); 3228 unmap_memory(base[i-1], size[i-1]); 3229 i-=2; 3230 continue; 3231 } 3232 } 3233 3234 // Does this overlap the block we wanted? Give back the overlapped 3235 // parts and try again. 3236 // 3237 // There is still a bug in this code: if top_overlap == bytes, 3238 // the overlap is offset from requested region by the value of gap. 3239 // In this case giving back the overlapped part will not work, 3240 // because we'll give back the entire block at base[i] and 3241 // therefore the subsequent allocation will not generate a new gap. 3242 // This could be fixed with a new algorithm that used larger 3243 // or variable size chunks to find the requested region - 3244 // but such a change would introduce additional complications. 3245 // It's rare enough that the planets align for this bug, 3246 // so we'll just wait for a fix for 6204603/5003415 which 3247 // will provide a mmap flag to allow us to avoid this business. 3248 3249 size_t top_overlap = requested_addr + (bytes + gap) - base[i]; 3250 if (top_overlap >= 0 && top_overlap < bytes) { 3251 had_top_overlap = true; 3252 unmap_memory(base[i], top_overlap); 3253 base[i] += top_overlap; 3254 size[i] = bytes - top_overlap; 3255 } else { 3256 size_t bottom_overlap = base[i] + bytes - requested_addr; 3257 if (bottom_overlap >= 0 && bottom_overlap < bytes) { 3258 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) { 3259 warning("attempt_reserve_memory_at: possible alignment bug"); 3260 } 3261 unmap_memory(requested_addr, bottom_overlap); 3262 size[i] = bytes - bottom_overlap; 3263 } else { 3264 size[i] = bytes; 3265 } 3266 } 3267 } 3268 } 3269 3270 // Give back the unused reserved pieces. 3271 3272 for (int j = 0; j < i; ++j) { 3273 if (base[j] != NULL) { 3274 unmap_memory(base[j], size[j]); 3275 } 3276 } 3277 3278 return (i < max_tries) ? requested_addr : NULL; 3279 } 3280 3281 bool os::pd_release_memory(char* addr, size_t bytes) { 3282 size_t size = bytes; 3283 return munmap(addr, size) == 0; 3284 } 3285 3286 static bool solaris_mprotect(char* addr, size_t bytes, int prot) { 3287 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()), 3288 "addr must be page aligned"); 3289 int retVal = mprotect(addr, bytes, prot); 3290 return retVal == 0; 3291 } 3292 3293 // Protect memory (Used to pass readonly pages through 3294 // JNI GetArray<type>Elements with empty arrays.) 3295 // Also, used for serialization page and for compressed oops null pointer 3296 // checking. 3297 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3298 bool is_committed) { 3299 unsigned int p = 0; 3300 switch (prot) { 3301 case MEM_PROT_NONE: p = PROT_NONE; break; 3302 case MEM_PROT_READ: p = PROT_READ; break; 3303 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 3304 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 3305 default: 3306 ShouldNotReachHere(); 3307 } 3308 // is_committed is unused. 3309 return solaris_mprotect(addr, bytes, p); 3310 } 3311 3312 // guard_memory and unguard_memory only happens within stack guard pages. 3313 // Since ISM pertains only to the heap, guard and unguard memory should not 3314 /// happen with an ISM region. 3315 bool os::guard_memory(char* addr, size_t bytes) { 3316 return solaris_mprotect(addr, bytes, PROT_NONE); 3317 } 3318 3319 bool os::unguard_memory(char* addr, size_t bytes) { 3320 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE); 3321 } 3322 3323 // Large page support 3324 3325 // UseLargePages is the master flag to enable/disable large page memory. 3326 // UseMPSS and UseISM are supported for compatibility reasons. Their combined 3327 // effects can be described in the following table: 3328 // 3329 // UseLargePages UseMPSS UseISM 3330 // false * * => UseLargePages is the master switch, turning 3331 // it off will turn off both UseMPSS and 3332 // UseISM. VM will not use large page memory 3333 // regardless the settings of UseMPSS/UseISM. 3334 // true false false => Unless future Solaris provides other 3335 // mechanism to use large page memory, this 3336 // combination is equivalent to -UseLargePages, 3337 // VM will not use large page memory 3338 // true true false => JVM will use MPSS for large page memory. 3339 // This is the default behavior. 3340 // true false true => JVM will use ISM for large page memory. 3341 // true true true => JVM will use ISM if it is available. 3342 // Otherwise, JVM will fall back to MPSS. 3343 // Becaues ISM is now available on all 3344 // supported Solaris versions, this combination 3345 // is equivalent to +UseISM -UseMPSS. 3346 3347 static size_t _large_page_size = 0; 3348 3349 bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) { 3350 // x86 uses either 2M or 4M page, depending on whether PAE (Physical Address 3351 // Extensions) mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. Sparc 3352 // can support multiple page sizes. 3353 3354 // Don't bother to probe page size because getpagesizes() comes with MPSS. 3355 // ISM is only recommended on old Solaris where there is no MPSS support. 3356 // Simply choose a conservative value as default. 3357 *page_size = LargePageSizeInBytes ? LargePageSizeInBytes : 3358 SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M) 3359 ARM_ONLY(2 * M); 3360 3361 // ISM is available on all supported Solaris versions 3362 return true; 3363 } 3364 3365 // Insertion sort for small arrays (descending order). 3366 static void insertion_sort_descending(size_t* array, int len) { 3367 for (int i = 0; i < len; i++) { 3368 size_t val = array[i]; 3369 for (size_t key = i; key > 0 && array[key - 1] < val; --key) { 3370 size_t tmp = array[key]; 3371 array[key] = array[key - 1]; 3372 array[key - 1] = tmp; 3373 } 3374 } 3375 } 3376 3377 bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) { 3378 const unsigned int usable_count = VM_Version::page_size_count(); 3379 if (usable_count == 1) { 3380 return false; 3381 } 3382 3383 // Find the right getpagesizes interface. When solaris 11 is the minimum 3384 // build platform, getpagesizes() (without the '2') can be called directly. 3385 typedef int (*gps_t)(size_t[], int); 3386 gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2")); 3387 if (gps_func == NULL) { 3388 gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes")); 3389 if (gps_func == NULL) { 3390 if (warn) { 3391 warning("MPSS is not supported by the operating system."); 3392 } 3393 return false; 3394 } 3395 } 3396 3397 // Fill the array of page sizes. 3398 int n = (*gps_func)(_page_sizes, page_sizes_max); 3399 assert(n > 0, "Solaris bug?"); 3400 3401 if (n == page_sizes_max) { 3402 // Add a sentinel value (necessary only if the array was completely filled 3403 // since it is static (zeroed at initialization)). 3404 _page_sizes[--n] = 0; 3405 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");) 3406 } 3407 assert(_page_sizes[n] == 0, "missing sentinel"); 3408 trace_page_sizes("available page sizes", _page_sizes, n); 3409 3410 if (n == 1) return false; // Only one page size available. 3411 3412 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and 3413 // select up to usable_count elements. First sort the array, find the first 3414 // acceptable value, then copy the usable sizes to the top of the array and 3415 // trim the rest. Make sure to include the default page size :-). 3416 // 3417 // A better policy could get rid of the 4M limit by taking the sizes of the 3418 // important VM memory regions (java heap and possibly the code cache) into 3419 // account. 3420 insertion_sort_descending(_page_sizes, n); 3421 const size_t size_limit = 3422 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes; 3423 int beg; 3424 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ; 3425 const int end = MIN2((int)usable_count, n) - 1; 3426 for (int cur = 0; cur < end; ++cur, ++beg) { 3427 _page_sizes[cur] = _page_sizes[beg]; 3428 } 3429 _page_sizes[end] = vm_page_size(); 3430 _page_sizes[end + 1] = 0; 3431 3432 if (_page_sizes[end] > _page_sizes[end - 1]) { 3433 // Default page size is not the smallest; sort again. 3434 insertion_sort_descending(_page_sizes, end + 1); 3435 } 3436 *page_size = _page_sizes[0]; 3437 3438 trace_page_sizes("usable page sizes", _page_sizes, end + 1); 3439 return true; 3440 } 3441 3442 void os::large_page_init() { 3443 if (!UseLargePages) { 3444 UseISM = false; 3445 UseMPSS = false; 3446 return; 3447 } 3448 3449 // print a warning if any large page related flag is specified on command line 3450 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3451 !FLAG_IS_DEFAULT(UseISM) || 3452 !FLAG_IS_DEFAULT(UseMPSS) || 3453 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3454 UseISM = UseISM && 3455 Solaris::ism_sanity_check(warn_on_failure, &_large_page_size); 3456 if (UseISM) { 3457 // ISM disables MPSS to be compatible with old JDK behavior 3458 UseMPSS = false; 3459 _page_sizes[0] = _large_page_size; 3460 _page_sizes[1] = vm_page_size(); 3461 } 3462 3463 UseMPSS = UseMPSS && 3464 Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); 3465 3466 UseLargePages = UseISM || UseMPSS; 3467 } 3468 3469 bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) { 3470 // Signal to OS that we want large pages for addresses 3471 // from addr, addr + bytes 3472 struct memcntl_mha mpss_struct; 3473 mpss_struct.mha_cmd = MHA_MAPSIZE_VA; 3474 mpss_struct.mha_pagesize = align; 3475 mpss_struct.mha_flags = 0; 3476 if (memcntl(start, bytes, MC_HAT_ADVISE, 3477 (caddr_t) &mpss_struct, 0, 0) < 0) { 3478 debug_only(warning("Attempt to use MPSS failed.")); 3479 return false; 3480 } 3481 return true; 3482 } 3483 3484 char* os::reserve_memory_special(size_t size, char* addr, bool exec) { 3485 // "exec" is passed in but not used. Creating the shared image for 3486 // the code cache doesn't have an SHM_X executable permission to check. 3487 assert(UseLargePages && UseISM, "only for ISM large pages"); 3488 3489 char* retAddr = NULL; 3490 int shmid; 3491 key_t ismKey; 3492 3493 bool warn_on_failure = UseISM && 3494 (!FLAG_IS_DEFAULT(UseLargePages) || 3495 !FLAG_IS_DEFAULT(UseISM) || 3496 !FLAG_IS_DEFAULT(LargePageSizeInBytes) 3497 ); 3498 char msg[128]; 3499 3500 ismKey = IPC_PRIVATE; 3501 3502 // Create a large shared memory region to attach to based on size. 3503 // Currently, size is the total size of the heap 3504 shmid = shmget(ismKey, size, SHM_R | SHM_W | IPC_CREAT); 3505 if (shmid == -1){ 3506 if (warn_on_failure) { 3507 jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno); 3508 warning(msg); 3509 } 3510 return NULL; 3511 } 3512 3513 // Attach to the region 3514 retAddr = (char *) shmat(shmid, 0, SHM_SHARE_MMU | SHM_R | SHM_W); 3515 int err = errno; 3516 3517 // Remove shmid. If shmat() is successful, the actual shared memory segment 3518 // will be deleted when it's detached by shmdt() or when the process 3519 // terminates. If shmat() is not successful this will remove the shared 3520 // segment immediately. 3521 shmctl(shmid, IPC_RMID, NULL); 3522 3523 if (retAddr == (char *) -1) { 3524 if (warn_on_failure) { 3525 jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err); 3526 warning(msg); 3527 } 3528 return NULL; 3529 } 3530 if ((retAddr != NULL) && UseNUMAInterleaving) { 3531 numa_make_global(retAddr, size); 3532 } 3533 3534 // The memory is committed 3535 MemTracker::record_virtual_memory_reserve_and_commit((address)retAddr, size, mtNone, CURRENT_PC); 3536 3537 return retAddr; 3538 } 3539 3540 bool os::release_memory_special(char* base, size_t bytes) { 3541 MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); 3542 // detaching the SHM segment will also delete it, see reserve_memory_special() 3543 int rslt = shmdt(base); 3544 if (rslt == 0) { 3545 tkr.record((address)base, bytes); 3546 return true; 3547 } else { 3548 tkr.discard(); 3549 return false; 3550 } 3551 } 3552 3553 size_t os::large_page_size() { 3554 return _large_page_size; 3555 } 3556 3557 // MPSS allows application to commit large page memory on demand; with ISM 3558 // the entire memory region must be allocated as shared memory. 3559 bool os::can_commit_large_page_memory() { 3560 return UseISM ? false : true; 3561 } 3562 3563 bool os::can_execute_large_page_memory() { 3564 return UseISM ? false : true; 3565 } 3566 3567 static int os_sleep(jlong millis, bool interruptible) { 3568 const jlong limit = INT_MAX; 3569 jlong prevtime; 3570 int res; 3571 3572 while (millis > limit) { 3573 if ((res = os_sleep(limit, interruptible)) != OS_OK) 3574 return res; 3575 millis -= limit; 3576 } 3577 3578 // Restart interrupted polls with new parameters until the proper delay 3579 // has been completed. 3580 3581 prevtime = getTimeMillis(); 3582 3583 while (millis > 0) { 3584 jlong newtime; 3585 3586 if (!interruptible) { 3587 // Following assert fails for os::yield_all: 3588 // assert(!thread->is_Java_thread(), "must not be java thread"); 3589 res = poll(NULL, 0, millis); 3590 } else { 3591 JavaThread *jt = JavaThread::current(); 3592 3593 INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt, 3594 os::Solaris::clear_interrupted); 3595 } 3596 3597 // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for 3598 // thread.Interrupt. 3599 3600 // See c/r 6751923. Poll can return 0 before time 3601 // has elapsed if time is set via clock_settime (as NTP does). 3602 // res == 0 if poll timed out (see man poll RETURN VALUES) 3603 // using the logic below checks that we really did 3604 // sleep at least "millis" if not we'll sleep again. 3605 if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) { 3606 newtime = getTimeMillis(); 3607 assert(newtime >= prevtime, "time moving backwards"); 3608 /* Doing prevtime and newtime in microseconds doesn't help precision, 3609 and trying to round up to avoid lost milliseconds can result in a 3610 too-short delay. */ 3611 millis -= newtime - prevtime; 3612 if(millis <= 0) 3613 return OS_OK; 3614 prevtime = newtime; 3615 } else 3616 return res; 3617 } 3618 3619 return OS_OK; 3620 } 3621 3622 // Read calls from inside the vm need to perform state transitions 3623 size_t os::read(int fd, void *buf, unsigned int nBytes) { 3624 INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3625 } 3626 3627 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) { 3628 INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3629 } 3630 3631 int os::sleep(Thread* thread, jlong millis, bool interruptible) { 3632 assert(thread == Thread::current(), "thread consistency check"); 3633 3634 // TODO-FIXME: this should be removed. 3635 // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock 3636 // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate 3637 // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving 3638 // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel 3639 // is fooled into believing that the system is making progress. In the code below we block the 3640 // the watcher thread while safepoint is in progress so that it would not appear as though the 3641 // system is making progress. 3642 if (!Solaris::T2_libthread() && 3643 thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) { 3644 // We now try to acquire the threads lock. Since this lock is held by the VM thread during 3645 // the entire safepoint, the watcher thread will line up here during the safepoint. 3646 Threads_lock->lock_without_safepoint_check(); 3647 Threads_lock->unlock(); 3648 } 3649 3650 if (thread->is_Java_thread()) { 3651 // This is a JavaThread so we honor the _thread_blocked protocol 3652 // even for sleeps of 0 milliseconds. This was originally done 3653 // as a workaround for bug 4338139. However, now we also do it 3654 // to honor the suspend-equivalent protocol. 3655 3656 JavaThread *jt = (JavaThread *) thread; 3657 ThreadBlockInVM tbivm(jt); 3658 3659 jt->set_suspend_equivalent(); 3660 // cleared by handle_special_suspend_equivalent_condition() or 3661 // java_suspend_self() via check_and_wait_while_suspended() 3662 3663 int ret_code; 3664 if (millis <= 0) { 3665 thr_yield(); 3666 ret_code = 0; 3667 } else { 3668 // The original sleep() implementation did not create an 3669 // OSThreadWaitState helper for sleeps of 0 milliseconds. 3670 // I'm preserving that decision for now. 3671 OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */); 3672 3673 ret_code = os_sleep(millis, interruptible); 3674 } 3675 3676 // were we externally suspended while we were waiting? 3677 jt->check_and_wait_while_suspended(); 3678 3679 return ret_code; 3680 } 3681 3682 // non-JavaThread from this point on: 3683 3684 if (millis <= 0) { 3685 thr_yield(); 3686 return 0; 3687 } 3688 3689 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 3690 3691 return os_sleep(millis, interruptible); 3692 } 3693 3694 int os::naked_sleep() { 3695 // %% make the sleep time an integer flag. for now use 1 millisec. 3696 return os_sleep(1, false); 3697 } 3698 3699 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3700 void os::infinite_sleep() { 3701 while (true) { // sleep forever ... 3702 ::sleep(100); // ... 100 seconds at a time 3703 } 3704 } 3705 3706 // Used to convert frequent JVM_Yield() to nops 3707 bool os::dont_yield() { 3708 if (DontYieldALot) { 3709 static hrtime_t last_time = 0; 3710 hrtime_t diff = getTimeNanos() - last_time; 3711 3712 if (diff < DontYieldALotInterval * 1000000) 3713 return true; 3714 3715 last_time += diff; 3716 3717 return false; 3718 } 3719 else { 3720 return false; 3721 } 3722 } 3723 3724 // Caveat: Solaris os::yield() causes a thread-state transition whereas 3725 // the linux and win32 implementations do not. This should be checked. 3726 3727 void os::yield() { 3728 // Yields to all threads with same or greater priority 3729 os::sleep(Thread::current(), 0, false); 3730 } 3731 3732 // Note that yield semantics are defined by the scheduling class to which 3733 // the thread currently belongs. Typically, yield will _not yield to 3734 // other equal or higher priority threads that reside on the dispatch queues 3735 // of other CPUs. 3736 3737 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; } 3738 3739 3740 // On Solaris we found that yield_all doesn't always yield to all other threads. 3741 // There have been cases where there is a thread ready to execute but it doesn't 3742 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond. 3743 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a 3744 // SIGWAITING signal which will cause a new lwp to be created. So we count the 3745 // number of times yield_all is called in the one loop and increase the sleep 3746 // time after 8 attempts. If this fails too we increase the concurrency level 3747 // so that the starving thread would get an lwp 3748 3749 void os::yield_all(int attempts) { 3750 // Yields to all threads, including threads with lower priorities 3751 if (attempts == 0) { 3752 os::sleep(Thread::current(), 1, false); 3753 } else { 3754 int iterations = attempts % 30; 3755 if (iterations == 0 && !os::Solaris::T2_libthread()) { 3756 // thr_setconcurrency and _getconcurrency make sense only under T1. 3757 int noofLWPS = thr_getconcurrency(); 3758 if (noofLWPS < (Threads::number_of_threads() + 2)) { 3759 thr_setconcurrency(thr_getconcurrency() + 1); 3760 } 3761 } else if (iterations < 25) { 3762 os::sleep(Thread::current(), 1, false); 3763 } else { 3764 os::sleep(Thread::current(), 10, false); 3765 } 3766 } 3767 } 3768 3769 // Called from the tight loops to possibly influence time-sharing heuristics 3770 void os::loop_breaker(int attempts) { 3771 os::yield_all(attempts); 3772 } 3773 3774 3775 // Interface for setting lwp priorities. If we are using T2 libthread, 3776 // which forces the use of BoundThreads or we manually set UseBoundThreads, 3777 // all of our threads will be assigned to real lwp's. Using the thr_setprio 3778 // function is meaningless in this mode so we must adjust the real lwp's priority 3779 // The routines below implement the getting and setting of lwp priorities. 3780 // 3781 // Note: There are three priority scales used on Solaris. Java priotities 3782 // which range from 1 to 10, libthread "thr_setprio" scale which range 3783 // from 0 to 127, and the current scheduling class of the process we 3784 // are running in. This is typically from -60 to +60. 3785 // The setting of the lwp priorities in done after a call to thr_setprio 3786 // so Java priorities are mapped to libthread priorities and we map from 3787 // the latter to lwp priorities. We don't keep priorities stored in 3788 // Java priorities since some of our worker threads want to set priorities 3789 // higher than all Java threads. 3790 // 3791 // For related information: 3792 // (1) man -s 2 priocntl 3793 // (2) man -s 4 priocntl 3794 // (3) man dispadmin 3795 // = librt.so 3796 // = libthread/common/rtsched.c - thrp_setlwpprio(). 3797 // = ps -cL <pid> ... to validate priority. 3798 // = sched_get_priority_min and _max 3799 // pthread_create 3800 // sched_setparam 3801 // pthread_setschedparam 3802 // 3803 // Assumptions: 3804 // + We assume that all threads in the process belong to the same 3805 // scheduling class. IE. an homogenous process. 3806 // + Must be root or in IA group to change change "interactive" attribute. 3807 // Priocntl() will fail silently. The only indication of failure is when 3808 // we read-back the value and notice that it hasn't changed. 3809 // + Interactive threads enter the runq at the head, non-interactive at the tail. 3810 // + For RT, change timeslice as well. Invariant: 3811 // constant "priority integral" 3812 // Konst == TimeSlice * (60-Priority) 3813 // Given a priority, compute appropriate timeslice. 3814 // + Higher numerical values have higher priority. 3815 3816 // sched class attributes 3817 typedef struct { 3818 int schedPolicy; // classID 3819 int maxPrio; 3820 int minPrio; 3821 } SchedInfo; 3822 3823 3824 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits; 3825 3826 #ifdef ASSERT 3827 static int ReadBackValidate = 1; 3828 #endif 3829 static int myClass = 0; 3830 static int myMin = 0; 3831 static int myMax = 0; 3832 static int myCur = 0; 3833 static bool priocntl_enable = false; 3834 3835 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4 3836 static int java_MaxPriority_to_os_priority = 0; // Saved mapping 3837 3838 // Call the version of priocntl suitable for all supported versions 3839 // of Solaris. We need to call through this wrapper so that we can 3840 // build on Solaris 9 and run on Solaris 8, 9 and 10. 3841 // 3842 // This code should be removed if we ever stop supporting Solaris 8 3843 // and earlier releases. 3844 3845 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); 3846 typedef long (*priocntl_type)(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); 3847 static priocntl_type priocntl_ptr = priocntl_stub; 3848 3849 // Stub to set the value of the real pointer, and then call the real 3850 // function. 3851 3852 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg) { 3853 // Try Solaris 8- name only. 3854 priocntl_type tmp = (priocntl_type)dlsym(RTLD_DEFAULT, "__priocntl"); 3855 guarantee(tmp != NULL, "priocntl function not found."); 3856 priocntl_ptr = tmp; 3857 return (*priocntl_ptr)(PC_VERSION, idtype, id, cmd, arg); 3858 } 3859 3860 3861 // lwp_priocntl_init 3862 // 3863 // Try to determine the priority scale for our process. 3864 // 3865 // Return errno or 0 if OK. 3866 // 3867 static 3868 int lwp_priocntl_init () 3869 { 3870 int rslt; 3871 pcinfo_t ClassInfo; 3872 pcparms_t ParmInfo; 3873 int i; 3874 3875 if (!UseThreadPriorities) return 0; 3876 3877 // We are using Bound threads, we need to determine our priority ranges 3878 if (os::Solaris::T2_libthread() || UseBoundThreads) { 3879 // If ThreadPriorityPolicy is 1, switch tables 3880 if (ThreadPriorityPolicy == 1) { 3881 for (i = 0 ; i < CriticalPriority+1; i++) 3882 os::java_to_os_priority[i] = prio_policy1[i]; 3883 } 3884 if (UseCriticalJavaThreadPriority) { 3885 // MaxPriority always maps to the FX scheduling class and criticalPrio. 3886 // See set_native_priority() and set_lwp_class_and_priority(). 3887 // Save original MaxPriority mapping in case attempt to 3888 // use critical priority fails. 3889 java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority]; 3890 // Set negative to distinguish from other priorities 3891 os::java_to_os_priority[MaxPriority] = -criticalPrio; 3892 } 3893 } 3894 // Not using Bound Threads, set to ThreadPolicy 1 3895 else { 3896 for ( i = 0 ; i < CriticalPriority+1; i++ ) { 3897 os::java_to_os_priority[i] = prio_policy1[i]; 3898 } 3899 return 0; 3900 } 3901 3902 // Get IDs for a set of well-known scheduling classes. 3903 // TODO-FIXME: GETCLINFO returns the current # of classes in the 3904 // the system. We should have a loop that iterates over the 3905 // classID values, which are known to be "small" integers. 3906 3907 strcpy(ClassInfo.pc_clname, "TS"); 3908 ClassInfo.pc_cid = -1; 3909 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3910 if (rslt < 0) return errno; 3911 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1"); 3912 tsLimits.schedPolicy = ClassInfo.pc_cid; 3913 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri; 3914 tsLimits.minPrio = -tsLimits.maxPrio; 3915 3916 strcpy(ClassInfo.pc_clname, "IA"); 3917 ClassInfo.pc_cid = -1; 3918 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3919 if (rslt < 0) return errno; 3920 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1"); 3921 iaLimits.schedPolicy = ClassInfo.pc_cid; 3922 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri; 3923 iaLimits.minPrio = -iaLimits.maxPrio; 3924 3925 strcpy(ClassInfo.pc_clname, "RT"); 3926 ClassInfo.pc_cid = -1; 3927 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3928 if (rslt < 0) return errno; 3929 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1"); 3930 rtLimits.schedPolicy = ClassInfo.pc_cid; 3931 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri; 3932 rtLimits.minPrio = 0; 3933 3934 strcpy(ClassInfo.pc_clname, "FX"); 3935 ClassInfo.pc_cid = -1; 3936 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3937 if (rslt < 0) return errno; 3938 assert(ClassInfo.pc_cid != -1, "cid for FX class is -1"); 3939 fxLimits.schedPolicy = ClassInfo.pc_cid; 3940 fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri; 3941 fxLimits.minPrio = 0; 3942 3943 // Query our "current" scheduling class. 3944 // This will normally be IA, TS or, rarely, FX or RT. 3945 memset(&ParmInfo, 0, sizeof(ParmInfo)); 3946 ParmInfo.pc_cid = PC_CLNULL; 3947 rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3948 if (rslt < 0) return errno; 3949 myClass = ParmInfo.pc_cid; 3950 3951 // We now know our scheduling classId, get specific information 3952 // about the class. 3953 ClassInfo.pc_cid = myClass; 3954 ClassInfo.pc_clname[0] = 0; 3955 rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo); 3956 if (rslt < 0) return errno; 3957 3958 if (ThreadPriorityVerbose) { 3959 tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname); 3960 } 3961 3962 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3963 ParmInfo.pc_cid = PC_CLNULL; 3964 rslt = (*priocntl_ptr)(PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3965 if (rslt < 0) return errno; 3966 3967 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3968 myMin = rtLimits.minPrio; 3969 myMax = rtLimits.maxPrio; 3970 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3971 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3972 myMin = iaLimits.minPrio; 3973 myMax = iaLimits.maxPrio; 3974 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict 3975 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3976 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3977 myMin = tsLimits.minPrio; 3978 myMax = tsLimits.maxPrio; 3979 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict 3980 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 3981 fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 3982 myMin = fxLimits.minPrio; 3983 myMax = fxLimits.maxPrio; 3984 myMax = MIN2(myMax, (int)fxInfo->fx_uprilim); // clamp - restrict 3985 } else { 3986 // No clue - punt 3987 if (ThreadPriorityVerbose) 3988 tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname); 3989 return EINVAL; // no clue, punt 3990 } 3991 3992 if (ThreadPriorityVerbose) { 3993 tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax); 3994 } 3995 3996 priocntl_enable = true; // Enable changing priorities 3997 return 0; 3998 } 3999 4000 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms)) 4001 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms)) 4002 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms)) 4003 #define FXPRI(x) ((fxparms_t *)((x).pc_clparms)) 4004 4005 4006 // scale_to_lwp_priority 4007 // 4008 // Convert from the libthread "thr_setprio" scale to our current 4009 // lwp scheduling class scale. 4010 // 4011 static 4012 int scale_to_lwp_priority (int rMin, int rMax, int x) 4013 { 4014 int v; 4015 4016 if (x == 127) return rMax; // avoid round-down 4017 v = (((x*(rMax-rMin)))/128)+rMin; 4018 return v; 4019 } 4020 4021 4022 // set_lwp_class_and_priority 4023 // 4024 // Set the class and priority of the lwp. This call should only 4025 // be made when using bound threads (T2 threads are bound by default). 4026 // 4027 int set_lwp_class_and_priority(int ThreadID, int lwpid, 4028 int newPrio, int new_class, bool scale) { 4029 int rslt; 4030 int Actual, Expected, prv; 4031 pcparms_t ParmInfo; // for GET-SET 4032 #ifdef ASSERT 4033 pcparms_t ReadBack; // for readback 4034 #endif 4035 4036 // Set priority via PC_GETPARMS, update, PC_SETPARMS 4037 // Query current values. 4038 // TODO: accelerate this by eliminating the PC_GETPARMS call. 4039 // Cache "pcparms_t" in global ParmCache. 4040 // TODO: elide set-to-same-value 4041 4042 // If something went wrong on init, don't change priorities. 4043 if ( !priocntl_enable ) { 4044 if (ThreadPriorityVerbose) 4045 tty->print_cr("Trying to set priority but init failed, ignoring"); 4046 return EINVAL; 4047 } 4048 4049 // If lwp hasn't started yet, just return 4050 // the _start routine will call us again. 4051 if ( lwpid <= 0 ) { 4052 if (ThreadPriorityVerbose) { 4053 tty->print_cr ("deferring the set_lwp_class_and_priority of thread " 4054 INTPTR_FORMAT " to %d, lwpid not set", 4055 ThreadID, newPrio); 4056 } 4057 return 0; 4058 } 4059 4060 if (ThreadPriorityVerbose) { 4061 tty->print_cr ("set_lwp_class_and_priority(" 4062 INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ", 4063 ThreadID, lwpid, newPrio); 4064 } 4065 4066 memset(&ParmInfo, 0, sizeof(pcparms_t)); 4067 ParmInfo.pc_cid = PC_CLNULL; 4068 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo); 4069 if (rslt < 0) return errno; 4070 4071 int cur_class = ParmInfo.pc_cid; 4072 ParmInfo.pc_cid = (id_t)new_class; 4073 4074 if (new_class == rtLimits.schedPolicy) { 4075 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms; 4076 rtInfo->rt_pri = scale ? scale_to_lwp_priority(rtLimits.minPrio, 4077 rtLimits.maxPrio, newPrio) 4078 : newPrio; 4079 rtInfo->rt_tqsecs = RT_NOCHANGE; 4080 rtInfo->rt_tqnsecs = RT_NOCHANGE; 4081 if (ThreadPriorityVerbose) { 4082 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri); 4083 } 4084 } else if (new_class == iaLimits.schedPolicy) { 4085 iaparms_t* iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 4086 int maxClamped = MIN2(iaLimits.maxPrio, 4087 cur_class == new_class 4088 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio); 4089 iaInfo->ia_upri = scale ? scale_to_lwp_priority(iaLimits.minPrio, 4090 maxClamped, newPrio) 4091 : newPrio; 4092 iaInfo->ia_uprilim = cur_class == new_class 4093 ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio; 4094 iaInfo->ia_mode = IA_NOCHANGE; 4095 if (ThreadPriorityVerbose) { 4096 tty->print_cr("IA: [%d...%d] %d->%d\n", 4097 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri); 4098 } 4099 } else if (new_class == tsLimits.schedPolicy) { 4100 tsparms_t* tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 4101 int maxClamped = MIN2(tsLimits.maxPrio, 4102 cur_class == new_class 4103 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio); 4104 tsInfo->ts_upri = scale ? scale_to_lwp_priority(tsLimits.minPrio, 4105 maxClamped, newPrio) 4106 : newPrio; 4107 tsInfo->ts_uprilim = cur_class == new_class 4108 ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio; 4109 if (ThreadPriorityVerbose) { 4110 tty->print_cr("TS: [%d...%d] %d->%d\n", 4111 tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri); 4112 } 4113 } else if (new_class == fxLimits.schedPolicy) { 4114 fxparms_t* fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 4115 int maxClamped = MIN2(fxLimits.maxPrio, 4116 cur_class == new_class 4117 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio); 4118 fxInfo->fx_upri = scale ? scale_to_lwp_priority(fxLimits.minPrio, 4119 maxClamped, newPrio) 4120 : newPrio; 4121 fxInfo->fx_uprilim = cur_class == new_class 4122 ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio; 4123 fxInfo->fx_tqsecs = FX_NOCHANGE; 4124 fxInfo->fx_tqnsecs = FX_NOCHANGE; 4125 if (ThreadPriorityVerbose) { 4126 tty->print_cr("FX: [%d...%d] %d->%d\n", 4127 fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri); 4128 } 4129 } else { 4130 if (ThreadPriorityVerbose) { 4131 tty->print_cr("Unknown new scheduling class %d\n", new_class); 4132 } 4133 return EINVAL; // no clue, punt 4134 } 4135 4136 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo); 4137 if (ThreadPriorityVerbose && rslt) { 4138 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno); 4139 } 4140 if (rslt < 0) return errno; 4141 4142 #ifdef ASSERT 4143 // Sanity check: read back what we just attempted to set. 4144 // In theory it could have changed in the interim ... 4145 // 4146 // The priocntl system call is tricky. 4147 // Sometimes it'll validate the priority value argument and 4148 // return EINVAL if unhappy. At other times it fails silently. 4149 // Readbacks are prudent. 4150 4151 if (!ReadBackValidate) return 0; 4152 4153 memset(&ReadBack, 0, sizeof(pcparms_t)); 4154 ReadBack.pc_cid = PC_CLNULL; 4155 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack); 4156 assert(rslt >= 0, "priocntl failed"); 4157 Actual = Expected = 0xBAD; 4158 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match"); 4159 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 4160 Actual = RTPRI(ReadBack)->rt_pri; 4161 Expected = RTPRI(ParmInfo)->rt_pri; 4162 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 4163 Actual = IAPRI(ReadBack)->ia_upri; 4164 Expected = IAPRI(ParmInfo)->ia_upri; 4165 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 4166 Actual = TSPRI(ReadBack)->ts_upri; 4167 Expected = TSPRI(ParmInfo)->ts_upri; 4168 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 4169 Actual = FXPRI(ReadBack)->fx_upri; 4170 Expected = FXPRI(ParmInfo)->fx_upri; 4171 } else { 4172 if (ThreadPriorityVerbose) { 4173 tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n", 4174 ParmInfo.pc_cid); 4175 } 4176 } 4177 4178 if (Actual != Expected) { 4179 if (ThreadPriorityVerbose) { 4180 tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n", 4181 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected); 4182 } 4183 } 4184 #endif 4185 4186 return 0; 4187 } 4188 4189 // Solaris only gives access to 128 real priorities at a time, 4190 // so we expand Java's ten to fill this range. This would be better 4191 // if we dynamically adjusted relative priorities. 4192 // 4193 // The ThreadPriorityPolicy option allows us to select 2 different 4194 // priority scales. 4195 // 4196 // ThreadPriorityPolicy=0 4197 // Since the Solaris' default priority is MaximumPriority, we do not 4198 // set a priority lower than Max unless a priority lower than 4199 // NormPriority is requested. 4200 // 4201 // ThreadPriorityPolicy=1 4202 // This mode causes the priority table to get filled with 4203 // linear values. NormPriority get's mapped to 50% of the 4204 // Maximum priority an so on. This will cause VM threads 4205 // to get unfair treatment against other Solaris processes 4206 // which do not explicitly alter their thread priorities. 4207 // 4208 4209 int os::java_to_os_priority[CriticalPriority + 1] = { 4210 -99999, // 0 Entry should never be used 4211 4212 0, // 1 MinPriority 4213 32, // 2 4214 64, // 3 4215 4216 96, // 4 4217 127, // 5 NormPriority 4218 127, // 6 4219 4220 127, // 7 4221 127, // 8 4222 127, // 9 NearMaxPriority 4223 4224 127, // 10 MaxPriority 4225 4226 -criticalPrio // 11 CriticalPriority 4227 }; 4228 4229 OSReturn os::set_native_priority(Thread* thread, int newpri) { 4230 OSThread* osthread = thread->osthread(); 4231 4232 // Save requested priority in case the thread hasn't been started 4233 osthread->set_native_priority(newpri); 4234 4235 // Check for critical priority request 4236 bool fxcritical = false; 4237 if (newpri == -criticalPrio) { 4238 fxcritical = true; 4239 newpri = criticalPrio; 4240 } 4241 4242 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping"); 4243 if (!UseThreadPriorities) return OS_OK; 4244 4245 int status = 0; 4246 4247 if (!fxcritical) { 4248 // Use thr_setprio only if we have a priority that thr_setprio understands 4249 status = thr_setprio(thread->osthread()->thread_id(), newpri); 4250 } 4251 4252 if (os::Solaris::T2_libthread() || 4253 (UseBoundThreads && osthread->is_vm_created())) { 4254 int lwp_status = 4255 set_lwp_class_and_priority(osthread->thread_id(), 4256 osthread->lwp_id(), 4257 newpri, 4258 fxcritical ? fxLimits.schedPolicy : myClass, 4259 !fxcritical); 4260 if (lwp_status != 0 && fxcritical) { 4261 // Try again, this time without changing the scheduling class 4262 newpri = java_MaxPriority_to_os_priority; 4263 lwp_status = set_lwp_class_and_priority(osthread->thread_id(), 4264 osthread->lwp_id(), 4265 newpri, myClass, false); 4266 } 4267 status |= lwp_status; 4268 } 4269 return (status == 0) ? OS_OK : OS_ERR; 4270 } 4271 4272 4273 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { 4274 int p; 4275 if ( !UseThreadPriorities ) { 4276 *priority_ptr = NormalPriority; 4277 return OS_OK; 4278 } 4279 int status = thr_getprio(thread->osthread()->thread_id(), &p); 4280 if (status != 0) { 4281 return OS_ERR; 4282 } 4283 *priority_ptr = p; 4284 return OS_OK; 4285 } 4286 4287 4288 // Hint to the underlying OS that a task switch would not be good. 4289 // Void return because it's a hint and can fail. 4290 void os::hint_no_preempt() { 4291 schedctl_start(schedctl_init()); 4292 } 4293 4294 static void resume_clear_context(OSThread *osthread) { 4295 osthread->set_ucontext(NULL); 4296 } 4297 4298 static void suspend_save_context(OSThread *osthread, ucontext_t* context) { 4299 osthread->set_ucontext(context); 4300 } 4301 4302 static Semaphore sr_semaphore; 4303 4304 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) { 4305 // Save and restore errno to avoid confusing native code with EINTR 4306 // after sigsuspend. 4307 int old_errno = errno; 4308 4309 OSThread* osthread = thread->osthread(); 4310 assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); 4311 4312 os::SuspendResume::State current = osthread->sr.state(); 4313 if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { 4314 suspend_save_context(osthread, uc); 4315 4316 // attempt to switch the state, we assume we had a SUSPEND_REQUEST 4317 os::SuspendResume::State state = osthread->sr.suspended(); 4318 if (state == os::SuspendResume::SR_SUSPENDED) { 4319 sigset_t suspend_set; // signals for sigsuspend() 4320 4321 // get current set of blocked signals and unblock resume signal 4322 thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set); 4323 sigdelset(&suspend_set, os::Solaris::SIGasync()); 4324 4325 sr_semaphore.signal(); 4326 // wait here until we are resumed 4327 while (1) { 4328 sigsuspend(&suspend_set); 4329 4330 os::SuspendResume::State result = osthread->sr.running(); 4331 if (result == os::SuspendResume::SR_RUNNING) { 4332 sr_semaphore.signal(); 4333 break; 4334 } 4335 } 4336 4337 } else if (state == os::SuspendResume::SR_RUNNING) { 4338 // request was cancelled, continue 4339 } else { 4340 ShouldNotReachHere(); 4341 } 4342 4343 resume_clear_context(osthread); 4344 } else if (current == os::SuspendResume::SR_RUNNING) { 4345 // request was cancelled, continue 4346 } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { 4347 // ignore 4348 } else { 4349 // ignore 4350 } 4351 4352 errno = old_errno; 4353 } 4354 4355 4356 void os::interrupt(Thread* thread) { 4357 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); 4358 4359 OSThread* osthread = thread->osthread(); 4360 4361 int isInterrupted = osthread->interrupted(); 4362 if (!isInterrupted) { 4363 osthread->set_interrupted(true); 4364 OrderAccess::fence(); 4365 // os::sleep() is implemented with either poll (NULL,0,timeout) or 4366 // by parking on _SleepEvent. If the former, thr_kill will unwedge 4367 // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper. 4368 ParkEvent * const slp = thread->_SleepEvent ; 4369 if (slp != NULL) slp->unpark() ; 4370 } 4371 4372 // For JSR166: unpark after setting status but before thr_kill -dl 4373 if (thread->is_Java_thread()) { 4374 ((JavaThread*)thread)->parker()->unpark(); 4375 } 4376 4377 // Handle interruptible wait() ... 4378 ParkEvent * const ev = thread->_ParkEvent ; 4379 if (ev != NULL) ev->unpark() ; 4380 4381 // When events are used everywhere for os::sleep, then this thr_kill 4382 // will only be needed if UseVMInterruptibleIO is true. 4383 4384 if (!isInterrupted) { 4385 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt()); 4386 assert_status(status == 0, status, "thr_kill"); 4387 4388 // Bump thread interruption counter 4389 RuntimeService::record_thread_interrupt_signaled_count(); 4390 } 4391 } 4392 4393 4394 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 4395 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); 4396 4397 OSThread* osthread = thread->osthread(); 4398 4399 bool res = osthread->interrupted(); 4400 4401 // NOTE that since there is no "lock" around these two operations, 4402 // there is the possibility that the interrupted flag will be 4403 // "false" but that the interrupt event will be set. This is 4404 // intentional. The effect of this is that Object.wait() will appear 4405 // to have a spurious wakeup, which is not harmful, and the 4406 // possibility is so rare that it is not worth the added complexity 4407 // to add yet another lock. It has also been recommended not to put 4408 // the interrupted flag into the os::Solaris::Event structure, 4409 // because it hides the issue. 4410 if (res && clear_interrupted) { 4411 osthread->set_interrupted(false); 4412 } 4413 return res; 4414 } 4415 4416 4417 void os::print_statistics() { 4418 } 4419 4420 int os::message_box(const char* title, const char* message) { 4421 int i; 4422 fdStream err(defaultStream::error_fd()); 4423 for (i = 0; i < 78; i++) err.print_raw("="); 4424 err.cr(); 4425 err.print_raw_cr(title); 4426 for (i = 0; i < 78; i++) err.print_raw("-"); 4427 err.cr(); 4428 err.print_raw_cr(message); 4429 for (i = 0; i < 78; i++) err.print_raw("="); 4430 err.cr(); 4431 4432 char buf[16]; 4433 // Prevent process from exiting upon "read error" without consuming all CPU 4434 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 4435 4436 return buf[0] == 'y' || buf[0] == 'Y'; 4437 } 4438 4439 static int sr_notify(OSThread* osthread) { 4440 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync()); 4441 assert_status(status == 0, status, "thr_kill"); 4442 return status; 4443 } 4444 4445 // "Randomly" selected value for how long we want to spin 4446 // before bailing out on suspending a thread, also how often 4447 // we send a signal to a thread we want to resume 4448 static const int RANDOMLY_LARGE_INTEGER = 1000000; 4449 static const int RANDOMLY_LARGE_INTEGER2 = 100; 4450 4451 static bool do_suspend(OSThread* osthread) { 4452 assert(osthread->sr.is_running(), "thread should be running"); 4453 assert(!sr_semaphore.trywait(), "semaphore has invalid state"); 4454 4455 // mark as suspended and send signal 4456 if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { 4457 // failed to switch, state wasn't running? 4458 ShouldNotReachHere(); 4459 return false; 4460 } 4461 4462 if (sr_notify(osthread) != 0) { 4463 ShouldNotReachHere(); 4464 } 4465 4466 // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED 4467 while (true) { 4468 if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) { 4469 break; 4470 } else { 4471 // timeout 4472 os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); 4473 if (cancelled == os::SuspendResume::SR_RUNNING) { 4474 return false; 4475 } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { 4476 // make sure that we consume the signal on the semaphore as well 4477 sr_semaphore.wait(); 4478 break; 4479 } else { 4480 ShouldNotReachHere(); 4481 return false; 4482 } 4483 } 4484 } 4485 4486 guarantee(osthread->sr.is_suspended(), "Must be suspended"); 4487 return true; 4488 } 4489 4490 static void do_resume(OSThread* osthread) { 4491 assert(osthread->sr.is_suspended(), "thread should be suspended"); 4492 assert(!sr_semaphore.trywait(), "invalid semaphore state"); 4493 4494 if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { 4495 // failed to switch to WAKEUP_REQUEST 4496 ShouldNotReachHere(); 4497 return; 4498 } 4499 4500 while (true) { 4501 if (sr_notify(osthread) == 0) { 4502 if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { 4503 if (osthread->sr.is_running()) { 4504 return; 4505 } 4506 } 4507 } else { 4508 ShouldNotReachHere(); 4509 } 4510 } 4511 4512 guarantee(osthread->sr.is_running(), "Must be running!"); 4513 } 4514 4515 void os::SuspendedThreadTask::internal_do_task() { 4516 if (do_suspend(_thread->osthread())) { 4517 SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); 4518 do_task(context); 4519 do_resume(_thread->osthread()); 4520 } 4521 } 4522 4523 class PcFetcher : public os::SuspendedThreadTask { 4524 public: 4525 PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} 4526 ExtendedPC result(); 4527 protected: 4528 void do_task(const os::SuspendedThreadTaskContext& context); 4529 private: 4530 ExtendedPC _epc; 4531 }; 4532 4533 ExtendedPC PcFetcher::result() { 4534 guarantee(is_done(), "task is not done yet."); 4535 return _epc; 4536 } 4537 4538 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { 4539 Thread* thread = context.thread(); 4540 OSThread* osthread = thread->osthread(); 4541 if (osthread->ucontext() != NULL) { 4542 _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext()); 4543 } else { 4544 // NULL context is unexpected, double-check this is the VMThread 4545 guarantee(thread->is_VM_thread(), "can only be called for VMThread"); 4546 } 4547 } 4548 4549 // A lightweight implementation that does not suspend the target thread and 4550 // thus returns only a hint. Used for profiling only! 4551 ExtendedPC os::get_thread_pc(Thread* thread) { 4552 // Make sure that it is called by the watcher and the Threads lock is owned. 4553 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); 4554 // For now, is only used to profile the VM Thread 4555 assert(thread->is_VM_thread(), "Can only be called for VMThread"); 4556 PcFetcher fetcher(thread); 4557 fetcher.run(); 4558 return fetcher.result(); 4559 } 4560 4561 4562 // This does not do anything on Solaris. This is basically a hook for being 4563 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32. 4564 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) { 4565 f(value, method, args, thread); 4566 } 4567 4568 // This routine may be used by user applications as a "hook" to catch signals. 4569 // The user-defined signal handler must pass unrecognized signals to this 4570 // routine, and if it returns true (non-zero), then the signal handler must 4571 // return immediately. If the flag "abort_if_unrecognized" is true, then this 4572 // routine will never retun false (zero), but instead will execute a VM panic 4573 // routine kill the process. 4574 // 4575 // If this routine returns false, it is OK to call it again. This allows 4576 // the user-defined signal handler to perform checks either before or after 4577 // the VM performs its own checks. Naturally, the user code would be making 4578 // a serious error if it tried to handle an exception (such as a null check 4579 // or breakpoint) that the VM was generating for its own correct operation. 4580 // 4581 // This routine may recognize any of the following kinds of signals: 4582 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ, 4583 // os::Solaris::SIGasync 4584 // It should be consulted by handlers for any of those signals. 4585 // It explicitly does not recognize os::Solaris::SIGinterrupt 4586 // 4587 // The caller of this routine must pass in the three arguments supplied 4588 // to the function referred to in the "sa_sigaction" (not the "sa_handler") 4589 // field of the structure passed to sigaction(). This routine assumes that 4590 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 4591 // 4592 // Note that the VM will print warnings if it detects conflicting signal 4593 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 4594 // 4595 extern "C" JNIEXPORT int 4596 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, 4597 int abort_if_unrecognized); 4598 4599 4600 void signalHandler(int sig, siginfo_t* info, void* ucVoid) { 4601 int orig_errno = errno; // Preserve errno value over signal handler. 4602 JVM_handle_solaris_signal(sig, info, ucVoid, true); 4603 errno = orig_errno; 4604 } 4605 4606 /* Do not delete - if guarantee is ever removed, a signal handler (even empty) 4607 is needed to provoke threads blocked on IO to return an EINTR 4608 Note: this explicitly does NOT call JVM_handle_solaris_signal and 4609 does NOT participate in signal chaining due to requirement for 4610 NOT setting SA_RESTART to make EINTR work. */ 4611 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) { 4612 if (UseSignalChaining) { 4613 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig); 4614 if (actp && actp->sa_handler) { 4615 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs"); 4616 } 4617 } 4618 } 4619 4620 // This boolean allows users to forward their own non-matching signals 4621 // to JVM_handle_solaris_signal, harmlessly. 4622 bool os::Solaris::signal_handlers_are_installed = false; 4623 4624 // For signal-chaining 4625 bool os::Solaris::libjsig_is_loaded = false; 4626 typedef struct sigaction *(*get_signal_t)(int); 4627 get_signal_t os::Solaris::get_signal_action = NULL; 4628 4629 struct sigaction* os::Solaris::get_chained_signal_action(int sig) { 4630 struct sigaction *actp = NULL; 4631 4632 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) { 4633 // Retrieve the old signal handler from libjsig 4634 actp = (*get_signal_action)(sig); 4635 } 4636 if (actp == NULL) { 4637 // Retrieve the preinstalled signal handler from jvm 4638 actp = get_preinstalled_handler(sig); 4639 } 4640 4641 return actp; 4642 } 4643 4644 static bool call_chained_handler(struct sigaction *actp, int sig, 4645 siginfo_t *siginfo, void *context) { 4646 // Call the old signal handler 4647 if (actp->sa_handler == SIG_DFL) { 4648 // It's more reasonable to let jvm treat it as an unexpected exception 4649 // instead of taking the default action. 4650 return false; 4651 } else if (actp->sa_handler != SIG_IGN) { 4652 if ((actp->sa_flags & SA_NODEFER) == 0) { 4653 // automaticlly block the signal 4654 sigaddset(&(actp->sa_mask), sig); 4655 } 4656 4657 sa_handler_t hand; 4658 sa_sigaction_t sa; 4659 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 4660 // retrieve the chained handler 4661 if (siginfo_flag_set) { 4662 sa = actp->sa_sigaction; 4663 } else { 4664 hand = actp->sa_handler; 4665 } 4666 4667 if ((actp->sa_flags & SA_RESETHAND) != 0) { 4668 actp->sa_handler = SIG_DFL; 4669 } 4670 4671 // try to honor the signal mask 4672 sigset_t oset; 4673 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset); 4674 4675 // call into the chained handler 4676 if (siginfo_flag_set) { 4677 (*sa)(sig, siginfo, context); 4678 } else { 4679 (*hand)(sig); 4680 } 4681 4682 // restore the signal mask 4683 thr_sigsetmask(SIG_SETMASK, &oset, 0); 4684 } 4685 // Tell jvm's signal handler the signal is taken care of. 4686 return true; 4687 } 4688 4689 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) { 4690 bool chained = false; 4691 // signal-chaining 4692 if (UseSignalChaining) { 4693 struct sigaction *actp = get_chained_signal_action(sig); 4694 if (actp != NULL) { 4695 chained = call_chained_handler(actp, sig, siginfo, context); 4696 } 4697 } 4698 return chained; 4699 } 4700 4701 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) { 4702 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4703 if (preinstalled_sigs[sig] != 0) { 4704 return &chainedsigactions[sig]; 4705 } 4706 return NULL; 4707 } 4708 4709 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) { 4710 4711 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range"); 4712 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4713 chainedsigactions[sig] = oldAct; 4714 preinstalled_sigs[sig] = 1; 4715 } 4716 4717 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) { 4718 // Check for overwrite. 4719 struct sigaction oldAct; 4720 sigaction(sig, (struct sigaction*)NULL, &oldAct); 4721 void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4722 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4723 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 4724 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 4725 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) { 4726 if (AllowUserSignalHandlers || !set_installed) { 4727 // Do not overwrite; user takes responsibility to forward to us. 4728 return; 4729 } else if (UseSignalChaining) { 4730 if (oktochain) { 4731 // save the old handler in jvm 4732 save_preinstalled_handler(sig, oldAct); 4733 } else { 4734 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs."); 4735 } 4736 // libjsig also interposes the sigaction() call below and saves the 4737 // old sigaction on it own. 4738 } else { 4739 fatal(err_msg("Encountered unexpected pre-existing sigaction handler " 4740 "%#lx for signal %d.", (long)oldhand, sig)); 4741 } 4742 } 4743 4744 struct sigaction sigAct; 4745 sigfillset(&(sigAct.sa_mask)); 4746 sigAct.sa_handler = SIG_DFL; 4747 4748 sigAct.sa_sigaction = signalHandler; 4749 // Handle SIGSEGV on alternate signal stack if 4750 // not using stack banging 4751 if (!UseStackBanging && sig == SIGSEGV) { 4752 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK; 4753 // Interruptible i/o requires SA_RESTART cleared so EINTR 4754 // is returned instead of restarting system calls 4755 } else if (sig == os::Solaris::SIGinterrupt()) { 4756 sigemptyset(&sigAct.sa_mask); 4757 sigAct.sa_handler = NULL; 4758 sigAct.sa_flags = SA_SIGINFO; 4759 sigAct.sa_sigaction = sigINTRHandler; 4760 } else { 4761 sigAct.sa_flags = SA_SIGINFO | SA_RESTART; 4762 } 4763 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags); 4764 4765 sigaction(sig, &sigAct, &oldAct); 4766 4767 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4768 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4769 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 4770 } 4771 4772 4773 #define DO_SIGNAL_CHECK(sig) \ 4774 if (!sigismember(&check_signal_done, sig)) \ 4775 os::Solaris::check_signal_handler(sig) 4776 4777 // This method is a periodic task to check for misbehaving JNI applications 4778 // under CheckJNI, we can add any periodic checks here 4779 4780 void os::run_periodic_checks() { 4781 // A big source of grief is hijacking virt. addr 0x0 on Solaris, 4782 // thereby preventing a NULL checks. 4783 if(!check_addr0_done) check_addr0_done = check_addr0(tty); 4784 4785 if (check_signals == false) return; 4786 4787 // SEGV and BUS if overridden could potentially prevent 4788 // generation of hs*.log in the event of a crash, debugging 4789 // such a case can be very challenging, so we absolutely 4790 // check for the following for a good measure: 4791 DO_SIGNAL_CHECK(SIGSEGV); 4792 DO_SIGNAL_CHECK(SIGILL); 4793 DO_SIGNAL_CHECK(SIGFPE); 4794 DO_SIGNAL_CHECK(SIGBUS); 4795 DO_SIGNAL_CHECK(SIGPIPE); 4796 DO_SIGNAL_CHECK(SIGXFSZ); 4797 4798 // ReduceSignalUsage allows the user to override these handlers 4799 // see comments at the very top and jvm_solaris.h 4800 if (!ReduceSignalUsage) { 4801 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 4802 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 4803 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 4804 DO_SIGNAL_CHECK(BREAK_SIGNAL); 4805 } 4806 4807 // See comments above for using JVM1/JVM2 and UseAltSigs 4808 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt()); 4809 DO_SIGNAL_CHECK(os::Solaris::SIGasync()); 4810 4811 } 4812 4813 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 4814 4815 static os_sigaction_t os_sigaction = NULL; 4816 4817 void os::Solaris::check_signal_handler(int sig) { 4818 char buf[O_BUFLEN]; 4819 address jvmHandler = NULL; 4820 4821 struct sigaction act; 4822 if (os_sigaction == NULL) { 4823 // only trust the default sigaction, in case it has been interposed 4824 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction"); 4825 if (os_sigaction == NULL) return; 4826 } 4827 4828 os_sigaction(sig, (struct sigaction*)NULL, &act); 4829 4830 address thisHandler = (act.sa_flags & SA_SIGINFO) 4831 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 4832 : CAST_FROM_FN_PTR(address, act.sa_handler) ; 4833 4834 4835 switch(sig) { 4836 case SIGSEGV: 4837 case SIGBUS: 4838 case SIGFPE: 4839 case SIGPIPE: 4840 case SIGXFSZ: 4841 case SIGILL: 4842 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4843 break; 4844 4845 case SHUTDOWN1_SIGNAL: 4846 case SHUTDOWN2_SIGNAL: 4847 case SHUTDOWN3_SIGNAL: 4848 case BREAK_SIGNAL: 4849 jvmHandler = (address)user_handler(); 4850 break; 4851 4852 default: 4853 int intrsig = os::Solaris::SIGinterrupt(); 4854 int asynsig = os::Solaris::SIGasync(); 4855 4856 if (sig == intrsig) { 4857 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler); 4858 } else if (sig == asynsig) { 4859 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4860 } else { 4861 return; 4862 } 4863 break; 4864 } 4865 4866 4867 if (thisHandler != jvmHandler) { 4868 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 4869 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 4870 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 4871 // No need to check this sig any longer 4872 sigaddset(&check_signal_done, sig); 4873 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) { 4874 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 4875 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig)); 4876 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags); 4877 // No need to check this sig any longer 4878 sigaddset(&check_signal_done, sig); 4879 } 4880 4881 // Print all the signal handler state 4882 if (sigismember(&check_signal_done, sig)) { 4883 print_signal_handlers(tty, buf, O_BUFLEN); 4884 } 4885 4886 } 4887 4888 void os::Solaris::install_signal_handlers() { 4889 bool libjsigdone = false; 4890 signal_handlers_are_installed = true; 4891 4892 // signal-chaining 4893 typedef void (*signal_setting_t)(); 4894 signal_setting_t begin_signal_setting = NULL; 4895 signal_setting_t end_signal_setting = NULL; 4896 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4897 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 4898 if (begin_signal_setting != NULL) { 4899 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4900 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 4901 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 4902 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 4903 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t, 4904 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version")); 4905 libjsig_is_loaded = true; 4906 if (os::Solaris::get_libjsig_version != NULL) { 4907 libjsigversion = (*os::Solaris::get_libjsig_version)(); 4908 } 4909 assert(UseSignalChaining, "should enable signal-chaining"); 4910 } 4911 if (libjsig_is_loaded) { 4912 // Tell libjsig jvm is setting signal handlers 4913 (*begin_signal_setting)(); 4914 } 4915 4916 set_signal_handler(SIGSEGV, true, true); 4917 set_signal_handler(SIGPIPE, true, true); 4918 set_signal_handler(SIGXFSZ, true, true); 4919 set_signal_handler(SIGBUS, true, true); 4920 set_signal_handler(SIGILL, true, true); 4921 set_signal_handler(SIGFPE, true, true); 4922 4923 4924 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) { 4925 4926 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so 4927 // can not register overridable signals which might be > 32 4928 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) { 4929 // Tell libjsig jvm has finished setting signal handlers 4930 (*end_signal_setting)(); 4931 libjsigdone = true; 4932 } 4933 } 4934 4935 // Never ok to chain our SIGinterrupt 4936 set_signal_handler(os::Solaris::SIGinterrupt(), true, false); 4937 set_signal_handler(os::Solaris::SIGasync(), true, true); 4938 4939 if (libjsig_is_loaded && !libjsigdone) { 4940 // Tell libjsig jvm finishes setting signal handlers 4941 (*end_signal_setting)(); 4942 } 4943 4944 // We don't activate signal checker if libjsig is in place, we trust ourselves 4945 // and if UserSignalHandler is installed all bets are off. 4946 // Log that signal checking is off only if -verbose:jni is specified. 4947 if (CheckJNICalls) { 4948 if (libjsig_is_loaded) { 4949 if (PrintJNIResolving) { 4950 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 4951 } 4952 check_signals = false; 4953 } 4954 if (AllowUserSignalHandlers) { 4955 if (PrintJNIResolving) { 4956 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 4957 } 4958 check_signals = false; 4959 } 4960 } 4961 } 4962 4963 4964 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...); 4965 4966 const char * signames[] = { 4967 "SIG0", 4968 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP", 4969 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS", 4970 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM", 4971 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH", 4972 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT", 4973 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU", 4974 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW", 4975 "SIGCANCEL", "SIGLOST" 4976 }; 4977 4978 const char* os::exception_name(int exception_code, char* buf, size_t size) { 4979 if (0 < exception_code && exception_code <= SIGRTMAX) { 4980 // signal 4981 if (exception_code < sizeof(signames)/sizeof(const char*)) { 4982 jio_snprintf(buf, size, "%s", signames[exception_code]); 4983 } else { 4984 jio_snprintf(buf, size, "SIG%d", exception_code); 4985 } 4986 return buf; 4987 } else { 4988 return NULL; 4989 } 4990 } 4991 4992 // (Static) wrappers for the new libthread API 4993 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate; 4994 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate; 4995 int_fnP_thread_t_i os::Solaris::_thr_setmutator; 4996 int_fnP_thread_t os::Solaris::_thr_suspend_mutator; 4997 int_fnP_thread_t os::Solaris::_thr_continue_mutator; 4998 4999 // (Static) wrapper for getisax(2) call. 5000 os::Solaris::getisax_func_t os::Solaris::_getisax = 0; 5001 5002 // (Static) wrappers for the liblgrp API 5003 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home; 5004 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init; 5005 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini; 5006 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root; 5007 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children; 5008 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources; 5009 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps; 5010 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale; 5011 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0; 5012 5013 // (Static) wrapper for meminfo() call. 5014 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0; 5015 5016 static address resolve_symbol_lazy(const char* name) { 5017 address addr = (address) dlsym(RTLD_DEFAULT, name); 5018 if(addr == NULL) { 5019 // RTLD_DEFAULT was not defined on some early versions of 2.5.1 5020 addr = (address) dlsym(RTLD_NEXT, name); 5021 } 5022 return addr; 5023 } 5024 5025 static address resolve_symbol(const char* name) { 5026 address addr = resolve_symbol_lazy(name); 5027 if(addr == NULL) { 5028 fatal(dlerror()); 5029 } 5030 return addr; 5031 } 5032 5033 5034 5035 // isT2_libthread() 5036 // 5037 // Routine to determine if we are currently using the new T2 libthread. 5038 // 5039 // We determine if we are using T2 by reading /proc/self/lstatus and 5040 // looking for a thread with the ASLWP bit set. If we find this status 5041 // bit set, we must assume that we are NOT using T2. The T2 team 5042 // has approved this algorithm. 5043 // 5044 // We need to determine if we are running with the new T2 libthread 5045 // since setting native thread priorities is handled differently 5046 // when using this library. All threads created using T2 are bound 5047 // threads. Calling thr_setprio is meaningless in this case. 5048 // 5049 bool isT2_libthread() { 5050 static prheader_t * lwpArray = NULL; 5051 static int lwpSize = 0; 5052 static int lwpFile = -1; 5053 lwpstatus_t * that; 5054 char lwpName [128]; 5055 bool isT2 = false; 5056 5057 #define ADR(x) ((uintptr_t)(x)) 5058 #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1)))) 5059 5060 lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0); 5061 if (lwpFile < 0) { 5062 if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n"); 5063 return false; 5064 } 5065 lwpSize = 16*1024; 5066 for (;;) { 5067 ::lseek64 (lwpFile, 0, SEEK_SET); 5068 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal); 5069 if (::read(lwpFile, lwpArray, lwpSize) < 0) { 5070 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n"); 5071 break; 5072 } 5073 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) { 5074 // We got a good snapshot - now iterate over the list. 5075 int aslwpcount = 0; 5076 for (int i = 0; i < lwpArray->pr_nent; i++ ) { 5077 that = LWPINDEX(lwpArray,i); 5078 if (that->pr_flags & PR_ASLWP) { 5079 aslwpcount++; 5080 } 5081 } 5082 if (aslwpcount == 0) isT2 = true; 5083 break; 5084 } 5085 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize; 5086 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); // retry. 5087 } 5088 5089 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); 5090 ::close (lwpFile); 5091 if (ThreadPriorityVerbose) { 5092 if (isT2) tty->print_cr("We are running with a T2 libthread\n"); 5093 else tty->print_cr("We are not running with a T2 libthread\n"); 5094 } 5095 return isT2; 5096 } 5097 5098 5099 void os::Solaris::libthread_init() { 5100 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators"); 5101 5102 // Determine if we are running with the new T2 libthread 5103 os::Solaris::set_T2_libthread(isT2_libthread()); 5104 5105 lwp_priocntl_init(); 5106 5107 // RTLD_DEFAULT was not defined on some early versions of 5.5.1 5108 if(func == NULL) { 5109 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators"); 5110 // Guarantee that this VM is running on an new enough OS (5.6 or 5111 // later) that it will have a new enough libthread.so. 5112 guarantee(func != NULL, "libthread.so is too old."); 5113 } 5114 5115 // Initialize the new libthread getstate API wrappers 5116 func = resolve_symbol("thr_getstate"); 5117 os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func)); 5118 5119 func = resolve_symbol("thr_setstate"); 5120 os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func)); 5121 5122 func = resolve_symbol("thr_setmutator"); 5123 os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func)); 5124 5125 func = resolve_symbol("thr_suspend_mutator"); 5126 os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 5127 5128 func = resolve_symbol("thr_continue_mutator"); 5129 os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 5130 5131 int size; 5132 void (*handler_info_func)(address *, int *); 5133 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo")); 5134 handler_info_func(&handler_start, &size); 5135 handler_end = handler_start + size; 5136 } 5137 5138 5139 int_fnP_mutex_tP os::Solaris::_mutex_lock; 5140 int_fnP_mutex_tP os::Solaris::_mutex_trylock; 5141 int_fnP_mutex_tP os::Solaris::_mutex_unlock; 5142 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init; 5143 int_fnP_mutex_tP os::Solaris::_mutex_destroy; 5144 int os::Solaris::_mutex_scope = USYNC_THREAD; 5145 5146 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait; 5147 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait; 5148 int_fnP_cond_tP os::Solaris::_cond_signal; 5149 int_fnP_cond_tP os::Solaris::_cond_broadcast; 5150 int_fnP_cond_tP_i_vP os::Solaris::_cond_init; 5151 int_fnP_cond_tP os::Solaris::_cond_destroy; 5152 int os::Solaris::_cond_scope = USYNC_THREAD; 5153 5154 void os::Solaris::synchronization_init() { 5155 if(UseLWPSynchronization) { 5156 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock"))); 5157 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock"))); 5158 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock"))); 5159 os::Solaris::set_mutex_init(lwp_mutex_init); 5160 os::Solaris::set_mutex_destroy(lwp_mutex_destroy); 5161 os::Solaris::set_mutex_scope(USYNC_THREAD); 5162 5163 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait"))); 5164 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait"))); 5165 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal"))); 5166 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast"))); 5167 os::Solaris::set_cond_init(lwp_cond_init); 5168 os::Solaris::set_cond_destroy(lwp_cond_destroy); 5169 os::Solaris::set_cond_scope(USYNC_THREAD); 5170 } 5171 else { 5172 os::Solaris::set_mutex_scope(USYNC_THREAD); 5173 os::Solaris::set_cond_scope(USYNC_THREAD); 5174 5175 if(UsePthreads) { 5176 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock"))); 5177 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock"))); 5178 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock"))); 5179 os::Solaris::set_mutex_init(pthread_mutex_default_init); 5180 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy"))); 5181 5182 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait"))); 5183 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait"))); 5184 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal"))); 5185 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast"))); 5186 os::Solaris::set_cond_init(pthread_cond_default_init); 5187 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy"))); 5188 } 5189 else { 5190 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock"))); 5191 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock"))); 5192 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock"))); 5193 os::Solaris::set_mutex_init(::mutex_init); 5194 os::Solaris::set_mutex_destroy(::mutex_destroy); 5195 5196 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait"))); 5197 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait"))); 5198 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal"))); 5199 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast"))); 5200 os::Solaris::set_cond_init(::cond_init); 5201 os::Solaris::set_cond_destroy(::cond_destroy); 5202 } 5203 } 5204 } 5205 5206 bool os::Solaris::liblgrp_init() { 5207 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY); 5208 if (handle != NULL) { 5209 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home"))); 5210 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init"))); 5211 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini"))); 5212 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root"))); 5213 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children"))); 5214 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources"))); 5215 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps"))); 5216 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t, 5217 dlsym(handle, "lgrp_cookie_stale"))); 5218 5219 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER); 5220 set_lgrp_cookie(c); 5221 return true; 5222 } 5223 return false; 5224 } 5225 5226 void os::Solaris::misc_sym_init() { 5227 address func; 5228 5229 // getisax 5230 func = resolve_symbol_lazy("getisax"); 5231 if (func != NULL) { 5232 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func); 5233 } 5234 5235 // meminfo 5236 func = resolve_symbol_lazy("meminfo"); 5237 if (func != NULL) { 5238 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func)); 5239 } 5240 } 5241 5242 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) { 5243 assert(_getisax != NULL, "_getisax not set"); 5244 return _getisax(array, n); 5245 } 5246 5247 // Symbol doesn't exist in Solaris 8 pset.h 5248 #ifndef PS_MYID 5249 #define PS_MYID -3 5250 #endif 5251 5252 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem); 5253 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem); 5254 static pset_getloadavg_type pset_getloadavg_ptr = NULL; 5255 5256 void init_pset_getloadavg_ptr(void) { 5257 pset_getloadavg_ptr = 5258 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg"); 5259 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) { 5260 warning("pset_getloadavg function not found"); 5261 } 5262 } 5263 5264 int os::Solaris::_dev_zero_fd = -1; 5265 5266 // this is called _before_ the global arguments have been parsed 5267 void os::init(void) { 5268 _initial_pid = getpid(); 5269 5270 max_hrtime = first_hrtime = gethrtime(); 5271 5272 init_random(1234567); 5273 5274 page_size = sysconf(_SC_PAGESIZE); 5275 if (page_size == -1) 5276 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)", 5277 strerror(errno))); 5278 init_page_sizes((size_t) page_size); 5279 5280 Solaris::initialize_system_info(); 5281 5282 // Initialize misc. symbols as soon as possible, so we can use them 5283 // if we need them. 5284 Solaris::misc_sym_init(); 5285 5286 int fd = ::open("/dev/zero", O_RDWR); 5287 if (fd < 0) { 5288 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno))); 5289 } else { 5290 Solaris::set_dev_zero_fd(fd); 5291 5292 // Close on exec, child won't inherit. 5293 fcntl(fd, F_SETFD, FD_CLOEXEC); 5294 } 5295 5296 clock_tics_per_sec = CLK_TCK; 5297 5298 // check if dladdr1() exists; dladdr1 can provide more information than 5299 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9 5300 // and is available on linker patches for 5.7 and 5.8. 5301 // libdl.so must have been loaded, this call is just an entry lookup 5302 void * hdl = dlopen("libdl.so", RTLD_NOW); 5303 if (hdl) 5304 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1")); 5305 5306 // (Solaris only) this switches to calls that actually do locking. 5307 ThreadCritical::initialize(); 5308 5309 main_thread = thr_self(); 5310 5311 // Constant minimum stack size allowed. It must be at least 5312 // the minimum of what the OS supports (thr_min_stack()), and 5313 // enough to allow the thread to get to user bytecode execution. 5314 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed); 5315 // If the pagesize of the VM is greater than 8K determine the appropriate 5316 // number of initial guard pages. The user can change this with the 5317 // command line arguments, if needed. 5318 if (vm_page_size() > 8*K) { 5319 StackYellowPages = 1; 5320 StackRedPages = 1; 5321 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size(); 5322 } 5323 } 5324 5325 // To install functions for atexit system call 5326 extern "C" { 5327 static void perfMemory_exit_helper() { 5328 perfMemory_exit(); 5329 } 5330 } 5331 5332 // this is called _after_ the global arguments have been parsed 5333 jint os::init_2(void) { 5334 // try to enable extended file IO ASAP, see 6431278 5335 os::Solaris::try_enable_extended_io(); 5336 5337 // Allocate a single page and mark it as readable for safepoint polling. Also 5338 // use this first mmap call to check support for MAP_ALIGN. 5339 address polling_page = (address)Solaris::mmap_chunk((char*)page_size, 5340 page_size, 5341 MAP_PRIVATE | MAP_ALIGN, 5342 PROT_READ); 5343 if (polling_page == NULL) { 5344 has_map_align = false; 5345 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, 5346 PROT_READ); 5347 } 5348 5349 os::set_polling_page(polling_page); 5350 5351 #ifndef PRODUCT 5352 if( Verbose && PrintMiscellaneous ) 5353 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 5354 #endif 5355 5356 if (!UseMembar) { 5357 address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE ); 5358 guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 5359 os::set_memory_serialize_page( mem_serialize_page ); 5360 5361 #ifndef PRODUCT 5362 if(Verbose && PrintMiscellaneous) 5363 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 5364 #endif 5365 } 5366 5367 os::large_page_init(); 5368 5369 // Check minimum allowable stack size for thread creation and to initialize 5370 // the java system classes, including StackOverflowError - depends on page 5371 // size. Add a page for compiler2 recursion in main thread. 5372 // Add in 2*BytesPerWord times page size to account for VM stack during 5373 // class initialization depending on 32 or 64 bit VM. 5374 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed, 5375 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 5376 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size); 5377 5378 size_t threadStackSizeInBytes = ThreadStackSize * K; 5379 if (threadStackSizeInBytes != 0 && 5380 threadStackSizeInBytes < os::Solaris::min_stack_allowed) { 5381 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk", 5382 os::Solaris::min_stack_allowed/K); 5383 return JNI_ERR; 5384 } 5385 5386 // For 64kbps there will be a 64kb page size, which makes 5387 // the usable default stack size quite a bit less. Increase the 5388 // stack for 64kb (or any > than 8kb) pages, this increases 5389 // virtual memory fragmentation (since we're not creating the 5390 // stack on a power of 2 boundary. The real fix for this 5391 // should be to fix the guard page mechanism. 5392 5393 if (vm_page_size() > 8*K) { 5394 threadStackSizeInBytes = (threadStackSizeInBytes != 0) 5395 ? threadStackSizeInBytes + 5396 ((StackYellowPages + StackRedPages) * vm_page_size()) 5397 : 0; 5398 ThreadStackSize = threadStackSizeInBytes/K; 5399 } 5400 5401 // Make the stack size a multiple of the page size so that 5402 // the yellow/red zones can be guarded. 5403 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, 5404 vm_page_size())); 5405 5406 Solaris::libthread_init(); 5407 5408 if (UseNUMA) { 5409 if (!Solaris::liblgrp_init()) { 5410 UseNUMA = false; 5411 } else { 5412 size_t lgrp_limit = os::numa_get_groups_num(); 5413 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal); 5414 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); 5415 FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal); 5416 if (lgrp_num < 2) { 5417 // There's only one locality group, disable NUMA. 5418 UseNUMA = false; 5419 } 5420 } 5421 // ISM is not compatible with the NUMA allocator - it always allocates 5422 // pages round-robin across the lgroups. 5423 if (UseNUMA && UseLargePages && UseISM) { 5424 if (!FLAG_IS_DEFAULT(UseNUMA)) { 5425 if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseISM)) { 5426 UseLargePages = false; 5427 } else { 5428 warning("UseNUMA is not compatible with ISM large pages, disabling NUMA allocator"); 5429 UseNUMA = false; 5430 } 5431 } else { 5432 UseNUMA = false; 5433 } 5434 } 5435 if (!UseNUMA && ForceNUMA) { 5436 UseNUMA = true; 5437 } 5438 } 5439 5440 Solaris::signal_sets_init(); 5441 Solaris::init_signal_mem(); 5442 Solaris::install_signal_handlers(); 5443 5444 if (libjsigversion < JSIG_VERSION_1_4_1) { 5445 Maxlibjsigsigs = OLDMAXSIGNUM; 5446 } 5447 5448 // initialize synchronization primitives to use either thread or 5449 // lwp synchronization (controlled by UseLWPSynchronization) 5450 Solaris::synchronization_init(); 5451 5452 if (MaxFDLimit) { 5453 // set the number of file descriptors to max. print out error 5454 // if getrlimit/setrlimit fails but continue regardless. 5455 struct rlimit nbr_files; 5456 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 5457 if (status != 0) { 5458 if (PrintMiscellaneous && (Verbose || WizardMode)) 5459 perror("os::init_2 getrlimit failed"); 5460 } else { 5461 nbr_files.rlim_cur = nbr_files.rlim_max; 5462 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 5463 if (status != 0) { 5464 if (PrintMiscellaneous && (Verbose || WizardMode)) 5465 perror("os::init_2 setrlimit failed"); 5466 } 5467 } 5468 } 5469 5470 // Calculate theoretical max. size of Threads to guard gainst 5471 // artifical out-of-memory situations, where all available address- 5472 // space has been reserved by thread stacks. Default stack size is 1Mb. 5473 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ? 5474 JavaThread::stack_size_at_create() : (1*K*K); 5475 assert(pre_thread_stack_size != 0, "Must have a stack"); 5476 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when 5477 // we should start doing Virtual Memory banging. Currently when the threads will 5478 // have used all but 200Mb of space. 5479 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K); 5480 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size; 5481 5482 // at-exit methods are called in the reverse order of their registration. 5483 // In Solaris 7 and earlier, atexit functions are called on return from 5484 // main or as a result of a call to exit(3C). There can be only 32 of 5485 // these functions registered and atexit() does not set errno. In Solaris 5486 // 8 and later, there is no limit to the number of functions registered 5487 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit 5488 // functions are called upon dlclose(3DL) in addition to return from main 5489 // and exit(3C). 5490 5491 if (PerfAllowAtExitRegistration) { 5492 // only register atexit functions if PerfAllowAtExitRegistration is set. 5493 // atexit functions can be delayed until process exit time, which 5494 // can be problematic for embedded VM situations. Embedded VMs should 5495 // call DestroyJavaVM() to assure that VM resources are released. 5496 5497 // note: perfMemory_exit_helper atexit function may be removed in 5498 // the future if the appropriate cleanup code can be added to the 5499 // VM_Exit VMOperation's doit method. 5500 if (atexit(perfMemory_exit_helper) != 0) { 5501 warning("os::init2 atexit(perfMemory_exit_helper) failed"); 5502 } 5503 } 5504 5505 // Init pset_loadavg function pointer 5506 init_pset_getloadavg_ptr(); 5507 5508 return JNI_OK; 5509 } 5510 5511 void os::init_3(void) { 5512 return; 5513 } 5514 5515 // Mark the polling page as unreadable 5516 void os::make_polling_page_unreadable(void) { 5517 if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 ) 5518 fatal("Could not disable polling page"); 5519 }; 5520 5521 // Mark the polling page as readable 5522 void os::make_polling_page_readable(void) { 5523 if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 ) 5524 fatal("Could not enable polling page"); 5525 }; 5526 5527 // OS interface. 5528 5529 bool os::check_heap(bool force) { return true; } 5530 5531 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr); 5532 static vsnprintf_t sol_vsnprintf = NULL; 5533 5534 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) { 5535 if (!sol_vsnprintf) { 5536 //search for the named symbol in the objects that were loaded after libjvm 5537 void* where = RTLD_NEXT; 5538 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5539 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5540 if (!sol_vsnprintf){ 5541 //search for the named symbol in the objects that were loaded before libjvm 5542 where = RTLD_DEFAULT; 5543 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5544 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5545 assert(sol_vsnprintf != NULL, "vsnprintf not found"); 5546 } 5547 } 5548 return (*sol_vsnprintf)(buf, count, fmt, argptr); 5549 } 5550 5551 5552 // Is a (classpath) directory empty? 5553 bool os::dir_is_empty(const char* path) { 5554 DIR *dir = NULL; 5555 struct dirent *ptr; 5556 5557 dir = opendir(path); 5558 if (dir == NULL) return true; 5559 5560 /* Scan the directory */ 5561 bool result = true; 5562 char buf[sizeof(struct dirent) + MAX_PATH]; 5563 struct dirent *dbuf = (struct dirent *) buf; 5564 while (result && (ptr = readdir(dir, dbuf)) != NULL) { 5565 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 5566 result = false; 5567 } 5568 } 5569 closedir(dir); 5570 return result; 5571 } 5572 5573 // This code originates from JDK's sysOpen and open64_w 5574 // from src/solaris/hpi/src/system_md.c 5575 5576 #ifndef O_DELETE 5577 #define O_DELETE 0x10000 5578 #endif 5579 5580 // Open a file. Unlink the file immediately after open returns 5581 // if the specified oflag has the O_DELETE flag set. 5582 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c 5583 5584 int os::open(const char *path, int oflag, int mode) { 5585 if (strlen(path) > MAX_PATH - 1) { 5586 errno = ENAMETOOLONG; 5587 return -1; 5588 } 5589 int fd; 5590 int o_delete = (oflag & O_DELETE); 5591 oflag = oflag & ~O_DELETE; 5592 5593 fd = ::open64(path, oflag, mode); 5594 if (fd == -1) return -1; 5595 5596 //If the open succeeded, the file might still be a directory 5597 { 5598 struct stat64 buf64; 5599 int ret = ::fstat64(fd, &buf64); 5600 int st_mode = buf64.st_mode; 5601 5602 if (ret != -1) { 5603 if ((st_mode & S_IFMT) == S_IFDIR) { 5604 errno = EISDIR; 5605 ::close(fd); 5606 return -1; 5607 } 5608 } else { 5609 ::close(fd); 5610 return -1; 5611 } 5612 } 5613 /* 5614 * 32-bit Solaris systems suffer from: 5615 * 5616 * - an historical default soft limit of 256 per-process file 5617 * descriptors that is too low for many Java programs. 5618 * 5619 * - a design flaw where file descriptors created using stdio 5620 * fopen must be less than 256, _even_ when the first limit above 5621 * has been raised. This can cause calls to fopen (but not calls to 5622 * open, for example) to fail mysteriously, perhaps in 3rd party 5623 * native code (although the JDK itself uses fopen). One can hardly 5624 * criticize them for using this most standard of all functions. 5625 * 5626 * We attempt to make everything work anyways by: 5627 * 5628 * - raising the soft limit on per-process file descriptors beyond 5629 * 256 5630 * 5631 * - As of Solaris 10u4, we can request that Solaris raise the 256 5632 * stdio fopen limit by calling function enable_extended_FILE_stdio. 5633 * This is done in init_2 and recorded in enabled_extended_FILE_stdio 5634 * 5635 * - If we are stuck on an old (pre 10u4) Solaris system, we can 5636 * workaround the bug by remapping non-stdio file descriptors below 5637 * 256 to ones beyond 256, which is done below. 5638 * 5639 * See: 5640 * 1085341: 32-bit stdio routines should support file descriptors >255 5641 * 6533291: Work around 32-bit Solaris stdio limit of 256 open files 5642 * 6431278: Netbeans crash on 32 bit Solaris: need to call 5643 * enable_extended_FILE_stdio() in VM initialisation 5644 * Giri Mandalika's blog 5645 * http://technopark02.blogspot.com/2005_05_01_archive.html 5646 */ 5647 #ifndef _LP64 5648 if ((!enabled_extended_FILE_stdio) && fd < 256) { 5649 int newfd = ::fcntl(fd, F_DUPFD, 256); 5650 if (newfd != -1) { 5651 ::close(fd); 5652 fd = newfd; 5653 } 5654 } 5655 #endif // 32-bit Solaris 5656 /* 5657 * All file descriptors that are opened in the JVM and not 5658 * specifically destined for a subprocess should have the 5659 * close-on-exec flag set. If we don't set it, then careless 3rd 5660 * party native code might fork and exec without closing all 5661 * appropriate file descriptors (e.g. as we do in closeDescriptors in 5662 * UNIXProcess.c), and this in turn might: 5663 * 5664 * - cause end-of-file to fail to be detected on some file 5665 * descriptors, resulting in mysterious hangs, or 5666 * 5667 * - might cause an fopen in the subprocess to fail on a system 5668 * suffering from bug 1085341. 5669 * 5670 * (Yes, the default setting of the close-on-exec flag is a Unix 5671 * design flaw) 5672 * 5673 * See: 5674 * 1085341: 32-bit stdio routines should support file descriptors >255 5675 * 4843136: (process) pipe file descriptor from Runtime.exec not being closed 5676 * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 5677 */ 5678 #ifdef FD_CLOEXEC 5679 { 5680 int flags = ::fcntl(fd, F_GETFD); 5681 if (flags != -1) 5682 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); 5683 } 5684 #endif 5685 5686 if (o_delete != 0) { 5687 ::unlink(path); 5688 } 5689 return fd; 5690 } 5691 5692 // create binary file, rewriting existing file if required 5693 int os::create_binary_file(const char* path, bool rewrite_existing) { 5694 int oflags = O_WRONLY | O_CREAT; 5695 if (!rewrite_existing) { 5696 oflags |= O_EXCL; 5697 } 5698 return ::open64(path, oflags, S_IREAD | S_IWRITE); 5699 } 5700 5701 // return current position of file pointer 5702 jlong os::current_file_offset(int fd) { 5703 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 5704 } 5705 5706 // move file pointer to the specified offset 5707 jlong os::seek_to_file_offset(int fd, jlong offset) { 5708 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 5709 } 5710 5711 jlong os::lseek(int fd, jlong offset, int whence) { 5712 return (jlong) ::lseek64(fd, offset, whence); 5713 } 5714 5715 char * os::native_path(char *path) { 5716 return path; 5717 } 5718 5719 int os::ftruncate(int fd, jlong length) { 5720 return ::ftruncate64(fd, length); 5721 } 5722 5723 int os::fsync(int fd) { 5724 RESTARTABLE_RETURN_INT(::fsync(fd)); 5725 } 5726 5727 int os::available(int fd, jlong *bytes) { 5728 jlong cur, end; 5729 int mode; 5730 struct stat64 buf64; 5731 5732 if (::fstat64(fd, &buf64) >= 0) { 5733 mode = buf64.st_mode; 5734 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { 5735 /* 5736 * XXX: is the following call interruptible? If so, this might 5737 * need to go through the INTERRUPT_IO() wrapper as for other 5738 * blocking, interruptible calls in this file. 5739 */ 5740 int n,ioctl_return; 5741 5742 INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted); 5743 if (ioctl_return>= 0) { 5744 *bytes = n; 5745 return 1; 5746 } 5747 } 5748 } 5749 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) { 5750 return 0; 5751 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) { 5752 return 0; 5753 } else if (::lseek64(fd, cur, SEEK_SET) == -1) { 5754 return 0; 5755 } 5756 *bytes = end - cur; 5757 return 1; 5758 } 5759 5760 // Map a block of memory. 5761 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 5762 char *addr, size_t bytes, bool read_only, 5763 bool allow_exec) { 5764 int prot; 5765 int flags; 5766 5767 if (read_only) { 5768 prot = PROT_READ; 5769 flags = MAP_SHARED; 5770 } else { 5771 prot = PROT_READ | PROT_WRITE; 5772 flags = MAP_PRIVATE; 5773 } 5774 5775 if (allow_exec) { 5776 prot |= PROT_EXEC; 5777 } 5778 5779 if (addr != NULL) { 5780 flags |= MAP_FIXED; 5781 } 5782 5783 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, 5784 fd, file_offset); 5785 if (mapped_address == MAP_FAILED) { 5786 return NULL; 5787 } 5788 return mapped_address; 5789 } 5790 5791 5792 // Remap a block of memory. 5793 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 5794 char *addr, size_t bytes, bool read_only, 5795 bool allow_exec) { 5796 // same as map_memory() on this OS 5797 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 5798 allow_exec); 5799 } 5800 5801 5802 // Unmap a block of memory. 5803 bool os::pd_unmap_memory(char* addr, size_t bytes) { 5804 return munmap(addr, bytes) == 0; 5805 } 5806 5807 void os::pause() { 5808 char filename[MAX_PATH]; 5809 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 5810 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 5811 } else { 5812 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 5813 } 5814 5815 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 5816 if (fd != -1) { 5817 struct stat buf; 5818 ::close(fd); 5819 while (::stat(filename, &buf) == 0) { 5820 (void)::poll(NULL, 0, 100); 5821 } 5822 } else { 5823 jio_fprintf(stderr, 5824 "Could not open pause file '%s', continuing immediately.\n", filename); 5825 } 5826 } 5827 5828 #ifndef PRODUCT 5829 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5830 // Turn this on if you need to trace synch operations. 5831 // Set RECORD_SYNCH_LIMIT to a large-enough value, 5832 // and call record_synch_enable and record_synch_disable 5833 // around the computation of interest. 5834 5835 void record_synch(char* name, bool returning); // defined below 5836 5837 class RecordSynch { 5838 char* _name; 5839 public: 5840 RecordSynch(char* name) :_name(name) 5841 { record_synch(_name, false); } 5842 ~RecordSynch() { record_synch(_name, true); } 5843 }; 5844 5845 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \ 5846 extern "C" ret name params { \ 5847 typedef ret name##_t params; \ 5848 static name##_t* implem = NULL; \ 5849 static int callcount = 0; \ 5850 if (implem == NULL) { \ 5851 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \ 5852 if (implem == NULL) fatal(dlerror()); \ 5853 } \ 5854 ++callcount; \ 5855 RecordSynch _rs(#name); \ 5856 inner; \ 5857 return implem args; \ 5858 } 5859 // in dbx, examine callcounts this way: 5860 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done 5861 5862 #define CHECK_POINTER_OK(p) \ 5863 (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p))) 5864 #define CHECK_MU \ 5865 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only."); 5866 #define CHECK_CV \ 5867 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only."); 5868 #define CHECK_P(p) \ 5869 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only."); 5870 5871 #define CHECK_MUTEX(mutex_op) \ 5872 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU); 5873 5874 CHECK_MUTEX( mutex_lock) 5875 CHECK_MUTEX( _mutex_lock) 5876 CHECK_MUTEX( mutex_unlock) 5877 CHECK_MUTEX(_mutex_unlock) 5878 CHECK_MUTEX( mutex_trylock) 5879 CHECK_MUTEX(_mutex_trylock) 5880 5881 #define CHECK_COND(cond_op) \ 5882 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV); 5883 5884 CHECK_COND( cond_wait); 5885 CHECK_COND(_cond_wait); 5886 CHECK_COND(_cond_wait_cancel); 5887 5888 #define CHECK_COND2(cond_op) \ 5889 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV); 5890 5891 CHECK_COND2( cond_timedwait); 5892 CHECK_COND2(_cond_timedwait); 5893 CHECK_COND2(_cond_timedwait_cancel); 5894 5895 // do the _lwp_* versions too 5896 #define mutex_t lwp_mutex_t 5897 #define cond_t lwp_cond_t 5898 CHECK_MUTEX( _lwp_mutex_lock) 5899 CHECK_MUTEX( _lwp_mutex_unlock) 5900 CHECK_MUTEX( _lwp_mutex_trylock) 5901 CHECK_MUTEX( __lwp_mutex_lock) 5902 CHECK_MUTEX( __lwp_mutex_unlock) 5903 CHECK_MUTEX( __lwp_mutex_trylock) 5904 CHECK_MUTEX(___lwp_mutex_lock) 5905 CHECK_MUTEX(___lwp_mutex_unlock) 5906 5907 CHECK_COND( _lwp_cond_wait); 5908 CHECK_COND( __lwp_cond_wait); 5909 CHECK_COND(___lwp_cond_wait); 5910 5911 CHECK_COND2( _lwp_cond_timedwait); 5912 CHECK_COND2( __lwp_cond_timedwait); 5913 #undef mutex_t 5914 #undef cond_t 5915 5916 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5917 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5918 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0); 5919 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0); 5920 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5921 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5922 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5923 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5924 5925 5926 // recording machinery: 5927 5928 enum { RECORD_SYNCH_LIMIT = 200 }; 5929 char* record_synch_name[RECORD_SYNCH_LIMIT]; 5930 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT]; 5931 bool record_synch_returning[RECORD_SYNCH_LIMIT]; 5932 thread_t record_synch_thread[RECORD_SYNCH_LIMIT]; 5933 int record_synch_count = 0; 5934 bool record_synch_enabled = false; 5935 5936 // in dbx, examine recorded data this way: 5937 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done 5938 5939 void record_synch(char* name, bool returning) { 5940 if (record_synch_enabled) { 5941 if (record_synch_count < RECORD_SYNCH_LIMIT) { 5942 record_synch_name[record_synch_count] = name; 5943 record_synch_returning[record_synch_count] = returning; 5944 record_synch_thread[record_synch_count] = thr_self(); 5945 record_synch_arg0ptr[record_synch_count] = &name; 5946 record_synch_count++; 5947 } 5948 // put more checking code here: 5949 // ... 5950 } 5951 } 5952 5953 void record_synch_enable() { 5954 // start collecting trace data, if not already doing so 5955 if (!record_synch_enabled) record_synch_count = 0; 5956 record_synch_enabled = true; 5957 } 5958 5959 void record_synch_disable() { 5960 // stop collecting trace data 5961 record_synch_enabled = false; 5962 } 5963 5964 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5965 #endif // PRODUCT 5966 5967 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5968 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) - 5969 (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5970 5971 5972 // JVMTI & JVM monitoring and management support 5973 // The thread_cpu_time() and current_thread_cpu_time() are only 5974 // supported if is_thread_cpu_time_supported() returns true. 5975 // They are not supported on Solaris T1. 5976 5977 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 5978 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 5979 // of a thread. 5980 // 5981 // current_thread_cpu_time() and thread_cpu_time(Thread *) 5982 // returns the fast estimate available on the platform. 5983 5984 // hrtime_t gethrvtime() return value includes 5985 // user time but does not include system time 5986 jlong os::current_thread_cpu_time() { 5987 return (jlong) gethrvtime(); 5988 } 5989 5990 jlong os::thread_cpu_time(Thread *thread) { 5991 // return user level CPU time only to be consistent with 5992 // what current_thread_cpu_time returns. 5993 // thread_cpu_time_info() must be changed if this changes 5994 return os::thread_cpu_time(thread, false /* user time only */); 5995 } 5996 5997 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 5998 if (user_sys_cpu_time) { 5999 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 6000 } else { 6001 return os::current_thread_cpu_time(); 6002 } 6003 } 6004 6005 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 6006 char proc_name[64]; 6007 int count; 6008 prusage_t prusage; 6009 jlong lwp_time; 6010 int fd; 6011 6012 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage", 6013 getpid(), 6014 thread->osthread()->lwp_id()); 6015 fd = ::open(proc_name, O_RDONLY); 6016 if ( fd == -1 ) return -1; 6017 6018 do { 6019 count = ::pread(fd, 6020 (void *)&prusage.pr_utime, 6021 thr_time_size, 6022 thr_time_off); 6023 } while (count < 0 && errno == EINTR); 6024 ::close(fd); 6025 if ( count < 0 ) return -1; 6026 6027 if (user_sys_cpu_time) { 6028 // user + system CPU time 6029 lwp_time = (((jlong)prusage.pr_stime.tv_sec + 6030 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) + 6031 (jlong)prusage.pr_stime.tv_nsec + 6032 (jlong)prusage.pr_utime.tv_nsec; 6033 } else { 6034 // user level CPU time only 6035 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) + 6036 (jlong)prusage.pr_utime.tv_nsec; 6037 } 6038 6039 return(lwp_time); 6040 } 6041 6042 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 6043 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 6044 info_ptr->may_skip_backward = false; // elapsed time not wall time 6045 info_ptr->may_skip_forward = false; // elapsed time not wall time 6046 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 6047 } 6048 6049 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 6050 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 6051 info_ptr->may_skip_backward = false; // elapsed time not wall time 6052 info_ptr->may_skip_forward = false; // elapsed time not wall time 6053 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 6054 } 6055 6056 bool os::is_thread_cpu_time_supported() { 6057 if ( os::Solaris::T2_libthread() || UseBoundThreads ) { 6058 return true; 6059 } else { 6060 return false; 6061 } 6062 } 6063 6064 // System loadavg support. Returns -1 if load average cannot be obtained. 6065 // Return the load average for our processor set if the primitive exists 6066 // (Solaris 9 and later). Otherwise just return system wide loadavg. 6067 int os::loadavg(double loadavg[], int nelem) { 6068 if (pset_getloadavg_ptr != NULL) { 6069 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem); 6070 } else { 6071 return ::getloadavg(loadavg, nelem); 6072 } 6073 } 6074 6075 //--------------------------------------------------------------------------------- 6076 6077 bool os::find(address addr, outputStream* st) { 6078 Dl_info dlinfo; 6079 memset(&dlinfo, 0, sizeof(dlinfo)); 6080 if (dladdr(addr, &dlinfo)) { 6081 #ifdef _LP64 6082 st->print("0x%016lx: ", addr); 6083 #else 6084 st->print("0x%08x: ", addr); 6085 #endif 6086 if (dlinfo.dli_sname != NULL) 6087 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr); 6088 else if (dlinfo.dli_fname) 6089 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase); 6090 else 6091 st->print("<absolute address>"); 6092 if (dlinfo.dli_fname) st->print(" in %s", dlinfo.dli_fname); 6093 #ifdef _LP64 6094 if (dlinfo.dli_fbase) st->print(" at 0x%016lx", dlinfo.dli_fbase); 6095 #else 6096 if (dlinfo.dli_fbase) st->print(" at 0x%08x", dlinfo.dli_fbase); 6097 #endif 6098 st->cr(); 6099 6100 if (Verbose) { 6101 // decode some bytes around the PC 6102 address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size()); 6103 address end = clamp_address_in_page(addr+40, addr, os::vm_page_size()); 6104 address lowest = (address) dlinfo.dli_sname; 6105 if (!lowest) lowest = (address) dlinfo.dli_fbase; 6106 if (begin < lowest) begin = lowest; 6107 Dl_info dlinfo2; 6108 if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr 6109 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) 6110 end = (address) dlinfo2.dli_saddr; 6111 Disassembler::decode(begin, end, st); 6112 } 6113 return true; 6114 } 6115 return false; 6116 } 6117 6118 // Following function has been added to support HotSparc's libjvm.so running 6119 // under Solaris production JDK 1.2.2 / 1.3.0. These came from 6120 // src/solaris/hpi/native_threads in the EVM codebase. 6121 // 6122 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release 6123 // libraries and should thus be removed. We will leave it behind for a while 6124 // until we no longer want to able to run on top of 1.3.0 Solaris production 6125 // JDK. See 4341971. 6126 6127 #define STACK_SLACK 0x800 6128 6129 extern "C" { 6130 intptr_t sysThreadAvailableStackWithSlack() { 6131 stack_t st; 6132 intptr_t retval, stack_top; 6133 retval = thr_stksegment(&st); 6134 assert(retval == 0, "incorrect return value from thr_stksegment"); 6135 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 6136 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 6137 stack_top=(intptr_t)st.ss_sp-st.ss_size; 6138 return ((intptr_t)&stack_top - stack_top - STACK_SLACK); 6139 } 6140 } 6141 6142 // ObjectMonitor park-unpark infrastructure ... 6143 // 6144 // We implement Solaris and Linux PlatformEvents with the 6145 // obvious condvar-mutex-flag triple. 6146 // Another alternative that works quite well is pipes: 6147 // Each PlatformEvent consists of a pipe-pair. 6148 // The thread associated with the PlatformEvent 6149 // calls park(), which reads from the input end of the pipe. 6150 // Unpark() writes into the other end of the pipe. 6151 // The write-side of the pipe must be set NDELAY. 6152 // Unfortunately pipes consume a large # of handles. 6153 // Native solaris lwp_park() and lwp_unpark() work nicely, too. 6154 // Using pipes for the 1st few threads might be workable, however. 6155 // 6156 // park() is permitted to return spuriously. 6157 // Callers of park() should wrap the call to park() in 6158 // an appropriate loop. A litmus test for the correct 6159 // usage of park is the following: if park() were modified 6160 // to immediately return 0 your code should still work, 6161 // albeit degenerating to a spin loop. 6162 // 6163 // An interesting optimization for park() is to use a trylock() 6164 // to attempt to acquire the mutex. If the trylock() fails 6165 // then we know that a concurrent unpark() operation is in-progress. 6166 // in that case the park() code could simply set _count to 0 6167 // and return immediately. The subsequent park() operation *might* 6168 // return immediately. That's harmless as the caller of park() is 6169 // expected to loop. By using trylock() we will have avoided a 6170 // avoided a context switch caused by contention on the per-thread mutex. 6171 // 6172 // TODO-FIXME: 6173 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the 6174 // objectmonitor implementation. 6175 // 2. Collapse the JSR166 parker event, and the 6176 // objectmonitor ParkEvent into a single "Event" construct. 6177 // 3. In park() and unpark() add: 6178 // assert (Thread::current() == AssociatedWith). 6179 // 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch. 6180 // 1-out-of-N park() operations will return immediately. 6181 // 6182 // _Event transitions in park() 6183 // -1 => -1 : illegal 6184 // 1 => 0 : pass - return immediately 6185 // 0 => -1 : block 6186 // 6187 // _Event serves as a restricted-range semaphore. 6188 // 6189 // Another possible encoding of _Event would be with 6190 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 6191 // 6192 // TODO-FIXME: add DTRACE probes for: 6193 // 1. Tx parks 6194 // 2. Ty unparks Tx 6195 // 3. Tx resumes from park 6196 6197 6198 // value determined through experimentation 6199 #define ROUNDINGFIX 11 6200 6201 // utility to compute the abstime argument to timedwait. 6202 // TODO-FIXME: switch from compute_abstime() to unpackTime(). 6203 6204 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) { 6205 // millis is the relative timeout time 6206 // abstime will be the absolute timeout time 6207 if (millis < 0) millis = 0; 6208 struct timeval now; 6209 int status = gettimeofday(&now, NULL); 6210 assert(status == 0, "gettimeofday"); 6211 jlong seconds = millis / 1000; 6212 jlong max_wait_period; 6213 6214 if (UseLWPSynchronization) { 6215 // forward port of fix for 4275818 (not sleeping long enough) 6216 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where 6217 // _lwp_cond_timedwait() used a round_down algorithm rather 6218 // than a round_up. For millis less than our roundfactor 6219 // it rounded down to 0 which doesn't meet the spec. 6220 // For millis > roundfactor we may return a bit sooner, but 6221 // since we can not accurately identify the patch level and 6222 // this has already been fixed in Solaris 9 and 8 we will 6223 // leave it alone rather than always rounding down. 6224 6225 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX; 6226 // It appears that when we go directly through Solaris _lwp_cond_timedwait() 6227 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6 6228 max_wait_period = 21000000; 6229 } else { 6230 max_wait_period = 50000000; 6231 } 6232 millis %= 1000; 6233 if (seconds > max_wait_period) { // see man cond_timedwait(3T) 6234 seconds = max_wait_period; 6235 } 6236 abstime->tv_sec = now.tv_sec + seconds; 6237 long usec = now.tv_usec + millis * 1000; 6238 if (usec >= 1000000) { 6239 abstime->tv_sec += 1; 6240 usec -= 1000000; 6241 } 6242 abstime->tv_nsec = usec * 1000; 6243 return abstime; 6244 } 6245 6246 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately. 6247 // Conceptually TryPark() should be equivalent to park(0). 6248 6249 int os::PlatformEvent::TryPark() { 6250 for (;;) { 6251 const int v = _Event ; 6252 guarantee ((v == 0) || (v == 1), "invariant") ; 6253 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ; 6254 } 6255 } 6256 6257 void os::PlatformEvent::park() { // AKA: down() 6258 // Invariant: Only the thread associated with the Event/PlatformEvent 6259 // may call park(). 6260 int v ; 6261 for (;;) { 6262 v = _Event ; 6263 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 6264 } 6265 guarantee (v >= 0, "invariant") ; 6266 if (v == 0) { 6267 // Do this the hard way by blocking ... 6268 // See http://monaco.sfbay/detail.jsf?cr=5094058. 6269 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 6270 // Only for SPARC >= V8PlusA 6271 #if defined(__sparc) && defined(COMPILER2) 6272 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 6273 #endif 6274 int status = os::Solaris::mutex_lock(_mutex); 6275 assert_status(status == 0, status, "mutex_lock"); 6276 guarantee (_nParked == 0, "invariant") ; 6277 ++ _nParked ; 6278 while (_Event < 0) { 6279 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ... 6280 // Treat this the same as if the wait was interrupted 6281 // With usr/lib/lwp going to kernel, always handle ETIME 6282 status = os::Solaris::cond_wait(_cond, _mutex); 6283 if (status == ETIME) status = EINTR ; 6284 assert_status(status == 0 || status == EINTR, status, "cond_wait"); 6285 } 6286 -- _nParked ; 6287 _Event = 0 ; 6288 status = os::Solaris::mutex_unlock(_mutex); 6289 assert_status(status == 0, status, "mutex_unlock"); 6290 // Paranoia to ensure our locked and lock-free paths interact 6291 // correctly with each other. 6292 OrderAccess::fence(); 6293 } 6294 } 6295 6296 int os::PlatformEvent::park(jlong millis) { 6297 guarantee (_nParked == 0, "invariant") ; 6298 int v ; 6299 for (;;) { 6300 v = _Event ; 6301 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 6302 } 6303 guarantee (v >= 0, "invariant") ; 6304 if (v != 0) return OS_OK ; 6305 6306 int ret = OS_TIMEOUT; 6307 timestruc_t abst; 6308 compute_abstime (&abst, millis); 6309 6310 // See http://monaco.sfbay/detail.jsf?cr=5094058. 6311 // For Solaris SPARC set fprs.FEF=0 prior to parking. 6312 // Only for SPARC >= V8PlusA 6313 #if defined(__sparc) && defined(COMPILER2) 6314 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 6315 #endif 6316 int status = os::Solaris::mutex_lock(_mutex); 6317 assert_status(status == 0, status, "mutex_lock"); 6318 guarantee (_nParked == 0, "invariant") ; 6319 ++ _nParked ; 6320 while (_Event < 0) { 6321 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst); 6322 assert_status(status == 0 || status == EINTR || 6323 status == ETIME || status == ETIMEDOUT, 6324 status, "cond_timedwait"); 6325 if (!FilterSpuriousWakeups) break ; // previous semantics 6326 if (status == ETIME || status == ETIMEDOUT) break ; 6327 // We consume and ignore EINTR and spurious wakeups. 6328 } 6329 -- _nParked ; 6330 if (_Event >= 0) ret = OS_OK ; 6331 _Event = 0 ; 6332 status = os::Solaris::mutex_unlock(_mutex); 6333 assert_status(status == 0, status, "mutex_unlock"); 6334 // Paranoia to ensure our locked and lock-free paths interact 6335 // correctly with each other. 6336 OrderAccess::fence(); 6337 return ret; 6338 } 6339 6340 void os::PlatformEvent::unpark() { 6341 // Transitions for _Event: 6342 // 0 :=> 1 6343 // 1 :=> 1 6344 // -1 :=> either 0 or 1; must signal target thread 6345 // That is, we can safely transition _Event from -1 to either 6346 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back 6347 // unpark() calls. 6348 // See also: "Semaphores in Plan 9" by Mullender & Cox 6349 // 6350 // Note: Forcing a transition from "-1" to "1" on an unpark() means 6351 // that it will take two back-to-back park() calls for the owning 6352 // thread to block. This has the benefit of forcing a spurious return 6353 // from the first park() call after an unpark() call which will help 6354 // shake out uses of park() and unpark() without condition variables. 6355 6356 if (Atomic::xchg(1, &_Event) >= 0) return; 6357 6358 // If the thread associated with the event was parked, wake it. 6359 // Wait for the thread assoc with the PlatformEvent to vacate. 6360 int status = os::Solaris::mutex_lock(_mutex); 6361 assert_status(status == 0, status, "mutex_lock"); 6362 int AnyWaiters = _nParked; 6363 status = os::Solaris::mutex_unlock(_mutex); 6364 assert_status(status == 0, status, "mutex_unlock"); 6365 guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant"); 6366 if (AnyWaiters != 0) { 6367 // We intentional signal *after* dropping the lock 6368 // to avoid a common class of futile wakeups. 6369 status = os::Solaris::cond_signal(_cond); 6370 assert_status(status == 0, status, "cond_signal"); 6371 } 6372 } 6373 6374 // JSR166 6375 // ------------------------------------------------------- 6376 6377 /* 6378 * The solaris and linux implementations of park/unpark are fairly 6379 * conservative for now, but can be improved. They currently use a 6380 * mutex/condvar pair, plus _counter. 6381 * Park decrements _counter if > 0, else does a condvar wait. Unpark 6382 * sets count to 1 and signals condvar. Only one thread ever waits 6383 * on the condvar. Contention seen when trying to park implies that someone 6384 * is unparking you, so don't wait. And spurious returns are fine, so there 6385 * is no need to track notifications. 6386 */ 6387 6388 #define MAX_SECS 100000000 6389 /* 6390 * This code is common to linux and solaris and will be moved to a 6391 * common place in dolphin. 6392 * 6393 * The passed in time value is either a relative time in nanoseconds 6394 * or an absolute time in milliseconds. Either way it has to be unpacked 6395 * into suitable seconds and nanoseconds components and stored in the 6396 * given timespec structure. 6397 * Given time is a 64-bit value and the time_t used in the timespec is only 6398 * a signed-32-bit value (except on 64-bit Linux) we have to watch for 6399 * overflow if times way in the future are given. Further on Solaris versions 6400 * prior to 10 there is a restriction (see cond_timedwait) that the specified 6401 * number of seconds, in abstime, is less than current_time + 100,000,000. 6402 * As it will be 28 years before "now + 100000000" will overflow we can 6403 * ignore overflow and just impose a hard-limit on seconds using the value 6404 * of "now + 100,000,000". This places a limit on the timeout of about 3.17 6405 * years from "now". 6406 */ 6407 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { 6408 assert (time > 0, "convertTime"); 6409 6410 struct timeval now; 6411 int status = gettimeofday(&now, NULL); 6412 assert(status == 0, "gettimeofday"); 6413 6414 time_t max_secs = now.tv_sec + MAX_SECS; 6415 6416 if (isAbsolute) { 6417 jlong secs = time / 1000; 6418 if (secs > max_secs) { 6419 absTime->tv_sec = max_secs; 6420 } 6421 else { 6422 absTime->tv_sec = secs; 6423 } 6424 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; 6425 } 6426 else { 6427 jlong secs = time / NANOSECS_PER_SEC; 6428 if (secs >= MAX_SECS) { 6429 absTime->tv_sec = max_secs; 6430 absTime->tv_nsec = 0; 6431 } 6432 else { 6433 absTime->tv_sec = now.tv_sec + secs; 6434 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; 6435 if (absTime->tv_nsec >= NANOSECS_PER_SEC) { 6436 absTime->tv_nsec -= NANOSECS_PER_SEC; 6437 ++absTime->tv_sec; // note: this must be <= max_secs 6438 } 6439 } 6440 } 6441 assert(absTime->tv_sec >= 0, "tv_sec < 0"); 6442 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); 6443 assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); 6444 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); 6445 } 6446 6447 void Parker::park(bool isAbsolute, jlong time) { 6448 // Ideally we'd do something useful while spinning, such 6449 // as calling unpackTime(). 6450 6451 // Optional fast-path check: 6452 // Return immediately if a permit is available. 6453 // We depend on Atomic::xchg() having full barrier semantics 6454 // since we are doing a lock-free update to _counter. 6455 if (Atomic::xchg(0, &_counter) > 0) return; 6456 6457 // Optional fast-exit: Check interrupt before trying to wait 6458 Thread* thread = Thread::current(); 6459 assert(thread->is_Java_thread(), "Must be JavaThread"); 6460 JavaThread *jt = (JavaThread *)thread; 6461 if (Thread::is_interrupted(thread, false)) { 6462 return; 6463 } 6464 6465 // First, demultiplex/decode time arguments 6466 timespec absTime; 6467 if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all 6468 return; 6469 } 6470 if (time > 0) { 6471 // Warning: this code might be exposed to the old Solaris time 6472 // round-down bugs. Grep "roundingFix" for details. 6473 unpackTime(&absTime, isAbsolute, time); 6474 } 6475 6476 // Enter safepoint region 6477 // Beware of deadlocks such as 6317397. 6478 // The per-thread Parker:: _mutex is a classic leaf-lock. 6479 // In particular a thread must never block on the Threads_lock while 6480 // holding the Parker:: mutex. If safepoints are pending both the 6481 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. 6482 ThreadBlockInVM tbivm(jt); 6483 6484 // Don't wait if cannot get lock since interference arises from 6485 // unblocking. Also. check interrupt before trying wait 6486 if (Thread::is_interrupted(thread, false) || 6487 os::Solaris::mutex_trylock(_mutex) != 0) { 6488 return; 6489 } 6490 6491 int status ; 6492 6493 if (_counter > 0) { // no wait needed 6494 _counter = 0; 6495 status = os::Solaris::mutex_unlock(_mutex); 6496 assert (status == 0, "invariant") ; 6497 // Paranoia to ensure our locked and lock-free paths interact 6498 // correctly with each other and Java-level accesses. 6499 OrderAccess::fence(); 6500 return; 6501 } 6502 6503 #ifdef ASSERT 6504 // Don't catch signals while blocked; let the running threads have the signals. 6505 // (This allows a debugger to break into the running thread.) 6506 sigset_t oldsigs; 6507 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals(); 6508 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); 6509 #endif 6510 6511 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 6512 jt->set_suspend_equivalent(); 6513 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 6514 6515 // Do this the hard way by blocking ... 6516 // See http://monaco.sfbay/detail.jsf?cr=5094058. 6517 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 6518 // Only for SPARC >= V8PlusA 6519 #if defined(__sparc) && defined(COMPILER2) 6520 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 6521 #endif 6522 6523 if (time == 0) { 6524 status = os::Solaris::cond_wait (_cond, _mutex) ; 6525 } else { 6526 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime); 6527 } 6528 // Note that an untimed cond_wait() can sometimes return ETIME on older 6529 // versions of the Solaris. 6530 assert_status(status == 0 || status == EINTR || 6531 status == ETIME || status == ETIMEDOUT, 6532 status, "cond_timedwait"); 6533 6534 #ifdef ASSERT 6535 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL); 6536 #endif 6537 _counter = 0 ; 6538 status = os::Solaris::mutex_unlock(_mutex); 6539 assert_status(status == 0, status, "mutex_unlock") ; 6540 // Paranoia to ensure our locked and lock-free paths interact 6541 // correctly with each other and Java-level accesses. 6542 OrderAccess::fence(); 6543 6544 // If externally suspended while waiting, re-suspend 6545 if (jt->handle_special_suspend_equivalent_condition()) { 6546 jt->java_suspend_self(); 6547 } 6548 } 6549 6550 void Parker::unpark() { 6551 int s, status ; 6552 status = os::Solaris::mutex_lock (_mutex) ; 6553 assert (status == 0, "invariant") ; 6554 s = _counter; 6555 _counter = 1; 6556 status = os::Solaris::mutex_unlock (_mutex) ; 6557 assert (status == 0, "invariant") ; 6558 6559 if (s < 1) { 6560 status = os::Solaris::cond_signal (_cond) ; 6561 assert (status == 0, "invariant") ; 6562 } 6563 } 6564 6565 extern char** environ; 6566 6567 // Run the specified command in a separate process. Return its exit value, 6568 // or -1 on failure (e.g. can't fork a new process). 6569 // Unlike system(), this function can be called from signal handler. It 6570 // doesn't block SIGINT et al. 6571 int os::fork_and_exec(char* cmd) { 6572 char * argv[4]; 6573 argv[0] = (char *)"sh"; 6574 argv[1] = (char *)"-c"; 6575 argv[2] = cmd; 6576 argv[3] = NULL; 6577 6578 // fork is async-safe, fork1 is not so can't use in signal handler 6579 pid_t pid; 6580 Thread* t = ThreadLocalStorage::get_thread_slow(); 6581 if (t != NULL && t->is_inside_signal_handler()) { 6582 pid = fork(); 6583 } else { 6584 pid = fork1(); 6585 } 6586 6587 if (pid < 0) { 6588 // fork failed 6589 warning("fork failed: %s", strerror(errno)); 6590 return -1; 6591 6592 } else if (pid == 0) { 6593 // child process 6594 6595 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris 6596 execve("/usr/bin/sh", argv, environ); 6597 6598 // execve failed 6599 _exit(-1); 6600 6601 } else { 6602 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 6603 // care about the actual exit code, for now. 6604 6605 int status; 6606 6607 // Wait for the child process to exit. This returns immediately if 6608 // the child has already exited. */ 6609 while (waitpid(pid, &status, 0) < 0) { 6610 switch (errno) { 6611 case ECHILD: return 0; 6612 case EINTR: break; 6613 default: return -1; 6614 } 6615 } 6616 6617 if (WIFEXITED(status)) { 6618 // The child exited normally; get its exit code. 6619 return WEXITSTATUS(status); 6620 } else if (WIFSIGNALED(status)) { 6621 // The child exited because of a signal 6622 // The best value to return is 0x80 + signal number, 6623 // because that is what all Unix shells do, and because 6624 // it allows callers to distinguish between process exit and 6625 // process death by signal. 6626 return 0x80 + WTERMSIG(status); 6627 } else { 6628 // Unknown exit code; pass it through 6629 return status; 6630 } 6631 } 6632 } 6633 6634 // is_headless_jre() 6635 // 6636 // Test for the existence of xawt/libmawt.so or libawt_xawt.so 6637 // in order to report if we are running in a headless jre 6638 // 6639 // Since JDK8 xawt/libmawt.so was moved into the same directory 6640 // as libawt.so, and renamed libawt_xawt.so 6641 // 6642 bool os::is_headless_jre() { 6643 struct stat statbuf; 6644 char buf[MAXPATHLEN]; 6645 char libmawtpath[MAXPATHLEN]; 6646 const char *xawtstr = "/xawt/libmawt.so"; 6647 const char *new_xawtstr = "/libawt_xawt.so"; 6648 char *p; 6649 6650 // Get path to libjvm.so 6651 os::jvm_path(buf, sizeof(buf)); 6652 6653 // Get rid of libjvm.so 6654 p = strrchr(buf, '/'); 6655 if (p == NULL) return false; 6656 else *p = '\0'; 6657 6658 // Get rid of client or server 6659 p = strrchr(buf, '/'); 6660 if (p == NULL) return false; 6661 else *p = '\0'; 6662 6663 // check xawt/libmawt.so 6664 strcpy(libmawtpath, buf); 6665 strcat(libmawtpath, xawtstr); 6666 if (::stat(libmawtpath, &statbuf) == 0) return false; 6667 6668 // check libawt_xawt.so 6669 strcpy(libmawtpath, buf); 6670 strcat(libmawtpath, new_xawtstr); 6671 if (::stat(libmawtpath, &statbuf) == 0) return false; 6672 6673 return true; 6674 } 6675 6676 size_t os::write(int fd, const void *buf, unsigned int nBytes) { 6677 INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted); 6678 } 6679 6680 int os::close(int fd) { 6681 return ::close(fd); 6682 } 6683 6684 int os::socket_close(int fd) { 6685 return ::close(fd); 6686 } 6687 6688 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 6689 INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted); 6690 } 6691 6692 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 6693 INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted); 6694 } 6695 6696 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 6697 RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags)); 6698 } 6699 6700 // As both poll and select can be interrupted by signals, we have to be 6701 // prepared to restart the system call after updating the timeout, unless 6702 // a poll() is done with timeout == -1, in which case we repeat with this 6703 // "wait forever" value. 6704 6705 int os::timeout(int fd, long timeout) { 6706 int res; 6707 struct timeval t; 6708 julong prevtime, newtime; 6709 static const char* aNull = 0; 6710 struct pollfd pfd; 6711 pfd.fd = fd; 6712 pfd.events = POLLIN; 6713 6714 gettimeofday(&t, &aNull); 6715 prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000; 6716 6717 for(;;) { 6718 INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted); 6719 if(res == OS_ERR && errno == EINTR) { 6720 if(timeout != -1) { 6721 gettimeofday(&t, &aNull); 6722 newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000; 6723 timeout -= newtime - prevtime; 6724 if(timeout <= 0) 6725 return OS_OK; 6726 prevtime = newtime; 6727 } 6728 } else return res; 6729 } 6730 } 6731 6732 int os::connect(int fd, struct sockaddr *him, socklen_t len) { 6733 int _result; 6734 INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\ 6735 os::Solaris::clear_interrupted); 6736 6737 // Depending on when thread interruption is reset, _result could be 6738 // one of two values when errno == EINTR 6739 6740 if (((_result == OS_INTRPT) || (_result == OS_ERR)) 6741 && (errno == EINTR)) { 6742 /* restarting a connect() changes its errno semantics */ 6743 INTERRUPTIBLE(::connect(fd, him, len), _result,\ 6744 os::Solaris::clear_interrupted); 6745 /* undo these changes */ 6746 if (_result == OS_ERR) { 6747 if (errno == EALREADY) { 6748 errno = EINPROGRESS; /* fall through */ 6749 } else if (errno == EISCONN) { 6750 errno = 0; 6751 return OS_OK; 6752 } 6753 } 6754 } 6755 return _result; 6756 } 6757 6758 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 6759 if (fd < 0) { 6760 return OS_ERR; 6761 } 6762 INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\ 6763 os::Solaris::clear_interrupted); 6764 } 6765 6766 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags, 6767 sockaddr* from, socklen_t* fromlen) { 6768 INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\ 6769 os::Solaris::clear_interrupted); 6770 } 6771 6772 int os::sendto(int fd, char* buf, size_t len, uint flags, 6773 struct sockaddr* to, socklen_t tolen) { 6774 INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\ 6775 os::Solaris::clear_interrupted); 6776 } 6777 6778 int os::socket_available(int fd, jint *pbytes) { 6779 if (fd < 0) { 6780 return OS_OK; 6781 } 6782 int ret; 6783 RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret); 6784 // note: ioctl can return 0 when successful, JVM_SocketAvailable 6785 // is expected to return 0 on failure and 1 on success to the jdk. 6786 return (ret == OS_ERR) ? 0 : 1; 6787 } 6788 6789 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 6790 INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\ 6791 os::Solaris::clear_interrupted); 6792 } 6793 6794 // Get the default path to the core file 6795 // Returns the length of the string 6796 int os::get_core_path(char* buffer, size_t bufferSize) { 6797 const char* p = get_current_directory(buffer, bufferSize); 6798 6799 if (p == NULL) { 6800 assert(p != NULL, "failed to get current directory"); 6801 return 0; 6802 } 6803 6804 return strlen(buffer); 6805 }